ilo: remove ilo_shader_internal.h from GEN6 pipeline
[mesa.git] / src / gallium / drivers / ilo / ilo_3d_pipeline_gen6.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "util/u_dual_blend.h"
29 #include "util/u_prim.h"
30 #include "intel_reg.h"
31
32 #include "ilo_3d.h"
33 #include "ilo_context.h"
34 #include "ilo_cp.h"
35 #include "ilo_gpe_gen6.h"
36 #include "ilo_shader.h"
37 #include "ilo_state.h"
38 #include "ilo_3d_pipeline.h"
39 #include "ilo_3d_pipeline_gen6.h"
40
41 /**
42 * This should be called before any depth stall flush (including those
43 * produced by non-pipelined state commands) or cache flush on GEN6.
44 *
45 * \see intel_emit_post_sync_nonzero_flush()
46 */
47 static void
48 gen6_wa_pipe_control_post_sync(struct ilo_3d_pipeline *p,
49 bool caller_post_sync)
50 {
51 assert(p->dev->gen == ILO_GEN(6));
52
53 /* emit once */
54 if (p->state.has_gen6_wa_pipe_control)
55 return;
56
57 p->state.has_gen6_wa_pipe_control = true;
58
59 /*
60 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
61 *
62 * "Pipe-control with CS-stall bit set must be sent BEFORE the
63 * pipe-control with a post-sync op and no write-cache flushes."
64 *
65 * The workaround below necessitates this workaround.
66 */
67 p->gen6_PIPE_CONTROL(p->dev,
68 PIPE_CONTROL_CS_STALL |
69 PIPE_CONTROL_STALL_AT_SCOREBOARD,
70 NULL, 0, false, p->cp);
71
72 /* the caller will emit the post-sync op */
73 if (caller_post_sync)
74 return;
75
76 /*
77 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
78 *
79 * "Before any depth stall flush (including those produced by
80 * non-pipelined state commands), software needs to first send a
81 * PIPE_CONTROL with no bits set except Post-Sync Operation != 0."
82 *
83 * "Before a PIPE_CONTROL with Write Cache Flush Enable =1, a
84 * PIPE_CONTROL with any non-zero post-sync-op is required."
85 */
86 p->gen6_PIPE_CONTROL(p->dev,
87 PIPE_CONTROL_WRITE_IMMEDIATE,
88 p->workaround_bo, 0, false, p->cp);
89 }
90
91 static void
92 gen6_wa_pipe_control_wm_multisample_flush(struct ilo_3d_pipeline *p)
93 {
94 assert(p->dev->gen == ILO_GEN(6));
95
96 gen6_wa_pipe_control_post_sync(p, false);
97
98 /*
99 * From the Sandy Bridge PRM, volume 2 part 1, page 305:
100 *
101 * "Driver must guarentee that all the caches in the depth pipe are
102 * flushed before this command (3DSTATE_MULTISAMPLE) is parsed. This
103 * requires driver to send a PIPE_CONTROL with a CS stall along with a
104 * Depth Flush prior to this command."
105 */
106 p->gen6_PIPE_CONTROL(p->dev,
107 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
108 PIPE_CONTROL_CS_STALL,
109 0, 0, false, p->cp);
110 }
111
112 static void
113 gen6_wa_pipe_control_wm_depth_flush(struct ilo_3d_pipeline *p)
114 {
115 assert(p->dev->gen == ILO_GEN(6));
116
117 gen6_wa_pipe_control_post_sync(p, false);
118
119 /*
120 * According to intel_emit_depth_stall_flushes() of classic i965, we need
121 * to emit a sequence of PIPE_CONTROLs prior to emitting depth related
122 * commands.
123 */
124 p->gen6_PIPE_CONTROL(p->dev,
125 PIPE_CONTROL_DEPTH_STALL,
126 NULL, 0, false, p->cp);
127
128 p->gen6_PIPE_CONTROL(p->dev,
129 PIPE_CONTROL_DEPTH_CACHE_FLUSH,
130 NULL, 0, false, p->cp);
131
132 p->gen6_PIPE_CONTROL(p->dev,
133 PIPE_CONTROL_DEPTH_STALL,
134 NULL, 0, false, p->cp);
135 }
136
137 static void
138 gen6_wa_pipe_control_wm_max_threads_stall(struct ilo_3d_pipeline *p)
139 {
140 assert(p->dev->gen == ILO_GEN(6));
141
142 /* the post-sync workaround should cover this already */
143 if (p->state.has_gen6_wa_pipe_control)
144 return;
145
146 /*
147 * From the Sandy Bridge PRM, volume 2 part 1, page 274:
148 *
149 * "A PIPE_CONTROL command, with only the Stall At Pixel Scoreboard
150 * field set (DW1 Bit 1), must be issued prior to any change to the
151 * value in this field (Maximum Number of Threads in 3DSTATE_WM)"
152 */
153 p->gen6_PIPE_CONTROL(p->dev,
154 PIPE_CONTROL_STALL_AT_SCOREBOARD,
155 NULL, 0, false, p->cp);
156
157 }
158
159 static void
160 gen6_wa_pipe_control_vs_const_flush(struct ilo_3d_pipeline *p)
161 {
162 assert(p->dev->gen == ILO_GEN(6));
163
164 gen6_wa_pipe_control_post_sync(p, false);
165
166 /*
167 * According to upload_vs_state() of classic i965, we need to emit
168 * PIPE_CONTROL after 3DSTATE_CONSTANT_VS so that the command is kept being
169 * buffered by VS FF, to the point that the FF dies.
170 */
171 p->gen6_PIPE_CONTROL(p->dev,
172 PIPE_CONTROL_DEPTH_STALL |
173 PIPE_CONTROL_INSTRUCTION_FLUSH |
174 PIPE_CONTROL_STATE_CACHE_INVALIDATE,
175 NULL, 0, false, p->cp);
176 }
177
178 #define DIRTY(state) (session->pipe_dirty & ILO_DIRTY_ ## state)
179
180 void
181 gen6_pipeline_common_select(struct ilo_3d_pipeline *p,
182 const struct ilo_context *ilo,
183 struct gen6_pipeline_session *session)
184 {
185 /* PIPELINE_SELECT */
186 if (session->hw_ctx_changed) {
187 if (p->dev->gen == ILO_GEN(6))
188 gen6_wa_pipe_control_post_sync(p, false);
189
190 p->gen6_PIPELINE_SELECT(p->dev, 0x0, p->cp);
191 }
192 }
193
194 void
195 gen6_pipeline_common_sip(struct ilo_3d_pipeline *p,
196 const struct ilo_context *ilo,
197 struct gen6_pipeline_session *session)
198 {
199 /* STATE_SIP */
200 if (session->hw_ctx_changed) {
201 if (p->dev->gen == ILO_GEN(6))
202 gen6_wa_pipe_control_post_sync(p, false);
203
204 p->gen6_STATE_SIP(p->dev, 0, p->cp);
205 }
206 }
207
208 void
209 gen6_pipeline_common_base_address(struct ilo_3d_pipeline *p,
210 const struct ilo_context *ilo,
211 struct gen6_pipeline_session *session)
212 {
213 /* STATE_BASE_ADDRESS */
214 if (session->state_bo_changed || session->kernel_bo_changed ||
215 session->batch_bo_changed) {
216 if (p->dev->gen == ILO_GEN(6))
217 gen6_wa_pipe_control_post_sync(p, false);
218
219 p->gen6_STATE_BASE_ADDRESS(p->dev,
220 NULL, p->cp->bo, p->cp->bo, NULL, ilo->hw3d->kernel.bo,
221 0, 0, 0, 0, p->cp);
222
223 /*
224 * From the Sandy Bridge PRM, volume 1 part 1, page 28:
225 *
226 * "The following commands must be reissued following any change to
227 * the base addresses:
228 *
229 * * 3DSTATE_BINDING_TABLE_POINTERS
230 * * 3DSTATE_SAMPLER_STATE_POINTERS
231 * * 3DSTATE_VIEWPORT_STATE_POINTERS
232 * * 3DSTATE_CC_POINTERS
233 * * MEDIA_STATE_POINTERS"
234 *
235 * 3DSTATE_SCISSOR_STATE_POINTERS is not on the list, but it is
236 * reasonable to also reissue the command. Same to PCB.
237 */
238 session->viewport_state_changed = true;
239
240 session->cc_state_blend_changed = true;
241 session->cc_state_dsa_changed = true;
242 session->cc_state_cc_changed = true;
243
244 session->scissor_state_changed = true;
245
246 session->binding_table_vs_changed = true;
247 session->binding_table_gs_changed = true;
248 session->binding_table_fs_changed = true;
249
250 session->sampler_state_vs_changed = true;
251 session->sampler_state_gs_changed = true;
252 session->sampler_state_fs_changed = true;
253
254 session->pcb_state_vs_changed = true;
255 session->pcb_state_gs_changed = true;
256 session->pcb_state_fs_changed = true;
257 }
258 }
259
260 static void
261 gen6_pipeline_common_urb(struct ilo_3d_pipeline *p,
262 const struct ilo_context *ilo,
263 struct gen6_pipeline_session *session)
264 {
265 /* 3DSTATE_URB */
266 if (DIRTY(VERTEX_ELEMENTS) || DIRTY(VS) || DIRTY(GS)) {
267 const bool gs_active = (ilo->gs || (ilo->vs &&
268 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_VS_GEN6_SO)));
269 int vs_entry_size, gs_entry_size;
270 int vs_total_size, gs_total_size;
271
272 vs_entry_size = (ilo->vs) ?
273 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_OUTPUT_COUNT) : 0;
274
275 /*
276 * As indicated by 2e712e41db0c0676e9f30fc73172c0e8de8d84d4, VF and VS
277 * share VUE handles. The VUE allocation size must be large enough to
278 * store either VF outputs (number of VERTEX_ELEMENTs) and VS outputs.
279 *
280 * I am not sure if the PRM explicitly states that VF and VS share VUE
281 * handles. But here is a citation that implies so:
282 *
283 * From the Sandy Bridge PRM, volume 2 part 1, page 44:
284 *
285 * "Once a FF stage that spawn threads has sufficient input to
286 * initiate a thread, it must guarantee that it is safe to request
287 * the thread initiation. For all these FF stages, this check is
288 * based on :
289 *
290 * - The availability of output URB entries:
291 * - VS: As the input URB entries are overwritten with the
292 * VS-generated output data, output URB availability isn't a
293 * factor."
294 */
295 if (vs_entry_size < ilo->ve->count)
296 vs_entry_size = ilo->ve->count;
297
298 gs_entry_size = (ilo->gs) ?
299 ilo_shader_get_kernel_param(ilo->gs, ILO_KERNEL_OUTPUT_COUNT) :
300 (gs_active) ? vs_entry_size : 0;
301
302 /* in bytes */
303 vs_entry_size *= sizeof(float) * 4;
304 gs_entry_size *= sizeof(float) * 4;
305 vs_total_size = ilo->dev->urb_size;
306
307 if (gs_active) {
308 vs_total_size /= 2;
309 gs_total_size = vs_total_size;
310 }
311 else {
312 gs_total_size = 0;
313 }
314
315 p->gen6_3DSTATE_URB(p->dev, vs_total_size, gs_total_size,
316 vs_entry_size, gs_entry_size, p->cp);
317
318 /*
319 * From the Sandy Bridge PRM, volume 2 part 1, page 27:
320 *
321 * "Because of a urb corruption caused by allocating a previous
322 * gsunit's urb entry to vsunit software is required to send a
323 * "GS NULL Fence" (Send URB fence with VS URB size == 1 and GS URB
324 * size == 0) plus a dummy DRAW call before any case where VS will
325 * be taking over GS URB space."
326 */
327 if (p->state.gs.active && !gs_active)
328 ilo_3d_pipeline_emit_flush_gen6(p);
329
330 p->state.gs.active = gs_active;
331 }
332 }
333
334 static void
335 gen6_pipeline_common_pointers_1(struct ilo_3d_pipeline *p,
336 const struct ilo_context *ilo,
337 struct gen6_pipeline_session *session)
338 {
339 /* 3DSTATE_VIEWPORT_STATE_POINTERS */
340 if (session->viewport_state_changed) {
341 p->gen6_3DSTATE_VIEWPORT_STATE_POINTERS(p->dev,
342 p->state.CLIP_VIEWPORT,
343 p->state.SF_VIEWPORT,
344 p->state.CC_VIEWPORT, p->cp);
345 }
346 }
347
348 static void
349 gen6_pipeline_common_pointers_2(struct ilo_3d_pipeline *p,
350 const struct ilo_context *ilo,
351 struct gen6_pipeline_session *session)
352 {
353 /* 3DSTATE_CC_STATE_POINTERS */
354 if (session->cc_state_blend_changed ||
355 session->cc_state_dsa_changed ||
356 session->cc_state_cc_changed) {
357 p->gen6_3DSTATE_CC_STATE_POINTERS(p->dev,
358 p->state.BLEND_STATE,
359 p->state.DEPTH_STENCIL_STATE,
360 p->state.COLOR_CALC_STATE, p->cp);
361 }
362
363 /* 3DSTATE_SAMPLER_STATE_POINTERS */
364 if (session->sampler_state_vs_changed ||
365 session->sampler_state_gs_changed ||
366 session->sampler_state_fs_changed) {
367 p->gen6_3DSTATE_SAMPLER_STATE_POINTERS(p->dev,
368 p->state.vs.SAMPLER_STATE,
369 0,
370 p->state.wm.SAMPLER_STATE, p->cp);
371 }
372 }
373
374 static void
375 gen6_pipeline_common_pointers_3(struct ilo_3d_pipeline *p,
376 const struct ilo_context *ilo,
377 struct gen6_pipeline_session *session)
378 {
379 /* 3DSTATE_SCISSOR_STATE_POINTERS */
380 if (session->scissor_state_changed) {
381 p->gen6_3DSTATE_SCISSOR_STATE_POINTERS(p->dev,
382 p->state.SCISSOR_RECT, p->cp);
383 }
384
385 /* 3DSTATE_BINDING_TABLE_POINTERS */
386 if (session->binding_table_vs_changed ||
387 session->binding_table_gs_changed ||
388 session->binding_table_fs_changed) {
389 p->gen6_3DSTATE_BINDING_TABLE_POINTERS(p->dev,
390 p->state.vs.BINDING_TABLE_STATE,
391 p->state.gs.BINDING_TABLE_STATE,
392 p->state.wm.BINDING_TABLE_STATE, p->cp);
393 }
394 }
395
396 void
397 gen6_pipeline_vf(struct ilo_3d_pipeline *p,
398 const struct ilo_context *ilo,
399 struct gen6_pipeline_session *session)
400 {
401 /* 3DSTATE_INDEX_BUFFER */
402 if (DIRTY(INDEX_BUFFER) || session->batch_bo_changed) {
403 p->gen6_3DSTATE_INDEX_BUFFER(p->dev,
404 &ilo->ib.state, session->info->primitive_restart, p->cp);
405 }
406
407 /* 3DSTATE_VERTEX_BUFFERS */
408 if (DIRTY(VERTEX_BUFFERS) || DIRTY(VERTEX_ELEMENTS) ||
409 session->batch_bo_changed) {
410 p->gen6_3DSTATE_VERTEX_BUFFERS(p->dev,
411 ilo->vb.states, ilo->vb.enabled_mask, ilo->ve, p->cp);
412 }
413
414 /* 3DSTATE_VERTEX_ELEMENTS */
415 if (DIRTY(VERTEX_ELEMENTS) || DIRTY(VS)) {
416 const struct ilo_ve_state *ve = ilo->ve;
417 bool last_velement_edgeflag = false;
418 bool prepend_generate_ids = false;
419
420 if (ilo->vs) {
421 if (ilo_shader_get_kernel_param(ilo->vs,
422 ILO_KERNEL_VS_INPUT_EDGEFLAG)) {
423 /* we rely on the state tracker here */
424 assert(ilo_shader_get_kernel_param(ilo->vs,
425 ILO_KERNEL_INPUT_COUNT) == ve->count);
426
427 last_velement_edgeflag = true;
428 }
429
430 if (ilo_shader_get_kernel_param(ilo->vs,
431 ILO_KERNEL_VS_INPUT_INSTANCEID) ||
432 ilo_shader_get_kernel_param(ilo->vs,
433 ILO_KERNEL_VS_INPUT_VERTEXID))
434 prepend_generate_ids = true;
435 }
436
437 p->gen6_3DSTATE_VERTEX_ELEMENTS(p->dev, ve,
438 last_velement_edgeflag, prepend_generate_ids, p->cp);
439 }
440 }
441
442 void
443 gen6_pipeline_vf_statistics(struct ilo_3d_pipeline *p,
444 const struct ilo_context *ilo,
445 struct gen6_pipeline_session *session)
446 {
447 /* 3DSTATE_VF_STATISTICS */
448 if (session->hw_ctx_changed)
449 p->gen6_3DSTATE_VF_STATISTICS(p->dev, false, p->cp);
450 }
451
452 void
453 gen6_pipeline_vf_draw(struct ilo_3d_pipeline *p,
454 const struct ilo_context *ilo,
455 struct gen6_pipeline_session *session)
456 {
457 /* 3DPRIMITIVE */
458 p->gen6_3DPRIMITIVE(p->dev, session->info, false, p->cp);
459 p->state.has_gen6_wa_pipe_control = false;
460 }
461
462 void
463 gen6_pipeline_vs(struct ilo_3d_pipeline *p,
464 const struct ilo_context *ilo,
465 struct gen6_pipeline_session *session)
466 {
467 const bool emit_3dstate_vs = (DIRTY(VS) || DIRTY(VERTEX_SAMPLERS) ||
468 session->kernel_bo_changed);
469 const bool emit_3dstate_constant_vs = session->pcb_state_vs_changed;
470
471 /*
472 * the classic i965 does this in upload_vs_state(), citing a spec that I
473 * cannot find
474 */
475 if (emit_3dstate_vs && p->dev->gen == ILO_GEN(6))
476 gen6_wa_pipe_control_post_sync(p, false);
477
478 /* 3DSTATE_CONSTANT_VS */
479 if (emit_3dstate_constant_vs) {
480 p->gen6_3DSTATE_CONSTANT_VS(p->dev,
481 &p->state.vs.PUSH_CONSTANT_BUFFER,
482 &p->state.vs.PUSH_CONSTANT_BUFFER_size,
483 1, p->cp);
484 }
485
486 /* 3DSTATE_VS */
487 if (emit_3dstate_vs) {
488 const int num_samplers = ilo->sampler[PIPE_SHADER_VERTEX].count;
489
490 p->gen6_3DSTATE_VS(p->dev, ilo->vs, num_samplers, p->cp);
491 }
492
493 if (emit_3dstate_constant_vs && p->dev->gen == ILO_GEN(6))
494 gen6_wa_pipe_control_vs_const_flush(p);
495 }
496
497 static void
498 gen6_pipeline_gs(struct ilo_3d_pipeline *p,
499 const struct ilo_context *ilo,
500 struct gen6_pipeline_session *session)
501 {
502 /* 3DSTATE_CONSTANT_GS */
503 if (session->pcb_state_gs_changed)
504 p->gen6_3DSTATE_CONSTANT_GS(p->dev, NULL, NULL, 0, p->cp);
505
506 /* 3DSTATE_GS */
507 if (DIRTY(GS) || DIRTY(VS) ||
508 session->prim_changed || session->kernel_bo_changed) {
509 const int verts_per_prim = u_vertices_per_prim(session->reduced_prim);
510
511 p->gen6_3DSTATE_GS(p->dev, ilo->gs, ilo->vs, verts_per_prim, p->cp);
512 }
513 }
514
515 bool
516 gen6_pipeline_update_max_svbi(struct ilo_3d_pipeline *p,
517 const struct ilo_context *ilo,
518 struct gen6_pipeline_session *session)
519 {
520 if (DIRTY(VS) || DIRTY(GS) || DIRTY(STREAM_OUTPUT_TARGETS)) {
521 const struct pipe_stream_output_info *so_info =
522 (ilo->gs) ? ilo_shader_get_kernel_so_info(ilo->gs) :
523 (ilo->vs) ? ilo_shader_get_kernel_so_info(ilo->vs) : NULL;
524 unsigned max_svbi = 0xffffffff;
525 int i;
526
527 for (i = 0; i < so_info->num_outputs; i++) {
528 const int output_buffer = so_info->output[i].output_buffer;
529 const struct pipe_stream_output_target *so =
530 ilo->so.states[output_buffer];
531 const int struct_size = so_info->stride[output_buffer] * 4;
532 const int elem_size = so_info->output[i].num_components * 4;
533 int buf_size, count;
534
535 if (!so) {
536 max_svbi = 0;
537 break;
538 }
539
540 buf_size = so->buffer_size - so_info->output[i].dst_offset * 4;
541
542 count = buf_size / struct_size;
543 if (buf_size % struct_size >= elem_size)
544 count++;
545
546 if (count < max_svbi)
547 max_svbi = count;
548 }
549
550 if (p->state.so_max_vertices != max_svbi) {
551 p->state.so_max_vertices = max_svbi;
552 return true;
553 }
554 }
555
556 return false;
557 }
558
559 static void
560 gen6_pipeline_gs_svbi(struct ilo_3d_pipeline *p,
561 const struct ilo_context *ilo,
562 struct gen6_pipeline_session *session)
563 {
564 const bool emit = gen6_pipeline_update_max_svbi(p, ilo, session);
565
566 /* 3DSTATE_GS_SVB_INDEX */
567 if (emit) {
568 if (p->dev->gen == ILO_GEN(6))
569 gen6_wa_pipe_control_post_sync(p, false);
570
571 p->gen6_3DSTATE_GS_SVB_INDEX(p->dev,
572 0, p->state.so_num_vertices, p->state.so_max_vertices,
573 false, p->cp);
574
575 if (session->hw_ctx_changed) {
576 int i;
577
578 /*
579 * From the Sandy Bridge PRM, volume 2 part 1, page 148:
580 *
581 * "If a buffer is not enabled then the SVBI must be set to 0x0
582 * in order to not cause overflow in that SVBI."
583 *
584 * "If a buffer is not enabled then the MaxSVBI must be set to
585 * 0xFFFFFFFF in order to not cause overflow in that SVBI."
586 */
587 for (i = 1; i < 4; i++) {
588 p->gen6_3DSTATE_GS_SVB_INDEX(p->dev,
589 i, 0, 0xffffffff, false, p->cp);
590 }
591 }
592 }
593 }
594
595 void
596 gen6_pipeline_clip(struct ilo_3d_pipeline *p,
597 const struct ilo_context *ilo,
598 struct gen6_pipeline_session *session)
599 {
600 /* 3DSTATE_CLIP */
601 if (DIRTY(RASTERIZER) || DIRTY(FS) ||
602 DIRTY(VIEWPORT) || DIRTY(FRAMEBUFFER)) {
603 bool enable_guardband = true;
604 unsigned i;
605
606 /*
607 * We do not do 2D clipping yet. Guard band test should only be enabled
608 * when the viewport is larger than the framebuffer.
609 */
610 for (i = 0; i < ilo->viewport.count; i++) {
611 const struct ilo_viewport_cso *vp = &ilo->viewport.cso[i];
612
613 if (vp->min_x > 0.0f || vp->max_x < ilo->fb.state.width ||
614 vp->min_y > 0.0f || vp->max_y < ilo->fb.state.height) {
615 enable_guardband = false;
616 break;
617 }
618 }
619
620 p->gen6_3DSTATE_CLIP(p->dev, ilo->rasterizer,
621 ilo->fs, enable_guardband, 1, p->cp);
622 }
623 }
624
625 static void
626 gen6_pipeline_sf(struct ilo_3d_pipeline *p,
627 const struct ilo_context *ilo,
628 struct gen6_pipeline_session *session)
629 {
630 /* 3DSTATE_SF */
631 if (DIRTY(RASTERIZER) || DIRTY(VS) || DIRTY(GS) || DIRTY(FS)) {
632 p->gen6_3DSTATE_SF(p->dev, ilo->rasterizer, ilo->fs,
633 (ilo->gs) ? ilo->gs : ilo->vs, p->cp);
634 }
635 }
636
637 void
638 gen6_pipeline_sf_rect(struct ilo_3d_pipeline *p,
639 const struct ilo_context *ilo,
640 struct gen6_pipeline_session *session)
641 {
642 /* 3DSTATE_DRAWING_RECTANGLE */
643 if (DIRTY(FRAMEBUFFER)) {
644 if (p->dev->gen == ILO_GEN(6))
645 gen6_wa_pipe_control_post_sync(p, false);
646
647 p->gen6_3DSTATE_DRAWING_RECTANGLE(p->dev, 0, 0,
648 ilo->fb.state.width, ilo->fb.state.height, p->cp);
649 }
650 }
651
652 static void
653 gen6_pipeline_wm(struct ilo_3d_pipeline *p,
654 const struct ilo_context *ilo,
655 struct gen6_pipeline_session *session)
656 {
657 /* 3DSTATE_CONSTANT_PS */
658 if (session->pcb_state_fs_changed)
659 p->gen6_3DSTATE_CONSTANT_PS(p->dev, NULL, NULL, 0, p->cp);
660
661 /* 3DSTATE_WM */
662 if (DIRTY(FS) || DIRTY(FRAGMENT_SAMPLERS) ||
663 DIRTY(BLEND) || DIRTY(DEPTH_STENCIL_ALPHA) ||
664 DIRTY(RASTERIZER) || session->kernel_bo_changed) {
665 const int num_samplers = ilo->sampler[PIPE_SHADER_FRAGMENT].count;
666 const bool dual_blend = ilo->blend->dual_blend;
667 const bool cc_may_kill = (ilo->dsa->alpha.enabled ||
668 ilo->blend->alpha_to_coverage);
669
670 if (p->dev->gen == ILO_GEN(6) && session->hw_ctx_changed)
671 gen6_wa_pipe_control_wm_max_threads_stall(p);
672
673 p->gen6_3DSTATE_WM(p->dev, ilo->fs, num_samplers,
674 ilo->rasterizer, dual_blend, cc_may_kill, p->cp);
675 }
676 }
677
678 static void
679 gen6_pipeline_wm_multisample(struct ilo_3d_pipeline *p,
680 const struct ilo_context *ilo,
681 struct gen6_pipeline_session *session)
682 {
683 /* 3DSTATE_MULTISAMPLE and 3DSTATE_SAMPLE_MASK */
684 if (DIRTY(SAMPLE_MASK) || DIRTY(FRAMEBUFFER)) {
685 const uint32_t *packed_sample_pos;
686
687 packed_sample_pos = (ilo->fb.num_samples > 1) ?
688 &p->packed_sample_position_4x : &p->packed_sample_position_1x;
689
690 if (p->dev->gen == ILO_GEN(6)) {
691 gen6_wa_pipe_control_post_sync(p, false);
692 gen6_wa_pipe_control_wm_multisample_flush(p);
693 }
694
695 p->gen6_3DSTATE_MULTISAMPLE(p->dev,
696 ilo->fb.num_samples, packed_sample_pos,
697 ilo->rasterizer->state.half_pixel_center, p->cp);
698
699 p->gen6_3DSTATE_SAMPLE_MASK(p->dev,
700 (ilo->fb.num_samples > 1) ? ilo->sample_mask : 0x1, p->cp);
701 }
702 }
703
704 static void
705 gen6_pipeline_wm_depth(struct ilo_3d_pipeline *p,
706 const struct ilo_context *ilo,
707 struct gen6_pipeline_session *session)
708 {
709 /* 3DSTATE_DEPTH_BUFFER and 3DSTATE_CLEAR_PARAMS */
710 if (DIRTY(FRAMEBUFFER) || session->batch_bo_changed) {
711 const struct ilo_zs_surface *zs;
712
713 if (ilo->fb.state.zsbuf) {
714 const struct ilo_surface_cso *surface =
715 (const struct ilo_surface_cso *) ilo->fb.state.zsbuf;
716
717 assert(!surface->is_rt);
718 zs = &surface->u.zs;
719 }
720 else {
721 zs = &ilo->fb.null_zs;
722 }
723
724 if (p->dev->gen == ILO_GEN(6)) {
725 gen6_wa_pipe_control_post_sync(p, false);
726 gen6_wa_pipe_control_wm_depth_flush(p);
727 }
728
729 p->gen6_3DSTATE_DEPTH_BUFFER(p->dev, zs, p->cp);
730
731 /* TODO */
732 p->gen6_3DSTATE_CLEAR_PARAMS(p->dev, 0, p->cp);
733 }
734 }
735
736 void
737 gen6_pipeline_wm_raster(struct ilo_3d_pipeline *p,
738 const struct ilo_context *ilo,
739 struct gen6_pipeline_session *session)
740 {
741 /* 3DSTATE_POLY_STIPPLE_PATTERN and 3DSTATE_POLY_STIPPLE_OFFSET */
742 if ((DIRTY(RASTERIZER) || DIRTY(POLY_STIPPLE)) &&
743 ilo->rasterizer->state.poly_stipple_enable) {
744 if (p->dev->gen == ILO_GEN(6))
745 gen6_wa_pipe_control_post_sync(p, false);
746
747 p->gen6_3DSTATE_POLY_STIPPLE_PATTERN(p->dev,
748 &ilo->poly_stipple, p->cp);
749
750 p->gen6_3DSTATE_POLY_STIPPLE_OFFSET(p->dev, 0, 0, p->cp);
751 }
752
753 /* 3DSTATE_LINE_STIPPLE */
754 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_stipple_enable) {
755 if (p->dev->gen == ILO_GEN(6))
756 gen6_wa_pipe_control_post_sync(p, false);
757
758 p->gen6_3DSTATE_LINE_STIPPLE(p->dev,
759 ilo->rasterizer->state.line_stipple_pattern,
760 ilo->rasterizer->state.line_stipple_factor + 1, p->cp);
761 }
762
763 /* 3DSTATE_AA_LINE_PARAMETERS */
764 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_smooth) {
765 if (p->dev->gen == ILO_GEN(6))
766 gen6_wa_pipe_control_post_sync(p, false);
767
768 p->gen6_3DSTATE_AA_LINE_PARAMETERS(p->dev, p->cp);
769 }
770 }
771
772 static void
773 gen6_pipeline_state_viewports(struct ilo_3d_pipeline *p,
774 const struct ilo_context *ilo,
775 struct gen6_pipeline_session *session)
776 {
777 /* SF_CLIP_VIEWPORT and CC_VIEWPORT */
778 if (p->dev->gen >= ILO_GEN(7) && DIRTY(VIEWPORT)) {
779 p->state.SF_CLIP_VIEWPORT = p->gen7_SF_CLIP_VIEWPORT(p->dev,
780 ilo->viewport.cso, ilo->viewport.count, p->cp);
781
782 p->state.CC_VIEWPORT = p->gen6_CC_VIEWPORT(p->dev,
783 ilo->viewport.cso, ilo->viewport.count, p->cp);
784
785 session->viewport_state_changed = true;
786 }
787 /* SF_VIEWPORT, CLIP_VIEWPORT, and CC_VIEWPORT */
788 else if (DIRTY(VIEWPORT)) {
789 p->state.CLIP_VIEWPORT = p->gen6_CLIP_VIEWPORT(p->dev,
790 ilo->viewport.cso, ilo->viewport.count, p->cp);
791
792 p->state.SF_VIEWPORT = p->gen6_SF_VIEWPORT(p->dev,
793 ilo->viewport.cso, ilo->viewport.count, p->cp);
794
795 p->state.CC_VIEWPORT = p->gen6_CC_VIEWPORT(p->dev,
796 ilo->viewport.cso, ilo->viewport.count, p->cp);
797
798 session->viewport_state_changed = true;
799 }
800 }
801
802 static void
803 gen6_pipeline_state_cc(struct ilo_3d_pipeline *p,
804 const struct ilo_context *ilo,
805 struct gen6_pipeline_session *session)
806 {
807 /* BLEND_STATE */
808 if (DIRTY(BLEND) || DIRTY(FRAMEBUFFER) || DIRTY(DEPTH_STENCIL_ALPHA)) {
809 p->state.BLEND_STATE = p->gen6_BLEND_STATE(p->dev,
810 ilo->blend, &ilo->fb, &ilo->dsa->alpha, p->cp);
811
812 session->cc_state_blend_changed = true;
813 }
814
815 /* COLOR_CALC_STATE */
816 if (DIRTY(DEPTH_STENCIL_ALPHA) || DIRTY(STENCIL_REF) || DIRTY(BLEND_COLOR)) {
817 p->state.COLOR_CALC_STATE =
818 p->gen6_COLOR_CALC_STATE(p->dev, &ilo->stencil_ref,
819 ilo->dsa->alpha.ref_value, &ilo->blend_color, p->cp);
820
821 session->cc_state_cc_changed = true;
822 }
823
824 /* DEPTH_STENCIL_STATE */
825 if (DIRTY(DEPTH_STENCIL_ALPHA)) {
826 p->state.DEPTH_STENCIL_STATE =
827 p->gen6_DEPTH_STENCIL_STATE(p->dev, ilo->dsa, p->cp);
828
829 session->cc_state_dsa_changed = true;
830 }
831 }
832
833 static void
834 gen6_pipeline_state_scissors(struct ilo_3d_pipeline *p,
835 const struct ilo_context *ilo,
836 struct gen6_pipeline_session *session)
837 {
838 /* SCISSOR_RECT */
839 if (DIRTY(SCISSOR) || DIRTY(VIEWPORT)) {
840 /* there should be as many scissors as there are viewports */
841 p->state.SCISSOR_RECT = p->gen6_SCISSOR_RECT(p->dev,
842 &ilo->scissor, ilo->viewport.count, p->cp);
843
844 session->scissor_state_changed = true;
845 }
846 }
847
848 static void
849 gen6_pipeline_state_surfaces_rt(struct ilo_3d_pipeline *p,
850 const struct ilo_context *ilo,
851 struct gen6_pipeline_session *session)
852 {
853 /* SURFACE_STATEs for render targets */
854 if (DIRTY(FRAMEBUFFER)) {
855 const int offset = ILO_WM_DRAW_SURFACE(0);
856 uint32_t *surface_state = &p->state.wm.SURFACE_STATE[offset];
857 int i;
858
859 for (i = 0; i < ilo->fb.state.nr_cbufs; i++) {
860 const struct ilo_surface_cso *surface =
861 (const struct ilo_surface_cso *) ilo->fb.state.cbufs[i];
862
863 assert(surface && surface->is_rt);
864 surface_state[i] =
865 p->gen6_SURFACE_STATE(p->dev, &surface->u.rt, true, p->cp);
866 }
867
868 /*
869 * Upload at least one render target, as
870 * brw_update_renderbuffer_surfaces() does. I don't know why.
871 */
872 if (i == 0) {
873 struct ilo_view_surface null_surface;
874
875 ilo_gpe_init_view_surface_null(p->dev,
876 ilo->fb.state.width, ilo->fb.state.height,
877 1, 0, &null_surface);
878
879 surface_state[i] =
880 p->gen6_SURFACE_STATE(p->dev, &null_surface, true, p->cp);
881
882 i++;
883 }
884
885 memset(&surface_state[i], 0, (ILO_MAX_DRAW_BUFFERS - i) * 4);
886
887 if (i && session->num_surfaces[PIPE_SHADER_FRAGMENT] < offset + i)
888 session->num_surfaces[PIPE_SHADER_FRAGMENT] = offset + i;
889
890 session->binding_table_fs_changed = true;
891 }
892 }
893
894 static void
895 gen6_pipeline_state_surfaces_so(struct ilo_3d_pipeline *p,
896 const struct ilo_context *ilo,
897 struct gen6_pipeline_session *session)
898 {
899 const struct pipe_stream_output_target **so_targets =
900 (const struct pipe_stream_output_target **) ilo->so.states;
901 const int num_so_targets = ilo->so.count;
902
903 if (p->dev->gen != ILO_GEN(6))
904 return;
905
906 /* SURFACE_STATEs for stream output targets */
907 if (DIRTY(VS) || DIRTY(GS) || DIRTY(STREAM_OUTPUT_TARGETS)) {
908 const struct pipe_stream_output_info *so_info =
909 (ilo->gs) ? ilo_shader_get_kernel_so_info(ilo->gs) :
910 (ilo->vs) ? ilo_shader_get_kernel_so_info(ilo->vs) : NULL;
911 const int offset = ILO_GS_SO_SURFACE(0);
912 uint32_t *surface_state = &p->state.gs.SURFACE_STATE[offset];
913 int i;
914
915 for (i = 0; so_info && i < so_info->num_outputs; i++) {
916 const int target = so_info->output[i].output_buffer;
917 const struct pipe_stream_output_target *so_target =
918 (target < num_so_targets) ? so_targets[target] : NULL;
919
920 if (so_target) {
921 surface_state[i] = p->gen6_so_SURFACE_STATE(p->dev,
922 so_target, so_info, i, p->cp);
923 }
924 else {
925 surface_state[i] = 0;
926 }
927 }
928
929 memset(&surface_state[i], 0, (ILO_MAX_SO_BINDINGS - i) * 4);
930
931 if (i && session->num_surfaces[PIPE_SHADER_GEOMETRY] < offset + i)
932 session->num_surfaces[PIPE_SHADER_GEOMETRY] = offset + i;
933
934 session->binding_table_gs_changed = true;
935 }
936 }
937
938 static void
939 gen6_pipeline_state_surfaces_view(struct ilo_3d_pipeline *p,
940 const struct ilo_context *ilo,
941 int shader_type,
942 struct gen6_pipeline_session *session)
943 {
944 const struct pipe_sampler_view * const *views =
945 (const struct pipe_sampler_view **) ilo->view[shader_type].states;
946 const int num_views = ilo->view[shader_type].count;
947 uint32_t *surface_state;
948 int offset, i;
949 bool skip = false;
950
951 /* SURFACE_STATEs for sampler views */
952 switch (shader_type) {
953 case PIPE_SHADER_VERTEX:
954 if (DIRTY(VERTEX_SAMPLER_VIEWS)) {
955 offset = ILO_VS_TEXTURE_SURFACE(0);
956 surface_state = &p->state.vs.SURFACE_STATE[offset];
957
958 session->binding_table_vs_changed = true;
959 }
960 else {
961 skip = true;
962 }
963 break;
964 case PIPE_SHADER_FRAGMENT:
965 if (DIRTY(FRAGMENT_SAMPLER_VIEWS)) {
966 offset = ILO_WM_TEXTURE_SURFACE(0);
967 surface_state = &p->state.wm.SURFACE_STATE[offset];
968
969 session->binding_table_fs_changed = true;
970 }
971 else {
972 skip = true;
973 }
974 break;
975 default:
976 skip = true;
977 break;
978 }
979
980 if (skip)
981 return;
982
983 for (i = 0; i < num_views; i++) {
984 if (views[i]) {
985 const struct ilo_view_cso *cso =
986 (const struct ilo_view_cso *) views[i];
987
988 surface_state[i] =
989 p->gen6_SURFACE_STATE(p->dev, &cso->surface, false, p->cp);
990 }
991 else {
992 surface_state[i] = 0;
993 }
994 }
995
996 memset(&surface_state[i], 0, (ILO_MAX_SAMPLER_VIEWS - i) * 4);
997
998 if (i && session->num_surfaces[shader_type] < offset + i)
999 session->num_surfaces[shader_type] = offset + i;
1000 }
1001
1002 static void
1003 gen6_pipeline_state_surfaces_const(struct ilo_3d_pipeline *p,
1004 const struct ilo_context *ilo,
1005 int shader_type,
1006 struct gen6_pipeline_session *session)
1007 {
1008 const struct ilo_cbuf_cso *buffers = ilo->cbuf[shader_type].cso;
1009 const int num_buffers = ilo->cbuf[shader_type].count;
1010 uint32_t *surface_state;
1011 int offset, i;
1012 bool skip = false;
1013
1014 /* SURFACE_STATEs for constant buffers */
1015 switch (shader_type) {
1016 case PIPE_SHADER_VERTEX:
1017 if (DIRTY(CONSTANT_BUFFER)) {
1018 offset = ILO_VS_CONST_SURFACE(0);
1019 surface_state = &p->state.vs.SURFACE_STATE[offset];
1020
1021 session->binding_table_vs_changed = true;
1022 }
1023 else {
1024 skip = true;
1025 }
1026 break;
1027 case PIPE_SHADER_FRAGMENT:
1028 if (DIRTY(CONSTANT_BUFFER)) {
1029 offset = ILO_WM_CONST_SURFACE(0);
1030 surface_state = &p->state.wm.SURFACE_STATE[offset];
1031
1032 session->binding_table_fs_changed = true;
1033 }
1034 else {
1035 skip = true;
1036 }
1037 break;
1038 default:
1039 skip = true;
1040 break;
1041 }
1042
1043 if (skip)
1044 return;
1045
1046 for (i = 0; i < num_buffers; i++) {
1047 if (buffers[i].resource) {
1048 const struct ilo_view_surface *surf = &buffers[i].surface;
1049
1050 surface_state[i] =
1051 p->gen6_SURFACE_STATE(p->dev, surf, false, p->cp);
1052 }
1053 else {
1054 surface_state[i] = 0;
1055 }
1056 }
1057
1058 memset(&surface_state[i], 0, (ILO_MAX_CONST_BUFFERS - i) * 4);
1059
1060 if (i && session->num_surfaces[shader_type] < offset + i)
1061 session->num_surfaces[shader_type] = offset + i;
1062 }
1063
1064 static void
1065 gen6_pipeline_state_binding_tables(struct ilo_3d_pipeline *p,
1066 const struct ilo_context *ilo,
1067 int shader_type,
1068 struct gen6_pipeline_session *session)
1069 {
1070 uint32_t *binding_table_state, *surface_state;
1071 int *binding_table_state_size, size;
1072 bool skip = false;
1073
1074 /* BINDING_TABLE_STATE */
1075 switch (shader_type) {
1076 case PIPE_SHADER_VERTEX:
1077 surface_state = p->state.vs.SURFACE_STATE;
1078 binding_table_state = &p->state.vs.BINDING_TABLE_STATE;
1079 binding_table_state_size = &p->state.vs.BINDING_TABLE_STATE_size;
1080
1081 skip = !session->binding_table_vs_changed;
1082 break;
1083 case PIPE_SHADER_GEOMETRY:
1084 surface_state = p->state.gs.SURFACE_STATE;
1085 binding_table_state = &p->state.gs.BINDING_TABLE_STATE;
1086 binding_table_state_size = &p->state.gs.BINDING_TABLE_STATE_size;
1087
1088 skip = !session->binding_table_gs_changed;
1089 break;
1090 case PIPE_SHADER_FRAGMENT:
1091 surface_state = p->state.wm.SURFACE_STATE;
1092 binding_table_state = &p->state.wm.BINDING_TABLE_STATE;
1093 binding_table_state_size = &p->state.wm.BINDING_TABLE_STATE_size;
1094
1095 skip = !session->binding_table_fs_changed;
1096 break;
1097 default:
1098 skip = true;
1099 break;
1100 }
1101
1102 if (skip)
1103 return;
1104
1105 /*
1106 * If we have seemingly less SURFACE_STATEs than before, it could be that
1107 * we did not touch those reside at the tail in this upload. Loop over
1108 * them to figure out the real number of SURFACE_STATEs.
1109 */
1110 for (size = *binding_table_state_size;
1111 size > session->num_surfaces[shader_type]; size--) {
1112 if (surface_state[size - 1])
1113 break;
1114 }
1115 if (size < session->num_surfaces[shader_type])
1116 size = session->num_surfaces[shader_type];
1117
1118 *binding_table_state = p->gen6_BINDING_TABLE_STATE(p->dev,
1119 surface_state, size, p->cp);
1120 *binding_table_state_size = size;
1121 }
1122
1123 static void
1124 gen6_pipeline_state_samplers(struct ilo_3d_pipeline *p,
1125 const struct ilo_context *ilo,
1126 int shader_type,
1127 struct gen6_pipeline_session *session)
1128 {
1129 const struct ilo_sampler_cso * const *samplers =
1130 ilo->sampler[shader_type].cso;
1131 const struct pipe_sampler_view * const *views =
1132 (const struct pipe_sampler_view **) ilo->view[shader_type].states;
1133 const int num_samplers = ilo->sampler[shader_type].count;
1134 const int num_views = ilo->view[shader_type].count;
1135 uint32_t *sampler_state, *border_color_state;
1136 bool emit_border_color = false;
1137 bool skip = false;
1138
1139 /* SAMPLER_BORDER_COLOR_STATE and SAMPLER_STATE */
1140 switch (shader_type) {
1141 case PIPE_SHADER_VERTEX:
1142 if (DIRTY(VERTEX_SAMPLERS) || DIRTY(VERTEX_SAMPLER_VIEWS)) {
1143 sampler_state = &p->state.vs.SAMPLER_STATE;
1144 border_color_state = p->state.vs.SAMPLER_BORDER_COLOR_STATE;
1145
1146 if (DIRTY(VERTEX_SAMPLERS))
1147 emit_border_color = true;
1148
1149 session->sampler_state_vs_changed = true;
1150 }
1151 else {
1152 skip = true;
1153 }
1154 break;
1155 case PIPE_SHADER_FRAGMENT:
1156 if (DIRTY(FRAGMENT_SAMPLERS) || DIRTY(FRAGMENT_SAMPLER_VIEWS)) {
1157 sampler_state = &p->state.wm.SAMPLER_STATE;
1158 border_color_state = p->state.wm.SAMPLER_BORDER_COLOR_STATE;
1159
1160 if (DIRTY(FRAGMENT_SAMPLERS))
1161 emit_border_color = true;
1162
1163 session->sampler_state_fs_changed = true;
1164 }
1165 else {
1166 skip = true;
1167 }
1168 break;
1169 default:
1170 skip = true;
1171 break;
1172 }
1173
1174 if (skip)
1175 return;
1176
1177 if (emit_border_color) {
1178 int i;
1179
1180 for (i = 0; i < num_samplers; i++) {
1181 border_color_state[i] = (samplers[i]) ?
1182 p->gen6_SAMPLER_BORDER_COLOR_STATE(p->dev,
1183 samplers[i], p->cp) : 0;
1184 }
1185 }
1186
1187 /* should we take the minimum of num_samplers and num_views? */
1188 *sampler_state = p->gen6_SAMPLER_STATE(p->dev,
1189 samplers, views,
1190 border_color_state,
1191 MIN2(num_samplers, num_views), p->cp);
1192 }
1193
1194 static void
1195 gen6_pipeline_state_pcb(struct ilo_3d_pipeline *p,
1196 const struct ilo_context *ilo,
1197 struct gen6_pipeline_session *session)
1198 {
1199 /* push constant buffer for VS */
1200 if (DIRTY(VS) || DIRTY(CLIP)) {
1201 const int clip_state_size = (ilo->vs) ?
1202 ilo_shader_get_kernel_param(ilo->vs,
1203 ILO_KERNEL_VS_PCB_UCP_SIZE) : 0;
1204
1205 if (clip_state_size) {
1206 void *pcb;
1207
1208 p->state.vs.PUSH_CONSTANT_BUFFER_size = clip_state_size;
1209 p->state.vs.PUSH_CONSTANT_BUFFER =
1210 p->gen6_push_constant_buffer(p->dev,
1211 p->state.vs.PUSH_CONSTANT_BUFFER_size, &pcb, p->cp);
1212
1213 memcpy(pcb, &ilo->clip, clip_state_size);
1214 }
1215 else {
1216 p->state.vs.PUSH_CONSTANT_BUFFER_size = 0;
1217 p->state.vs.PUSH_CONSTANT_BUFFER = 0;
1218 }
1219
1220 session->pcb_state_vs_changed = true;
1221 }
1222 }
1223
1224 #undef DIRTY
1225
1226 static void
1227 gen6_pipeline_commands(struct ilo_3d_pipeline *p,
1228 const struct ilo_context *ilo,
1229 struct gen6_pipeline_session *session)
1230 {
1231 /*
1232 * We try to keep the order of the commands match, as closely as possible,
1233 * that of the classic i965 driver. It allows us to compare the command
1234 * streams easily.
1235 */
1236 gen6_pipeline_common_select(p, ilo, session);
1237 gen6_pipeline_gs_svbi(p, ilo, session);
1238 gen6_pipeline_common_sip(p, ilo, session);
1239 gen6_pipeline_vf_statistics(p, ilo, session);
1240 gen6_pipeline_common_base_address(p, ilo, session);
1241 gen6_pipeline_common_pointers_1(p, ilo, session);
1242 gen6_pipeline_common_urb(p, ilo, session);
1243 gen6_pipeline_common_pointers_2(p, ilo, session);
1244 gen6_pipeline_wm_multisample(p, ilo, session);
1245 gen6_pipeline_vs(p, ilo, session);
1246 gen6_pipeline_gs(p, ilo, session);
1247 gen6_pipeline_clip(p, ilo, session);
1248 gen6_pipeline_sf(p, ilo, session);
1249 gen6_pipeline_wm(p, ilo, session);
1250 gen6_pipeline_common_pointers_3(p, ilo, session);
1251 gen6_pipeline_wm_depth(p, ilo, session);
1252 gen6_pipeline_wm_raster(p, ilo, session);
1253 gen6_pipeline_sf_rect(p, ilo, session);
1254 gen6_pipeline_vf(p, ilo, session);
1255 gen6_pipeline_vf_draw(p, ilo, session);
1256 }
1257
1258 void
1259 gen6_pipeline_states(struct ilo_3d_pipeline *p,
1260 const struct ilo_context *ilo,
1261 struct gen6_pipeline_session *session)
1262 {
1263 int shader_type;
1264
1265 gen6_pipeline_state_viewports(p, ilo, session);
1266 gen6_pipeline_state_cc(p, ilo, session);
1267 gen6_pipeline_state_scissors(p, ilo, session);
1268 gen6_pipeline_state_pcb(p, ilo, session);
1269
1270 /*
1271 * upload all SURAFCE_STATEs together so that we know there are minimal
1272 * paddings
1273 */
1274 gen6_pipeline_state_surfaces_rt(p, ilo, session);
1275 gen6_pipeline_state_surfaces_so(p, ilo, session);
1276 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1277 gen6_pipeline_state_surfaces_view(p, ilo, shader_type, session);
1278 gen6_pipeline_state_surfaces_const(p, ilo, shader_type, session);
1279 }
1280
1281 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1282 gen6_pipeline_state_samplers(p, ilo, shader_type, session);
1283 /* this must be called after all SURFACE_STATEs are uploaded */
1284 gen6_pipeline_state_binding_tables(p, ilo, shader_type, session);
1285 }
1286 }
1287
1288 void
1289 gen6_pipeline_prepare(const struct ilo_3d_pipeline *p,
1290 const struct ilo_context *ilo,
1291 const struct pipe_draw_info *info,
1292 struct gen6_pipeline_session *session)
1293 {
1294 memset(session, 0, sizeof(*session));
1295 session->info = info;
1296 session->pipe_dirty = ilo->dirty;
1297 session->reduced_prim = u_reduced_prim(info->mode);
1298
1299 /* available space before the session */
1300 session->init_cp_space = ilo_cp_space(p->cp);
1301
1302 session->hw_ctx_changed =
1303 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_HW);
1304
1305 if (session->hw_ctx_changed) {
1306 /* these should be enough to make everything uploaded */
1307 session->batch_bo_changed = true;
1308 session->state_bo_changed = true;
1309 session->kernel_bo_changed = true;
1310 session->prim_changed = true;
1311 }
1312 else {
1313 /*
1314 * Any state that involves resources needs to be re-emitted when the
1315 * batch bo changed. This is because we do not pin the resources and
1316 * their offsets (or existence) may change between batch buffers.
1317 *
1318 * Since we messed around with ILO_3D_PIPELINE_INVALIDATE_BATCH_BO in
1319 * handle_invalid_batch_bo(), use ILO_3D_PIPELINE_INVALIDATE_STATE_BO as
1320 * a temporary workaround.
1321 */
1322 session->batch_bo_changed =
1323 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1324
1325 session->state_bo_changed =
1326 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1327 session->kernel_bo_changed =
1328 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
1329 session->prim_changed = (p->state.reduced_prim != session->reduced_prim);
1330 }
1331 }
1332
1333 void
1334 gen6_pipeline_draw(struct ilo_3d_pipeline *p,
1335 const struct ilo_context *ilo,
1336 struct gen6_pipeline_session *session)
1337 {
1338 /* force all states to be uploaded if the state bo changed */
1339 if (session->state_bo_changed)
1340 session->pipe_dirty = ILO_DIRTY_ALL;
1341 else
1342 session->pipe_dirty = ilo->dirty;
1343
1344 session->emit_draw_states(p, ilo, session);
1345
1346 /* force all commands to be uploaded if the HW context changed */
1347 if (session->hw_ctx_changed)
1348 session->pipe_dirty = ILO_DIRTY_ALL;
1349 else
1350 session->pipe_dirty = ilo->dirty;
1351
1352 session->emit_draw_commands(p, ilo, session);
1353 }
1354
1355 void
1356 gen6_pipeline_end(struct ilo_3d_pipeline *p,
1357 const struct ilo_context *ilo,
1358 struct gen6_pipeline_session *session)
1359 {
1360 int used, estimate;
1361
1362 /* sanity check size estimation */
1363 used = session->init_cp_space - ilo_cp_space(p->cp);
1364 estimate = ilo_3d_pipeline_estimate_size(p, ILO_3D_PIPELINE_DRAW, ilo);
1365 assert(used <= estimate);
1366
1367 p->state.reduced_prim = session->reduced_prim;
1368 }
1369
1370 static void
1371 ilo_3d_pipeline_emit_draw_gen6(struct ilo_3d_pipeline *p,
1372 const struct ilo_context *ilo,
1373 const struct pipe_draw_info *info)
1374 {
1375 struct gen6_pipeline_session session;
1376
1377 gen6_pipeline_prepare(p, ilo, info, &session);
1378
1379 session.emit_draw_states = gen6_pipeline_states;
1380 session.emit_draw_commands = gen6_pipeline_commands;
1381
1382 gen6_pipeline_draw(p, ilo, &session);
1383 gen6_pipeline_end(p, ilo, &session);
1384 }
1385
1386 void
1387 ilo_3d_pipeline_emit_flush_gen6(struct ilo_3d_pipeline *p)
1388 {
1389 if (p->dev->gen == ILO_GEN(6))
1390 gen6_wa_pipe_control_post_sync(p, false);
1391
1392 p->gen6_PIPE_CONTROL(p->dev,
1393 PIPE_CONTROL_INSTRUCTION_FLUSH |
1394 PIPE_CONTROL_WRITE_FLUSH |
1395 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1396 PIPE_CONTROL_VF_CACHE_INVALIDATE |
1397 PIPE_CONTROL_TC_FLUSH |
1398 PIPE_CONTROL_NO_WRITE |
1399 PIPE_CONTROL_CS_STALL,
1400 0, 0, false, p->cp);
1401 }
1402
1403 void
1404 ilo_3d_pipeline_emit_write_timestamp_gen6(struct ilo_3d_pipeline *p,
1405 struct intel_bo *bo, int index)
1406 {
1407 if (p->dev->gen == ILO_GEN(6))
1408 gen6_wa_pipe_control_post_sync(p, true);
1409
1410 p->gen6_PIPE_CONTROL(p->dev,
1411 PIPE_CONTROL_WRITE_TIMESTAMP,
1412 bo, index * sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE,
1413 true, p->cp);
1414 }
1415
1416 void
1417 ilo_3d_pipeline_emit_write_depth_count_gen6(struct ilo_3d_pipeline *p,
1418 struct intel_bo *bo, int index)
1419 {
1420 if (p->dev->gen == ILO_GEN(6))
1421 gen6_wa_pipe_control_post_sync(p, false);
1422
1423 p->gen6_PIPE_CONTROL(p->dev,
1424 PIPE_CONTROL_DEPTH_STALL |
1425 PIPE_CONTROL_WRITE_DEPTH_COUNT,
1426 bo, index * sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE,
1427 true, p->cp);
1428 }
1429
1430 static int
1431 gen6_pipeline_estimate_commands(const struct ilo_3d_pipeline *p,
1432 const struct ilo_gpe_gen6 *gen6,
1433 const struct ilo_context *ilo)
1434 {
1435 static int size;
1436 enum ilo_gpe_gen6_command cmd;
1437
1438 if (size)
1439 return size;
1440
1441 for (cmd = 0; cmd < ILO_GPE_GEN6_COMMAND_COUNT; cmd++) {
1442 int count;
1443
1444 switch (cmd) {
1445 case ILO_GPE_GEN6_PIPE_CONTROL:
1446 /* for the workaround */
1447 count = 2;
1448 /* another one after 3DSTATE_URB */
1449 count += 1;
1450 /* and another one after 3DSTATE_CONSTANT_VS */
1451 count += 1;
1452 break;
1453 case ILO_GPE_GEN6_3DSTATE_GS_SVB_INDEX:
1454 /* there are 4 SVBIs */
1455 count = 4;
1456 break;
1457 case ILO_GPE_GEN6_3DSTATE_VERTEX_BUFFERS:
1458 count = 33;
1459 break;
1460 case ILO_GPE_GEN6_3DSTATE_VERTEX_ELEMENTS:
1461 count = 34;
1462 break;
1463 case ILO_GPE_GEN6_MEDIA_VFE_STATE:
1464 case ILO_GPE_GEN6_MEDIA_CURBE_LOAD:
1465 case ILO_GPE_GEN6_MEDIA_INTERFACE_DESCRIPTOR_LOAD:
1466 case ILO_GPE_GEN6_MEDIA_GATEWAY_STATE:
1467 case ILO_GPE_GEN6_MEDIA_STATE_FLUSH:
1468 case ILO_GPE_GEN6_MEDIA_OBJECT_WALKER:
1469 /* media commands */
1470 count = 0;
1471 break;
1472 default:
1473 count = 1;
1474 break;
1475 }
1476
1477 if (count)
1478 size += gen6->estimate_command_size(p->dev, cmd, count);
1479 }
1480
1481 return size;
1482 }
1483
1484 static int
1485 gen6_pipeline_estimate_states(const struct ilo_3d_pipeline *p,
1486 const struct ilo_gpe_gen6 *gen6,
1487 const struct ilo_context *ilo)
1488 {
1489 static int static_size;
1490 int shader_type, count, size;
1491
1492 if (!static_size) {
1493 struct {
1494 enum ilo_gpe_gen6_state state;
1495 int count;
1496 } static_states[] = {
1497 /* viewports */
1498 { ILO_GPE_GEN6_SF_VIEWPORT, 1 },
1499 { ILO_GPE_GEN6_CLIP_VIEWPORT, 1 },
1500 { ILO_GPE_GEN6_CC_VIEWPORT, 1 },
1501 /* cc */
1502 { ILO_GPE_GEN6_COLOR_CALC_STATE, 1 },
1503 { ILO_GPE_GEN6_BLEND_STATE, ILO_MAX_DRAW_BUFFERS },
1504 { ILO_GPE_GEN6_DEPTH_STENCIL_STATE, 1 },
1505 /* scissors */
1506 { ILO_GPE_GEN6_SCISSOR_RECT, 1 },
1507 /* binding table (vs, gs, fs) */
1508 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_VS_SURFACES },
1509 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_GS_SURFACES },
1510 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_WM_SURFACES },
1511 };
1512 int i;
1513
1514 for (i = 0; i < Elements(static_states); i++) {
1515 static_size += gen6->estimate_state_size(p->dev,
1516 static_states[i].state,
1517 static_states[i].count);
1518 }
1519 }
1520
1521 size = static_size;
1522
1523 /*
1524 * render targets (fs)
1525 * stream outputs (gs)
1526 * sampler views (vs, fs)
1527 * constant buffers (vs, fs)
1528 */
1529 count = ilo->fb.state.nr_cbufs;
1530
1531 if (ilo->gs) {
1532 const struct pipe_stream_output_info *so_info =
1533 ilo_shader_get_kernel_so_info(ilo->gs);
1534
1535 count += so_info->num_outputs;
1536 }
1537 else if (ilo->vs) {
1538 const struct pipe_stream_output_info *so_info =
1539 ilo_shader_get_kernel_so_info(ilo->vs);
1540
1541 count += so_info->num_outputs;
1542 }
1543
1544 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1545 count += ilo->view[shader_type].count;
1546 count += ilo->cbuf[shader_type].count;
1547 }
1548
1549 if (count) {
1550 size += gen6->estimate_state_size(p->dev,
1551 ILO_GPE_GEN6_SURFACE_STATE, count);
1552 }
1553
1554 /* samplers (vs, fs) */
1555 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1556 count = ilo->sampler[shader_type].count;
1557 if (count) {
1558 size += gen6->estimate_state_size(p->dev,
1559 ILO_GPE_GEN6_SAMPLER_BORDER_COLOR_STATE, count);
1560 size += gen6->estimate_state_size(p->dev,
1561 ILO_GPE_GEN6_SAMPLER_STATE, count);
1562 }
1563 }
1564
1565 /* pcb (vs) */
1566 if (ilo->vs &&
1567 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_VS_PCB_UCP_SIZE)) {
1568 const int pcb_size =
1569 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_VS_PCB_UCP_SIZE);
1570
1571 size += gen6->estimate_state_size(p->dev,
1572 ILO_GPE_GEN6_PUSH_CONSTANT_BUFFER, pcb_size);
1573 }
1574
1575 return size;
1576 }
1577
1578 static int
1579 ilo_3d_pipeline_estimate_size_gen6(struct ilo_3d_pipeline *p,
1580 enum ilo_3d_pipeline_action action,
1581 const void *arg)
1582 {
1583 const struct ilo_gpe_gen6 *gen6 = ilo_gpe_gen6_get();
1584 int size;
1585
1586 switch (action) {
1587 case ILO_3D_PIPELINE_DRAW:
1588 {
1589 const struct ilo_context *ilo = arg;
1590
1591 size = gen6_pipeline_estimate_commands(p, gen6, ilo) +
1592 gen6_pipeline_estimate_states(p, gen6, ilo);
1593 }
1594 break;
1595 case ILO_3D_PIPELINE_FLUSH:
1596 size = gen6->estimate_command_size(p->dev,
1597 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1598 break;
1599 case ILO_3D_PIPELINE_WRITE_TIMESTAMP:
1600 size = gen6->estimate_command_size(p->dev,
1601 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 2;
1602 break;
1603 case ILO_3D_PIPELINE_WRITE_DEPTH_COUNT:
1604 size = gen6->estimate_command_size(p->dev,
1605 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1606 break;
1607 default:
1608 assert(!"unknown 3D pipeline action");
1609 size = 0;
1610 break;
1611 }
1612
1613 return size;
1614 }
1615
1616 void
1617 ilo_3d_pipeline_init_gen6(struct ilo_3d_pipeline *p)
1618 {
1619 const struct ilo_gpe_gen6 *gen6 = ilo_gpe_gen6_get();
1620
1621 p->estimate_size = ilo_3d_pipeline_estimate_size_gen6;
1622 p->emit_draw = ilo_3d_pipeline_emit_draw_gen6;
1623 p->emit_flush = ilo_3d_pipeline_emit_flush_gen6;
1624 p->emit_write_timestamp = ilo_3d_pipeline_emit_write_timestamp_gen6;
1625 p->emit_write_depth_count = ilo_3d_pipeline_emit_write_depth_count_gen6;
1626
1627 #define GEN6_USE(p, name, from) \
1628 p->gen6_ ## name = from->emit_ ## name
1629 GEN6_USE(p, STATE_BASE_ADDRESS, gen6);
1630 GEN6_USE(p, STATE_SIP, gen6);
1631 GEN6_USE(p, PIPELINE_SELECT, gen6);
1632 GEN6_USE(p, 3DSTATE_BINDING_TABLE_POINTERS, gen6);
1633 GEN6_USE(p, 3DSTATE_SAMPLER_STATE_POINTERS, gen6);
1634 GEN6_USE(p, 3DSTATE_URB, gen6);
1635 GEN6_USE(p, 3DSTATE_VERTEX_BUFFERS, gen6);
1636 GEN6_USE(p, 3DSTATE_VERTEX_ELEMENTS, gen6);
1637 GEN6_USE(p, 3DSTATE_INDEX_BUFFER, gen6);
1638 GEN6_USE(p, 3DSTATE_VF_STATISTICS, gen6);
1639 GEN6_USE(p, 3DSTATE_VIEWPORT_STATE_POINTERS, gen6);
1640 GEN6_USE(p, 3DSTATE_CC_STATE_POINTERS, gen6);
1641 GEN6_USE(p, 3DSTATE_SCISSOR_STATE_POINTERS, gen6);
1642 GEN6_USE(p, 3DSTATE_VS, gen6);
1643 GEN6_USE(p, 3DSTATE_GS, gen6);
1644 GEN6_USE(p, 3DSTATE_CLIP, gen6);
1645 GEN6_USE(p, 3DSTATE_SF, gen6);
1646 GEN6_USE(p, 3DSTATE_WM, gen6);
1647 GEN6_USE(p, 3DSTATE_CONSTANT_VS, gen6);
1648 GEN6_USE(p, 3DSTATE_CONSTANT_GS, gen6);
1649 GEN6_USE(p, 3DSTATE_CONSTANT_PS, gen6);
1650 GEN6_USE(p, 3DSTATE_SAMPLE_MASK, gen6);
1651 GEN6_USE(p, 3DSTATE_DRAWING_RECTANGLE, gen6);
1652 GEN6_USE(p, 3DSTATE_DEPTH_BUFFER, gen6);
1653 GEN6_USE(p, 3DSTATE_POLY_STIPPLE_OFFSET, gen6);
1654 GEN6_USE(p, 3DSTATE_POLY_STIPPLE_PATTERN, gen6);
1655 GEN6_USE(p, 3DSTATE_LINE_STIPPLE, gen6);
1656 GEN6_USE(p, 3DSTATE_AA_LINE_PARAMETERS, gen6);
1657 GEN6_USE(p, 3DSTATE_GS_SVB_INDEX, gen6);
1658 GEN6_USE(p, 3DSTATE_MULTISAMPLE, gen6);
1659 GEN6_USE(p, 3DSTATE_STENCIL_BUFFER, gen6);
1660 GEN6_USE(p, 3DSTATE_HIER_DEPTH_BUFFER, gen6);
1661 GEN6_USE(p, 3DSTATE_CLEAR_PARAMS, gen6);
1662 GEN6_USE(p, PIPE_CONTROL, gen6);
1663 GEN6_USE(p, 3DPRIMITIVE, gen6);
1664 GEN6_USE(p, INTERFACE_DESCRIPTOR_DATA, gen6);
1665 GEN6_USE(p, SF_VIEWPORT, gen6);
1666 GEN6_USE(p, CLIP_VIEWPORT, gen6);
1667 GEN6_USE(p, CC_VIEWPORT, gen6);
1668 GEN6_USE(p, COLOR_CALC_STATE, gen6);
1669 GEN6_USE(p, BLEND_STATE, gen6);
1670 GEN6_USE(p, DEPTH_STENCIL_STATE, gen6);
1671 GEN6_USE(p, SCISSOR_RECT, gen6);
1672 GEN6_USE(p, BINDING_TABLE_STATE, gen6);
1673 GEN6_USE(p, SURFACE_STATE, gen6);
1674 GEN6_USE(p, so_SURFACE_STATE, gen6);
1675 GEN6_USE(p, SAMPLER_STATE, gen6);
1676 GEN6_USE(p, SAMPLER_BORDER_COLOR_STATE, gen6);
1677 GEN6_USE(p, push_constant_buffer, gen6);
1678 #undef GEN6_USE
1679 }