ilo: Add support for HW primitive restart.
[mesa.git] / src / gallium / drivers / ilo / ilo_3d_pipeline_gen6.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "util/u_dual_blend.h"
29 #include "util/u_prim.h"
30 #include "intel_reg.h"
31
32 #include "ilo_context.h"
33 #include "ilo_cp.h"
34 #include "ilo_gpe_gen6.h"
35 #include "ilo_shader.h"
36 #include "ilo_state.h"
37 #include "ilo_3d_pipeline.h"
38 #include "ilo_3d_pipeline_gen6.h"
39
40 /**
41 * This should be called before any depth stall flush (including those
42 * produced by non-pipelined state commands) or cache flush on GEN6.
43 *
44 * \see intel_emit_post_sync_nonzero_flush()
45 */
46 static void
47 gen6_wa_pipe_control_post_sync(struct ilo_3d_pipeline *p,
48 bool caller_post_sync)
49 {
50 assert(p->dev->gen == ILO_GEN(6));
51
52 /* emit once */
53 if (p->state.has_gen6_wa_pipe_control)
54 return;
55
56 p->state.has_gen6_wa_pipe_control = true;
57
58 /*
59 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
60 *
61 * "Pipe-control with CS-stall bit set must be sent BEFORE the
62 * pipe-control with a post-sync op and no write-cache flushes."
63 *
64 * The workaround below necessitates this workaround.
65 */
66 p->gen6_PIPE_CONTROL(p->dev,
67 PIPE_CONTROL_CS_STALL |
68 PIPE_CONTROL_STALL_AT_SCOREBOARD,
69 NULL, 0, false, p->cp);
70
71 /* the caller will emit the post-sync op */
72 if (caller_post_sync)
73 return;
74
75 /*
76 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
77 *
78 * "Before any depth stall flush (including those produced by
79 * non-pipelined state commands), software needs to first send a
80 * PIPE_CONTROL with no bits set except Post-Sync Operation != 0."
81 *
82 * "Before a PIPE_CONTROL with Write Cache Flush Enable =1, a
83 * PIPE_CONTROL with any non-zero post-sync-op is required."
84 */
85 p->gen6_PIPE_CONTROL(p->dev,
86 PIPE_CONTROL_WRITE_IMMEDIATE,
87 p->workaround_bo, 0, false, p->cp);
88 }
89
90 static void
91 gen6_wa_pipe_control_wm_multisample_flush(struct ilo_3d_pipeline *p)
92 {
93 assert(p->dev->gen == ILO_GEN(6));
94
95 gen6_wa_pipe_control_post_sync(p, false);
96
97 /*
98 * From the Sandy Bridge PRM, volume 2 part 1, page 305:
99 *
100 * "Driver must guarentee that all the caches in the depth pipe are
101 * flushed before this command (3DSTATE_MULTISAMPLE) is parsed. This
102 * requires driver to send a PIPE_CONTROL with a CS stall along with a
103 * Depth Flush prior to this command."
104 */
105 p->gen6_PIPE_CONTROL(p->dev,
106 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
107 PIPE_CONTROL_CS_STALL,
108 0, 0, false, p->cp);
109 }
110
111 static void
112 gen6_wa_pipe_control_wm_depth_flush(struct ilo_3d_pipeline *p)
113 {
114 assert(p->dev->gen == ILO_GEN(6));
115
116 gen6_wa_pipe_control_post_sync(p, false);
117
118 /*
119 * According to intel_emit_depth_stall_flushes() of classic i965, we need
120 * to emit a sequence of PIPE_CONTROLs prior to emitting depth related
121 * commands.
122 */
123 p->gen6_PIPE_CONTROL(p->dev,
124 PIPE_CONTROL_DEPTH_STALL,
125 NULL, 0, false, p->cp);
126
127 p->gen6_PIPE_CONTROL(p->dev,
128 PIPE_CONTROL_DEPTH_CACHE_FLUSH,
129 NULL, 0, false, p->cp);
130
131 p->gen6_PIPE_CONTROL(p->dev,
132 PIPE_CONTROL_DEPTH_STALL,
133 NULL, 0, false, p->cp);
134 }
135
136 static void
137 gen6_wa_pipe_control_wm_max_threads_stall(struct ilo_3d_pipeline *p)
138 {
139 assert(p->dev->gen == ILO_GEN(6));
140
141 /* the post-sync workaround should cover this already */
142 if (p->state.has_gen6_wa_pipe_control)
143 return;
144
145 /*
146 * From the Sandy Bridge PRM, volume 2 part 1, page 274:
147 *
148 * "A PIPE_CONTROL command, with only the Stall At Pixel Scoreboard
149 * field set (DW1 Bit 1), must be issued prior to any change to the
150 * value in this field (Maximum Number of Threads in 3DSTATE_WM)"
151 */
152 p->gen6_PIPE_CONTROL(p->dev,
153 PIPE_CONTROL_STALL_AT_SCOREBOARD,
154 NULL, 0, false, p->cp);
155
156 }
157
158 static void
159 gen6_wa_pipe_control_vs_const_flush(struct ilo_3d_pipeline *p)
160 {
161 assert(p->dev->gen == ILO_GEN(6));
162
163 gen6_wa_pipe_control_post_sync(p, false);
164
165 /*
166 * According to upload_vs_state() of classic i965, we need to emit
167 * PIPE_CONTROL after 3DSTATE_CONSTANT_VS so that the command is kept being
168 * buffered by VS FF, to the point that the FF dies.
169 */
170 p->gen6_PIPE_CONTROL(p->dev,
171 PIPE_CONTROL_DEPTH_STALL |
172 PIPE_CONTROL_INSTRUCTION_FLUSH |
173 PIPE_CONTROL_STATE_CACHE_INVALIDATE,
174 NULL, 0, false, p->cp);
175 }
176
177 #define DIRTY(state) (session->pipe_dirty & ILO_DIRTY_ ## state)
178
179 void
180 gen6_pipeline_common_select(struct ilo_3d_pipeline *p,
181 const struct ilo_context *ilo,
182 struct gen6_pipeline_session *session)
183 {
184 /* PIPELINE_SELECT */
185 if (session->hw_ctx_changed) {
186 if (p->dev->gen == ILO_GEN(6))
187 gen6_wa_pipe_control_post_sync(p, false);
188
189 p->gen6_PIPELINE_SELECT(p->dev, 0x0, p->cp);
190 }
191 }
192
193 void
194 gen6_pipeline_common_sip(struct ilo_3d_pipeline *p,
195 const struct ilo_context *ilo,
196 struct gen6_pipeline_session *session)
197 {
198 /* STATE_SIP */
199 if (session->hw_ctx_changed) {
200 if (p->dev->gen == ILO_GEN(6))
201 gen6_wa_pipe_control_post_sync(p, false);
202
203 p->gen6_STATE_SIP(p->dev, 0, p->cp);
204 }
205 }
206
207 void
208 gen6_pipeline_common_base_address(struct ilo_3d_pipeline *p,
209 const struct ilo_context *ilo,
210 struct gen6_pipeline_session *session)
211 {
212 /* STATE_BASE_ADDRESS */
213 if (session->state_bo_changed || session->instruction_bo_changed) {
214 if (p->dev->gen == ILO_GEN(6))
215 gen6_wa_pipe_control_post_sync(p, false);
216
217 p->gen6_STATE_BASE_ADDRESS(p->dev,
218 NULL, p->cp->bo, p->cp->bo, NULL, ilo->shader_cache->bo,
219 0, 0, 0, 0, p->cp);
220
221 /*
222 * From the Sandy Bridge PRM, volume 1 part 1, page 28:
223 *
224 * "The following commands must be reissued following any change to
225 * the base addresses:
226 *
227 * * 3DSTATE_BINDING_TABLE_POINTERS
228 * * 3DSTATE_SAMPLER_STATE_POINTERS
229 * * 3DSTATE_VIEWPORT_STATE_POINTERS
230 * * 3DSTATE_CC_POINTERS
231 * * MEDIA_STATE_POINTERS"
232 *
233 * 3DSTATE_SCISSOR_STATE_POINTERS is not on the list, but it is
234 * reasonable to also reissue the command. Same to PCB.
235 */
236 session->viewport_state_changed = true;
237
238 session->cc_state_blend_changed = true;
239 session->cc_state_dsa_changed = true;
240 session->cc_state_cc_changed = true;
241
242 session->scissor_state_changed = true;
243
244 session->binding_table_vs_changed = true;
245 session->binding_table_gs_changed = true;
246 session->binding_table_fs_changed = true;
247
248 session->sampler_state_vs_changed = true;
249 session->sampler_state_gs_changed = true;
250 session->sampler_state_fs_changed = true;
251
252 session->pcb_state_vs_changed = true;
253 session->pcb_state_gs_changed = true;
254 session->pcb_state_fs_changed = true;
255 }
256 }
257
258 static void
259 gen6_pipeline_common_urb(struct ilo_3d_pipeline *p,
260 const struct ilo_context *ilo,
261 struct gen6_pipeline_session *session)
262 {
263 /* 3DSTATE_URB */
264 if (DIRTY(VERTEX_ELEMENTS) || DIRTY(VS) || DIRTY(GS)) {
265 const struct ilo_shader *vs = (ilo->vs) ? ilo->vs->shader : NULL;
266 const struct ilo_shader *gs = (ilo->gs) ? ilo->gs->shader : NULL;
267 const bool gs_active = (gs || (vs && vs->stream_output));
268 int vs_entry_size, gs_entry_size;
269 int vs_total_size, gs_total_size;
270
271 vs_entry_size = (vs) ? vs->out.count : 0;
272
273 /*
274 * As indicated by 2e712e41db0c0676e9f30fc73172c0e8de8d84d4, VF and VS
275 * share VUE handles. The VUE allocation size must be large enough to
276 * store either VF outputs (number of VERTEX_ELEMENTs) and VS outputs.
277 *
278 * I am not sure if the PRM explicitly states that VF and VS share VUE
279 * handles. But here is a citation that implies so:
280 *
281 * From the Sandy Bridge PRM, volume 2 part 1, page 44:
282 *
283 * "Once a FF stage that spawn threads has sufficient input to
284 * initiate a thread, it must guarantee that it is safe to request
285 * the thread initiation. For all these FF stages, this check is
286 * based on :
287 *
288 * - The availability of output URB entries:
289 * - VS: As the input URB entries are overwritten with the
290 * VS-generated output data, output URB availability isn't a
291 * factor."
292 */
293 if (vs_entry_size < ilo->vertex_elements->num_elements)
294 vs_entry_size = ilo->vertex_elements->num_elements;
295
296 gs_entry_size = (gs) ? gs->out.count :
297 (vs && vs->stream_output) ? vs_entry_size : 0;
298
299 /* in bytes */
300 vs_entry_size *= sizeof(float) * 4;
301 gs_entry_size *= sizeof(float) * 4;
302 vs_total_size = ilo->dev->urb_size;
303
304 if (gs_active) {
305 vs_total_size /= 2;
306 gs_total_size = vs_total_size;
307 }
308 else {
309 gs_total_size = 0;
310 }
311
312 p->gen6_3DSTATE_URB(p->dev, vs_total_size, gs_total_size,
313 vs_entry_size, gs_entry_size, p->cp);
314
315 /*
316 * From the Sandy Bridge PRM, volume 2 part 1, page 27:
317 *
318 * "Because of a urb corruption caused by allocating a previous
319 * gsunit's urb entry to vsunit software is required to send a
320 * "GS NULL Fence" (Send URB fence with VS URB size == 1 and GS URB
321 * size == 0) plus a dummy DRAW call before any case where VS will
322 * be taking over GS URB space."
323 */
324 if (p->state.gs.active && !gs_active)
325 ilo_3d_pipeline_emit_flush_gen6(p);
326
327 p->state.gs.active = gs_active;
328 }
329 }
330
331 static void
332 gen6_pipeline_common_pointers_1(struct ilo_3d_pipeline *p,
333 const struct ilo_context *ilo,
334 struct gen6_pipeline_session *session)
335 {
336 /* 3DSTATE_VIEWPORT_STATE_POINTERS */
337 if (session->viewport_state_changed) {
338 p->gen6_3DSTATE_VIEWPORT_STATE_POINTERS(p->dev,
339 p->state.CLIP_VIEWPORT,
340 p->state.SF_VIEWPORT,
341 p->state.CC_VIEWPORT, p->cp);
342 }
343 }
344
345 static void
346 gen6_pipeline_common_pointers_2(struct ilo_3d_pipeline *p,
347 const struct ilo_context *ilo,
348 struct gen6_pipeline_session *session)
349 {
350 /* 3DSTATE_CC_STATE_POINTERS */
351 if (session->cc_state_blend_changed ||
352 session->cc_state_dsa_changed ||
353 session->cc_state_cc_changed) {
354 p->gen6_3DSTATE_CC_STATE_POINTERS(p->dev,
355 p->state.BLEND_STATE,
356 p->state.DEPTH_STENCIL_STATE,
357 p->state.COLOR_CALC_STATE, p->cp);
358 }
359
360 /* 3DSTATE_SAMPLER_STATE_POINTERS */
361 if (session->sampler_state_vs_changed ||
362 session->sampler_state_gs_changed ||
363 session->sampler_state_fs_changed) {
364 p->gen6_3DSTATE_SAMPLER_STATE_POINTERS(p->dev,
365 p->state.vs.SAMPLER_STATE,
366 0,
367 p->state.wm.SAMPLER_STATE, p->cp);
368 }
369 }
370
371 static void
372 gen6_pipeline_common_pointers_3(struct ilo_3d_pipeline *p,
373 const struct ilo_context *ilo,
374 struct gen6_pipeline_session *session)
375 {
376 /* 3DSTATE_SCISSOR_STATE_POINTERS */
377 if (session->scissor_state_changed) {
378 p->gen6_3DSTATE_SCISSOR_STATE_POINTERS(p->dev,
379 p->state.SCISSOR_RECT, p->cp);
380 }
381
382 /* 3DSTATE_BINDING_TABLE_POINTERS */
383 if (session->binding_table_vs_changed ||
384 session->binding_table_gs_changed ||
385 session->binding_table_fs_changed) {
386 p->gen6_3DSTATE_BINDING_TABLE_POINTERS(p->dev,
387 p->state.vs.BINDING_TABLE_STATE,
388 p->state.gs.BINDING_TABLE_STATE,
389 p->state.wm.BINDING_TABLE_STATE, p->cp);
390 }
391 }
392
393 void
394 gen6_pipeline_vf(struct ilo_3d_pipeline *p,
395 const struct ilo_context *ilo,
396 struct gen6_pipeline_session *session)
397 {
398 /* 3DSTATE_INDEX_BUFFER */
399 if (DIRTY(INDEX_BUFFER)) {
400 p->gen6_3DSTATE_INDEX_BUFFER(p->dev,
401 &ilo->index_buffer, session->info->primitive_restart, p->cp);
402 }
403
404 /* 3DSTATE_VERTEX_BUFFERS */
405 if (DIRTY(VERTEX_BUFFERS)) {
406 p->gen6_3DSTATE_VERTEX_BUFFERS(p->dev,
407 ilo->vertex_buffers.buffers, NULL,
408 (1 << ilo->vertex_buffers.num_buffers) - 1, p->cp);
409 }
410
411 /* 3DSTATE_VERTEX_ELEMENTS */
412 if (DIRTY(VERTEX_ELEMENTS) || DIRTY(VS)) {
413 const struct ilo_vertex_element *ive = ilo->vertex_elements;
414 bool last_velement_edgeflag = false;
415 bool prepend_generate_ids = false;
416
417 if (ilo->vs) {
418 const struct ilo_shader_info *info = &ilo->vs->info;
419
420 if (info->edgeflag_in >= 0) {
421 /* we rely on the state tracker here */
422 assert(info->edgeflag_in == ive->num_elements - 1);
423 last_velement_edgeflag = true;
424 }
425
426 prepend_generate_ids = (info->has_instanceid || info->has_vertexid);
427 }
428
429 p->gen6_3DSTATE_VERTEX_ELEMENTS(p->dev,
430 ive->elements, ive->num_elements,
431 last_velement_edgeflag, prepend_generate_ids, p->cp);
432 }
433 }
434
435 void
436 gen6_pipeline_vf_statistics(struct ilo_3d_pipeline *p,
437 const struct ilo_context *ilo,
438 struct gen6_pipeline_session *session)
439 {
440 /* 3DSTATE_VF_STATISTICS */
441 if (session->hw_ctx_changed)
442 p->gen6_3DSTATE_VF_STATISTICS(p->dev, false, p->cp);
443 }
444
445 void
446 gen6_pipeline_vf_draw(struct ilo_3d_pipeline *p,
447 const struct ilo_context *ilo,
448 struct gen6_pipeline_session *session)
449 {
450 /* 3DPRIMITIVE */
451 p->gen6_3DPRIMITIVE(p->dev, session->info, false, p->cp);
452 p->state.has_gen6_wa_pipe_control = false;
453 }
454
455 void
456 gen6_pipeline_vs(struct ilo_3d_pipeline *p,
457 const struct ilo_context *ilo,
458 struct gen6_pipeline_session *session)
459 {
460 const bool emit_3dstate_vs = (DIRTY(VS) || DIRTY(VERTEX_SAMPLERS));
461 const bool emit_3dstate_constant_vs = session->pcb_state_vs_changed;
462
463 /*
464 * the classic i965 does this in upload_vs_state(), citing a spec that I
465 * cannot find
466 */
467 if (emit_3dstate_vs && p->dev->gen == ILO_GEN(6))
468 gen6_wa_pipe_control_post_sync(p, false);
469
470 /* 3DSTATE_CONSTANT_VS */
471 if (emit_3dstate_constant_vs) {
472 p->gen6_3DSTATE_CONSTANT_VS(p->dev,
473 &p->state.vs.PUSH_CONSTANT_BUFFER,
474 &p->state.vs.PUSH_CONSTANT_BUFFER_size,
475 1, p->cp);
476 }
477
478 /* 3DSTATE_VS */
479 if (emit_3dstate_vs) {
480 const struct ilo_shader *vs = (ilo->vs)? ilo->vs->shader : NULL;
481 const int num_samplers = ilo->samplers[PIPE_SHADER_VERTEX].num_samplers;
482
483 p->gen6_3DSTATE_VS(p->dev, vs, num_samplers, p->cp);
484 }
485
486 if (emit_3dstate_constant_vs && p->dev->gen == ILO_GEN(6))
487 gen6_wa_pipe_control_vs_const_flush(p);
488 }
489
490 static void
491 gen6_pipeline_gs(struct ilo_3d_pipeline *p,
492 const struct ilo_context *ilo,
493 struct gen6_pipeline_session *session)
494 {
495 /* 3DSTATE_CONSTANT_GS */
496 if (session->pcb_state_gs_changed)
497 p->gen6_3DSTATE_CONSTANT_GS(p->dev, NULL, NULL, 0, p->cp);
498
499 /* 3DSTATE_GS */
500 if (DIRTY(GS) || DIRTY(VS) || session->prim_changed) {
501 const struct ilo_shader *gs = (ilo->gs)? ilo->gs->shader : NULL;
502 const struct ilo_shader *vs = (ilo->vs)? ilo->vs->shader : NULL;
503 const int num_vertices = u_vertices_per_prim(session->reduced_prim);
504
505 if (gs)
506 assert(!gs->pcb.clip_state_size);
507
508 p->gen6_3DSTATE_GS(p->dev, gs, vs,
509 (vs) ? vs->cache_offset + vs->gs_offsets[num_vertices - 1] : 0,
510 p->cp);
511 }
512 }
513
514 bool
515 gen6_pipeline_update_max_svbi(struct ilo_3d_pipeline *p,
516 const struct ilo_context *ilo,
517 struct gen6_pipeline_session *session)
518 {
519 if (DIRTY(VS) || DIRTY(GS) || DIRTY(STREAM_OUTPUT_TARGETS)) {
520 const struct pipe_stream_output_info *so_info =
521 (ilo->gs) ? &ilo->gs->info.stream_output :
522 (ilo->vs) ? &ilo->vs->info.stream_output : NULL;
523 unsigned max_svbi = 0xffffffff;
524 int i;
525
526 for (i = 0; i < so_info->num_outputs; i++) {
527 const int output_buffer = so_info->output[i].output_buffer;
528 const struct pipe_stream_output_target *so =
529 ilo->stream_output_targets.targets[output_buffer];
530 const int struct_size = so_info->stride[output_buffer] * 4;
531 const int elem_size = so_info->output[i].num_components * 4;
532 int buf_size, count;
533
534 if (!so) {
535 max_svbi = 0;
536 break;
537 }
538
539 buf_size = so->buffer_size - so_info->output[i].dst_offset * 4;
540
541 count = buf_size / struct_size;
542 if (buf_size % struct_size >= elem_size)
543 count++;
544
545 if (count < max_svbi)
546 max_svbi = count;
547 }
548
549 if (p->state.so_max_vertices != max_svbi) {
550 p->state.so_max_vertices = max_svbi;
551 return true;
552 }
553 }
554
555 return false;
556 }
557
558 static void
559 gen6_pipeline_gs_svbi(struct ilo_3d_pipeline *p,
560 const struct ilo_context *ilo,
561 struct gen6_pipeline_session *session)
562 {
563 const bool emit = gen6_pipeline_update_max_svbi(p, ilo, session);
564
565 /* 3DSTATE_GS_SVB_INDEX */
566 if (emit) {
567 if (p->dev->gen == ILO_GEN(6))
568 gen6_wa_pipe_control_post_sync(p, false);
569
570 p->gen6_3DSTATE_GS_SVB_INDEX(p->dev,
571 0, p->state.so_num_vertices, p->state.so_max_vertices,
572 false, p->cp);
573
574 if (session->hw_ctx_changed) {
575 int i;
576
577 /*
578 * From the Sandy Bridge PRM, volume 2 part 1, page 148:
579 *
580 * "If a buffer is not enabled then the SVBI must be set to 0x0
581 * in order to not cause overflow in that SVBI."
582 *
583 * "If a buffer is not enabled then the MaxSVBI must be set to
584 * 0xFFFFFFFF in order to not cause overflow in that SVBI."
585 */
586 for (i = 1; i < 4; i++) {
587 p->gen6_3DSTATE_GS_SVB_INDEX(p->dev,
588 i, 0, 0xffffffff, false, p->cp);
589 }
590 }
591 }
592 }
593
594 void
595 gen6_pipeline_clip(struct ilo_3d_pipeline *p,
596 const struct ilo_context *ilo,
597 struct gen6_pipeline_session *session)
598 {
599 /* 3DSTATE_CLIP */
600 if (DIRTY(RASTERIZER) || DIRTY(FS) ||
601 DIRTY(VIEWPORT) || DIRTY(FRAMEBUFFER)) {
602 bool enable_guardband;
603 float x1, x2, y1, y2;
604
605 /*
606 * We do not do 2D clipping yet. Guard band test should only be enabled
607 * when the viewport is larger than the framebuffer.
608 */
609 x1 = fabs(ilo->viewport.scale[0]) * -1.0f + ilo->viewport.translate[0];
610 x2 = fabs(ilo->viewport.scale[0]) * 1.0f + ilo->viewport.translate[0];
611 y1 = fabs(ilo->viewport.scale[1]) * -1.0f + ilo->viewport.translate[1];
612 y2 = fabs(ilo->viewport.scale[1]) * 1.0f + ilo->viewport.translate[1];
613 enable_guardband =
614 (x1 <= 0.0f && x2 >= (float) ilo->framebuffer.width &&
615 y1 <= 0.0f && y2 >= (float) ilo->framebuffer.height);
616
617 p->gen6_3DSTATE_CLIP(p->dev,
618 ilo->rasterizer,
619 (ilo->fs && ilo->fs->shader->in.has_linear_interp),
620 enable_guardband, 1, p->cp);
621 }
622 }
623
624 static void
625 gen6_pipeline_sf(struct ilo_3d_pipeline *p,
626 const struct ilo_context *ilo,
627 struct gen6_pipeline_session *session)
628 {
629 /* 3DSTATE_SF */
630 if (DIRTY(RASTERIZER) || DIRTY(VS) || DIRTY(GS) || DIRTY(FS)) {
631 const struct ilo_shader *fs = (ilo->fs)? ilo->fs->shader : NULL;
632 const struct ilo_shader *last_sh =
633 (ilo->gs)? ilo->gs->shader :
634 (ilo->vs)? ilo->vs->shader : NULL;
635
636 p->gen6_3DSTATE_SF(p->dev,
637 ilo->rasterizer, fs, last_sh, p->cp);
638 }
639 }
640
641 void
642 gen6_pipeline_sf_rect(struct ilo_3d_pipeline *p,
643 const struct ilo_context *ilo,
644 struct gen6_pipeline_session *session)
645 {
646 /* 3DSTATE_DRAWING_RECTANGLE */
647 if (DIRTY(FRAMEBUFFER)) {
648 if (p->dev->gen == ILO_GEN(6))
649 gen6_wa_pipe_control_post_sync(p, false);
650
651 p->gen6_3DSTATE_DRAWING_RECTANGLE(p->dev, 0, 0,
652 ilo->framebuffer.width, ilo->framebuffer.height, p->cp);
653 }
654 }
655
656 static void
657 gen6_pipeline_wm(struct ilo_3d_pipeline *p,
658 const struct ilo_context *ilo,
659 struct gen6_pipeline_session *session)
660 {
661 /* 3DSTATE_CONSTANT_PS */
662 if (session->pcb_state_fs_changed)
663 p->gen6_3DSTATE_CONSTANT_PS(p->dev, NULL, NULL, 0, p->cp);
664
665 /* 3DSTATE_WM */
666 if (DIRTY(FS) || DIRTY(FRAGMENT_SAMPLERS) ||
667 DIRTY(BLEND) || DIRTY(DEPTH_STENCIL_ALPHA) ||
668 DIRTY(RASTERIZER)) {
669 const struct ilo_shader *fs = (ilo->fs)? ilo->fs->shader : NULL;
670 const int num_samplers =
671 ilo->samplers[PIPE_SHADER_FRAGMENT].num_samplers;
672 const bool dual_blend = (!ilo->blend->logicop_enable &&
673 ilo->blend->rt[0].blend_enable &&
674 util_blend_state_is_dual(ilo->blend, 0));
675 const bool cc_may_kill = (ilo->depth_stencil_alpha->alpha.enabled ||
676 ilo->blend->alpha_to_coverage);
677
678 if (fs)
679 assert(!fs->pcb.clip_state_size);
680
681 if (p->dev->gen == ILO_GEN(6) && session->hw_ctx_changed)
682 gen6_wa_pipe_control_wm_max_threads_stall(p);
683
684 p->gen6_3DSTATE_WM(p->dev, fs, num_samplers,
685 ilo->rasterizer, dual_blend, cc_may_kill, p->cp);
686 }
687 }
688
689 static void
690 gen6_pipeline_wm_multisample(struct ilo_3d_pipeline *p,
691 const struct ilo_context *ilo,
692 struct gen6_pipeline_session *session)
693 {
694 /* 3DSTATE_MULTISAMPLE and 3DSTATE_SAMPLE_MASK */
695 if (DIRTY(SAMPLE_MASK) || DIRTY(FRAMEBUFFER)) {
696 const uint32_t *packed_sample_pos;
697 int num_samples = 1;
698
699 if (ilo->framebuffer.nr_cbufs)
700 num_samples = ilo->framebuffer.cbufs[0]->texture->nr_samples;
701
702 packed_sample_pos = (num_samples > 1) ?
703 &p->packed_sample_position_4x : &p->packed_sample_position_1x;
704
705 if (p->dev->gen == ILO_GEN(6)) {
706 gen6_wa_pipe_control_post_sync(p, false);
707 gen6_wa_pipe_control_wm_multisample_flush(p);
708 }
709
710 p->gen6_3DSTATE_MULTISAMPLE(p->dev, num_samples, packed_sample_pos,
711 ilo->rasterizer->half_pixel_center, p->cp);
712
713 p->gen6_3DSTATE_SAMPLE_MASK(p->dev,
714 (num_samples > 1) ? ilo->sample_mask : 0x1, p->cp);
715 }
716 }
717
718 static void
719 gen6_pipeline_wm_depth(struct ilo_3d_pipeline *p,
720 const struct ilo_context *ilo,
721 struct gen6_pipeline_session *session)
722 {
723 /* 3DSTATE_DEPTH_BUFFER and 3DSTATE_CLEAR_PARAMS */
724 if (DIRTY(FRAMEBUFFER)) {
725 if (p->dev->gen == ILO_GEN(6)) {
726 gen6_wa_pipe_control_post_sync(p, false);
727 gen6_wa_pipe_control_wm_depth_flush(p);
728 }
729
730 p->gen6_3DSTATE_DEPTH_BUFFER(p->dev,
731 ilo->framebuffer.zsbuf, false, p->cp);
732
733 /* TODO */
734 p->gen6_3DSTATE_CLEAR_PARAMS(p->dev, 0, p->cp);
735 }
736 }
737
738 void
739 gen6_pipeline_wm_raster(struct ilo_3d_pipeline *p,
740 const struct ilo_context *ilo,
741 struct gen6_pipeline_session *session)
742 {
743 /* 3DSTATE_POLY_STIPPLE_PATTERN and 3DSTATE_POLY_STIPPLE_OFFSET */
744 if ((DIRTY(RASTERIZER) || DIRTY(POLY_STIPPLE)) &&
745 ilo->rasterizer->poly_stipple_enable) {
746 if (p->dev->gen == ILO_GEN(6))
747 gen6_wa_pipe_control_post_sync(p, false);
748
749 p->gen6_3DSTATE_POLY_STIPPLE_PATTERN(p->dev,
750 &ilo->poly_stipple, p->cp);
751
752 p->gen6_3DSTATE_POLY_STIPPLE_OFFSET(p->dev, 0, 0, p->cp);
753 }
754
755 /* 3DSTATE_LINE_STIPPLE */
756 if (DIRTY(RASTERIZER) && ilo->rasterizer->line_stipple_enable) {
757 if (p->dev->gen == ILO_GEN(6))
758 gen6_wa_pipe_control_post_sync(p, false);
759
760 p->gen6_3DSTATE_LINE_STIPPLE(p->dev,
761 ilo->rasterizer->line_stipple_pattern,
762 ilo->rasterizer->line_stipple_factor + 1, p->cp);
763 }
764
765 /* 3DSTATE_AA_LINE_PARAMETERS */
766 if (DIRTY(RASTERIZER) && ilo->rasterizer->line_smooth) {
767 if (p->dev->gen == ILO_GEN(6))
768 gen6_wa_pipe_control_post_sync(p, false);
769
770 p->gen6_3DSTATE_AA_LINE_PARAMETERS(p->dev, p->cp);
771 }
772 }
773
774 static void
775 gen6_pipeline_state_viewports(struct ilo_3d_pipeline *p,
776 const struct ilo_context *ilo,
777 struct gen6_pipeline_session *session)
778 {
779 /* SF_CLIP_VIEWPORT and CC_VIEWPORT */
780 if (p->dev->gen >= ILO_GEN(7) && DIRTY(VIEWPORT)) {
781 p->state.SF_CLIP_VIEWPORT = p->gen7_SF_CLIP_VIEWPORT(p->dev,
782 &ilo->viewport, 1, p->cp);
783
784 p->state.CC_VIEWPORT = p->gen6_CC_VIEWPORT(p->dev,
785 &ilo->viewport, 1, p->cp);
786
787 session->viewport_state_changed = true;
788 }
789 /* SF_VIEWPORT, CLIP_VIEWPORT, and CC_VIEWPORT */
790 else if (DIRTY(VIEWPORT)) {
791 p->state.CLIP_VIEWPORT = p->gen6_CLIP_VIEWPORT(p->dev,
792 &ilo->viewport, 1, p->cp);
793
794 p->state.SF_VIEWPORT = p->gen6_SF_VIEWPORT(p->dev,
795 &ilo->viewport, 1, p->cp);
796
797 p->state.CC_VIEWPORT = p->gen6_CC_VIEWPORT(p->dev,
798 &ilo->viewport, 1, p->cp);
799
800 session->viewport_state_changed = true;
801 }
802 }
803
804 static void
805 gen6_pipeline_state_cc(struct ilo_3d_pipeline *p,
806 const struct ilo_context *ilo,
807 struct gen6_pipeline_session *session)
808 {
809 /* BLEND_STATE */
810 if (DIRTY(BLEND) || DIRTY(FRAMEBUFFER) || DIRTY(DEPTH_STENCIL_ALPHA)) {
811 p->state.BLEND_STATE = p->gen6_BLEND_STATE(p->dev,
812 ilo->blend, &ilo->framebuffer,
813 &ilo->depth_stencil_alpha->alpha, p->cp);
814
815 session->cc_state_blend_changed = true;
816 }
817
818 /* COLOR_CALC_STATE */
819 if (DIRTY(DEPTH_STENCIL_ALPHA) || DIRTY(STENCIL_REF) || DIRTY(BLEND_COLOR)) {
820 p->state.COLOR_CALC_STATE = p->gen6_COLOR_CALC_STATE(p->dev,
821 &ilo->stencil_ref,
822 ilo->depth_stencil_alpha->alpha.ref_value,
823 &ilo->blend_color, p->cp);
824
825 session->cc_state_cc_changed = true;
826 }
827
828 /* DEPTH_STENCIL_STATE */
829 if (DIRTY(DEPTH_STENCIL_ALPHA)) {
830 p->state.DEPTH_STENCIL_STATE =
831 p->gen6_DEPTH_STENCIL_STATE(p->dev,
832 ilo->depth_stencil_alpha, p->cp);
833
834 session->cc_state_dsa_changed = true;
835 }
836 }
837
838 static void
839 gen6_pipeline_state_scissors(struct ilo_3d_pipeline *p,
840 const struct ilo_context *ilo,
841 struct gen6_pipeline_session *session)
842 {
843 /* SCISSOR_RECT */
844 if (DIRTY(SCISSOR)) {
845 p->state.SCISSOR_RECT = p->gen6_SCISSOR_RECT(p->dev,
846 &ilo->scissor, 1, p->cp);
847
848 session->scissor_state_changed = true;
849 }
850 }
851
852 static void
853 gen6_pipeline_state_surfaces_rt(struct ilo_3d_pipeline *p,
854 const struct ilo_context *ilo,
855 struct gen6_pipeline_session *session)
856 {
857 /* SURFACE_STATEs for render targets */
858 if (DIRTY(FRAMEBUFFER)) {
859 const int offset = ILO_WM_DRAW_SURFACE(0);
860 uint32_t *surface_state = &p->state.wm.SURFACE_STATE[offset];
861 int i;
862
863 for (i = 0; i < ilo->framebuffer.nr_cbufs; i++) {
864 const struct pipe_surface *surface = ilo->framebuffer.cbufs[i];
865
866 assert(surface);
867 surface_state[i] =
868 p->gen6_surf_SURFACE_STATE(p->dev, surface, p->cp);
869 }
870
871 /*
872 * Upload at least one render target, as
873 * brw_update_renderbuffer_surfaces() does. I don't know why.
874 */
875 if (i == 0) {
876 struct pipe_surface null_surface;
877
878 memset(&null_surface, 0, sizeof(null_surface));
879 null_surface.width = ilo->framebuffer.width;
880 null_surface.height = ilo->framebuffer.height;
881
882 surface_state[i] =
883 p->gen6_surf_SURFACE_STATE(p->dev, &null_surface, p->cp);
884
885 i++;
886 }
887
888 memset(&surface_state[i], 0, (ILO_MAX_DRAW_BUFFERS - i) * 4);
889
890 if (i && session->num_surfaces[PIPE_SHADER_FRAGMENT] < offset + i)
891 session->num_surfaces[PIPE_SHADER_FRAGMENT] = offset + i;
892
893 session->binding_table_fs_changed = true;
894 }
895 }
896
897 static void
898 gen6_pipeline_state_surfaces_so(struct ilo_3d_pipeline *p,
899 const struct ilo_context *ilo,
900 struct gen6_pipeline_session *session)
901 {
902 const struct ilo_shader_state *vs = ilo->vs;
903 const struct ilo_shader_state *gs = ilo->gs;
904 const struct pipe_stream_output_target **so_targets =
905 (const struct pipe_stream_output_target **)
906 ilo->stream_output_targets.targets;
907 const int num_so_targets = ilo->stream_output_targets.num_targets;
908
909 if (p->dev->gen != ILO_GEN(6))
910 return;
911
912 /* SURFACE_STATEs for stream output targets */
913 if (DIRTY(VS) || DIRTY(GS) || DIRTY(STREAM_OUTPUT_TARGETS)) {
914 const struct pipe_stream_output_info *so_info =
915 (gs) ? &gs->info.stream_output :
916 (vs) ? &vs->info.stream_output : NULL;
917 const int offset = ILO_GS_SO_SURFACE(0);
918 uint32_t *surface_state = &p->state.gs.SURFACE_STATE[offset];
919 int i;
920
921 for (i = 0; so_info && i < so_info->num_outputs; i++) {
922 const int target = so_info->output[i].output_buffer;
923 const struct pipe_stream_output_target *so_target =
924 (target < num_so_targets) ? so_targets[target] : NULL;
925
926 if (so_target) {
927 surface_state[i] = p->gen6_so_SURFACE_STATE(p->dev,
928 so_target, so_info, i, p->cp);
929 }
930 else {
931 surface_state[i] = 0;
932 }
933 }
934
935 memset(&surface_state[i], 0, (ILO_MAX_SO_BINDINGS - i) * 4);
936
937 if (i && session->num_surfaces[PIPE_SHADER_GEOMETRY] < offset + i)
938 session->num_surfaces[PIPE_SHADER_GEOMETRY] = offset + i;
939
940 session->binding_table_gs_changed = true;
941 }
942 }
943
944 static void
945 gen6_pipeline_state_surfaces_view(struct ilo_3d_pipeline *p,
946 const struct ilo_context *ilo,
947 int shader_type,
948 struct gen6_pipeline_session *session)
949 {
950 const struct pipe_sampler_view **views =
951 (const struct pipe_sampler_view **)
952 ilo->sampler_views[shader_type].views;
953 const int num_views = ilo->sampler_views[shader_type].num_views;
954 uint32_t *surface_state;
955 int offset, i;
956 bool skip = false;
957
958 /* SURFACE_STATEs for sampler views */
959 switch (shader_type) {
960 case PIPE_SHADER_VERTEX:
961 if (DIRTY(VERTEX_SAMPLER_VIEWS)) {
962 offset = ILO_VS_TEXTURE_SURFACE(0);
963 surface_state = &p->state.vs.SURFACE_STATE[offset];
964
965 session->binding_table_vs_changed = true;
966 }
967 else {
968 skip = true;
969 }
970 break;
971 case PIPE_SHADER_FRAGMENT:
972 if (DIRTY(FRAGMENT_SAMPLER_VIEWS)) {
973 offset = ILO_WM_TEXTURE_SURFACE(0);
974 surface_state = &p->state.wm.SURFACE_STATE[offset];
975
976 session->binding_table_fs_changed = true;
977 }
978 else {
979 skip = true;
980 }
981 break;
982 default:
983 skip = true;
984 break;
985 }
986
987 if (skip)
988 return;
989
990 for (i = 0; i < num_views; i++) {
991 if (views[i]) {
992 surface_state[i] =
993 p->gen6_view_SURFACE_STATE(p->dev, views[i], p->cp);
994 }
995 else {
996 surface_state[i] = 0;
997 }
998 }
999
1000 memset(&surface_state[i], 0, (ILO_MAX_SAMPLER_VIEWS - i) * 4);
1001
1002 if (i && session->num_surfaces[shader_type] < offset + i)
1003 session->num_surfaces[shader_type] = offset + i;
1004 }
1005
1006 static void
1007 gen6_pipeline_state_surfaces_const(struct ilo_3d_pipeline *p,
1008 const struct ilo_context *ilo,
1009 int shader_type,
1010 struct gen6_pipeline_session *session)
1011 {
1012 const struct pipe_constant_buffer *buffers =
1013 ilo->constant_buffers[shader_type].buffers;
1014 const int num_buffers = ilo->constant_buffers[shader_type].num_buffers;
1015 uint32_t *surface_state;
1016 int offset, i;
1017 bool skip = false;
1018
1019 /* SURFACE_STATEs for constant buffers */
1020 switch (shader_type) {
1021 case PIPE_SHADER_VERTEX:
1022 if (DIRTY(CONSTANT_BUFFER)) {
1023 offset = ILO_VS_CONST_SURFACE(0);
1024 surface_state = &p->state.vs.SURFACE_STATE[offset];
1025
1026 session->binding_table_vs_changed = true;
1027 }
1028 else {
1029 skip = true;
1030 }
1031 break;
1032 case PIPE_SHADER_FRAGMENT:
1033 if (DIRTY(CONSTANT_BUFFER)) {
1034 offset = ILO_WM_CONST_SURFACE(0);
1035 surface_state = &p->state.wm.SURFACE_STATE[offset];
1036
1037 session->binding_table_fs_changed = true;
1038 }
1039 else {
1040 skip = true;
1041 }
1042 break;
1043 default:
1044 skip = true;
1045 break;
1046 }
1047
1048 if (skip)
1049 return;
1050
1051 for (i = 0; i < num_buffers; i++) {
1052 if (buffers[i].buffer) {
1053 surface_state[i] =
1054 p->gen6_cbuf_SURFACE_STATE(p->dev, &buffers[i], p->cp);
1055 }
1056 else {
1057 surface_state[i] = 0;
1058 }
1059 }
1060
1061 memset(&surface_state[i], 0, (ILO_MAX_CONST_BUFFERS - i) * 4);
1062
1063 if (i && session->num_surfaces[shader_type] < offset + i)
1064 session->num_surfaces[shader_type] = offset + i;
1065 }
1066
1067 static void
1068 gen6_pipeline_state_binding_tables(struct ilo_3d_pipeline *p,
1069 const struct ilo_context *ilo,
1070 int shader_type,
1071 struct gen6_pipeline_session *session)
1072 {
1073 uint32_t *binding_table_state, *surface_state;
1074 int *binding_table_state_size, size;
1075 bool skip = false;
1076
1077 /* BINDING_TABLE_STATE */
1078 switch (shader_type) {
1079 case PIPE_SHADER_VERTEX:
1080 surface_state = p->state.vs.SURFACE_STATE;
1081 binding_table_state = &p->state.vs.BINDING_TABLE_STATE;
1082 binding_table_state_size = &p->state.vs.BINDING_TABLE_STATE_size;
1083
1084 skip = !session->binding_table_vs_changed;
1085 break;
1086 case PIPE_SHADER_GEOMETRY:
1087 surface_state = p->state.gs.SURFACE_STATE;
1088 binding_table_state = &p->state.gs.BINDING_TABLE_STATE;
1089 binding_table_state_size = &p->state.gs.BINDING_TABLE_STATE_size;
1090
1091 skip = !session->binding_table_gs_changed;
1092 break;
1093 case PIPE_SHADER_FRAGMENT:
1094 surface_state = p->state.wm.SURFACE_STATE;
1095 binding_table_state = &p->state.wm.BINDING_TABLE_STATE;
1096 binding_table_state_size = &p->state.wm.BINDING_TABLE_STATE_size;
1097
1098 skip = !session->binding_table_fs_changed;
1099 break;
1100 default:
1101 skip = true;
1102 break;
1103 }
1104
1105 if (skip)
1106 return;
1107
1108 /*
1109 * If we have seemingly less SURFACE_STATEs than before, it could be that
1110 * we did not touch those reside at the tail in this upload. Loop over
1111 * them to figure out the real number of SURFACE_STATEs.
1112 */
1113 for (size = *binding_table_state_size;
1114 size > session->num_surfaces[shader_type]; size--) {
1115 if (surface_state[size - 1])
1116 break;
1117 }
1118 if (size < session->num_surfaces[shader_type])
1119 size = session->num_surfaces[shader_type];
1120
1121 *binding_table_state = p->gen6_BINDING_TABLE_STATE(p->dev,
1122 surface_state, size, p->cp);
1123 *binding_table_state_size = size;
1124 }
1125
1126 static void
1127 gen6_pipeline_state_samplers(struct ilo_3d_pipeline *p,
1128 const struct ilo_context *ilo,
1129 int shader_type,
1130 struct gen6_pipeline_session *session)
1131 {
1132 const struct pipe_sampler_state **samplers =
1133 (const struct pipe_sampler_state **)
1134 ilo->samplers[shader_type].samplers;
1135 const struct pipe_sampler_view **views =
1136 (const struct pipe_sampler_view **)
1137 ilo->sampler_views[shader_type].views;
1138 const int num_samplers = ilo->samplers[shader_type].num_samplers;
1139 const int num_views = ilo->sampler_views[shader_type].num_views;
1140 uint32_t *sampler_state, *border_color_state;
1141 bool emit_border_color = false;
1142 bool skip = false;
1143
1144 /* SAMPLER_BORDER_COLOR_STATE and SAMPLER_STATE */
1145 switch (shader_type) {
1146 case PIPE_SHADER_VERTEX:
1147 if (DIRTY(VERTEX_SAMPLERS) || DIRTY(VERTEX_SAMPLER_VIEWS)) {
1148 sampler_state = &p->state.vs.SAMPLER_STATE;
1149 border_color_state = p->state.vs.SAMPLER_BORDER_COLOR_STATE;
1150
1151 if (DIRTY(VERTEX_SAMPLERS))
1152 emit_border_color = true;
1153
1154 session->sampler_state_vs_changed = true;
1155 }
1156 else {
1157 skip = true;
1158 }
1159 break;
1160 case PIPE_SHADER_FRAGMENT:
1161 if (DIRTY(FRAGMENT_SAMPLERS) || DIRTY(FRAGMENT_SAMPLER_VIEWS)) {
1162 sampler_state = &p->state.wm.SAMPLER_STATE;
1163 border_color_state = p->state.wm.SAMPLER_BORDER_COLOR_STATE;
1164
1165 if (DIRTY(FRAGMENT_SAMPLERS))
1166 emit_border_color = true;
1167
1168 session->sampler_state_fs_changed = true;
1169 }
1170 else {
1171 skip = true;
1172 }
1173 break;
1174 default:
1175 skip = true;
1176 break;
1177 }
1178
1179 if (skip)
1180 return;
1181
1182 if (emit_border_color) {
1183 int i;
1184
1185 for (i = 0; i < num_samplers; i++) {
1186 border_color_state[i] = (samplers[i]) ?
1187 p->gen6_SAMPLER_BORDER_COLOR_STATE(p->dev,
1188 &samplers[i]->border_color, p->cp) : 0;
1189 }
1190 }
1191
1192 /* should we take the minimum of num_samplers and num_views? */
1193 *sampler_state = p->gen6_SAMPLER_STATE(p->dev,
1194 samplers, views,
1195 border_color_state,
1196 MIN2(num_samplers, num_views), p->cp);
1197 }
1198
1199 static void
1200 gen6_pipeline_state_pcb(struct ilo_3d_pipeline *p,
1201 const struct ilo_context *ilo,
1202 struct gen6_pipeline_session *session)
1203 {
1204 /* push constant buffer for VS */
1205 if (DIRTY(VS) || DIRTY(CLIP)) {
1206 const struct ilo_shader *vs = (ilo->vs)? ilo->vs->shader : NULL;
1207
1208 if (vs && vs->pcb.clip_state_size) {
1209 void *pcb;
1210
1211 p->state.vs.PUSH_CONSTANT_BUFFER_size = vs->pcb.clip_state_size;
1212 p->state.vs.PUSH_CONSTANT_BUFFER =
1213 p->gen6_push_constant_buffer(p->dev,
1214 p->state.vs.PUSH_CONSTANT_BUFFER_size, &pcb, p->cp);
1215
1216 memcpy(pcb, &ilo->clip, vs->pcb.clip_state_size);
1217 }
1218 else {
1219 p->state.vs.PUSH_CONSTANT_BUFFER_size = 0;
1220 p->state.vs.PUSH_CONSTANT_BUFFER = 0;
1221 }
1222
1223 session->pcb_state_vs_changed = true;
1224 }
1225 }
1226
1227 #undef DIRTY
1228
1229 static void
1230 gen6_pipeline_commands(struct ilo_3d_pipeline *p,
1231 const struct ilo_context *ilo,
1232 struct gen6_pipeline_session *session)
1233 {
1234 /*
1235 * We try to keep the order of the commands match, as closely as possible,
1236 * that of the classic i965 driver. It allows us to compare the command
1237 * streams easily.
1238 */
1239 gen6_pipeline_common_select(p, ilo, session);
1240 gen6_pipeline_gs_svbi(p, ilo, session);
1241 gen6_pipeline_common_sip(p, ilo, session);
1242 gen6_pipeline_vf_statistics(p, ilo, session);
1243 gen6_pipeline_common_base_address(p, ilo, session);
1244 gen6_pipeline_common_pointers_1(p, ilo, session);
1245 gen6_pipeline_common_urb(p, ilo, session);
1246 gen6_pipeline_common_pointers_2(p, ilo, session);
1247 gen6_pipeline_wm_multisample(p, ilo, session);
1248 gen6_pipeline_vs(p, ilo, session);
1249 gen6_pipeline_gs(p, ilo, session);
1250 gen6_pipeline_clip(p, ilo, session);
1251 gen6_pipeline_sf(p, ilo, session);
1252 gen6_pipeline_wm(p, ilo, session);
1253 gen6_pipeline_common_pointers_3(p, ilo, session);
1254 gen6_pipeline_wm_depth(p, ilo, session);
1255 gen6_pipeline_wm_raster(p, ilo, session);
1256 gen6_pipeline_sf_rect(p, ilo, session);
1257 gen6_pipeline_vf(p, ilo, session);
1258 gen6_pipeline_vf_draw(p, ilo, session);
1259 }
1260
1261 void
1262 gen6_pipeline_states(struct ilo_3d_pipeline *p,
1263 const struct ilo_context *ilo,
1264 struct gen6_pipeline_session *session)
1265 {
1266 int shader_type;
1267
1268 gen6_pipeline_state_viewports(p, ilo, session);
1269 gen6_pipeline_state_cc(p, ilo, session);
1270 gen6_pipeline_state_scissors(p, ilo, session);
1271 gen6_pipeline_state_pcb(p, ilo, session);
1272
1273 /*
1274 * upload all SURAFCE_STATEs together so that we know there are minimal
1275 * paddings
1276 */
1277 gen6_pipeline_state_surfaces_rt(p, ilo, session);
1278 gen6_pipeline_state_surfaces_so(p, ilo, session);
1279 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1280 gen6_pipeline_state_surfaces_view(p, ilo, shader_type, session);
1281 gen6_pipeline_state_surfaces_const(p, ilo, shader_type, session);
1282 }
1283
1284 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1285 gen6_pipeline_state_samplers(p, ilo, shader_type, session);
1286 /* this must be called after all SURFACE_STATEs are uploaded */
1287 gen6_pipeline_state_binding_tables(p, ilo, shader_type, session);
1288 }
1289 }
1290
1291 void
1292 gen6_pipeline_prepare(const struct ilo_3d_pipeline *p,
1293 const struct ilo_context *ilo,
1294 const struct pipe_draw_info *info,
1295 struct gen6_pipeline_session *session)
1296 {
1297 memset(session, 0, sizeof(*session));
1298 session->info = info;
1299 session->pipe_dirty = ilo->dirty;
1300 session->reduced_prim = u_reduced_prim(info->mode);
1301
1302 /* available space before the session */
1303 session->init_cp_space = ilo_cp_space(p->cp);
1304
1305 session->hw_ctx_changed =
1306 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_HW);
1307
1308 if (session->hw_ctx_changed) {
1309 /* these should be enough to make everything uploaded */
1310 session->state_bo_changed = true;
1311 session->instruction_bo_changed = true;
1312 session->prim_changed = true;
1313 }
1314 else {
1315 session->state_bo_changed =
1316 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1317 session->instruction_bo_changed =
1318 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
1319 session->prim_changed = (p->state.reduced_prim != session->reduced_prim);
1320 }
1321 }
1322
1323 void
1324 gen6_pipeline_draw(struct ilo_3d_pipeline *p,
1325 const struct ilo_context *ilo,
1326 struct gen6_pipeline_session *session)
1327 {
1328 /* force all states to be uploaded if the state bo changed */
1329 if (session->state_bo_changed)
1330 session->pipe_dirty = ILO_DIRTY_ALL;
1331 else
1332 session->pipe_dirty = ilo->dirty;
1333
1334 session->emit_draw_states(p, ilo, session);
1335
1336 /* force all commands to be uploaded if the HW context changed */
1337 if (session->hw_ctx_changed)
1338 session->pipe_dirty = ILO_DIRTY_ALL;
1339 else
1340 session->pipe_dirty = ilo->dirty;
1341
1342 session->emit_draw_commands(p, ilo, session);
1343 }
1344
1345 void
1346 gen6_pipeline_end(struct ilo_3d_pipeline *p,
1347 const struct ilo_context *ilo,
1348 struct gen6_pipeline_session *session)
1349 {
1350 int used, estimate;
1351
1352 /* sanity check size estimation */
1353 used = session->init_cp_space - ilo_cp_space(p->cp);
1354 estimate = ilo_3d_pipeline_estimate_size(p, ILO_3D_PIPELINE_DRAW, ilo);
1355 assert(used <= estimate);
1356
1357 p->state.reduced_prim = session->reduced_prim;
1358 }
1359
1360 static void
1361 ilo_3d_pipeline_emit_draw_gen6(struct ilo_3d_pipeline *p,
1362 const struct ilo_context *ilo,
1363 const struct pipe_draw_info *info)
1364 {
1365 struct gen6_pipeline_session session;
1366
1367 gen6_pipeline_prepare(p, ilo, info, &session);
1368
1369 session.emit_draw_states = gen6_pipeline_states;
1370 session.emit_draw_commands = gen6_pipeline_commands;
1371
1372 gen6_pipeline_draw(p, ilo, &session);
1373 gen6_pipeline_end(p, ilo, &session);
1374 }
1375
1376 void
1377 ilo_3d_pipeline_emit_flush_gen6(struct ilo_3d_pipeline *p)
1378 {
1379 if (p->dev->gen == ILO_GEN(6))
1380 gen6_wa_pipe_control_post_sync(p, false);
1381
1382 p->gen6_PIPE_CONTROL(p->dev,
1383 PIPE_CONTROL_INSTRUCTION_FLUSH |
1384 PIPE_CONTROL_WRITE_FLUSH |
1385 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1386 PIPE_CONTROL_VF_CACHE_INVALIDATE |
1387 PIPE_CONTROL_TC_FLUSH |
1388 PIPE_CONTROL_NO_WRITE |
1389 PIPE_CONTROL_CS_STALL,
1390 0, 0, false, p->cp);
1391 }
1392
1393 void
1394 ilo_3d_pipeline_emit_write_timestamp_gen6(struct ilo_3d_pipeline *p,
1395 struct intel_bo *bo, int index)
1396 {
1397 if (p->dev->gen == ILO_GEN(6))
1398 gen6_wa_pipe_control_post_sync(p, true);
1399
1400 p->gen6_PIPE_CONTROL(p->dev,
1401 PIPE_CONTROL_WRITE_TIMESTAMP,
1402 bo, index * sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE,
1403 true, p->cp);
1404 }
1405
1406 void
1407 ilo_3d_pipeline_emit_write_depth_count_gen6(struct ilo_3d_pipeline *p,
1408 struct intel_bo *bo, int index)
1409 {
1410 if (p->dev->gen == ILO_GEN(6))
1411 gen6_wa_pipe_control_post_sync(p, false);
1412
1413 p->gen6_PIPE_CONTROL(p->dev,
1414 PIPE_CONTROL_DEPTH_STALL |
1415 PIPE_CONTROL_WRITE_DEPTH_COUNT,
1416 bo, index * sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE,
1417 true, p->cp);
1418 }
1419
1420 static int
1421 gen6_pipeline_estimate_commands(const struct ilo_3d_pipeline *p,
1422 const struct ilo_gpe_gen6 *gen6,
1423 const struct ilo_context *ilo)
1424 {
1425 static int size;
1426 enum ilo_gpe_gen6_command cmd;
1427
1428 if (size)
1429 return size;
1430
1431 for (cmd = 0; cmd < ILO_GPE_GEN6_COMMAND_COUNT; cmd++) {
1432 int count;
1433
1434 switch (cmd) {
1435 case ILO_GPE_GEN6_PIPE_CONTROL:
1436 /* for the workaround */
1437 count = 2;
1438 /* another one after 3DSTATE_URB */
1439 count += 1;
1440 /* and another one after 3DSTATE_CONSTANT_VS */
1441 count += 1;
1442 break;
1443 case ILO_GPE_GEN6_3DSTATE_GS_SVB_INDEX:
1444 /* there are 4 SVBIs */
1445 count = 4;
1446 break;
1447 case ILO_GPE_GEN6_3DSTATE_VERTEX_BUFFERS:
1448 count = 33;
1449 break;
1450 case ILO_GPE_GEN6_3DSTATE_VERTEX_ELEMENTS:
1451 count = 34;
1452 break;
1453 case ILO_GPE_GEN6_MEDIA_VFE_STATE:
1454 case ILO_GPE_GEN6_MEDIA_CURBE_LOAD:
1455 case ILO_GPE_GEN6_MEDIA_INTERFACE_DESCRIPTOR_LOAD:
1456 case ILO_GPE_GEN6_MEDIA_GATEWAY_STATE:
1457 case ILO_GPE_GEN6_MEDIA_STATE_FLUSH:
1458 case ILO_GPE_GEN6_MEDIA_OBJECT_WALKER:
1459 /* media commands */
1460 count = 0;
1461 break;
1462 default:
1463 count = 1;
1464 break;
1465 }
1466
1467 if (count)
1468 size += gen6->estimate_command_size(p->dev, cmd, count);
1469 }
1470
1471 return size;
1472 }
1473
1474 static int
1475 gen6_pipeline_estimate_states(const struct ilo_3d_pipeline *p,
1476 const struct ilo_gpe_gen6 *gen6,
1477 const struct ilo_context *ilo)
1478 {
1479 static int static_size;
1480 int shader_type, count, size;
1481
1482 if (!static_size) {
1483 struct {
1484 enum ilo_gpe_gen6_state state;
1485 int count;
1486 } static_states[] = {
1487 /* viewports */
1488 { ILO_GPE_GEN6_SF_VIEWPORT, 1 },
1489 { ILO_GPE_GEN6_CLIP_VIEWPORT, 1 },
1490 { ILO_GPE_GEN6_CC_VIEWPORT, 1 },
1491 /* cc */
1492 { ILO_GPE_GEN6_COLOR_CALC_STATE, 1 },
1493 { ILO_GPE_GEN6_BLEND_STATE, ILO_MAX_DRAW_BUFFERS },
1494 { ILO_GPE_GEN6_DEPTH_STENCIL_STATE, 1 },
1495 /* scissors */
1496 { ILO_GPE_GEN6_SCISSOR_RECT, 1 },
1497 /* binding table (vs, gs, fs) */
1498 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_VS_SURFACES },
1499 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_GS_SURFACES },
1500 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_WM_SURFACES },
1501 };
1502 int i;
1503
1504 for (i = 0; i < Elements(static_states); i++) {
1505 static_size += gen6->estimate_state_size(p->dev,
1506 static_states[i].state,
1507 static_states[i].count);
1508 }
1509 }
1510
1511 size = static_size;
1512
1513 /*
1514 * render targets (fs)
1515 * stream outputs (gs)
1516 * sampler views (vs, fs)
1517 * constant buffers (vs, fs)
1518 */
1519 count = ilo->framebuffer.nr_cbufs;
1520
1521 if (ilo->gs)
1522 count += ilo->gs->info.stream_output.num_outputs;
1523 else if (ilo->vs)
1524 count += ilo->vs->info.stream_output.num_outputs;
1525
1526 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1527 count += ilo->sampler_views[shader_type].num_views;
1528 count += ilo->constant_buffers[shader_type].num_buffers;
1529 }
1530
1531 if (count) {
1532 size += gen6->estimate_state_size(p->dev,
1533 ILO_GPE_GEN6_SURFACE_STATE, count);
1534 }
1535
1536 /* samplers (vs, fs) */
1537 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1538 count = ilo->samplers[shader_type].num_samplers;
1539 if (count) {
1540 size += gen6->estimate_state_size(p->dev,
1541 ILO_GPE_GEN6_SAMPLER_BORDER_COLOR_STATE, count);
1542 size += gen6->estimate_state_size(p->dev,
1543 ILO_GPE_GEN6_SAMPLER_STATE, count);
1544 }
1545 }
1546
1547 /* pcb (vs) */
1548 if (ilo->vs && ilo->vs->shader->pcb.clip_state_size) {
1549 const int pcb_size = ilo->vs->shader->pcb.clip_state_size;
1550
1551 size += gen6->estimate_state_size(p->dev,
1552 ILO_GPE_GEN6_PUSH_CONSTANT_BUFFER, pcb_size);
1553 }
1554
1555 return size;
1556 }
1557
1558 static int
1559 ilo_3d_pipeline_estimate_size_gen6(struct ilo_3d_pipeline *p,
1560 enum ilo_3d_pipeline_action action,
1561 const void *arg)
1562 {
1563 const struct ilo_gpe_gen6 *gen6 = ilo_gpe_gen6_get();
1564 int size;
1565
1566 switch (action) {
1567 case ILO_3D_PIPELINE_DRAW:
1568 {
1569 const struct ilo_context *ilo = arg;
1570
1571 size = gen6_pipeline_estimate_commands(p, gen6, ilo) +
1572 gen6_pipeline_estimate_states(p, gen6, ilo);
1573 }
1574 break;
1575 case ILO_3D_PIPELINE_FLUSH:
1576 size = gen6->estimate_command_size(p->dev,
1577 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1578 break;
1579 case ILO_3D_PIPELINE_WRITE_TIMESTAMP:
1580 size = gen6->estimate_command_size(p->dev,
1581 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 2;
1582 break;
1583 case ILO_3D_PIPELINE_WRITE_DEPTH_COUNT:
1584 size = gen6->estimate_command_size(p->dev,
1585 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1586 break;
1587 default:
1588 assert(!"unknown 3D pipeline action");
1589 size = 0;
1590 break;
1591 }
1592
1593 return size;
1594 }
1595
1596 void
1597 ilo_3d_pipeline_init_gen6(struct ilo_3d_pipeline *p)
1598 {
1599 const struct ilo_gpe_gen6 *gen6 = ilo_gpe_gen6_get();
1600
1601 p->estimate_size = ilo_3d_pipeline_estimate_size_gen6;
1602 p->emit_draw = ilo_3d_pipeline_emit_draw_gen6;
1603 p->emit_flush = ilo_3d_pipeline_emit_flush_gen6;
1604 p->emit_write_timestamp = ilo_3d_pipeline_emit_write_timestamp_gen6;
1605 p->emit_write_depth_count = ilo_3d_pipeline_emit_write_depth_count_gen6;
1606
1607 #define GEN6_USE(p, name, from) \
1608 p->gen6_ ## name = from->emit_ ## name
1609 GEN6_USE(p, STATE_BASE_ADDRESS, gen6);
1610 GEN6_USE(p, STATE_SIP, gen6);
1611 GEN6_USE(p, PIPELINE_SELECT, gen6);
1612 GEN6_USE(p, 3DSTATE_BINDING_TABLE_POINTERS, gen6);
1613 GEN6_USE(p, 3DSTATE_SAMPLER_STATE_POINTERS, gen6);
1614 GEN6_USE(p, 3DSTATE_URB, gen6);
1615 GEN6_USE(p, 3DSTATE_VERTEX_BUFFERS, gen6);
1616 GEN6_USE(p, 3DSTATE_VERTEX_ELEMENTS, gen6);
1617 GEN6_USE(p, 3DSTATE_INDEX_BUFFER, gen6);
1618 GEN6_USE(p, 3DSTATE_VF_STATISTICS, gen6);
1619 GEN6_USE(p, 3DSTATE_VIEWPORT_STATE_POINTERS, gen6);
1620 GEN6_USE(p, 3DSTATE_CC_STATE_POINTERS, gen6);
1621 GEN6_USE(p, 3DSTATE_SCISSOR_STATE_POINTERS, gen6);
1622 GEN6_USE(p, 3DSTATE_VS, gen6);
1623 GEN6_USE(p, 3DSTATE_GS, gen6);
1624 GEN6_USE(p, 3DSTATE_CLIP, gen6);
1625 GEN6_USE(p, 3DSTATE_SF, gen6);
1626 GEN6_USE(p, 3DSTATE_WM, gen6);
1627 GEN6_USE(p, 3DSTATE_CONSTANT_VS, gen6);
1628 GEN6_USE(p, 3DSTATE_CONSTANT_GS, gen6);
1629 GEN6_USE(p, 3DSTATE_CONSTANT_PS, gen6);
1630 GEN6_USE(p, 3DSTATE_SAMPLE_MASK, gen6);
1631 GEN6_USE(p, 3DSTATE_DRAWING_RECTANGLE, gen6);
1632 GEN6_USE(p, 3DSTATE_DEPTH_BUFFER, gen6);
1633 GEN6_USE(p, 3DSTATE_POLY_STIPPLE_OFFSET, gen6);
1634 GEN6_USE(p, 3DSTATE_POLY_STIPPLE_PATTERN, gen6);
1635 GEN6_USE(p, 3DSTATE_LINE_STIPPLE, gen6);
1636 GEN6_USE(p, 3DSTATE_AA_LINE_PARAMETERS, gen6);
1637 GEN6_USE(p, 3DSTATE_GS_SVB_INDEX, gen6);
1638 GEN6_USE(p, 3DSTATE_MULTISAMPLE, gen6);
1639 GEN6_USE(p, 3DSTATE_STENCIL_BUFFER, gen6);
1640 GEN6_USE(p, 3DSTATE_HIER_DEPTH_BUFFER, gen6);
1641 GEN6_USE(p, 3DSTATE_CLEAR_PARAMS, gen6);
1642 GEN6_USE(p, PIPE_CONTROL, gen6);
1643 GEN6_USE(p, 3DPRIMITIVE, gen6);
1644 GEN6_USE(p, INTERFACE_DESCRIPTOR_DATA, gen6);
1645 GEN6_USE(p, SF_VIEWPORT, gen6);
1646 GEN6_USE(p, CLIP_VIEWPORT, gen6);
1647 GEN6_USE(p, CC_VIEWPORT, gen6);
1648 GEN6_USE(p, COLOR_CALC_STATE, gen6);
1649 GEN6_USE(p, BLEND_STATE, gen6);
1650 GEN6_USE(p, DEPTH_STENCIL_STATE, gen6);
1651 GEN6_USE(p, SCISSOR_RECT, gen6);
1652 GEN6_USE(p, BINDING_TABLE_STATE, gen6);
1653 GEN6_USE(p, surf_SURFACE_STATE, gen6);
1654 GEN6_USE(p, view_SURFACE_STATE, gen6);
1655 GEN6_USE(p, cbuf_SURFACE_STATE, gen6);
1656 GEN6_USE(p, so_SURFACE_STATE, gen6);
1657 GEN6_USE(p, SAMPLER_STATE, gen6);
1658 GEN6_USE(p, SAMPLER_BORDER_COLOR_STATE, gen6);
1659 GEN6_USE(p, push_constant_buffer, gen6);
1660 #undef GEN6_USE
1661 }