ilo: mapping a resource may make some states dirty
[mesa.git] / src / gallium / drivers / ilo / ilo_3d_pipeline_gen6.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "util/u_dual_blend.h"
29 #include "util/u_prim.h"
30 #include "intel_reg.h"
31
32 #include "ilo_context.h"
33 #include "ilo_cp.h"
34 #include "ilo_gpe_gen6.h"
35 #include "ilo_shader.h"
36 #include "ilo_state.h"
37 #include "ilo_3d_pipeline.h"
38 #include "ilo_3d_pipeline_gen6.h"
39
40 /**
41 * This should be called before any depth stall flush (including those
42 * produced by non-pipelined state commands) or cache flush on GEN6.
43 *
44 * \see intel_emit_post_sync_nonzero_flush()
45 */
46 static void
47 gen6_wa_pipe_control_post_sync(struct ilo_3d_pipeline *p,
48 bool caller_post_sync)
49 {
50 assert(p->dev->gen == ILO_GEN(6));
51
52 /* emit once */
53 if (p->state.has_gen6_wa_pipe_control)
54 return;
55
56 p->state.has_gen6_wa_pipe_control = true;
57
58 /*
59 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
60 *
61 * "Pipe-control with CS-stall bit set must be sent BEFORE the
62 * pipe-control with a post-sync op and no write-cache flushes."
63 *
64 * The workaround below necessitates this workaround.
65 */
66 p->gen6_PIPE_CONTROL(p->dev,
67 PIPE_CONTROL_CS_STALL |
68 PIPE_CONTROL_STALL_AT_SCOREBOARD,
69 NULL, 0, false, p->cp);
70
71 /* the caller will emit the post-sync op */
72 if (caller_post_sync)
73 return;
74
75 /*
76 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
77 *
78 * "Before any depth stall flush (including those produced by
79 * non-pipelined state commands), software needs to first send a
80 * PIPE_CONTROL with no bits set except Post-Sync Operation != 0."
81 *
82 * "Before a PIPE_CONTROL with Write Cache Flush Enable =1, a
83 * PIPE_CONTROL with any non-zero post-sync-op is required."
84 */
85 p->gen6_PIPE_CONTROL(p->dev,
86 PIPE_CONTROL_WRITE_IMMEDIATE,
87 p->workaround_bo, 0, false, p->cp);
88 }
89
90 static void
91 gen6_wa_pipe_control_wm_multisample_flush(struct ilo_3d_pipeline *p)
92 {
93 assert(p->dev->gen == ILO_GEN(6));
94
95 gen6_wa_pipe_control_post_sync(p, false);
96
97 /*
98 * From the Sandy Bridge PRM, volume 2 part 1, page 305:
99 *
100 * "Driver must guarentee that all the caches in the depth pipe are
101 * flushed before this command (3DSTATE_MULTISAMPLE) is parsed. This
102 * requires driver to send a PIPE_CONTROL with a CS stall along with a
103 * Depth Flush prior to this command."
104 */
105 p->gen6_PIPE_CONTROL(p->dev,
106 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
107 PIPE_CONTROL_CS_STALL,
108 0, 0, false, p->cp);
109 }
110
111 static void
112 gen6_wa_pipe_control_wm_depth_flush(struct ilo_3d_pipeline *p)
113 {
114 assert(p->dev->gen == ILO_GEN(6));
115
116 gen6_wa_pipe_control_post_sync(p, false);
117
118 /*
119 * According to intel_emit_depth_stall_flushes() of classic i965, we need
120 * to emit a sequence of PIPE_CONTROLs prior to emitting depth related
121 * commands.
122 */
123 p->gen6_PIPE_CONTROL(p->dev,
124 PIPE_CONTROL_DEPTH_STALL,
125 NULL, 0, false, p->cp);
126
127 p->gen6_PIPE_CONTROL(p->dev,
128 PIPE_CONTROL_DEPTH_CACHE_FLUSH,
129 NULL, 0, false, p->cp);
130
131 p->gen6_PIPE_CONTROL(p->dev,
132 PIPE_CONTROL_DEPTH_STALL,
133 NULL, 0, false, p->cp);
134 }
135
136 static void
137 gen6_wa_pipe_control_wm_max_threads_stall(struct ilo_3d_pipeline *p)
138 {
139 assert(p->dev->gen == ILO_GEN(6));
140
141 /* the post-sync workaround should cover this already */
142 if (p->state.has_gen6_wa_pipe_control)
143 return;
144
145 /*
146 * From the Sandy Bridge PRM, volume 2 part 1, page 274:
147 *
148 * "A PIPE_CONTROL command, with only the Stall At Pixel Scoreboard
149 * field set (DW1 Bit 1), must be issued prior to any change to the
150 * value in this field (Maximum Number of Threads in 3DSTATE_WM)"
151 */
152 p->gen6_PIPE_CONTROL(p->dev,
153 PIPE_CONTROL_STALL_AT_SCOREBOARD,
154 NULL, 0, false, p->cp);
155
156 }
157
158 static void
159 gen6_wa_pipe_control_vs_const_flush(struct ilo_3d_pipeline *p)
160 {
161 assert(p->dev->gen == ILO_GEN(6));
162
163 gen6_wa_pipe_control_post_sync(p, false);
164
165 /*
166 * According to upload_vs_state() of classic i965, we need to emit
167 * PIPE_CONTROL after 3DSTATE_CONSTANT_VS so that the command is kept being
168 * buffered by VS FF, to the point that the FF dies.
169 */
170 p->gen6_PIPE_CONTROL(p->dev,
171 PIPE_CONTROL_DEPTH_STALL |
172 PIPE_CONTROL_INSTRUCTION_FLUSH |
173 PIPE_CONTROL_STATE_CACHE_INVALIDATE,
174 NULL, 0, false, p->cp);
175 }
176
177 #define DIRTY(state) (session->pipe_dirty & ILO_DIRTY_ ## state)
178
179 void
180 gen6_pipeline_common_select(struct ilo_3d_pipeline *p,
181 const struct ilo_context *ilo,
182 struct gen6_pipeline_session *session)
183 {
184 /* PIPELINE_SELECT */
185 if (session->hw_ctx_changed) {
186 if (p->dev->gen == ILO_GEN(6))
187 gen6_wa_pipe_control_post_sync(p, false);
188
189 p->gen6_PIPELINE_SELECT(p->dev, 0x0, p->cp);
190 }
191 }
192
193 void
194 gen6_pipeline_common_sip(struct ilo_3d_pipeline *p,
195 const struct ilo_context *ilo,
196 struct gen6_pipeline_session *session)
197 {
198 /* STATE_SIP */
199 if (session->hw_ctx_changed) {
200 if (p->dev->gen == ILO_GEN(6))
201 gen6_wa_pipe_control_post_sync(p, false);
202
203 p->gen6_STATE_SIP(p->dev, 0, p->cp);
204 }
205 }
206
207 void
208 gen6_pipeline_common_base_address(struct ilo_3d_pipeline *p,
209 const struct ilo_context *ilo,
210 struct gen6_pipeline_session *session)
211 {
212 /* STATE_BASE_ADDRESS */
213 if (session->state_bo_changed || session->instruction_bo_changed ||
214 session->batch_bo_changed) {
215 if (p->dev->gen == ILO_GEN(6))
216 gen6_wa_pipe_control_post_sync(p, false);
217
218 p->gen6_STATE_BASE_ADDRESS(p->dev,
219 NULL, p->cp->bo, p->cp->bo, NULL, ilo->shader_cache->bo,
220 0, 0, 0, 0, p->cp);
221
222 /*
223 * From the Sandy Bridge PRM, volume 1 part 1, page 28:
224 *
225 * "The following commands must be reissued following any change to
226 * the base addresses:
227 *
228 * * 3DSTATE_BINDING_TABLE_POINTERS
229 * * 3DSTATE_SAMPLER_STATE_POINTERS
230 * * 3DSTATE_VIEWPORT_STATE_POINTERS
231 * * 3DSTATE_CC_POINTERS
232 * * MEDIA_STATE_POINTERS"
233 *
234 * 3DSTATE_SCISSOR_STATE_POINTERS is not on the list, but it is
235 * reasonable to also reissue the command. Same to PCB.
236 */
237 session->viewport_state_changed = true;
238
239 session->cc_state_blend_changed = true;
240 session->cc_state_dsa_changed = true;
241 session->cc_state_cc_changed = true;
242
243 session->scissor_state_changed = true;
244
245 session->binding_table_vs_changed = true;
246 session->binding_table_gs_changed = true;
247 session->binding_table_fs_changed = true;
248
249 session->sampler_state_vs_changed = true;
250 session->sampler_state_gs_changed = true;
251 session->sampler_state_fs_changed = true;
252
253 session->pcb_state_vs_changed = true;
254 session->pcb_state_gs_changed = true;
255 session->pcb_state_fs_changed = true;
256 }
257 }
258
259 static void
260 gen6_pipeline_common_urb(struct ilo_3d_pipeline *p,
261 const struct ilo_context *ilo,
262 struct gen6_pipeline_session *session)
263 {
264 /* 3DSTATE_URB */
265 if (DIRTY(VERTEX_ELEMENTS) || DIRTY(VS) || DIRTY(GS)) {
266 const struct ilo_shader *vs = (ilo->vs) ? ilo->vs->shader : NULL;
267 const struct ilo_shader *gs = (ilo->gs) ? ilo->gs->shader : NULL;
268 const bool gs_active = (gs || (vs && vs->stream_output));
269 int vs_entry_size, gs_entry_size;
270 int vs_total_size, gs_total_size;
271
272 vs_entry_size = (vs) ? vs->out.count : 0;
273
274 /*
275 * As indicated by 2e712e41db0c0676e9f30fc73172c0e8de8d84d4, VF and VS
276 * share VUE handles. The VUE allocation size must be large enough to
277 * store either VF outputs (number of VERTEX_ELEMENTs) and VS outputs.
278 *
279 * I am not sure if the PRM explicitly states that VF and VS share VUE
280 * handles. But here is a citation that implies so:
281 *
282 * From the Sandy Bridge PRM, volume 2 part 1, page 44:
283 *
284 * "Once a FF stage that spawn threads has sufficient input to
285 * initiate a thread, it must guarantee that it is safe to request
286 * the thread initiation. For all these FF stages, this check is
287 * based on :
288 *
289 * - The availability of output URB entries:
290 * - VS: As the input URB entries are overwritten with the
291 * VS-generated output data, output URB availability isn't a
292 * factor."
293 */
294 if (vs_entry_size < ilo->ve->count)
295 vs_entry_size = ilo->ve->count;
296
297 gs_entry_size = (gs) ? gs->out.count :
298 (vs && vs->stream_output) ? vs_entry_size : 0;
299
300 /* in bytes */
301 vs_entry_size *= sizeof(float) * 4;
302 gs_entry_size *= sizeof(float) * 4;
303 vs_total_size = ilo->dev->urb_size;
304
305 if (gs_active) {
306 vs_total_size /= 2;
307 gs_total_size = vs_total_size;
308 }
309 else {
310 gs_total_size = 0;
311 }
312
313 p->gen6_3DSTATE_URB(p->dev, vs_total_size, gs_total_size,
314 vs_entry_size, gs_entry_size, p->cp);
315
316 /*
317 * From the Sandy Bridge PRM, volume 2 part 1, page 27:
318 *
319 * "Because of a urb corruption caused by allocating a previous
320 * gsunit's urb entry to vsunit software is required to send a
321 * "GS NULL Fence" (Send URB fence with VS URB size == 1 and GS URB
322 * size == 0) plus a dummy DRAW call before any case where VS will
323 * be taking over GS URB space."
324 */
325 if (p->state.gs.active && !gs_active)
326 ilo_3d_pipeline_emit_flush_gen6(p);
327
328 p->state.gs.active = gs_active;
329 }
330 }
331
332 static void
333 gen6_pipeline_common_pointers_1(struct ilo_3d_pipeline *p,
334 const struct ilo_context *ilo,
335 struct gen6_pipeline_session *session)
336 {
337 /* 3DSTATE_VIEWPORT_STATE_POINTERS */
338 if (session->viewport_state_changed) {
339 p->gen6_3DSTATE_VIEWPORT_STATE_POINTERS(p->dev,
340 p->state.CLIP_VIEWPORT,
341 p->state.SF_VIEWPORT,
342 p->state.CC_VIEWPORT, p->cp);
343 }
344 }
345
346 static void
347 gen6_pipeline_common_pointers_2(struct ilo_3d_pipeline *p,
348 const struct ilo_context *ilo,
349 struct gen6_pipeline_session *session)
350 {
351 /* 3DSTATE_CC_STATE_POINTERS */
352 if (session->cc_state_blend_changed ||
353 session->cc_state_dsa_changed ||
354 session->cc_state_cc_changed) {
355 p->gen6_3DSTATE_CC_STATE_POINTERS(p->dev,
356 p->state.BLEND_STATE,
357 p->state.DEPTH_STENCIL_STATE,
358 p->state.COLOR_CALC_STATE, p->cp);
359 }
360
361 /* 3DSTATE_SAMPLER_STATE_POINTERS */
362 if (session->sampler_state_vs_changed ||
363 session->sampler_state_gs_changed ||
364 session->sampler_state_fs_changed) {
365 p->gen6_3DSTATE_SAMPLER_STATE_POINTERS(p->dev,
366 p->state.vs.SAMPLER_STATE,
367 0,
368 p->state.wm.SAMPLER_STATE, p->cp);
369 }
370 }
371
372 static void
373 gen6_pipeline_common_pointers_3(struct ilo_3d_pipeline *p,
374 const struct ilo_context *ilo,
375 struct gen6_pipeline_session *session)
376 {
377 /* 3DSTATE_SCISSOR_STATE_POINTERS */
378 if (session->scissor_state_changed) {
379 p->gen6_3DSTATE_SCISSOR_STATE_POINTERS(p->dev,
380 p->state.SCISSOR_RECT, p->cp);
381 }
382
383 /* 3DSTATE_BINDING_TABLE_POINTERS */
384 if (session->binding_table_vs_changed ||
385 session->binding_table_gs_changed ||
386 session->binding_table_fs_changed) {
387 p->gen6_3DSTATE_BINDING_TABLE_POINTERS(p->dev,
388 p->state.vs.BINDING_TABLE_STATE,
389 p->state.gs.BINDING_TABLE_STATE,
390 p->state.wm.BINDING_TABLE_STATE, p->cp);
391 }
392 }
393
394 void
395 gen6_pipeline_vf(struct ilo_3d_pipeline *p,
396 const struct ilo_context *ilo,
397 struct gen6_pipeline_session *session)
398 {
399 /* 3DSTATE_INDEX_BUFFER */
400 if (DIRTY(INDEX_BUFFER) || session->batch_bo_changed) {
401 p->gen6_3DSTATE_INDEX_BUFFER(p->dev,
402 &ilo->ib.state, session->info->primitive_restart, p->cp);
403 }
404
405 /* 3DSTATE_VERTEX_BUFFERS */
406 if (DIRTY(VERTEX_BUFFERS) || DIRTY(VERTEX_ELEMENTS) ||
407 session->batch_bo_changed) {
408 p->gen6_3DSTATE_VERTEX_BUFFERS(p->dev,
409 ilo->vb.states, ilo->vb.enabled_mask, ilo->ve, p->cp);
410 }
411
412 /* 3DSTATE_VERTEX_ELEMENTS */
413 if (DIRTY(VERTEX_ELEMENTS) || DIRTY(VS)) {
414 const struct ilo_ve_state *ve = ilo->ve;
415 bool last_velement_edgeflag = false;
416 bool prepend_generate_ids = false;
417
418 if (ilo->vs) {
419 const struct ilo_shader_info *info = &ilo->vs->info;
420
421 if (info->edgeflag_in >= 0) {
422 /* we rely on the state tracker here */
423 assert(info->edgeflag_in == ve->count - 1);
424 last_velement_edgeflag = true;
425 }
426
427 prepend_generate_ids = (info->has_instanceid || info->has_vertexid);
428 }
429
430 p->gen6_3DSTATE_VERTEX_ELEMENTS(p->dev, ve,
431 last_velement_edgeflag, prepend_generate_ids, p->cp);
432 }
433 }
434
435 void
436 gen6_pipeline_vf_statistics(struct ilo_3d_pipeline *p,
437 const struct ilo_context *ilo,
438 struct gen6_pipeline_session *session)
439 {
440 /* 3DSTATE_VF_STATISTICS */
441 if (session->hw_ctx_changed)
442 p->gen6_3DSTATE_VF_STATISTICS(p->dev, false, p->cp);
443 }
444
445 void
446 gen6_pipeline_vf_draw(struct ilo_3d_pipeline *p,
447 const struct ilo_context *ilo,
448 struct gen6_pipeline_session *session)
449 {
450 /* 3DPRIMITIVE */
451 p->gen6_3DPRIMITIVE(p->dev, session->info, false, p->cp);
452 p->state.has_gen6_wa_pipe_control = false;
453 }
454
455 void
456 gen6_pipeline_vs(struct ilo_3d_pipeline *p,
457 const struct ilo_context *ilo,
458 struct gen6_pipeline_session *session)
459 {
460 const bool emit_3dstate_vs = (DIRTY(VS) || DIRTY(VERTEX_SAMPLERS));
461 const bool emit_3dstate_constant_vs = session->pcb_state_vs_changed;
462
463 /*
464 * the classic i965 does this in upload_vs_state(), citing a spec that I
465 * cannot find
466 */
467 if (emit_3dstate_vs && p->dev->gen == ILO_GEN(6))
468 gen6_wa_pipe_control_post_sync(p, false);
469
470 /* 3DSTATE_CONSTANT_VS */
471 if (emit_3dstate_constant_vs) {
472 p->gen6_3DSTATE_CONSTANT_VS(p->dev,
473 &p->state.vs.PUSH_CONSTANT_BUFFER,
474 &p->state.vs.PUSH_CONSTANT_BUFFER_size,
475 1, p->cp);
476 }
477
478 /* 3DSTATE_VS */
479 if (emit_3dstate_vs) {
480 const struct ilo_shader *vs = (ilo->vs)? ilo->vs->shader : NULL;
481 const int num_samplers = ilo->sampler[PIPE_SHADER_VERTEX].count;
482
483 p->gen6_3DSTATE_VS(p->dev, vs, num_samplers, p->cp);
484 }
485
486 if (emit_3dstate_constant_vs && p->dev->gen == ILO_GEN(6))
487 gen6_wa_pipe_control_vs_const_flush(p);
488 }
489
490 static void
491 gen6_pipeline_gs(struct ilo_3d_pipeline *p,
492 const struct ilo_context *ilo,
493 struct gen6_pipeline_session *session)
494 {
495 /* 3DSTATE_CONSTANT_GS */
496 if (session->pcb_state_gs_changed)
497 p->gen6_3DSTATE_CONSTANT_GS(p->dev, NULL, NULL, 0, p->cp);
498
499 /* 3DSTATE_GS */
500 if (DIRTY(GS) || DIRTY(VS) || session->prim_changed) {
501 const struct ilo_shader *gs = (ilo->gs)? ilo->gs->shader : NULL;
502 const struct ilo_shader *vs = (ilo->vs)? ilo->vs->shader : NULL;
503 const int num_vertices = u_vertices_per_prim(session->reduced_prim);
504
505 if (gs)
506 assert(!gs->pcb.clip_state_size);
507
508 p->gen6_3DSTATE_GS(p->dev, gs, vs,
509 (vs) ? vs->cache_offset + vs->gs_offsets[num_vertices - 1] : 0,
510 p->cp);
511 }
512 }
513
514 bool
515 gen6_pipeline_update_max_svbi(struct ilo_3d_pipeline *p,
516 const struct ilo_context *ilo,
517 struct gen6_pipeline_session *session)
518 {
519 if (DIRTY(VS) || DIRTY(GS) || DIRTY(STREAM_OUTPUT_TARGETS)) {
520 const struct pipe_stream_output_info *so_info =
521 (ilo->gs) ? &ilo->gs->info.stream_output :
522 (ilo->vs) ? &ilo->vs->info.stream_output : NULL;
523 unsigned max_svbi = 0xffffffff;
524 int i;
525
526 for (i = 0; i < so_info->num_outputs; i++) {
527 const int output_buffer = so_info->output[i].output_buffer;
528 const struct pipe_stream_output_target *so =
529 ilo->so.states[output_buffer];
530 const int struct_size = so_info->stride[output_buffer] * 4;
531 const int elem_size = so_info->output[i].num_components * 4;
532 int buf_size, count;
533
534 if (!so) {
535 max_svbi = 0;
536 break;
537 }
538
539 buf_size = so->buffer_size - so_info->output[i].dst_offset * 4;
540
541 count = buf_size / struct_size;
542 if (buf_size % struct_size >= elem_size)
543 count++;
544
545 if (count < max_svbi)
546 max_svbi = count;
547 }
548
549 if (p->state.so_max_vertices != max_svbi) {
550 p->state.so_max_vertices = max_svbi;
551 return true;
552 }
553 }
554
555 return false;
556 }
557
558 static void
559 gen6_pipeline_gs_svbi(struct ilo_3d_pipeline *p,
560 const struct ilo_context *ilo,
561 struct gen6_pipeline_session *session)
562 {
563 const bool emit = gen6_pipeline_update_max_svbi(p, ilo, session);
564
565 /* 3DSTATE_GS_SVB_INDEX */
566 if (emit) {
567 if (p->dev->gen == ILO_GEN(6))
568 gen6_wa_pipe_control_post_sync(p, false);
569
570 p->gen6_3DSTATE_GS_SVB_INDEX(p->dev,
571 0, p->state.so_num_vertices, p->state.so_max_vertices,
572 false, p->cp);
573
574 if (session->hw_ctx_changed) {
575 int i;
576
577 /*
578 * From the Sandy Bridge PRM, volume 2 part 1, page 148:
579 *
580 * "If a buffer is not enabled then the SVBI must be set to 0x0
581 * in order to not cause overflow in that SVBI."
582 *
583 * "If a buffer is not enabled then the MaxSVBI must be set to
584 * 0xFFFFFFFF in order to not cause overflow in that SVBI."
585 */
586 for (i = 1; i < 4; i++) {
587 p->gen6_3DSTATE_GS_SVB_INDEX(p->dev,
588 i, 0, 0xffffffff, false, p->cp);
589 }
590 }
591 }
592 }
593
594 void
595 gen6_pipeline_clip(struct ilo_3d_pipeline *p,
596 const struct ilo_context *ilo,
597 struct gen6_pipeline_session *session)
598 {
599 /* 3DSTATE_CLIP */
600 if (DIRTY(RASTERIZER) || DIRTY(FS) ||
601 DIRTY(VIEWPORT) || DIRTY(FRAMEBUFFER)) {
602 bool enable_guardband = true;
603 unsigned i;
604
605 /*
606 * We do not do 2D clipping yet. Guard band test should only be enabled
607 * when the viewport is larger than the framebuffer.
608 */
609 for (i = 0; i < ilo->viewport.count; i++) {
610 const struct ilo_viewport_cso *vp = &ilo->viewport.cso[i];
611
612 if (vp->min_x > 0.0f || vp->max_x < ilo->fb.state.width ||
613 vp->min_y > 0.0f || vp->max_y < ilo->fb.state.height) {
614 enable_guardband = false;
615 break;
616 }
617 }
618
619 p->gen6_3DSTATE_CLIP(p->dev,
620 ilo->rasterizer,
621 (ilo->fs && ilo->fs->shader->in.has_linear_interp),
622 enable_guardband, 1, p->cp);
623 }
624 }
625
626 static void
627 gen6_pipeline_sf(struct ilo_3d_pipeline *p,
628 const struct ilo_context *ilo,
629 struct gen6_pipeline_session *session)
630 {
631 /* 3DSTATE_SF */
632 if (DIRTY(RASTERIZER) || DIRTY(VS) || DIRTY(GS) || DIRTY(FS)) {
633 const struct ilo_shader *fs = (ilo->fs)? ilo->fs->shader : NULL;
634 const struct ilo_shader *last_sh =
635 (ilo->gs)? ilo->gs->shader :
636 (ilo->vs)? ilo->vs->shader : NULL;
637
638 p->gen6_3DSTATE_SF(p->dev, ilo->rasterizer, fs, last_sh, p->cp);
639 }
640 }
641
642 void
643 gen6_pipeline_sf_rect(struct ilo_3d_pipeline *p,
644 const struct ilo_context *ilo,
645 struct gen6_pipeline_session *session)
646 {
647 /* 3DSTATE_DRAWING_RECTANGLE */
648 if (DIRTY(FRAMEBUFFER)) {
649 if (p->dev->gen == ILO_GEN(6))
650 gen6_wa_pipe_control_post_sync(p, false);
651
652 p->gen6_3DSTATE_DRAWING_RECTANGLE(p->dev, 0, 0,
653 ilo->fb.state.width, ilo->fb.state.height, p->cp);
654 }
655 }
656
657 static void
658 gen6_pipeline_wm(struct ilo_3d_pipeline *p,
659 const struct ilo_context *ilo,
660 struct gen6_pipeline_session *session)
661 {
662 /* 3DSTATE_CONSTANT_PS */
663 if (session->pcb_state_fs_changed)
664 p->gen6_3DSTATE_CONSTANT_PS(p->dev, NULL, NULL, 0, p->cp);
665
666 /* 3DSTATE_WM */
667 if (DIRTY(FS) || DIRTY(FRAGMENT_SAMPLERS) ||
668 DIRTY(BLEND) || DIRTY(DEPTH_STENCIL_ALPHA) ||
669 DIRTY(RASTERIZER)) {
670 const struct ilo_shader *fs = (ilo->fs)? ilo->fs->shader : NULL;
671 const int num_samplers = ilo->sampler[PIPE_SHADER_FRAGMENT].count;
672 const bool dual_blend = ilo->blend->dual_blend;
673 const bool cc_may_kill = (ilo->dsa->alpha.enabled ||
674 ilo->blend->alpha_to_coverage);
675
676 if (fs)
677 assert(!fs->pcb.clip_state_size);
678
679 if (p->dev->gen == ILO_GEN(6) && session->hw_ctx_changed)
680 gen6_wa_pipe_control_wm_max_threads_stall(p);
681
682 p->gen6_3DSTATE_WM(p->dev, fs, num_samplers,
683 &ilo->rasterizer->state, dual_blend, cc_may_kill, p->cp);
684 }
685 }
686
687 static void
688 gen6_pipeline_wm_multisample(struct ilo_3d_pipeline *p,
689 const struct ilo_context *ilo,
690 struct gen6_pipeline_session *session)
691 {
692 /* 3DSTATE_MULTISAMPLE and 3DSTATE_SAMPLE_MASK */
693 if (DIRTY(SAMPLE_MASK) || DIRTY(FRAMEBUFFER)) {
694 const uint32_t *packed_sample_pos;
695
696 packed_sample_pos = (ilo->fb.num_samples > 1) ?
697 &p->packed_sample_position_4x : &p->packed_sample_position_1x;
698
699 if (p->dev->gen == ILO_GEN(6)) {
700 gen6_wa_pipe_control_post_sync(p, false);
701 gen6_wa_pipe_control_wm_multisample_flush(p);
702 }
703
704 p->gen6_3DSTATE_MULTISAMPLE(p->dev,
705 ilo->fb.num_samples, packed_sample_pos,
706 ilo->rasterizer->state.half_pixel_center, p->cp);
707
708 p->gen6_3DSTATE_SAMPLE_MASK(p->dev,
709 (ilo->fb.num_samples > 1) ? ilo->sample_mask : 0x1, p->cp);
710 }
711 }
712
713 static void
714 gen6_pipeline_wm_depth(struct ilo_3d_pipeline *p,
715 const struct ilo_context *ilo,
716 struct gen6_pipeline_session *session)
717 {
718 /* 3DSTATE_DEPTH_BUFFER and 3DSTATE_CLEAR_PARAMS */
719 if (DIRTY(FRAMEBUFFER) || session->batch_bo_changed) {
720 if (p->dev->gen == ILO_GEN(6)) {
721 gen6_wa_pipe_control_post_sync(p, false);
722 gen6_wa_pipe_control_wm_depth_flush(p);
723 }
724
725 p->gen6_3DSTATE_DEPTH_BUFFER(p->dev, ilo->fb.state.zsbuf, p->cp);
726
727 /* TODO */
728 p->gen6_3DSTATE_CLEAR_PARAMS(p->dev, 0, p->cp);
729 }
730 }
731
732 void
733 gen6_pipeline_wm_raster(struct ilo_3d_pipeline *p,
734 const struct ilo_context *ilo,
735 struct gen6_pipeline_session *session)
736 {
737 /* 3DSTATE_POLY_STIPPLE_PATTERN and 3DSTATE_POLY_STIPPLE_OFFSET */
738 if ((DIRTY(RASTERIZER) || DIRTY(POLY_STIPPLE)) &&
739 ilo->rasterizer->state.poly_stipple_enable) {
740 if (p->dev->gen == ILO_GEN(6))
741 gen6_wa_pipe_control_post_sync(p, false);
742
743 p->gen6_3DSTATE_POLY_STIPPLE_PATTERN(p->dev,
744 &ilo->poly_stipple, p->cp);
745
746 p->gen6_3DSTATE_POLY_STIPPLE_OFFSET(p->dev, 0, 0, p->cp);
747 }
748
749 /* 3DSTATE_LINE_STIPPLE */
750 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_stipple_enable) {
751 if (p->dev->gen == ILO_GEN(6))
752 gen6_wa_pipe_control_post_sync(p, false);
753
754 p->gen6_3DSTATE_LINE_STIPPLE(p->dev,
755 ilo->rasterizer->state.line_stipple_pattern,
756 ilo->rasterizer->state.line_stipple_factor + 1, p->cp);
757 }
758
759 /* 3DSTATE_AA_LINE_PARAMETERS */
760 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_smooth) {
761 if (p->dev->gen == ILO_GEN(6))
762 gen6_wa_pipe_control_post_sync(p, false);
763
764 p->gen6_3DSTATE_AA_LINE_PARAMETERS(p->dev, p->cp);
765 }
766 }
767
768 static void
769 gen6_pipeline_state_viewports(struct ilo_3d_pipeline *p,
770 const struct ilo_context *ilo,
771 struct gen6_pipeline_session *session)
772 {
773 /* SF_CLIP_VIEWPORT and CC_VIEWPORT */
774 if (p->dev->gen >= ILO_GEN(7) && DIRTY(VIEWPORT)) {
775 p->state.SF_CLIP_VIEWPORT = p->gen7_SF_CLIP_VIEWPORT(p->dev,
776 ilo->viewport.cso, ilo->viewport.count, p->cp);
777
778 p->state.CC_VIEWPORT = p->gen6_CC_VIEWPORT(p->dev,
779 ilo->viewport.cso, ilo->viewport.count, p->cp);
780
781 session->viewport_state_changed = true;
782 }
783 /* SF_VIEWPORT, CLIP_VIEWPORT, and CC_VIEWPORT */
784 else if (DIRTY(VIEWPORT)) {
785 p->state.CLIP_VIEWPORT = p->gen6_CLIP_VIEWPORT(p->dev,
786 ilo->viewport.cso, ilo->viewport.count, p->cp);
787
788 p->state.SF_VIEWPORT = p->gen6_SF_VIEWPORT(p->dev,
789 ilo->viewport.cso, ilo->viewport.count, p->cp);
790
791 p->state.CC_VIEWPORT = p->gen6_CC_VIEWPORT(p->dev,
792 ilo->viewport.cso, ilo->viewport.count, p->cp);
793
794 session->viewport_state_changed = true;
795 }
796 }
797
798 static void
799 gen6_pipeline_state_cc(struct ilo_3d_pipeline *p,
800 const struct ilo_context *ilo,
801 struct gen6_pipeline_session *session)
802 {
803 /* BLEND_STATE */
804 if (DIRTY(BLEND) || DIRTY(FRAMEBUFFER) || DIRTY(DEPTH_STENCIL_ALPHA)) {
805 p->state.BLEND_STATE = p->gen6_BLEND_STATE(p->dev,
806 ilo->blend, &ilo->fb, &ilo->dsa->alpha, p->cp);
807
808 session->cc_state_blend_changed = true;
809 }
810
811 /* COLOR_CALC_STATE */
812 if (DIRTY(DEPTH_STENCIL_ALPHA) || DIRTY(STENCIL_REF) || DIRTY(BLEND_COLOR)) {
813 p->state.COLOR_CALC_STATE =
814 p->gen6_COLOR_CALC_STATE(p->dev, &ilo->stencil_ref,
815 ilo->dsa->alpha.ref_value, &ilo->blend_color, p->cp);
816
817 session->cc_state_cc_changed = true;
818 }
819
820 /* DEPTH_STENCIL_STATE */
821 if (DIRTY(DEPTH_STENCIL_ALPHA)) {
822 p->state.DEPTH_STENCIL_STATE =
823 p->gen6_DEPTH_STENCIL_STATE(p->dev, ilo->dsa, p->cp);
824
825 session->cc_state_dsa_changed = true;
826 }
827 }
828
829 static void
830 gen6_pipeline_state_scissors(struct ilo_3d_pipeline *p,
831 const struct ilo_context *ilo,
832 struct gen6_pipeline_session *session)
833 {
834 /* SCISSOR_RECT */
835 if (DIRTY(SCISSOR) || DIRTY(VIEWPORT)) {
836 /* there should be as many scissors as there are viewports */
837 p->state.SCISSOR_RECT = p->gen6_SCISSOR_RECT(p->dev,
838 &ilo->scissor, ilo->viewport.count, p->cp);
839
840 session->scissor_state_changed = true;
841 }
842 }
843
844 static void
845 gen6_pipeline_state_surfaces_rt(struct ilo_3d_pipeline *p,
846 const struct ilo_context *ilo,
847 struct gen6_pipeline_session *session)
848 {
849 /* SURFACE_STATEs for render targets */
850 if (DIRTY(FRAMEBUFFER)) {
851 const int offset = ILO_WM_DRAW_SURFACE(0);
852 uint32_t *surface_state = &p->state.wm.SURFACE_STATE[offset];
853 int i;
854
855 for (i = 0; i < ilo->fb.state.nr_cbufs; i++) {
856 const struct ilo_surface_cso *surface =
857 (const struct ilo_surface_cso *) ilo->fb.state.cbufs[i];
858
859 assert(surface && surface->is_rt);
860 surface_state[i] =
861 p->gen6_SURFACE_STATE(p->dev, &surface->u.rt, true, p->cp);
862 }
863
864 /*
865 * Upload at least one render target, as
866 * brw_update_renderbuffer_surfaces() does. I don't know why.
867 */
868 if (i == 0) {
869 struct ilo_view_surface null_surface;
870
871 ilo_gpe_init_view_surface_null(p->dev,
872 ilo->fb.state.width, ilo->fb.state.height,
873 1, 0, &null_surface);
874
875 surface_state[i] =
876 p->gen6_SURFACE_STATE(p->dev, &null_surface, true, p->cp);
877
878 i++;
879 }
880
881 memset(&surface_state[i], 0, (ILO_MAX_DRAW_BUFFERS - i) * 4);
882
883 if (i && session->num_surfaces[PIPE_SHADER_FRAGMENT] < offset + i)
884 session->num_surfaces[PIPE_SHADER_FRAGMENT] = offset + i;
885
886 session->binding_table_fs_changed = true;
887 }
888 }
889
890 static void
891 gen6_pipeline_state_surfaces_so(struct ilo_3d_pipeline *p,
892 const struct ilo_context *ilo,
893 struct gen6_pipeline_session *session)
894 {
895 const struct ilo_shader_state *vs = ilo->vs;
896 const struct ilo_shader_state *gs = ilo->gs;
897 const struct pipe_stream_output_target **so_targets =
898 (const struct pipe_stream_output_target **) ilo->so.states;
899 const int num_so_targets = ilo->so.count;
900
901 if (p->dev->gen != ILO_GEN(6))
902 return;
903
904 /* SURFACE_STATEs for stream output targets */
905 if (DIRTY(VS) || DIRTY(GS) || DIRTY(STREAM_OUTPUT_TARGETS)) {
906 const struct pipe_stream_output_info *so_info =
907 (gs) ? &gs->info.stream_output :
908 (vs) ? &vs->info.stream_output : NULL;
909 const int offset = ILO_GS_SO_SURFACE(0);
910 uint32_t *surface_state = &p->state.gs.SURFACE_STATE[offset];
911 int i;
912
913 for (i = 0; so_info && i < so_info->num_outputs; i++) {
914 const int target = so_info->output[i].output_buffer;
915 const struct pipe_stream_output_target *so_target =
916 (target < num_so_targets) ? so_targets[target] : NULL;
917
918 if (so_target) {
919 surface_state[i] = p->gen6_so_SURFACE_STATE(p->dev,
920 so_target, so_info, i, p->cp);
921 }
922 else {
923 surface_state[i] = 0;
924 }
925 }
926
927 memset(&surface_state[i], 0, (ILO_MAX_SO_BINDINGS - i) * 4);
928
929 if (i && session->num_surfaces[PIPE_SHADER_GEOMETRY] < offset + i)
930 session->num_surfaces[PIPE_SHADER_GEOMETRY] = offset + i;
931
932 session->binding_table_gs_changed = true;
933 }
934 }
935
936 static void
937 gen6_pipeline_state_surfaces_view(struct ilo_3d_pipeline *p,
938 const struct ilo_context *ilo,
939 int shader_type,
940 struct gen6_pipeline_session *session)
941 {
942 const struct pipe_sampler_view * const *views =
943 (const struct pipe_sampler_view **) ilo->view[shader_type].states;
944 const int num_views = ilo->view[shader_type].count;
945 uint32_t *surface_state;
946 int offset, i;
947 bool skip = false;
948
949 /* SURFACE_STATEs for sampler views */
950 switch (shader_type) {
951 case PIPE_SHADER_VERTEX:
952 if (DIRTY(VERTEX_SAMPLER_VIEWS)) {
953 offset = ILO_VS_TEXTURE_SURFACE(0);
954 surface_state = &p->state.vs.SURFACE_STATE[offset];
955
956 session->binding_table_vs_changed = true;
957 }
958 else {
959 skip = true;
960 }
961 break;
962 case PIPE_SHADER_FRAGMENT:
963 if (DIRTY(FRAGMENT_SAMPLER_VIEWS)) {
964 offset = ILO_WM_TEXTURE_SURFACE(0);
965 surface_state = &p->state.wm.SURFACE_STATE[offset];
966
967 session->binding_table_fs_changed = true;
968 }
969 else {
970 skip = true;
971 }
972 break;
973 default:
974 skip = true;
975 break;
976 }
977
978 if (skip)
979 return;
980
981 for (i = 0; i < num_views; i++) {
982 if (views[i]) {
983 const struct ilo_view_cso *cso =
984 (const struct ilo_view_cso *) views[i];
985
986 surface_state[i] =
987 p->gen6_SURFACE_STATE(p->dev, &cso->surface, false, p->cp);
988 }
989 else {
990 surface_state[i] = 0;
991 }
992 }
993
994 memset(&surface_state[i], 0, (ILO_MAX_SAMPLER_VIEWS - i) * 4);
995
996 if (i && session->num_surfaces[shader_type] < offset + i)
997 session->num_surfaces[shader_type] = offset + i;
998 }
999
1000 static void
1001 gen6_pipeline_state_surfaces_const(struct ilo_3d_pipeline *p,
1002 const struct ilo_context *ilo,
1003 int shader_type,
1004 struct gen6_pipeline_session *session)
1005 {
1006 const struct ilo_cbuf_cso *buffers = ilo->cbuf[shader_type].cso;
1007 const int num_buffers = ilo->cbuf[shader_type].count;
1008 uint32_t *surface_state;
1009 int offset, i;
1010 bool skip = false;
1011
1012 /* SURFACE_STATEs for constant buffers */
1013 switch (shader_type) {
1014 case PIPE_SHADER_VERTEX:
1015 if (DIRTY(CONSTANT_BUFFER)) {
1016 offset = ILO_VS_CONST_SURFACE(0);
1017 surface_state = &p->state.vs.SURFACE_STATE[offset];
1018
1019 session->binding_table_vs_changed = true;
1020 }
1021 else {
1022 skip = true;
1023 }
1024 break;
1025 case PIPE_SHADER_FRAGMENT:
1026 if (DIRTY(CONSTANT_BUFFER)) {
1027 offset = ILO_WM_CONST_SURFACE(0);
1028 surface_state = &p->state.wm.SURFACE_STATE[offset];
1029
1030 session->binding_table_fs_changed = true;
1031 }
1032 else {
1033 skip = true;
1034 }
1035 break;
1036 default:
1037 skip = true;
1038 break;
1039 }
1040
1041 if (skip)
1042 return;
1043
1044 for (i = 0; i < num_buffers; i++) {
1045 if (buffers[i].resource) {
1046 const struct ilo_view_surface *surf = &buffers[i].surface;
1047
1048 surface_state[i] =
1049 p->gen6_SURFACE_STATE(p->dev, surf, false, p->cp);
1050 }
1051 else {
1052 surface_state[i] = 0;
1053 }
1054 }
1055
1056 memset(&surface_state[i], 0, (ILO_MAX_CONST_BUFFERS - i) * 4);
1057
1058 if (i && session->num_surfaces[shader_type] < offset + i)
1059 session->num_surfaces[shader_type] = offset + i;
1060 }
1061
1062 static void
1063 gen6_pipeline_state_binding_tables(struct ilo_3d_pipeline *p,
1064 const struct ilo_context *ilo,
1065 int shader_type,
1066 struct gen6_pipeline_session *session)
1067 {
1068 uint32_t *binding_table_state, *surface_state;
1069 int *binding_table_state_size, size;
1070 bool skip = false;
1071
1072 /* BINDING_TABLE_STATE */
1073 switch (shader_type) {
1074 case PIPE_SHADER_VERTEX:
1075 surface_state = p->state.vs.SURFACE_STATE;
1076 binding_table_state = &p->state.vs.BINDING_TABLE_STATE;
1077 binding_table_state_size = &p->state.vs.BINDING_TABLE_STATE_size;
1078
1079 skip = !session->binding_table_vs_changed;
1080 break;
1081 case PIPE_SHADER_GEOMETRY:
1082 surface_state = p->state.gs.SURFACE_STATE;
1083 binding_table_state = &p->state.gs.BINDING_TABLE_STATE;
1084 binding_table_state_size = &p->state.gs.BINDING_TABLE_STATE_size;
1085
1086 skip = !session->binding_table_gs_changed;
1087 break;
1088 case PIPE_SHADER_FRAGMENT:
1089 surface_state = p->state.wm.SURFACE_STATE;
1090 binding_table_state = &p->state.wm.BINDING_TABLE_STATE;
1091 binding_table_state_size = &p->state.wm.BINDING_TABLE_STATE_size;
1092
1093 skip = !session->binding_table_fs_changed;
1094 break;
1095 default:
1096 skip = true;
1097 break;
1098 }
1099
1100 if (skip)
1101 return;
1102
1103 /*
1104 * If we have seemingly less SURFACE_STATEs than before, it could be that
1105 * we did not touch those reside at the tail in this upload. Loop over
1106 * them to figure out the real number of SURFACE_STATEs.
1107 */
1108 for (size = *binding_table_state_size;
1109 size > session->num_surfaces[shader_type]; size--) {
1110 if (surface_state[size - 1])
1111 break;
1112 }
1113 if (size < session->num_surfaces[shader_type])
1114 size = session->num_surfaces[shader_type];
1115
1116 *binding_table_state = p->gen6_BINDING_TABLE_STATE(p->dev,
1117 surface_state, size, p->cp);
1118 *binding_table_state_size = size;
1119 }
1120
1121 static void
1122 gen6_pipeline_state_samplers(struct ilo_3d_pipeline *p,
1123 const struct ilo_context *ilo,
1124 int shader_type,
1125 struct gen6_pipeline_session *session)
1126 {
1127 const struct ilo_sampler_cso * const *samplers =
1128 ilo->sampler[shader_type].cso;
1129 const struct pipe_sampler_view * const *views =
1130 (const struct pipe_sampler_view **) ilo->view[shader_type].states;
1131 const int num_samplers = ilo->sampler[shader_type].count;
1132 const int num_views = ilo->view[shader_type].count;
1133 uint32_t *sampler_state, *border_color_state;
1134 bool emit_border_color = false;
1135 bool skip = false;
1136
1137 /* SAMPLER_BORDER_COLOR_STATE and SAMPLER_STATE */
1138 switch (shader_type) {
1139 case PIPE_SHADER_VERTEX:
1140 if (DIRTY(VERTEX_SAMPLERS) || DIRTY(VERTEX_SAMPLER_VIEWS)) {
1141 sampler_state = &p->state.vs.SAMPLER_STATE;
1142 border_color_state = p->state.vs.SAMPLER_BORDER_COLOR_STATE;
1143
1144 if (DIRTY(VERTEX_SAMPLERS))
1145 emit_border_color = true;
1146
1147 session->sampler_state_vs_changed = true;
1148 }
1149 else {
1150 skip = true;
1151 }
1152 break;
1153 case PIPE_SHADER_FRAGMENT:
1154 if (DIRTY(FRAGMENT_SAMPLERS) || DIRTY(FRAGMENT_SAMPLER_VIEWS)) {
1155 sampler_state = &p->state.wm.SAMPLER_STATE;
1156 border_color_state = p->state.wm.SAMPLER_BORDER_COLOR_STATE;
1157
1158 if (DIRTY(FRAGMENT_SAMPLERS))
1159 emit_border_color = true;
1160
1161 session->sampler_state_fs_changed = true;
1162 }
1163 else {
1164 skip = true;
1165 }
1166 break;
1167 default:
1168 skip = true;
1169 break;
1170 }
1171
1172 if (skip)
1173 return;
1174
1175 if (emit_border_color) {
1176 int i;
1177
1178 for (i = 0; i < num_samplers; i++) {
1179 border_color_state[i] = (samplers[i]) ?
1180 p->gen6_SAMPLER_BORDER_COLOR_STATE(p->dev,
1181 samplers[i], p->cp) : 0;
1182 }
1183 }
1184
1185 /* should we take the minimum of num_samplers and num_views? */
1186 *sampler_state = p->gen6_SAMPLER_STATE(p->dev,
1187 samplers, views,
1188 border_color_state,
1189 MIN2(num_samplers, num_views), p->cp);
1190 }
1191
1192 static void
1193 gen6_pipeline_state_pcb(struct ilo_3d_pipeline *p,
1194 const struct ilo_context *ilo,
1195 struct gen6_pipeline_session *session)
1196 {
1197 /* push constant buffer for VS */
1198 if (DIRTY(VS) || DIRTY(CLIP)) {
1199 const struct ilo_shader *vs = (ilo->vs)? ilo->vs->shader : NULL;
1200
1201 if (vs && vs->pcb.clip_state_size) {
1202 void *pcb;
1203
1204 p->state.vs.PUSH_CONSTANT_BUFFER_size = vs->pcb.clip_state_size;
1205 p->state.vs.PUSH_CONSTANT_BUFFER =
1206 p->gen6_push_constant_buffer(p->dev,
1207 p->state.vs.PUSH_CONSTANT_BUFFER_size, &pcb, p->cp);
1208
1209 memcpy(pcb, &ilo->clip, vs->pcb.clip_state_size);
1210 }
1211 else {
1212 p->state.vs.PUSH_CONSTANT_BUFFER_size = 0;
1213 p->state.vs.PUSH_CONSTANT_BUFFER = 0;
1214 }
1215
1216 session->pcb_state_vs_changed = true;
1217 }
1218 }
1219
1220 #undef DIRTY
1221
1222 static void
1223 gen6_pipeline_commands(struct ilo_3d_pipeline *p,
1224 const struct ilo_context *ilo,
1225 struct gen6_pipeline_session *session)
1226 {
1227 /*
1228 * We try to keep the order of the commands match, as closely as possible,
1229 * that of the classic i965 driver. It allows us to compare the command
1230 * streams easily.
1231 */
1232 gen6_pipeline_common_select(p, ilo, session);
1233 gen6_pipeline_gs_svbi(p, ilo, session);
1234 gen6_pipeline_common_sip(p, ilo, session);
1235 gen6_pipeline_vf_statistics(p, ilo, session);
1236 gen6_pipeline_common_base_address(p, ilo, session);
1237 gen6_pipeline_common_pointers_1(p, ilo, session);
1238 gen6_pipeline_common_urb(p, ilo, session);
1239 gen6_pipeline_common_pointers_2(p, ilo, session);
1240 gen6_pipeline_wm_multisample(p, ilo, session);
1241 gen6_pipeline_vs(p, ilo, session);
1242 gen6_pipeline_gs(p, ilo, session);
1243 gen6_pipeline_clip(p, ilo, session);
1244 gen6_pipeline_sf(p, ilo, session);
1245 gen6_pipeline_wm(p, ilo, session);
1246 gen6_pipeline_common_pointers_3(p, ilo, session);
1247 gen6_pipeline_wm_depth(p, ilo, session);
1248 gen6_pipeline_wm_raster(p, ilo, session);
1249 gen6_pipeline_sf_rect(p, ilo, session);
1250 gen6_pipeline_vf(p, ilo, session);
1251 gen6_pipeline_vf_draw(p, ilo, session);
1252 }
1253
1254 void
1255 gen6_pipeline_states(struct ilo_3d_pipeline *p,
1256 const struct ilo_context *ilo,
1257 struct gen6_pipeline_session *session)
1258 {
1259 int shader_type;
1260
1261 gen6_pipeline_state_viewports(p, ilo, session);
1262 gen6_pipeline_state_cc(p, ilo, session);
1263 gen6_pipeline_state_scissors(p, ilo, session);
1264 gen6_pipeline_state_pcb(p, ilo, session);
1265
1266 /*
1267 * upload all SURAFCE_STATEs together so that we know there are minimal
1268 * paddings
1269 */
1270 gen6_pipeline_state_surfaces_rt(p, ilo, session);
1271 gen6_pipeline_state_surfaces_so(p, ilo, session);
1272 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1273 gen6_pipeline_state_surfaces_view(p, ilo, shader_type, session);
1274 gen6_pipeline_state_surfaces_const(p, ilo, shader_type, session);
1275 }
1276
1277 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1278 gen6_pipeline_state_samplers(p, ilo, shader_type, session);
1279 /* this must be called after all SURFACE_STATEs are uploaded */
1280 gen6_pipeline_state_binding_tables(p, ilo, shader_type, session);
1281 }
1282 }
1283
1284 void
1285 gen6_pipeline_prepare(const struct ilo_3d_pipeline *p,
1286 const struct ilo_context *ilo,
1287 const struct pipe_draw_info *info,
1288 struct gen6_pipeline_session *session)
1289 {
1290 memset(session, 0, sizeof(*session));
1291 session->info = info;
1292 session->pipe_dirty = ilo->dirty;
1293 session->reduced_prim = u_reduced_prim(info->mode);
1294
1295 /* available space before the session */
1296 session->init_cp_space = ilo_cp_space(p->cp);
1297
1298 session->hw_ctx_changed =
1299 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_HW);
1300
1301 if (session->hw_ctx_changed) {
1302 /* these should be enough to make everything uploaded */
1303 session->batch_bo_changed = true;
1304 session->state_bo_changed = true;
1305 session->instruction_bo_changed = true;
1306 session->prim_changed = true;
1307 }
1308 else {
1309 /*
1310 * Any state that involves resources needs to be re-emitted when the
1311 * batch bo changed. This is because we do not pin the resources and
1312 * their offsets (or existence) may change between batch buffers.
1313 *
1314 * Since we messed around with ILO_3D_PIPELINE_INVALIDATE_BATCH_BO in
1315 * handle_invalid_batch_bo(), use ILO_3D_PIPELINE_INVALIDATE_STATE_BO as
1316 * a temporary workaround.
1317 */
1318 session->batch_bo_changed =
1319 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1320
1321 session->state_bo_changed =
1322 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1323 session->instruction_bo_changed =
1324 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
1325 session->prim_changed = (p->state.reduced_prim != session->reduced_prim);
1326 }
1327 }
1328
1329 void
1330 gen6_pipeline_draw(struct ilo_3d_pipeline *p,
1331 const struct ilo_context *ilo,
1332 struct gen6_pipeline_session *session)
1333 {
1334 /* force all states to be uploaded if the state bo changed */
1335 if (session->state_bo_changed)
1336 session->pipe_dirty = ILO_DIRTY_ALL;
1337 else
1338 session->pipe_dirty = ilo->dirty;
1339
1340 session->emit_draw_states(p, ilo, session);
1341
1342 /* force all commands to be uploaded if the HW context changed */
1343 if (session->hw_ctx_changed)
1344 session->pipe_dirty = ILO_DIRTY_ALL;
1345 else
1346 session->pipe_dirty = ilo->dirty;
1347
1348 session->emit_draw_commands(p, ilo, session);
1349 }
1350
1351 void
1352 gen6_pipeline_end(struct ilo_3d_pipeline *p,
1353 const struct ilo_context *ilo,
1354 struct gen6_pipeline_session *session)
1355 {
1356 int used, estimate;
1357
1358 /* sanity check size estimation */
1359 used = session->init_cp_space - ilo_cp_space(p->cp);
1360 estimate = ilo_3d_pipeline_estimate_size(p, ILO_3D_PIPELINE_DRAW, ilo);
1361 assert(used <= estimate);
1362
1363 p->state.reduced_prim = session->reduced_prim;
1364 }
1365
1366 static void
1367 ilo_3d_pipeline_emit_draw_gen6(struct ilo_3d_pipeline *p,
1368 const struct ilo_context *ilo,
1369 const struct pipe_draw_info *info)
1370 {
1371 struct gen6_pipeline_session session;
1372
1373 gen6_pipeline_prepare(p, ilo, info, &session);
1374
1375 session.emit_draw_states = gen6_pipeline_states;
1376 session.emit_draw_commands = gen6_pipeline_commands;
1377
1378 gen6_pipeline_draw(p, ilo, &session);
1379 gen6_pipeline_end(p, ilo, &session);
1380 }
1381
1382 void
1383 ilo_3d_pipeline_emit_flush_gen6(struct ilo_3d_pipeline *p)
1384 {
1385 if (p->dev->gen == ILO_GEN(6))
1386 gen6_wa_pipe_control_post_sync(p, false);
1387
1388 p->gen6_PIPE_CONTROL(p->dev,
1389 PIPE_CONTROL_INSTRUCTION_FLUSH |
1390 PIPE_CONTROL_WRITE_FLUSH |
1391 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1392 PIPE_CONTROL_VF_CACHE_INVALIDATE |
1393 PIPE_CONTROL_TC_FLUSH |
1394 PIPE_CONTROL_NO_WRITE |
1395 PIPE_CONTROL_CS_STALL,
1396 0, 0, false, p->cp);
1397 }
1398
1399 void
1400 ilo_3d_pipeline_emit_write_timestamp_gen6(struct ilo_3d_pipeline *p,
1401 struct intel_bo *bo, int index)
1402 {
1403 if (p->dev->gen == ILO_GEN(6))
1404 gen6_wa_pipe_control_post_sync(p, true);
1405
1406 p->gen6_PIPE_CONTROL(p->dev,
1407 PIPE_CONTROL_WRITE_TIMESTAMP,
1408 bo, index * sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE,
1409 true, p->cp);
1410 }
1411
1412 void
1413 ilo_3d_pipeline_emit_write_depth_count_gen6(struct ilo_3d_pipeline *p,
1414 struct intel_bo *bo, int index)
1415 {
1416 if (p->dev->gen == ILO_GEN(6))
1417 gen6_wa_pipe_control_post_sync(p, false);
1418
1419 p->gen6_PIPE_CONTROL(p->dev,
1420 PIPE_CONTROL_DEPTH_STALL |
1421 PIPE_CONTROL_WRITE_DEPTH_COUNT,
1422 bo, index * sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE,
1423 true, p->cp);
1424 }
1425
1426 static int
1427 gen6_pipeline_estimate_commands(const struct ilo_3d_pipeline *p,
1428 const struct ilo_gpe_gen6 *gen6,
1429 const struct ilo_context *ilo)
1430 {
1431 static int size;
1432 enum ilo_gpe_gen6_command cmd;
1433
1434 if (size)
1435 return size;
1436
1437 for (cmd = 0; cmd < ILO_GPE_GEN6_COMMAND_COUNT; cmd++) {
1438 int count;
1439
1440 switch (cmd) {
1441 case ILO_GPE_GEN6_PIPE_CONTROL:
1442 /* for the workaround */
1443 count = 2;
1444 /* another one after 3DSTATE_URB */
1445 count += 1;
1446 /* and another one after 3DSTATE_CONSTANT_VS */
1447 count += 1;
1448 break;
1449 case ILO_GPE_GEN6_3DSTATE_GS_SVB_INDEX:
1450 /* there are 4 SVBIs */
1451 count = 4;
1452 break;
1453 case ILO_GPE_GEN6_3DSTATE_VERTEX_BUFFERS:
1454 count = 33;
1455 break;
1456 case ILO_GPE_GEN6_3DSTATE_VERTEX_ELEMENTS:
1457 count = 34;
1458 break;
1459 case ILO_GPE_GEN6_MEDIA_VFE_STATE:
1460 case ILO_GPE_GEN6_MEDIA_CURBE_LOAD:
1461 case ILO_GPE_GEN6_MEDIA_INTERFACE_DESCRIPTOR_LOAD:
1462 case ILO_GPE_GEN6_MEDIA_GATEWAY_STATE:
1463 case ILO_GPE_GEN6_MEDIA_STATE_FLUSH:
1464 case ILO_GPE_GEN6_MEDIA_OBJECT_WALKER:
1465 /* media commands */
1466 count = 0;
1467 break;
1468 default:
1469 count = 1;
1470 break;
1471 }
1472
1473 if (count)
1474 size += gen6->estimate_command_size(p->dev, cmd, count);
1475 }
1476
1477 return size;
1478 }
1479
1480 static int
1481 gen6_pipeline_estimate_states(const struct ilo_3d_pipeline *p,
1482 const struct ilo_gpe_gen6 *gen6,
1483 const struct ilo_context *ilo)
1484 {
1485 static int static_size;
1486 int shader_type, count, size;
1487
1488 if (!static_size) {
1489 struct {
1490 enum ilo_gpe_gen6_state state;
1491 int count;
1492 } static_states[] = {
1493 /* viewports */
1494 { ILO_GPE_GEN6_SF_VIEWPORT, 1 },
1495 { ILO_GPE_GEN6_CLIP_VIEWPORT, 1 },
1496 { ILO_GPE_GEN6_CC_VIEWPORT, 1 },
1497 /* cc */
1498 { ILO_GPE_GEN6_COLOR_CALC_STATE, 1 },
1499 { ILO_GPE_GEN6_BLEND_STATE, ILO_MAX_DRAW_BUFFERS },
1500 { ILO_GPE_GEN6_DEPTH_STENCIL_STATE, 1 },
1501 /* scissors */
1502 { ILO_GPE_GEN6_SCISSOR_RECT, 1 },
1503 /* binding table (vs, gs, fs) */
1504 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_VS_SURFACES },
1505 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_GS_SURFACES },
1506 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_WM_SURFACES },
1507 };
1508 int i;
1509
1510 for (i = 0; i < Elements(static_states); i++) {
1511 static_size += gen6->estimate_state_size(p->dev,
1512 static_states[i].state,
1513 static_states[i].count);
1514 }
1515 }
1516
1517 size = static_size;
1518
1519 /*
1520 * render targets (fs)
1521 * stream outputs (gs)
1522 * sampler views (vs, fs)
1523 * constant buffers (vs, fs)
1524 */
1525 count = ilo->fb.state.nr_cbufs;
1526
1527 if (ilo->gs)
1528 count += ilo->gs->info.stream_output.num_outputs;
1529 else if (ilo->vs)
1530 count += ilo->vs->info.stream_output.num_outputs;
1531
1532 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1533 count += ilo->view[shader_type].count;
1534 count += ilo->cbuf[shader_type].count;
1535 }
1536
1537 if (count) {
1538 size += gen6->estimate_state_size(p->dev,
1539 ILO_GPE_GEN6_SURFACE_STATE, count);
1540 }
1541
1542 /* samplers (vs, fs) */
1543 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1544 count = ilo->sampler[shader_type].count;
1545 if (count) {
1546 size += gen6->estimate_state_size(p->dev,
1547 ILO_GPE_GEN6_SAMPLER_BORDER_COLOR_STATE, count);
1548 size += gen6->estimate_state_size(p->dev,
1549 ILO_GPE_GEN6_SAMPLER_STATE, count);
1550 }
1551 }
1552
1553 /* pcb (vs) */
1554 if (ilo->vs && ilo->vs->shader->pcb.clip_state_size) {
1555 const int pcb_size = ilo->vs->shader->pcb.clip_state_size;
1556
1557 size += gen6->estimate_state_size(p->dev,
1558 ILO_GPE_GEN6_PUSH_CONSTANT_BUFFER, pcb_size);
1559 }
1560
1561 return size;
1562 }
1563
1564 static int
1565 ilo_3d_pipeline_estimate_size_gen6(struct ilo_3d_pipeline *p,
1566 enum ilo_3d_pipeline_action action,
1567 const void *arg)
1568 {
1569 const struct ilo_gpe_gen6 *gen6 = ilo_gpe_gen6_get();
1570 int size;
1571
1572 switch (action) {
1573 case ILO_3D_PIPELINE_DRAW:
1574 {
1575 const struct ilo_context *ilo = arg;
1576
1577 size = gen6_pipeline_estimate_commands(p, gen6, ilo) +
1578 gen6_pipeline_estimate_states(p, gen6, ilo);
1579 }
1580 break;
1581 case ILO_3D_PIPELINE_FLUSH:
1582 size = gen6->estimate_command_size(p->dev,
1583 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1584 break;
1585 case ILO_3D_PIPELINE_WRITE_TIMESTAMP:
1586 size = gen6->estimate_command_size(p->dev,
1587 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 2;
1588 break;
1589 case ILO_3D_PIPELINE_WRITE_DEPTH_COUNT:
1590 size = gen6->estimate_command_size(p->dev,
1591 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1592 break;
1593 default:
1594 assert(!"unknown 3D pipeline action");
1595 size = 0;
1596 break;
1597 }
1598
1599 return size;
1600 }
1601
1602 void
1603 ilo_3d_pipeline_init_gen6(struct ilo_3d_pipeline *p)
1604 {
1605 const struct ilo_gpe_gen6 *gen6 = ilo_gpe_gen6_get();
1606
1607 p->estimate_size = ilo_3d_pipeline_estimate_size_gen6;
1608 p->emit_draw = ilo_3d_pipeline_emit_draw_gen6;
1609 p->emit_flush = ilo_3d_pipeline_emit_flush_gen6;
1610 p->emit_write_timestamp = ilo_3d_pipeline_emit_write_timestamp_gen6;
1611 p->emit_write_depth_count = ilo_3d_pipeline_emit_write_depth_count_gen6;
1612
1613 #define GEN6_USE(p, name, from) \
1614 p->gen6_ ## name = from->emit_ ## name
1615 GEN6_USE(p, STATE_BASE_ADDRESS, gen6);
1616 GEN6_USE(p, STATE_SIP, gen6);
1617 GEN6_USE(p, PIPELINE_SELECT, gen6);
1618 GEN6_USE(p, 3DSTATE_BINDING_TABLE_POINTERS, gen6);
1619 GEN6_USE(p, 3DSTATE_SAMPLER_STATE_POINTERS, gen6);
1620 GEN6_USE(p, 3DSTATE_URB, gen6);
1621 GEN6_USE(p, 3DSTATE_VERTEX_BUFFERS, gen6);
1622 GEN6_USE(p, 3DSTATE_VERTEX_ELEMENTS, gen6);
1623 GEN6_USE(p, 3DSTATE_INDEX_BUFFER, gen6);
1624 GEN6_USE(p, 3DSTATE_VF_STATISTICS, gen6);
1625 GEN6_USE(p, 3DSTATE_VIEWPORT_STATE_POINTERS, gen6);
1626 GEN6_USE(p, 3DSTATE_CC_STATE_POINTERS, gen6);
1627 GEN6_USE(p, 3DSTATE_SCISSOR_STATE_POINTERS, gen6);
1628 GEN6_USE(p, 3DSTATE_VS, gen6);
1629 GEN6_USE(p, 3DSTATE_GS, gen6);
1630 GEN6_USE(p, 3DSTATE_CLIP, gen6);
1631 GEN6_USE(p, 3DSTATE_SF, gen6);
1632 GEN6_USE(p, 3DSTATE_WM, gen6);
1633 GEN6_USE(p, 3DSTATE_CONSTANT_VS, gen6);
1634 GEN6_USE(p, 3DSTATE_CONSTANT_GS, gen6);
1635 GEN6_USE(p, 3DSTATE_CONSTANT_PS, gen6);
1636 GEN6_USE(p, 3DSTATE_SAMPLE_MASK, gen6);
1637 GEN6_USE(p, 3DSTATE_DRAWING_RECTANGLE, gen6);
1638 GEN6_USE(p, 3DSTATE_DEPTH_BUFFER, gen6);
1639 GEN6_USE(p, 3DSTATE_POLY_STIPPLE_OFFSET, gen6);
1640 GEN6_USE(p, 3DSTATE_POLY_STIPPLE_PATTERN, gen6);
1641 GEN6_USE(p, 3DSTATE_LINE_STIPPLE, gen6);
1642 GEN6_USE(p, 3DSTATE_AA_LINE_PARAMETERS, gen6);
1643 GEN6_USE(p, 3DSTATE_GS_SVB_INDEX, gen6);
1644 GEN6_USE(p, 3DSTATE_MULTISAMPLE, gen6);
1645 GEN6_USE(p, 3DSTATE_STENCIL_BUFFER, gen6);
1646 GEN6_USE(p, 3DSTATE_HIER_DEPTH_BUFFER, gen6);
1647 GEN6_USE(p, 3DSTATE_CLEAR_PARAMS, gen6);
1648 GEN6_USE(p, PIPE_CONTROL, gen6);
1649 GEN6_USE(p, 3DPRIMITIVE, gen6);
1650 GEN6_USE(p, INTERFACE_DESCRIPTOR_DATA, gen6);
1651 GEN6_USE(p, SF_VIEWPORT, gen6);
1652 GEN6_USE(p, CLIP_VIEWPORT, gen6);
1653 GEN6_USE(p, CC_VIEWPORT, gen6);
1654 GEN6_USE(p, COLOR_CALC_STATE, gen6);
1655 GEN6_USE(p, BLEND_STATE, gen6);
1656 GEN6_USE(p, DEPTH_STENCIL_STATE, gen6);
1657 GEN6_USE(p, SCISSOR_RECT, gen6);
1658 GEN6_USE(p, BINDING_TABLE_STATE, gen6);
1659 GEN6_USE(p, SURFACE_STATE, gen6);
1660 GEN6_USE(p, so_SURFACE_STATE, gen6);
1661 GEN6_USE(p, SAMPLER_STATE, gen6);
1662 GEN6_USE(p, SAMPLER_BORDER_COLOR_STATE, gen6);
1663 GEN6_USE(p, push_constant_buffer, gen6);
1664 #undef GEN6_USE
1665 }