ilo: enable dumping of WM PCB
[mesa.git] / src / gallium / drivers / ilo / ilo_3d_pipeline_gen6.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "util/u_dual_blend.h"
29 #include "util/u_prim.h"
30 #include "intel_reg.h"
31
32 #include "ilo_3d.h"
33 #include "ilo_context.h"
34 #include "ilo_cp.h"
35 #include "ilo_gpe_gen6.h"
36 #include "ilo_gpe_gen7.h"
37 #include "ilo_shader.h"
38 #include "ilo_state.h"
39 #include "ilo_3d_pipeline.h"
40 #include "ilo_3d_pipeline_gen6.h"
41
42 /**
43 * This should be called before any depth stall flush (including those
44 * produced by non-pipelined state commands) or cache flush on GEN6.
45 *
46 * \see intel_emit_post_sync_nonzero_flush()
47 */
48 static void
49 gen6_wa_pipe_control_post_sync(struct ilo_3d_pipeline *p,
50 bool caller_post_sync)
51 {
52 assert(p->dev->gen == ILO_GEN(6));
53
54 /* emit once */
55 if (p->state.has_gen6_wa_pipe_control)
56 return;
57
58 p->state.has_gen6_wa_pipe_control = true;
59
60 /*
61 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
62 *
63 * "Pipe-control with CS-stall bit set must be sent BEFORE the
64 * pipe-control with a post-sync op and no write-cache flushes."
65 *
66 * The workaround below necessitates this workaround.
67 */
68 gen6_emit_PIPE_CONTROL(p->dev,
69 PIPE_CONTROL_CS_STALL |
70 PIPE_CONTROL_STALL_AT_SCOREBOARD,
71 NULL, 0, false, p->cp);
72
73 /* the caller will emit the post-sync op */
74 if (caller_post_sync)
75 return;
76
77 /*
78 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
79 *
80 * "Before any depth stall flush (including those produced by
81 * non-pipelined state commands), software needs to first send a
82 * PIPE_CONTROL with no bits set except Post-Sync Operation != 0."
83 *
84 * "Before a PIPE_CONTROL with Write Cache Flush Enable =1, a
85 * PIPE_CONTROL with any non-zero post-sync-op is required."
86 */
87 gen6_emit_PIPE_CONTROL(p->dev,
88 PIPE_CONTROL_WRITE_IMMEDIATE,
89 p->workaround_bo, 0, false, p->cp);
90 }
91
92 static void
93 gen6_wa_pipe_control_wm_multisample_flush(struct ilo_3d_pipeline *p)
94 {
95 assert(p->dev->gen == ILO_GEN(6));
96
97 gen6_wa_pipe_control_post_sync(p, false);
98
99 /*
100 * From the Sandy Bridge PRM, volume 2 part 1, page 305:
101 *
102 * "Driver must guarentee that all the caches in the depth pipe are
103 * flushed before this command (3DSTATE_MULTISAMPLE) is parsed. This
104 * requires driver to send a PIPE_CONTROL with a CS stall along with a
105 * Depth Flush prior to this command."
106 */
107 gen6_emit_PIPE_CONTROL(p->dev,
108 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
109 PIPE_CONTROL_CS_STALL,
110 0, 0, false, p->cp);
111 }
112
113 static void
114 gen6_wa_pipe_control_wm_depth_flush(struct ilo_3d_pipeline *p)
115 {
116 assert(p->dev->gen == ILO_GEN(6));
117
118 gen6_wa_pipe_control_post_sync(p, false);
119
120 /*
121 * According to intel_emit_depth_stall_flushes() of classic i965, we need
122 * to emit a sequence of PIPE_CONTROLs prior to emitting depth related
123 * commands.
124 */
125 gen6_emit_PIPE_CONTROL(p->dev,
126 PIPE_CONTROL_DEPTH_STALL,
127 NULL, 0, false, p->cp);
128
129 gen6_emit_PIPE_CONTROL(p->dev,
130 PIPE_CONTROL_DEPTH_CACHE_FLUSH,
131 NULL, 0, false, p->cp);
132
133 gen6_emit_PIPE_CONTROL(p->dev,
134 PIPE_CONTROL_DEPTH_STALL,
135 NULL, 0, false, p->cp);
136 }
137
138 static void
139 gen6_wa_pipe_control_wm_max_threads_stall(struct ilo_3d_pipeline *p)
140 {
141 assert(p->dev->gen == ILO_GEN(6));
142
143 /* the post-sync workaround should cover this already */
144 if (p->state.has_gen6_wa_pipe_control)
145 return;
146
147 /*
148 * From the Sandy Bridge PRM, volume 2 part 1, page 274:
149 *
150 * "A PIPE_CONTROL command, with only the Stall At Pixel Scoreboard
151 * field set (DW1 Bit 1), must be issued prior to any change to the
152 * value in this field (Maximum Number of Threads in 3DSTATE_WM)"
153 */
154 gen6_emit_PIPE_CONTROL(p->dev,
155 PIPE_CONTROL_STALL_AT_SCOREBOARD,
156 NULL, 0, false, p->cp);
157
158 }
159
160 static void
161 gen6_wa_pipe_control_vs_const_flush(struct ilo_3d_pipeline *p)
162 {
163 assert(p->dev->gen == ILO_GEN(6));
164
165 gen6_wa_pipe_control_post_sync(p, false);
166
167 /*
168 * According to upload_vs_state() of classic i965, we need to emit
169 * PIPE_CONTROL after 3DSTATE_CONSTANT_VS so that the command is kept being
170 * buffered by VS FF, to the point that the FF dies.
171 */
172 gen6_emit_PIPE_CONTROL(p->dev,
173 PIPE_CONTROL_DEPTH_STALL |
174 PIPE_CONTROL_INSTRUCTION_FLUSH |
175 PIPE_CONTROL_STATE_CACHE_INVALIDATE,
176 NULL, 0, false, p->cp);
177 }
178
179 #define DIRTY(state) (session->pipe_dirty & ILO_DIRTY_ ## state)
180
181 void
182 gen6_pipeline_common_select(struct ilo_3d_pipeline *p,
183 const struct ilo_context *ilo,
184 struct gen6_pipeline_session *session)
185 {
186 /* PIPELINE_SELECT */
187 if (session->hw_ctx_changed) {
188 if (p->dev->gen == ILO_GEN(6))
189 gen6_wa_pipe_control_post_sync(p, false);
190
191 gen6_emit_PIPELINE_SELECT(p->dev, 0x0, p->cp);
192 }
193 }
194
195 void
196 gen6_pipeline_common_sip(struct ilo_3d_pipeline *p,
197 const struct ilo_context *ilo,
198 struct gen6_pipeline_session *session)
199 {
200 /* STATE_SIP */
201 if (session->hw_ctx_changed) {
202 if (p->dev->gen == ILO_GEN(6))
203 gen6_wa_pipe_control_post_sync(p, false);
204
205 gen6_emit_STATE_SIP(p->dev, 0, p->cp);
206 }
207 }
208
209 void
210 gen6_pipeline_common_base_address(struct ilo_3d_pipeline *p,
211 const struct ilo_context *ilo,
212 struct gen6_pipeline_session *session)
213 {
214 /* STATE_BASE_ADDRESS */
215 if (session->state_bo_changed || session->kernel_bo_changed ||
216 session->batch_bo_changed) {
217 if (p->dev->gen == ILO_GEN(6))
218 gen6_wa_pipe_control_post_sync(p, false);
219
220 gen6_emit_STATE_BASE_ADDRESS(p->dev,
221 NULL, p->cp->bo, p->cp->bo, NULL, ilo->hw3d->kernel.bo,
222 0, 0, 0, 0, p->cp);
223
224 /*
225 * From the Sandy Bridge PRM, volume 1 part 1, page 28:
226 *
227 * "The following commands must be reissued following any change to
228 * the base addresses:
229 *
230 * * 3DSTATE_BINDING_TABLE_POINTERS
231 * * 3DSTATE_SAMPLER_STATE_POINTERS
232 * * 3DSTATE_VIEWPORT_STATE_POINTERS
233 * * 3DSTATE_CC_POINTERS
234 * * MEDIA_STATE_POINTERS"
235 *
236 * 3DSTATE_SCISSOR_STATE_POINTERS is not on the list, but it is
237 * reasonable to also reissue the command. Same to PCB.
238 */
239 session->viewport_state_changed = true;
240
241 session->cc_state_blend_changed = true;
242 session->cc_state_dsa_changed = true;
243 session->cc_state_cc_changed = true;
244
245 session->scissor_state_changed = true;
246
247 session->binding_table_vs_changed = true;
248 session->binding_table_gs_changed = true;
249 session->binding_table_fs_changed = true;
250
251 session->sampler_state_vs_changed = true;
252 session->sampler_state_gs_changed = true;
253 session->sampler_state_fs_changed = true;
254
255 session->pcb_state_vs_changed = true;
256 session->pcb_state_gs_changed = true;
257 session->pcb_state_fs_changed = true;
258 }
259 }
260
261 static void
262 gen6_pipeline_common_urb(struct ilo_3d_pipeline *p,
263 const struct ilo_context *ilo,
264 struct gen6_pipeline_session *session)
265 {
266 /* 3DSTATE_URB */
267 if (DIRTY(VE) || DIRTY(VS) || DIRTY(GS)) {
268 const bool gs_active = (ilo->gs || (ilo->vs &&
269 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_VS_GEN6_SO)));
270 int vs_entry_size, gs_entry_size;
271 int vs_total_size, gs_total_size;
272
273 vs_entry_size = (ilo->vs) ?
274 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_OUTPUT_COUNT) : 0;
275
276 /*
277 * As indicated by 2e712e41db0c0676e9f30fc73172c0e8de8d84d4, VF and VS
278 * share VUE handles. The VUE allocation size must be large enough to
279 * store either VF outputs (number of VERTEX_ELEMENTs) and VS outputs.
280 *
281 * I am not sure if the PRM explicitly states that VF and VS share VUE
282 * handles. But here is a citation that implies so:
283 *
284 * From the Sandy Bridge PRM, volume 2 part 1, page 44:
285 *
286 * "Once a FF stage that spawn threads has sufficient input to
287 * initiate a thread, it must guarantee that it is safe to request
288 * the thread initiation. For all these FF stages, this check is
289 * based on :
290 *
291 * - The availability of output URB entries:
292 * - VS: As the input URB entries are overwritten with the
293 * VS-generated output data, output URB availability isn't a
294 * factor."
295 */
296 if (vs_entry_size < ilo->ve->count)
297 vs_entry_size = ilo->ve->count;
298
299 gs_entry_size = (ilo->gs) ?
300 ilo_shader_get_kernel_param(ilo->gs, ILO_KERNEL_OUTPUT_COUNT) :
301 (gs_active) ? vs_entry_size : 0;
302
303 /* in bytes */
304 vs_entry_size *= sizeof(float) * 4;
305 gs_entry_size *= sizeof(float) * 4;
306 vs_total_size = ilo->dev->urb_size;
307
308 if (gs_active) {
309 vs_total_size /= 2;
310 gs_total_size = vs_total_size;
311 }
312 else {
313 gs_total_size = 0;
314 }
315
316 gen6_emit_3DSTATE_URB(p->dev, vs_total_size, gs_total_size,
317 vs_entry_size, gs_entry_size, p->cp);
318
319 /*
320 * From the Sandy Bridge PRM, volume 2 part 1, page 27:
321 *
322 * "Because of a urb corruption caused by allocating a previous
323 * gsunit's urb entry to vsunit software is required to send a
324 * "GS NULL Fence" (Send URB fence with VS URB size == 1 and GS URB
325 * size == 0) plus a dummy DRAW call before any case where VS will
326 * be taking over GS URB space."
327 */
328 if (p->state.gs.active && !gs_active)
329 ilo_3d_pipeline_emit_flush_gen6(p);
330
331 p->state.gs.active = gs_active;
332 }
333 }
334
335 static void
336 gen6_pipeline_common_pointers_1(struct ilo_3d_pipeline *p,
337 const struct ilo_context *ilo,
338 struct gen6_pipeline_session *session)
339 {
340 /* 3DSTATE_VIEWPORT_STATE_POINTERS */
341 if (session->viewport_state_changed) {
342 gen6_emit_3DSTATE_VIEWPORT_STATE_POINTERS(p->dev,
343 p->state.CLIP_VIEWPORT,
344 p->state.SF_VIEWPORT,
345 p->state.CC_VIEWPORT, p->cp);
346 }
347 }
348
349 static void
350 gen6_pipeline_common_pointers_2(struct ilo_3d_pipeline *p,
351 const struct ilo_context *ilo,
352 struct gen6_pipeline_session *session)
353 {
354 /* 3DSTATE_CC_STATE_POINTERS */
355 if (session->cc_state_blend_changed ||
356 session->cc_state_dsa_changed ||
357 session->cc_state_cc_changed) {
358 gen6_emit_3DSTATE_CC_STATE_POINTERS(p->dev,
359 p->state.BLEND_STATE,
360 p->state.DEPTH_STENCIL_STATE,
361 p->state.COLOR_CALC_STATE, p->cp);
362 }
363
364 /* 3DSTATE_SAMPLER_STATE_POINTERS */
365 if (session->sampler_state_vs_changed ||
366 session->sampler_state_gs_changed ||
367 session->sampler_state_fs_changed) {
368 gen6_emit_3DSTATE_SAMPLER_STATE_POINTERS(p->dev,
369 p->state.vs.SAMPLER_STATE,
370 0,
371 p->state.wm.SAMPLER_STATE, p->cp);
372 }
373 }
374
375 static void
376 gen6_pipeline_common_pointers_3(struct ilo_3d_pipeline *p,
377 const struct ilo_context *ilo,
378 struct gen6_pipeline_session *session)
379 {
380 /* 3DSTATE_SCISSOR_STATE_POINTERS */
381 if (session->scissor_state_changed) {
382 gen6_emit_3DSTATE_SCISSOR_STATE_POINTERS(p->dev,
383 p->state.SCISSOR_RECT, p->cp);
384 }
385
386 /* 3DSTATE_BINDING_TABLE_POINTERS */
387 if (session->binding_table_vs_changed ||
388 session->binding_table_gs_changed ||
389 session->binding_table_fs_changed) {
390 gen6_emit_3DSTATE_BINDING_TABLE_POINTERS(p->dev,
391 p->state.vs.BINDING_TABLE_STATE,
392 p->state.gs.BINDING_TABLE_STATE,
393 p->state.wm.BINDING_TABLE_STATE, p->cp);
394 }
395 }
396
397 void
398 gen6_pipeline_vf(struct ilo_3d_pipeline *p,
399 const struct ilo_context *ilo,
400 struct gen6_pipeline_session *session)
401 {
402 /* 3DSTATE_INDEX_BUFFER */
403 if (DIRTY(IB) || session->primitive_restart_changed ||
404 session->batch_bo_changed) {
405 gen6_emit_3DSTATE_INDEX_BUFFER(p->dev,
406 &ilo->ib, ilo->draw->primitive_restart, p->cp);
407 }
408
409 /* 3DSTATE_VERTEX_BUFFERS */
410 if (DIRTY(VB) || DIRTY(VE) || session->batch_bo_changed)
411 gen6_emit_3DSTATE_VERTEX_BUFFERS(p->dev, ilo->ve, &ilo->vb, p->cp);
412
413 /* 3DSTATE_VERTEX_ELEMENTS */
414 if (DIRTY(VE) || DIRTY(VS)) {
415 const struct ilo_ve_state *ve = ilo->ve;
416 bool last_velement_edgeflag = false;
417 bool prepend_generate_ids = false;
418
419 if (ilo->vs) {
420 if (ilo_shader_get_kernel_param(ilo->vs,
421 ILO_KERNEL_VS_INPUT_EDGEFLAG)) {
422 /* we rely on the state tracker here */
423 assert(ilo_shader_get_kernel_param(ilo->vs,
424 ILO_KERNEL_INPUT_COUNT) == ve->count);
425
426 last_velement_edgeflag = true;
427 }
428
429 if (ilo_shader_get_kernel_param(ilo->vs,
430 ILO_KERNEL_VS_INPUT_INSTANCEID) ||
431 ilo_shader_get_kernel_param(ilo->vs,
432 ILO_KERNEL_VS_INPUT_VERTEXID))
433 prepend_generate_ids = true;
434 }
435
436 gen6_emit_3DSTATE_VERTEX_ELEMENTS(p->dev, ve,
437 last_velement_edgeflag, prepend_generate_ids, p->cp);
438 }
439 }
440
441 void
442 gen6_pipeline_vf_statistics(struct ilo_3d_pipeline *p,
443 const struct ilo_context *ilo,
444 struct gen6_pipeline_session *session)
445 {
446 /* 3DSTATE_VF_STATISTICS */
447 if (session->hw_ctx_changed)
448 gen6_emit_3DSTATE_VF_STATISTICS(p->dev, false, p->cp);
449 }
450
451 static void
452 gen6_pipeline_vf_draw(struct ilo_3d_pipeline *p,
453 const struct ilo_context *ilo,
454 struct gen6_pipeline_session *session)
455 {
456 /* 3DPRIMITIVE */
457 gen6_emit_3DPRIMITIVE(p->dev, ilo->draw, &ilo->ib, false, p->cp);
458 p->state.has_gen6_wa_pipe_control = false;
459 }
460
461 void
462 gen6_pipeline_vs(struct ilo_3d_pipeline *p,
463 const struct ilo_context *ilo,
464 struct gen6_pipeline_session *session)
465 {
466 const bool emit_3dstate_vs = (DIRTY(VS) || DIRTY(SAMPLER_VS) ||
467 session->kernel_bo_changed);
468 const bool emit_3dstate_constant_vs = session->pcb_state_vs_changed;
469
470 /*
471 * the classic i965 does this in upload_vs_state(), citing a spec that I
472 * cannot find
473 */
474 if (emit_3dstate_vs && p->dev->gen == ILO_GEN(6))
475 gen6_wa_pipe_control_post_sync(p, false);
476
477 /* 3DSTATE_CONSTANT_VS */
478 if (emit_3dstate_constant_vs) {
479 gen6_emit_3DSTATE_CONSTANT_VS(p->dev,
480 &p->state.vs.PUSH_CONSTANT_BUFFER,
481 &p->state.vs.PUSH_CONSTANT_BUFFER_size,
482 1, p->cp);
483 }
484
485 /* 3DSTATE_VS */
486 if (emit_3dstate_vs) {
487 const int num_samplers = ilo->sampler[PIPE_SHADER_VERTEX].count;
488
489 gen6_emit_3DSTATE_VS(p->dev, ilo->vs, num_samplers, p->cp);
490 }
491
492 if (emit_3dstate_constant_vs && p->dev->gen == ILO_GEN(6))
493 gen6_wa_pipe_control_vs_const_flush(p);
494 }
495
496 static void
497 gen6_pipeline_gs(struct ilo_3d_pipeline *p,
498 const struct ilo_context *ilo,
499 struct gen6_pipeline_session *session)
500 {
501 /* 3DSTATE_CONSTANT_GS */
502 if (session->pcb_state_gs_changed)
503 gen6_emit_3DSTATE_CONSTANT_GS(p->dev, NULL, NULL, 0, p->cp);
504
505 /* 3DSTATE_GS */
506 if (DIRTY(GS) || DIRTY(VS) ||
507 session->prim_changed || session->kernel_bo_changed) {
508 const int verts_per_prim = u_vertices_per_prim(session->reduced_prim);
509
510 gen6_emit_3DSTATE_GS(p->dev, ilo->gs, ilo->vs, verts_per_prim, p->cp);
511 }
512 }
513
514 bool
515 gen6_pipeline_update_max_svbi(struct ilo_3d_pipeline *p,
516 const struct ilo_context *ilo,
517 struct gen6_pipeline_session *session)
518 {
519 if (DIRTY(VS) || DIRTY(GS) || DIRTY(SO)) {
520 const struct pipe_stream_output_info *so_info =
521 (ilo->gs) ? ilo_shader_get_kernel_so_info(ilo->gs) :
522 (ilo->vs) ? ilo_shader_get_kernel_so_info(ilo->vs) : NULL;
523 unsigned max_svbi = 0xffffffff;
524 int i;
525
526 for (i = 0; i < so_info->num_outputs; i++) {
527 const int output_buffer = so_info->output[i].output_buffer;
528 const struct pipe_stream_output_target *so =
529 ilo->so.states[output_buffer];
530 const int struct_size = so_info->stride[output_buffer] * 4;
531 const int elem_size = so_info->output[i].num_components * 4;
532 int buf_size, count;
533
534 if (!so) {
535 max_svbi = 0;
536 break;
537 }
538
539 buf_size = so->buffer_size - so_info->output[i].dst_offset * 4;
540
541 count = buf_size / struct_size;
542 if (buf_size % struct_size >= elem_size)
543 count++;
544
545 if (count < max_svbi)
546 max_svbi = count;
547 }
548
549 if (p->state.so_max_vertices != max_svbi) {
550 p->state.so_max_vertices = max_svbi;
551 return true;
552 }
553 }
554
555 return false;
556 }
557
558 static void
559 gen6_pipeline_gs_svbi(struct ilo_3d_pipeline *p,
560 const struct ilo_context *ilo,
561 struct gen6_pipeline_session *session)
562 {
563 const bool emit = gen6_pipeline_update_max_svbi(p, ilo, session);
564
565 /* 3DSTATE_GS_SVB_INDEX */
566 if (emit) {
567 if (p->dev->gen == ILO_GEN(6))
568 gen6_wa_pipe_control_post_sync(p, false);
569
570 gen6_emit_3DSTATE_GS_SVB_INDEX(p->dev,
571 0, p->state.so_num_vertices, p->state.so_max_vertices,
572 false, p->cp);
573
574 if (session->hw_ctx_changed) {
575 int i;
576
577 /*
578 * From the Sandy Bridge PRM, volume 2 part 1, page 148:
579 *
580 * "If a buffer is not enabled then the SVBI must be set to 0x0
581 * in order to not cause overflow in that SVBI."
582 *
583 * "If a buffer is not enabled then the MaxSVBI must be set to
584 * 0xFFFFFFFF in order to not cause overflow in that SVBI."
585 */
586 for (i = 1; i < 4; i++) {
587 gen6_emit_3DSTATE_GS_SVB_INDEX(p->dev,
588 i, 0, 0xffffffff, false, p->cp);
589 }
590 }
591 }
592 }
593
594 void
595 gen6_pipeline_clip(struct ilo_3d_pipeline *p,
596 const struct ilo_context *ilo,
597 struct gen6_pipeline_session *session)
598 {
599 /* 3DSTATE_CLIP */
600 if (DIRTY(RASTERIZER) || DIRTY(FS) || DIRTY(VIEWPORT) || DIRTY(FB)) {
601 bool enable_guardband = true;
602 unsigned i;
603
604 /*
605 * We do not do 2D clipping yet. Guard band test should only be enabled
606 * when the viewport is larger than the framebuffer.
607 */
608 for (i = 0; i < ilo->viewport.count; i++) {
609 const struct ilo_viewport_cso *vp = &ilo->viewport.cso[i];
610
611 if (vp->min_x > 0.0f || vp->max_x < ilo->fb.state.width ||
612 vp->min_y > 0.0f || vp->max_y < ilo->fb.state.height) {
613 enable_guardband = false;
614 break;
615 }
616 }
617
618 gen6_emit_3DSTATE_CLIP(p->dev, ilo->rasterizer,
619 ilo->fs, enable_guardband, 1, p->cp);
620 }
621 }
622
623 static void
624 gen6_pipeline_sf(struct ilo_3d_pipeline *p,
625 const struct ilo_context *ilo,
626 struct gen6_pipeline_session *session)
627 {
628 /* 3DSTATE_SF */
629 if (DIRTY(RASTERIZER) || DIRTY(VS) || DIRTY(GS) || DIRTY(FS)) {
630 gen6_emit_3DSTATE_SF(p->dev, ilo->rasterizer, ilo->fs,
631 (ilo->gs) ? ilo->gs : ilo->vs, p->cp);
632 }
633 }
634
635 void
636 gen6_pipeline_sf_rect(struct ilo_3d_pipeline *p,
637 const struct ilo_context *ilo,
638 struct gen6_pipeline_session *session)
639 {
640 /* 3DSTATE_DRAWING_RECTANGLE */
641 if (DIRTY(FB)) {
642 if (p->dev->gen == ILO_GEN(6))
643 gen6_wa_pipe_control_post_sync(p, false);
644
645 gen6_emit_3DSTATE_DRAWING_RECTANGLE(p->dev, 0, 0,
646 ilo->fb.state.width, ilo->fb.state.height, p->cp);
647 }
648 }
649
650 static void
651 gen6_pipeline_wm(struct ilo_3d_pipeline *p,
652 const struct ilo_context *ilo,
653 struct gen6_pipeline_session *session)
654 {
655 /* 3DSTATE_CONSTANT_PS */
656 if (session->pcb_state_fs_changed) {
657 gen6_emit_3DSTATE_CONSTANT_PS(p->dev,
658 &p->state.wm.PUSH_CONSTANT_BUFFER,
659 &p->state.wm.PUSH_CONSTANT_BUFFER_size,
660 1, p->cp);
661 }
662
663 /* 3DSTATE_WM */
664 if (DIRTY(FS) || DIRTY(SAMPLER_FS) || DIRTY(BLEND) || DIRTY(DSA) ||
665 DIRTY(RASTERIZER) || session->kernel_bo_changed) {
666 const int num_samplers = ilo->sampler[PIPE_SHADER_FRAGMENT].count;
667 const bool dual_blend = ilo->blend->dual_blend;
668 const bool cc_may_kill = (ilo->dsa->dw_alpha ||
669 ilo->blend->alpha_to_coverage);
670
671 if (p->dev->gen == ILO_GEN(6) && session->hw_ctx_changed)
672 gen6_wa_pipe_control_wm_max_threads_stall(p);
673
674 gen6_emit_3DSTATE_WM(p->dev, ilo->fs, num_samplers,
675 ilo->rasterizer, dual_blend, cc_may_kill, p->cp);
676 }
677 }
678
679 static void
680 gen6_pipeline_wm_multisample(struct ilo_3d_pipeline *p,
681 const struct ilo_context *ilo,
682 struct gen6_pipeline_session *session)
683 {
684 /* 3DSTATE_MULTISAMPLE and 3DSTATE_SAMPLE_MASK */
685 if (DIRTY(SAMPLE_MASK) || DIRTY(FB)) {
686 const uint32_t *packed_sample_pos;
687
688 packed_sample_pos = (ilo->fb.num_samples > 1) ?
689 &p->packed_sample_position_4x : &p->packed_sample_position_1x;
690
691 if (p->dev->gen == ILO_GEN(6)) {
692 gen6_wa_pipe_control_post_sync(p, false);
693 gen6_wa_pipe_control_wm_multisample_flush(p);
694 }
695
696 gen6_emit_3DSTATE_MULTISAMPLE(p->dev,
697 ilo->fb.num_samples, packed_sample_pos,
698 ilo->rasterizer->state.half_pixel_center, p->cp);
699
700 gen6_emit_3DSTATE_SAMPLE_MASK(p->dev,
701 (ilo->fb.num_samples > 1) ? ilo->sample_mask : 0x1, p->cp);
702 }
703 }
704
705 static void
706 gen6_pipeline_wm_depth(struct ilo_3d_pipeline *p,
707 const struct ilo_context *ilo,
708 struct gen6_pipeline_session *session)
709 {
710 /* 3DSTATE_DEPTH_BUFFER and 3DSTATE_CLEAR_PARAMS */
711 if (DIRTY(FB) || session->batch_bo_changed) {
712 const struct ilo_zs_surface *zs;
713
714 if (ilo->fb.state.zsbuf) {
715 const struct ilo_surface_cso *surface =
716 (const struct ilo_surface_cso *) ilo->fb.state.zsbuf;
717
718 assert(!surface->is_rt);
719 zs = &surface->u.zs;
720 }
721 else {
722 zs = &ilo->fb.null_zs;
723 }
724
725 if (p->dev->gen == ILO_GEN(6)) {
726 gen6_wa_pipe_control_post_sync(p, false);
727 gen6_wa_pipe_control_wm_depth_flush(p);
728 }
729
730 gen6_emit_3DSTATE_DEPTH_BUFFER(p->dev, zs, p->cp);
731
732 /* TODO */
733 gen6_emit_3DSTATE_CLEAR_PARAMS(p->dev, 0, p->cp);
734 }
735 }
736
737 void
738 gen6_pipeline_wm_raster(struct ilo_3d_pipeline *p,
739 const struct ilo_context *ilo,
740 struct gen6_pipeline_session *session)
741 {
742 /* 3DSTATE_POLY_STIPPLE_PATTERN and 3DSTATE_POLY_STIPPLE_OFFSET */
743 if ((DIRTY(RASTERIZER) || DIRTY(POLY_STIPPLE)) &&
744 ilo->rasterizer->state.poly_stipple_enable) {
745 if (p->dev->gen == ILO_GEN(6))
746 gen6_wa_pipe_control_post_sync(p, false);
747
748 gen6_emit_3DSTATE_POLY_STIPPLE_PATTERN(p->dev,
749 &ilo->poly_stipple, p->cp);
750
751 gen6_emit_3DSTATE_POLY_STIPPLE_OFFSET(p->dev, 0, 0, p->cp);
752 }
753
754 /* 3DSTATE_LINE_STIPPLE */
755 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_stipple_enable) {
756 if (p->dev->gen == ILO_GEN(6))
757 gen6_wa_pipe_control_post_sync(p, false);
758
759 gen6_emit_3DSTATE_LINE_STIPPLE(p->dev,
760 ilo->rasterizer->state.line_stipple_pattern,
761 ilo->rasterizer->state.line_stipple_factor + 1, p->cp);
762 }
763
764 /* 3DSTATE_AA_LINE_PARAMETERS */
765 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_smooth) {
766 if (p->dev->gen == ILO_GEN(6))
767 gen6_wa_pipe_control_post_sync(p, false);
768
769 gen6_emit_3DSTATE_AA_LINE_PARAMETERS(p->dev, p->cp);
770 }
771 }
772
773 static void
774 gen6_pipeline_state_viewports(struct ilo_3d_pipeline *p,
775 const struct ilo_context *ilo,
776 struct gen6_pipeline_session *session)
777 {
778 /* SF_CLIP_VIEWPORT and CC_VIEWPORT */
779 if (p->dev->gen >= ILO_GEN(7) && DIRTY(VIEWPORT)) {
780 p->state.SF_CLIP_VIEWPORT = gen7_emit_SF_CLIP_VIEWPORT(p->dev,
781 ilo->viewport.cso, ilo->viewport.count, p->cp);
782
783 p->state.CC_VIEWPORT = gen6_emit_CC_VIEWPORT(p->dev,
784 ilo->viewport.cso, ilo->viewport.count, p->cp);
785
786 session->viewport_state_changed = true;
787 }
788 /* SF_VIEWPORT, CLIP_VIEWPORT, and CC_VIEWPORT */
789 else if (DIRTY(VIEWPORT)) {
790 p->state.CLIP_VIEWPORT = gen6_emit_CLIP_VIEWPORT(p->dev,
791 ilo->viewport.cso, ilo->viewport.count, p->cp);
792
793 p->state.SF_VIEWPORT = gen6_emit_SF_VIEWPORT(p->dev,
794 ilo->viewport.cso, ilo->viewport.count, p->cp);
795
796 p->state.CC_VIEWPORT = gen6_emit_CC_VIEWPORT(p->dev,
797 ilo->viewport.cso, ilo->viewport.count, p->cp);
798
799 session->viewport_state_changed = true;
800 }
801 }
802
803 static void
804 gen6_pipeline_state_cc(struct ilo_3d_pipeline *p,
805 const struct ilo_context *ilo,
806 struct gen6_pipeline_session *session)
807 {
808 /* BLEND_STATE */
809 if (DIRTY(BLEND) || DIRTY(FB) || DIRTY(DSA)) {
810 p->state.BLEND_STATE = gen6_emit_BLEND_STATE(p->dev,
811 ilo->blend, &ilo->fb, ilo->dsa, p->cp);
812
813 session->cc_state_blend_changed = true;
814 }
815
816 /* COLOR_CALC_STATE */
817 if (DIRTY(DSA) || DIRTY(STENCIL_REF) || DIRTY(BLEND_COLOR)) {
818 p->state.COLOR_CALC_STATE =
819 gen6_emit_COLOR_CALC_STATE(p->dev, &ilo->stencil_ref,
820 ilo->dsa->alpha_ref, &ilo->blend_color, p->cp);
821
822 session->cc_state_cc_changed = true;
823 }
824
825 /* DEPTH_STENCIL_STATE */
826 if (DIRTY(DSA)) {
827 p->state.DEPTH_STENCIL_STATE =
828 gen6_emit_DEPTH_STENCIL_STATE(p->dev, ilo->dsa, p->cp);
829
830 session->cc_state_dsa_changed = true;
831 }
832 }
833
834 static void
835 gen6_pipeline_state_scissors(struct ilo_3d_pipeline *p,
836 const struct ilo_context *ilo,
837 struct gen6_pipeline_session *session)
838 {
839 /* SCISSOR_RECT */
840 if (DIRTY(SCISSOR) || DIRTY(VIEWPORT)) {
841 /* there should be as many scissors as there are viewports */
842 p->state.SCISSOR_RECT = gen6_emit_SCISSOR_RECT(p->dev,
843 &ilo->scissor, ilo->viewport.count, p->cp);
844
845 session->scissor_state_changed = true;
846 }
847 }
848
849 static void
850 gen6_pipeline_state_surfaces_rt(struct ilo_3d_pipeline *p,
851 const struct ilo_context *ilo,
852 struct gen6_pipeline_session *session)
853 {
854 /* SURFACE_STATEs for render targets */
855 if (DIRTY(FB)) {
856 const struct ilo_fb_state *fb = &ilo->fb;
857 const int offset = ILO_WM_DRAW_SURFACE(0);
858 uint32_t *surface_state = &p->state.wm.SURFACE_STATE[offset];
859 int i;
860
861 for (i = 0; i < fb->state.nr_cbufs; i++) {
862 const struct ilo_surface_cso *surface =
863 (const struct ilo_surface_cso *) fb->state.cbufs[i];
864
865 assert(surface && surface->is_rt);
866 surface_state[i] =
867 gen6_emit_SURFACE_STATE(p->dev, &surface->u.rt, true, p->cp);
868 }
869
870 /*
871 * Upload at least one render target, as
872 * brw_update_renderbuffer_surfaces() does. I don't know why.
873 */
874 if (i == 0) {
875 struct ilo_view_surface null_surface;
876
877 ilo_gpe_init_view_surface_null(p->dev,
878 fb->state.width, fb->state.height,
879 1, 0, &null_surface);
880
881 surface_state[i] =
882 gen6_emit_SURFACE_STATE(p->dev, &null_surface, true, p->cp);
883
884 i++;
885 }
886
887 memset(&surface_state[i], 0, (ILO_MAX_DRAW_BUFFERS - i) * 4);
888
889 if (i && session->num_surfaces[PIPE_SHADER_FRAGMENT] < offset + i)
890 session->num_surfaces[PIPE_SHADER_FRAGMENT] = offset + i;
891
892 session->binding_table_fs_changed = true;
893 }
894 }
895
896 static void
897 gen6_pipeline_state_surfaces_so(struct ilo_3d_pipeline *p,
898 const struct ilo_context *ilo,
899 struct gen6_pipeline_session *session)
900 {
901 const struct ilo_so_state *so = &ilo->so;
902
903 if (p->dev->gen != ILO_GEN(6))
904 return;
905
906 /* SURFACE_STATEs for stream output targets */
907 if (DIRTY(VS) || DIRTY(GS) || DIRTY(SO)) {
908 const struct pipe_stream_output_info *so_info =
909 (ilo->gs) ? ilo_shader_get_kernel_so_info(ilo->gs) :
910 (ilo->vs) ? ilo_shader_get_kernel_so_info(ilo->vs) : NULL;
911 const int offset = ILO_GS_SO_SURFACE(0);
912 uint32_t *surface_state = &p->state.gs.SURFACE_STATE[offset];
913 int i;
914
915 for (i = 0; so_info && i < so_info->num_outputs; i++) {
916 const int target = so_info->output[i].output_buffer;
917 const struct pipe_stream_output_target *so_target =
918 (target < so->count) ? so->states[target] : NULL;
919
920 if (so_target) {
921 surface_state[i] = gen6_emit_so_SURFACE_STATE(p->dev,
922 so_target, so_info, i, p->cp);
923 }
924 else {
925 surface_state[i] = 0;
926 }
927 }
928
929 memset(&surface_state[i], 0, (ILO_MAX_SO_BINDINGS - i) * 4);
930
931 if (i && session->num_surfaces[PIPE_SHADER_GEOMETRY] < offset + i)
932 session->num_surfaces[PIPE_SHADER_GEOMETRY] = offset + i;
933
934 session->binding_table_gs_changed = true;
935 }
936 }
937
938 static void
939 gen6_pipeline_state_surfaces_view(struct ilo_3d_pipeline *p,
940 const struct ilo_context *ilo,
941 int shader_type,
942 struct gen6_pipeline_session *session)
943 {
944 const struct ilo_view_state *view = &ilo->view[shader_type];
945 uint32_t *surface_state;
946 int offset, i;
947 bool skip = false;
948
949 /* SURFACE_STATEs for sampler views */
950 switch (shader_type) {
951 case PIPE_SHADER_VERTEX:
952 if (DIRTY(VIEW_VS)) {
953 offset = ILO_VS_TEXTURE_SURFACE(0);
954 surface_state = &p->state.vs.SURFACE_STATE[offset];
955
956 session->binding_table_vs_changed = true;
957 }
958 else {
959 skip = true;
960 }
961 break;
962 case PIPE_SHADER_FRAGMENT:
963 if (DIRTY(VIEW_FS)) {
964 offset = ILO_WM_TEXTURE_SURFACE(0);
965 surface_state = &p->state.wm.SURFACE_STATE[offset];
966
967 session->binding_table_fs_changed = true;
968 }
969 else {
970 skip = true;
971 }
972 break;
973 default:
974 skip = true;
975 break;
976 }
977
978 if (skip)
979 return;
980
981 for (i = 0; i < view->count; i++) {
982 if (view->states[i]) {
983 const struct ilo_view_cso *cso =
984 (const struct ilo_view_cso *) view->states[i];
985
986 surface_state[i] =
987 gen6_emit_SURFACE_STATE(p->dev, &cso->surface, false, p->cp);
988 }
989 else {
990 surface_state[i] = 0;
991 }
992 }
993
994 memset(&surface_state[i], 0, (ILO_MAX_SAMPLER_VIEWS - i) * 4);
995
996 if (i && session->num_surfaces[shader_type] < offset + i)
997 session->num_surfaces[shader_type] = offset + i;
998 }
999
1000 static void
1001 gen6_pipeline_state_surfaces_const(struct ilo_3d_pipeline *p,
1002 const struct ilo_context *ilo,
1003 int shader_type,
1004 struct gen6_pipeline_session *session)
1005 {
1006 const struct ilo_cbuf_state *cbuf = &ilo->cbuf[shader_type];
1007 uint32_t *surface_state;
1008 bool *binding_table_changed;
1009 int offset, count, i;
1010
1011 if (!DIRTY(CBUF))
1012 return;
1013
1014 /* SURFACE_STATEs for constant buffers */
1015 switch (shader_type) {
1016 case PIPE_SHADER_VERTEX:
1017 offset = ILO_VS_CONST_SURFACE(0);
1018 surface_state = &p->state.vs.SURFACE_STATE[offset];
1019 binding_table_changed = &session->binding_table_vs_changed;
1020 break;
1021 case PIPE_SHADER_FRAGMENT:
1022 offset = ILO_WM_CONST_SURFACE(0);
1023 surface_state = &p->state.wm.SURFACE_STATE[offset];
1024 binding_table_changed = &session->binding_table_fs_changed;
1025 break;
1026 default:
1027 return;
1028 break;
1029 }
1030
1031 /* constants are pushed via PCB */
1032 if (cbuf->enabled_mask == 0x1 && !cbuf->cso[0].resource) {
1033 memset(surface_state, 0, ILO_MAX_CONST_BUFFERS * 4);
1034 return;
1035 }
1036
1037 count = util_last_bit(cbuf->enabled_mask);
1038 for (i = 0; i < count; i++) {
1039 if (cbuf->cso[i].resource) {
1040 surface_state[i] = gen6_emit_SURFACE_STATE(p->dev,
1041 &cbuf->cso[i].surface, false, p->cp);
1042 }
1043 else {
1044 surface_state[i] = 0;
1045 }
1046 }
1047
1048 memset(&surface_state[count], 0, (ILO_MAX_CONST_BUFFERS - count) * 4);
1049
1050 if (count && session->num_surfaces[shader_type] < offset + count)
1051 session->num_surfaces[shader_type] = offset + count;
1052
1053 *binding_table_changed = true;
1054 }
1055
1056 static void
1057 gen6_pipeline_state_binding_tables(struct ilo_3d_pipeline *p,
1058 const struct ilo_context *ilo,
1059 int shader_type,
1060 struct gen6_pipeline_session *session)
1061 {
1062 uint32_t *binding_table_state, *surface_state;
1063 int *binding_table_state_size, size;
1064 bool skip = false;
1065
1066 /* BINDING_TABLE_STATE */
1067 switch (shader_type) {
1068 case PIPE_SHADER_VERTEX:
1069 surface_state = p->state.vs.SURFACE_STATE;
1070 binding_table_state = &p->state.vs.BINDING_TABLE_STATE;
1071 binding_table_state_size = &p->state.vs.BINDING_TABLE_STATE_size;
1072
1073 skip = !session->binding_table_vs_changed;
1074 break;
1075 case PIPE_SHADER_GEOMETRY:
1076 surface_state = p->state.gs.SURFACE_STATE;
1077 binding_table_state = &p->state.gs.BINDING_TABLE_STATE;
1078 binding_table_state_size = &p->state.gs.BINDING_TABLE_STATE_size;
1079
1080 skip = !session->binding_table_gs_changed;
1081 break;
1082 case PIPE_SHADER_FRAGMENT:
1083 surface_state = p->state.wm.SURFACE_STATE;
1084 binding_table_state = &p->state.wm.BINDING_TABLE_STATE;
1085 binding_table_state_size = &p->state.wm.BINDING_TABLE_STATE_size;
1086
1087 skip = !session->binding_table_fs_changed;
1088 break;
1089 default:
1090 skip = true;
1091 break;
1092 }
1093
1094 if (skip)
1095 return;
1096
1097 /*
1098 * If we have seemingly less SURFACE_STATEs than before, it could be that
1099 * we did not touch those reside at the tail in this upload. Loop over
1100 * them to figure out the real number of SURFACE_STATEs.
1101 */
1102 for (size = *binding_table_state_size;
1103 size > session->num_surfaces[shader_type]; size--) {
1104 if (surface_state[size - 1])
1105 break;
1106 }
1107 if (size < session->num_surfaces[shader_type])
1108 size = session->num_surfaces[shader_type];
1109
1110 *binding_table_state = gen6_emit_BINDING_TABLE_STATE(p->dev,
1111 surface_state, size, p->cp);
1112 *binding_table_state_size = size;
1113 }
1114
1115 static void
1116 gen6_pipeline_state_samplers(struct ilo_3d_pipeline *p,
1117 const struct ilo_context *ilo,
1118 int shader_type,
1119 struct gen6_pipeline_session *session)
1120 {
1121 const struct ilo_sampler_cso * const *samplers =
1122 ilo->sampler[shader_type].cso;
1123 const struct pipe_sampler_view * const *views =
1124 (const struct pipe_sampler_view **) ilo->view[shader_type].states;
1125 const int num_samplers = ilo->sampler[shader_type].count;
1126 const int num_views = ilo->view[shader_type].count;
1127 uint32_t *sampler_state, *border_color_state;
1128 bool emit_border_color = false;
1129 bool skip = false;
1130
1131 /* SAMPLER_BORDER_COLOR_STATE and SAMPLER_STATE */
1132 switch (shader_type) {
1133 case PIPE_SHADER_VERTEX:
1134 if (DIRTY(SAMPLER_VS) || DIRTY(VIEW_VS)) {
1135 sampler_state = &p->state.vs.SAMPLER_STATE;
1136 border_color_state = p->state.vs.SAMPLER_BORDER_COLOR_STATE;
1137
1138 if (DIRTY(SAMPLER_VS))
1139 emit_border_color = true;
1140
1141 session->sampler_state_vs_changed = true;
1142 }
1143 else {
1144 skip = true;
1145 }
1146 break;
1147 case PIPE_SHADER_FRAGMENT:
1148 if (DIRTY(SAMPLER_FS) || DIRTY(VIEW_FS)) {
1149 sampler_state = &p->state.wm.SAMPLER_STATE;
1150 border_color_state = p->state.wm.SAMPLER_BORDER_COLOR_STATE;
1151
1152 if (DIRTY(SAMPLER_FS))
1153 emit_border_color = true;
1154
1155 session->sampler_state_fs_changed = true;
1156 }
1157 else {
1158 skip = true;
1159 }
1160 break;
1161 default:
1162 skip = true;
1163 break;
1164 }
1165
1166 if (skip)
1167 return;
1168
1169 if (emit_border_color) {
1170 int i;
1171
1172 for (i = 0; i < num_samplers; i++) {
1173 border_color_state[i] = (samplers[i]) ?
1174 gen6_emit_SAMPLER_BORDER_COLOR_STATE(p->dev,
1175 samplers[i], p->cp) : 0;
1176 }
1177 }
1178
1179 /* should we take the minimum of num_samplers and num_views? */
1180 *sampler_state = gen6_emit_SAMPLER_STATE(p->dev,
1181 samplers, views,
1182 border_color_state,
1183 MIN2(num_samplers, num_views), p->cp);
1184 }
1185
1186 static void
1187 gen6_pipeline_state_pcb(struct ilo_3d_pipeline *p,
1188 const struct ilo_context *ilo,
1189 struct gen6_pipeline_session *session)
1190 {
1191 /* push constant buffer for VS */
1192 if (DIRTY(VS) || DIRTY(CBUF) || DIRTY(CLIP)) {
1193 const int cbuf0_size = (ilo->vs) ?
1194 ilo_shader_get_kernel_param(ilo->vs,
1195 ILO_KERNEL_PCB_CBUF0_SIZE) : 0;
1196 const int clip_state_size = (ilo->vs) ?
1197 ilo_shader_get_kernel_param(ilo->vs,
1198 ILO_KERNEL_VS_PCB_UCP_SIZE) : 0;
1199 const int total_size = cbuf0_size + clip_state_size;
1200
1201 if (total_size) {
1202 void *pcb;
1203
1204 p->state.vs.PUSH_CONSTANT_BUFFER =
1205 gen6_emit_push_constant_buffer(p->dev, total_size, &pcb, p->cp);
1206 p->state.vs.PUSH_CONSTANT_BUFFER_size = total_size;
1207
1208 if (cbuf0_size) {
1209 const struct ilo_cbuf_state *cbuf =
1210 &ilo->cbuf[PIPE_SHADER_VERTEX];
1211
1212 if (cbuf0_size <= cbuf->cso[0].user_buffer_size) {
1213 memcpy(pcb, cbuf->cso[0].user_buffer, cbuf0_size);
1214 }
1215 else {
1216 memcpy(pcb, cbuf->cso[0].user_buffer,
1217 cbuf->cso[0].user_buffer_size);
1218 memset(pcb + cbuf->cso[0].user_buffer_size, 0,
1219 cbuf0_size - cbuf->cso[0].user_buffer_size);
1220 }
1221
1222 pcb += cbuf0_size;
1223 }
1224
1225 if (clip_state_size)
1226 memcpy(pcb, &ilo->clip, clip_state_size);
1227
1228 session->pcb_state_vs_changed = true;
1229 }
1230 else if (p->state.vs.PUSH_CONSTANT_BUFFER_size) {
1231 p->state.vs.PUSH_CONSTANT_BUFFER = 0;
1232 p->state.vs.PUSH_CONSTANT_BUFFER_size = 0;
1233
1234 session->pcb_state_vs_changed = true;
1235 }
1236 }
1237
1238 /* push constant buffer for FS */
1239 if (DIRTY(FS) || DIRTY(CBUF)) {
1240 const int cbuf0_size = (ilo->fs) ?
1241 ilo_shader_get_kernel_param(ilo->fs, ILO_KERNEL_PCB_CBUF0_SIZE) : 0;
1242
1243 if (cbuf0_size) {
1244 const struct ilo_cbuf_state *cbuf = &ilo->cbuf[PIPE_SHADER_FRAGMENT];
1245 void *pcb;
1246
1247 p->state.wm.PUSH_CONSTANT_BUFFER =
1248 gen6_emit_push_constant_buffer(p->dev, cbuf0_size, &pcb, p->cp);
1249 p->state.wm.PUSH_CONSTANT_BUFFER_size = cbuf0_size;
1250
1251 if (cbuf0_size <= cbuf->cso[0].user_buffer_size) {
1252 memcpy(pcb, cbuf->cso[0].user_buffer, cbuf0_size);
1253 }
1254 else {
1255 memcpy(pcb, cbuf->cso[0].user_buffer,
1256 cbuf->cso[0].user_buffer_size);
1257 memset(pcb + cbuf->cso[0].user_buffer_size, 0,
1258 cbuf0_size - cbuf->cso[0].user_buffer_size);
1259 }
1260
1261 session->pcb_state_fs_changed = true;
1262 }
1263 else if (p->state.wm.PUSH_CONSTANT_BUFFER_size) {
1264 p->state.wm.PUSH_CONSTANT_BUFFER = 0;
1265 p->state.wm.PUSH_CONSTANT_BUFFER_size = 0;
1266
1267 session->pcb_state_fs_changed = true;
1268 }
1269 }
1270 }
1271
1272 #undef DIRTY
1273
1274 static void
1275 gen6_pipeline_commands(struct ilo_3d_pipeline *p,
1276 const struct ilo_context *ilo,
1277 struct gen6_pipeline_session *session)
1278 {
1279 /*
1280 * We try to keep the order of the commands match, as closely as possible,
1281 * that of the classic i965 driver. It allows us to compare the command
1282 * streams easily.
1283 */
1284 gen6_pipeline_common_select(p, ilo, session);
1285 gen6_pipeline_gs_svbi(p, ilo, session);
1286 gen6_pipeline_common_sip(p, ilo, session);
1287 gen6_pipeline_vf_statistics(p, ilo, session);
1288 gen6_pipeline_common_base_address(p, ilo, session);
1289 gen6_pipeline_common_pointers_1(p, ilo, session);
1290 gen6_pipeline_common_urb(p, ilo, session);
1291 gen6_pipeline_common_pointers_2(p, ilo, session);
1292 gen6_pipeline_wm_multisample(p, ilo, session);
1293 gen6_pipeline_vs(p, ilo, session);
1294 gen6_pipeline_gs(p, ilo, session);
1295 gen6_pipeline_clip(p, ilo, session);
1296 gen6_pipeline_sf(p, ilo, session);
1297 gen6_pipeline_wm(p, ilo, session);
1298 gen6_pipeline_common_pointers_3(p, ilo, session);
1299 gen6_pipeline_wm_depth(p, ilo, session);
1300 gen6_pipeline_wm_raster(p, ilo, session);
1301 gen6_pipeline_sf_rect(p, ilo, session);
1302 gen6_pipeline_vf(p, ilo, session);
1303 gen6_pipeline_vf_draw(p, ilo, session);
1304 }
1305
1306 void
1307 gen6_pipeline_states(struct ilo_3d_pipeline *p,
1308 const struct ilo_context *ilo,
1309 struct gen6_pipeline_session *session)
1310 {
1311 int shader_type;
1312
1313 gen6_pipeline_state_viewports(p, ilo, session);
1314 gen6_pipeline_state_cc(p, ilo, session);
1315 gen6_pipeline_state_scissors(p, ilo, session);
1316 gen6_pipeline_state_pcb(p, ilo, session);
1317
1318 /*
1319 * upload all SURAFCE_STATEs together so that we know there are minimal
1320 * paddings
1321 */
1322 gen6_pipeline_state_surfaces_rt(p, ilo, session);
1323 gen6_pipeline_state_surfaces_so(p, ilo, session);
1324 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1325 gen6_pipeline_state_surfaces_view(p, ilo, shader_type, session);
1326 gen6_pipeline_state_surfaces_const(p, ilo, shader_type, session);
1327 }
1328
1329 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1330 gen6_pipeline_state_samplers(p, ilo, shader_type, session);
1331 /* this must be called after all SURFACE_STATEs are uploaded */
1332 gen6_pipeline_state_binding_tables(p, ilo, shader_type, session);
1333 }
1334 }
1335
1336 void
1337 gen6_pipeline_prepare(const struct ilo_3d_pipeline *p,
1338 const struct ilo_context *ilo,
1339 struct gen6_pipeline_session *session)
1340 {
1341 memset(session, 0, sizeof(*session));
1342 session->pipe_dirty = ilo->dirty;
1343 session->reduced_prim = u_reduced_prim(ilo->draw->mode);
1344
1345 /* available space before the session */
1346 session->init_cp_space = ilo_cp_space(p->cp);
1347
1348 session->hw_ctx_changed =
1349 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_HW);
1350
1351 if (session->hw_ctx_changed) {
1352 /* these should be enough to make everything uploaded */
1353 session->batch_bo_changed = true;
1354 session->state_bo_changed = true;
1355 session->kernel_bo_changed = true;
1356 session->prim_changed = true;
1357 session->primitive_restart_changed = true;
1358 }
1359 else {
1360 /*
1361 * Any state that involves resources needs to be re-emitted when the
1362 * batch bo changed. This is because we do not pin the resources and
1363 * their offsets (or existence) may change between batch buffers.
1364 *
1365 * Since we messed around with ILO_3D_PIPELINE_INVALIDATE_BATCH_BO in
1366 * handle_invalid_batch_bo(), use ILO_3D_PIPELINE_INVALIDATE_STATE_BO as
1367 * a temporary workaround.
1368 */
1369 session->batch_bo_changed =
1370 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1371
1372 session->state_bo_changed =
1373 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1374 session->kernel_bo_changed =
1375 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
1376 session->prim_changed = (p->state.reduced_prim != session->reduced_prim);
1377 session->primitive_restart_changed =
1378 (p->state.primitive_restart != ilo->draw->primitive_restart);
1379 }
1380 }
1381
1382 void
1383 gen6_pipeline_draw(struct ilo_3d_pipeline *p,
1384 const struct ilo_context *ilo,
1385 struct gen6_pipeline_session *session)
1386 {
1387 /* force all states to be uploaded if the state bo changed */
1388 if (session->state_bo_changed)
1389 session->pipe_dirty = ILO_DIRTY_ALL;
1390 else
1391 session->pipe_dirty = ilo->dirty;
1392
1393 session->emit_draw_states(p, ilo, session);
1394
1395 /* force all commands to be uploaded if the HW context changed */
1396 if (session->hw_ctx_changed)
1397 session->pipe_dirty = ILO_DIRTY_ALL;
1398 else
1399 session->pipe_dirty = ilo->dirty;
1400
1401 session->emit_draw_commands(p, ilo, session);
1402 }
1403
1404 void
1405 gen6_pipeline_end(struct ilo_3d_pipeline *p,
1406 const struct ilo_context *ilo,
1407 struct gen6_pipeline_session *session)
1408 {
1409 /* sanity check size estimation */
1410 assert(session->init_cp_space - ilo_cp_space(p->cp) <=
1411 ilo_3d_pipeline_estimate_size(p, ILO_3D_PIPELINE_DRAW, ilo));
1412
1413 p->state.reduced_prim = session->reduced_prim;
1414 p->state.primitive_restart = ilo->draw->primitive_restart;
1415 }
1416
1417 static void
1418 ilo_3d_pipeline_emit_draw_gen6(struct ilo_3d_pipeline *p,
1419 const struct ilo_context *ilo)
1420 {
1421 struct gen6_pipeline_session session;
1422
1423 gen6_pipeline_prepare(p, ilo, &session);
1424
1425 session.emit_draw_states = gen6_pipeline_states;
1426 session.emit_draw_commands = gen6_pipeline_commands;
1427
1428 gen6_pipeline_draw(p, ilo, &session);
1429 gen6_pipeline_end(p, ilo, &session);
1430 }
1431
1432 void
1433 ilo_3d_pipeline_emit_flush_gen6(struct ilo_3d_pipeline *p)
1434 {
1435 if (p->dev->gen == ILO_GEN(6))
1436 gen6_wa_pipe_control_post_sync(p, false);
1437
1438 gen6_emit_PIPE_CONTROL(p->dev,
1439 PIPE_CONTROL_INSTRUCTION_FLUSH |
1440 PIPE_CONTROL_WRITE_FLUSH |
1441 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1442 PIPE_CONTROL_VF_CACHE_INVALIDATE |
1443 PIPE_CONTROL_TC_FLUSH |
1444 PIPE_CONTROL_NO_WRITE |
1445 PIPE_CONTROL_CS_STALL,
1446 0, 0, false, p->cp);
1447 }
1448
1449 void
1450 ilo_3d_pipeline_emit_write_timestamp_gen6(struct ilo_3d_pipeline *p,
1451 struct intel_bo *bo, int index)
1452 {
1453 if (p->dev->gen == ILO_GEN(6))
1454 gen6_wa_pipe_control_post_sync(p, true);
1455
1456 gen6_emit_PIPE_CONTROL(p->dev,
1457 PIPE_CONTROL_WRITE_TIMESTAMP,
1458 bo, index * sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE,
1459 true, p->cp);
1460 }
1461
1462 void
1463 ilo_3d_pipeline_emit_write_depth_count_gen6(struct ilo_3d_pipeline *p,
1464 struct intel_bo *bo, int index)
1465 {
1466 if (p->dev->gen == ILO_GEN(6))
1467 gen6_wa_pipe_control_post_sync(p, false);
1468
1469 gen6_emit_PIPE_CONTROL(p->dev,
1470 PIPE_CONTROL_DEPTH_STALL |
1471 PIPE_CONTROL_WRITE_DEPTH_COUNT,
1472 bo, index * sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE,
1473 true, p->cp);
1474 }
1475
1476 static int
1477 gen6_pipeline_estimate_commands(const struct ilo_3d_pipeline *p,
1478 const struct ilo_context *ilo)
1479 {
1480 static int size;
1481 enum ilo_gpe_gen6_command cmd;
1482
1483 if (size)
1484 return size;
1485
1486 for (cmd = 0; cmd < ILO_GPE_GEN6_COMMAND_COUNT; cmd++) {
1487 int count;
1488
1489 switch (cmd) {
1490 case ILO_GPE_GEN6_PIPE_CONTROL:
1491 /* for the workaround */
1492 count = 2;
1493 /* another one after 3DSTATE_URB */
1494 count += 1;
1495 /* and another one after 3DSTATE_CONSTANT_VS */
1496 count += 1;
1497 break;
1498 case ILO_GPE_GEN6_3DSTATE_GS_SVB_INDEX:
1499 /* there are 4 SVBIs */
1500 count = 4;
1501 break;
1502 case ILO_GPE_GEN6_3DSTATE_VERTEX_BUFFERS:
1503 count = 33;
1504 break;
1505 case ILO_GPE_GEN6_3DSTATE_VERTEX_ELEMENTS:
1506 count = 34;
1507 break;
1508 case ILO_GPE_GEN6_MEDIA_VFE_STATE:
1509 case ILO_GPE_GEN6_MEDIA_CURBE_LOAD:
1510 case ILO_GPE_GEN6_MEDIA_INTERFACE_DESCRIPTOR_LOAD:
1511 case ILO_GPE_GEN6_MEDIA_GATEWAY_STATE:
1512 case ILO_GPE_GEN6_MEDIA_STATE_FLUSH:
1513 case ILO_GPE_GEN6_MEDIA_OBJECT_WALKER:
1514 /* media commands */
1515 count = 0;
1516 break;
1517 default:
1518 count = 1;
1519 break;
1520 }
1521
1522 if (count)
1523 size += ilo_gpe_gen6_estimate_command_size(p->dev, cmd, count);
1524 }
1525
1526 return size;
1527 }
1528
1529 static int
1530 gen6_pipeline_estimate_states(const struct ilo_3d_pipeline *p,
1531 const struct ilo_context *ilo)
1532 {
1533 static int static_size;
1534 int shader_type, count, size;
1535
1536 if (!static_size) {
1537 struct {
1538 enum ilo_gpe_gen6_state state;
1539 int count;
1540 } static_states[] = {
1541 /* viewports */
1542 { ILO_GPE_GEN6_SF_VIEWPORT, 1 },
1543 { ILO_GPE_GEN6_CLIP_VIEWPORT, 1 },
1544 { ILO_GPE_GEN6_CC_VIEWPORT, 1 },
1545 /* cc */
1546 { ILO_GPE_GEN6_COLOR_CALC_STATE, 1 },
1547 { ILO_GPE_GEN6_BLEND_STATE, ILO_MAX_DRAW_BUFFERS },
1548 { ILO_GPE_GEN6_DEPTH_STENCIL_STATE, 1 },
1549 /* scissors */
1550 { ILO_GPE_GEN6_SCISSOR_RECT, 1 },
1551 /* binding table (vs, gs, fs) */
1552 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_VS_SURFACES },
1553 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_GS_SURFACES },
1554 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_WM_SURFACES },
1555 };
1556 int i;
1557
1558 for (i = 0; i < Elements(static_states); i++) {
1559 static_size += ilo_gpe_gen6_estimate_state_size(p->dev,
1560 static_states[i].state,
1561 static_states[i].count);
1562 }
1563 }
1564
1565 size = static_size;
1566
1567 /*
1568 * render targets (fs)
1569 * stream outputs (gs)
1570 * sampler views (vs, fs)
1571 * constant buffers (vs, fs)
1572 */
1573 count = ilo->fb.state.nr_cbufs;
1574
1575 if (ilo->gs) {
1576 const struct pipe_stream_output_info *so_info =
1577 ilo_shader_get_kernel_so_info(ilo->gs);
1578
1579 count += so_info->num_outputs;
1580 }
1581 else if (ilo->vs) {
1582 const struct pipe_stream_output_info *so_info =
1583 ilo_shader_get_kernel_so_info(ilo->vs);
1584
1585 count += so_info->num_outputs;
1586 }
1587
1588 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1589 count += ilo->view[shader_type].count;
1590 count += util_bitcount(ilo->cbuf[shader_type].enabled_mask);
1591 }
1592
1593 if (count) {
1594 size += ilo_gpe_gen6_estimate_state_size(p->dev,
1595 ILO_GPE_GEN6_SURFACE_STATE, count);
1596 }
1597
1598 /* samplers (vs, fs) */
1599 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1600 count = ilo->sampler[shader_type].count;
1601 if (count) {
1602 size += ilo_gpe_gen6_estimate_state_size(p->dev,
1603 ILO_GPE_GEN6_SAMPLER_BORDER_COLOR_STATE, count);
1604 size += ilo_gpe_gen6_estimate_state_size(p->dev,
1605 ILO_GPE_GEN6_SAMPLER_STATE, count);
1606 }
1607 }
1608
1609 /* pcb (vs) */
1610 if (ilo->vs) {
1611 const int cbuf0_size =
1612 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_PCB_CBUF0_SIZE);
1613 const int ucp_size =
1614 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_VS_PCB_UCP_SIZE);
1615
1616 size += ilo_gpe_gen6_estimate_state_size(p->dev,
1617 ILO_GPE_GEN6_PUSH_CONSTANT_BUFFER, cbuf0_size + ucp_size);
1618 }
1619
1620 /* pcb (fs) */
1621 if (ilo->fs) {
1622 const int cbuf0_size =
1623 ilo_shader_get_kernel_param(ilo->fs, ILO_KERNEL_PCB_CBUF0_SIZE);
1624
1625 size += ilo_gpe_gen6_estimate_state_size(p->dev,
1626 ILO_GPE_GEN6_PUSH_CONSTANT_BUFFER, cbuf0_size);
1627 }
1628
1629 return size;
1630 }
1631
1632 static int
1633 ilo_3d_pipeline_estimate_size_gen6(struct ilo_3d_pipeline *p,
1634 enum ilo_3d_pipeline_action action,
1635 const void *arg)
1636 {
1637 int size;
1638
1639 switch (action) {
1640 case ILO_3D_PIPELINE_DRAW:
1641 {
1642 const struct ilo_context *ilo = arg;
1643
1644 size = gen6_pipeline_estimate_commands(p, ilo) +
1645 gen6_pipeline_estimate_states(p, ilo);
1646 }
1647 break;
1648 case ILO_3D_PIPELINE_FLUSH:
1649 size = ilo_gpe_gen6_estimate_command_size(p->dev,
1650 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1651 break;
1652 case ILO_3D_PIPELINE_WRITE_TIMESTAMP:
1653 size = ilo_gpe_gen6_estimate_command_size(p->dev,
1654 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 2;
1655 break;
1656 case ILO_3D_PIPELINE_WRITE_DEPTH_COUNT:
1657 size = ilo_gpe_gen6_estimate_command_size(p->dev,
1658 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1659 break;
1660 default:
1661 assert(!"unknown 3D pipeline action");
1662 size = 0;
1663 break;
1664 }
1665
1666 return size;
1667 }
1668
1669 void
1670 ilo_3d_pipeline_init_gen6(struct ilo_3d_pipeline *p)
1671 {
1672 p->estimate_size = ilo_3d_pipeline_estimate_size_gen6;
1673 p->emit_draw = ilo_3d_pipeline_emit_draw_gen6;
1674 p->emit_flush = ilo_3d_pipeline_emit_flush_gen6;
1675 p->emit_write_timestamp = ilo_3d_pipeline_emit_write_timestamp_gen6;
1676 p->emit_write_depth_count = ilo_3d_pipeline_emit_write_depth_count_gen6;
1677 }