227492a41bfee07826fc0dd6edbccc560246afb9
[mesa.git] / src / gallium / drivers / ilo / ilo_3d_pipeline_gen6.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "genhw/genhw.h"
29 #include "util/u_dual_blend.h"
30 #include "util/u_prim.h"
31
32 #include "ilo_blitter.h"
33 #include "ilo_3d.h"
34 #include "ilo_context.h"
35 #include "ilo_cp.h"
36 #include "ilo_gpe_gen6.h"
37 #include "ilo_gpe_gen7.h"
38 #include "ilo_shader.h"
39 #include "ilo_state.h"
40 #include "ilo_3d_pipeline.h"
41 #include "ilo_3d_pipeline_gen6.h"
42
43 /**
44 * This should be called before any depth stall flush (including those
45 * produced by non-pipelined state commands) or cache flush on GEN6.
46 *
47 * \see intel_emit_post_sync_nonzero_flush()
48 */
49 static void
50 gen6_wa_pipe_control_post_sync(struct ilo_3d_pipeline *p,
51 bool caller_post_sync)
52 {
53 assert(p->dev->gen == ILO_GEN(6));
54
55 /* emit once */
56 if (p->state.has_gen6_wa_pipe_control)
57 return;
58
59 p->state.has_gen6_wa_pipe_control = true;
60
61 /*
62 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
63 *
64 * "Pipe-control with CS-stall bit set must be sent BEFORE the
65 * pipe-control with a post-sync op and no write-cache flushes."
66 *
67 * The workaround below necessitates this workaround.
68 */
69 gen6_emit_PIPE_CONTROL(p->dev,
70 GEN6_PIPE_CONTROL_CS_STALL |
71 GEN6_PIPE_CONTROL_PIXEL_SCOREBOARD_STALL,
72 NULL, 0, false, p->cp);
73
74 /* the caller will emit the post-sync op */
75 if (caller_post_sync)
76 return;
77
78 /*
79 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
80 *
81 * "Before any depth stall flush (including those produced by
82 * non-pipelined state commands), software needs to first send a
83 * PIPE_CONTROL with no bits set except Post-Sync Operation != 0."
84 *
85 * "Before a PIPE_CONTROL with Write Cache Flush Enable =1, a
86 * PIPE_CONTROL with any non-zero post-sync-op is required."
87 */
88 gen6_emit_PIPE_CONTROL(p->dev,
89 GEN6_PIPE_CONTROL_WRITE_IMM,
90 p->workaround_bo, 0, false, p->cp);
91 }
92
93 static void
94 gen6_wa_pipe_control_wm_multisample_flush(struct ilo_3d_pipeline *p)
95 {
96 assert(p->dev->gen == ILO_GEN(6));
97
98 gen6_wa_pipe_control_post_sync(p, false);
99
100 /*
101 * From the Sandy Bridge PRM, volume 2 part 1, page 305:
102 *
103 * "Driver must guarentee that all the caches in the depth pipe are
104 * flushed before this command (3DSTATE_MULTISAMPLE) is parsed. This
105 * requires driver to send a PIPE_CONTROL with a CS stall along with a
106 * Depth Flush prior to this command."
107 */
108 gen6_emit_PIPE_CONTROL(p->dev,
109 GEN6_PIPE_CONTROL_DEPTH_CACHE_FLUSH |
110 GEN6_PIPE_CONTROL_CS_STALL,
111 0, 0, false, p->cp);
112 }
113
114 static void
115 gen6_wa_pipe_control_wm_depth_flush(struct ilo_3d_pipeline *p)
116 {
117 assert(p->dev->gen == ILO_GEN(6));
118
119 gen6_wa_pipe_control_post_sync(p, false);
120
121 /*
122 * According to intel_emit_depth_stall_flushes() of classic i965, we need
123 * to emit a sequence of PIPE_CONTROLs prior to emitting depth related
124 * commands.
125 */
126 gen6_emit_PIPE_CONTROL(p->dev,
127 GEN6_PIPE_CONTROL_DEPTH_STALL,
128 NULL, 0, false, p->cp);
129
130 gen6_emit_PIPE_CONTROL(p->dev,
131 GEN6_PIPE_CONTROL_DEPTH_CACHE_FLUSH,
132 NULL, 0, false, p->cp);
133
134 gen6_emit_PIPE_CONTROL(p->dev,
135 GEN6_PIPE_CONTROL_DEPTH_STALL,
136 NULL, 0, false, p->cp);
137 }
138
139 static void
140 gen6_wa_pipe_control_wm_max_threads_stall(struct ilo_3d_pipeline *p)
141 {
142 assert(p->dev->gen == ILO_GEN(6));
143
144 /* the post-sync workaround should cover this already */
145 if (p->state.has_gen6_wa_pipe_control)
146 return;
147
148 /*
149 * From the Sandy Bridge PRM, volume 2 part 1, page 274:
150 *
151 * "A PIPE_CONTROL command, with only the Stall At Pixel Scoreboard
152 * field set (DW1 Bit 1), must be issued prior to any change to the
153 * value in this field (Maximum Number of Threads in 3DSTATE_WM)"
154 */
155 gen6_emit_PIPE_CONTROL(p->dev,
156 GEN6_PIPE_CONTROL_PIXEL_SCOREBOARD_STALL,
157 NULL, 0, false, p->cp);
158
159 }
160
161 static void
162 gen6_wa_pipe_control_vs_const_flush(struct ilo_3d_pipeline *p)
163 {
164 assert(p->dev->gen == ILO_GEN(6));
165
166 gen6_wa_pipe_control_post_sync(p, false);
167
168 /*
169 * According to upload_vs_state() of classic i965, we need to emit
170 * PIPE_CONTROL after 3DSTATE_CONSTANT_VS so that the command is kept being
171 * buffered by VS FF, to the point that the FF dies.
172 */
173 gen6_emit_PIPE_CONTROL(p->dev,
174 GEN6_PIPE_CONTROL_DEPTH_STALL |
175 GEN6_PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE |
176 GEN6_PIPE_CONTROL_STATE_CACHE_INVALIDATE,
177 NULL, 0, false, p->cp);
178 }
179
180 #define DIRTY(state) (session->pipe_dirty & ILO_DIRTY_ ## state)
181
182 void
183 gen6_pipeline_common_select(struct ilo_3d_pipeline *p,
184 const struct ilo_context *ilo,
185 struct gen6_pipeline_session *session)
186 {
187 /* PIPELINE_SELECT */
188 if (session->hw_ctx_changed) {
189 if (p->dev->gen == ILO_GEN(6))
190 gen6_wa_pipe_control_post_sync(p, false);
191
192 gen6_emit_PIPELINE_SELECT(p->dev, 0x0, p->cp);
193 }
194 }
195
196 void
197 gen6_pipeline_common_sip(struct ilo_3d_pipeline *p,
198 const struct ilo_context *ilo,
199 struct gen6_pipeline_session *session)
200 {
201 /* STATE_SIP */
202 if (session->hw_ctx_changed) {
203 if (p->dev->gen == ILO_GEN(6))
204 gen6_wa_pipe_control_post_sync(p, false);
205
206 gen6_emit_STATE_SIP(p->dev, 0, p->cp);
207 }
208 }
209
210 void
211 gen6_pipeline_common_base_address(struct ilo_3d_pipeline *p,
212 const struct ilo_context *ilo,
213 struct gen6_pipeline_session *session)
214 {
215 /* STATE_BASE_ADDRESS */
216 if (session->state_bo_changed || session->kernel_bo_changed ||
217 session->batch_bo_changed) {
218 if (p->dev->gen == ILO_GEN(6))
219 gen6_wa_pipe_control_post_sync(p, false);
220
221 gen6_emit_STATE_BASE_ADDRESS(p->dev,
222 NULL, p->cp->bo, p->cp->bo, NULL, ilo->hw3d->kernel.bo,
223 0, 0, 0, 0, p->cp);
224
225 /*
226 * From the Sandy Bridge PRM, volume 1 part 1, page 28:
227 *
228 * "The following commands must be reissued following any change to
229 * the base addresses:
230 *
231 * * 3DSTATE_BINDING_TABLE_POINTERS
232 * * 3DSTATE_SAMPLER_STATE_POINTERS
233 * * 3DSTATE_VIEWPORT_STATE_POINTERS
234 * * 3DSTATE_CC_POINTERS
235 * * MEDIA_STATE_POINTERS"
236 *
237 * 3DSTATE_SCISSOR_STATE_POINTERS is not on the list, but it is
238 * reasonable to also reissue the command. Same to PCB.
239 */
240 session->viewport_state_changed = true;
241
242 session->cc_state_blend_changed = true;
243 session->cc_state_dsa_changed = true;
244 session->cc_state_cc_changed = true;
245
246 session->scissor_state_changed = true;
247
248 session->binding_table_vs_changed = true;
249 session->binding_table_gs_changed = true;
250 session->binding_table_fs_changed = true;
251
252 session->sampler_state_vs_changed = true;
253 session->sampler_state_gs_changed = true;
254 session->sampler_state_fs_changed = true;
255
256 session->pcb_state_vs_changed = true;
257 session->pcb_state_gs_changed = true;
258 session->pcb_state_fs_changed = true;
259 }
260 }
261
262 static void
263 gen6_pipeline_common_urb(struct ilo_3d_pipeline *p,
264 const struct ilo_context *ilo,
265 struct gen6_pipeline_session *session)
266 {
267 /* 3DSTATE_URB */
268 if (DIRTY(VE) || DIRTY(VS) || DIRTY(GS)) {
269 const bool gs_active = (ilo->gs || (ilo->vs &&
270 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_VS_GEN6_SO)));
271 int vs_entry_size, gs_entry_size;
272 int vs_total_size, gs_total_size;
273
274 vs_entry_size = (ilo->vs) ?
275 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_OUTPUT_COUNT) : 0;
276
277 /*
278 * As indicated by 2e712e41db0c0676e9f30fc73172c0e8de8d84d4, VF and VS
279 * share VUE handles. The VUE allocation size must be large enough to
280 * store either VF outputs (number of VERTEX_ELEMENTs) and VS outputs.
281 *
282 * I am not sure if the PRM explicitly states that VF and VS share VUE
283 * handles. But here is a citation that implies so:
284 *
285 * From the Sandy Bridge PRM, volume 2 part 1, page 44:
286 *
287 * "Once a FF stage that spawn threads has sufficient input to
288 * initiate a thread, it must guarantee that it is safe to request
289 * the thread initiation. For all these FF stages, this check is
290 * based on :
291 *
292 * - The availability of output URB entries:
293 * - VS: As the input URB entries are overwritten with the
294 * VS-generated output data, output URB availability isn't a
295 * factor."
296 */
297 if (vs_entry_size < ilo->ve->count)
298 vs_entry_size = ilo->ve->count;
299
300 gs_entry_size = (ilo->gs) ?
301 ilo_shader_get_kernel_param(ilo->gs, ILO_KERNEL_OUTPUT_COUNT) :
302 (gs_active) ? vs_entry_size : 0;
303
304 /* in bytes */
305 vs_entry_size *= sizeof(float) * 4;
306 gs_entry_size *= sizeof(float) * 4;
307 vs_total_size = ilo->dev->urb_size;
308
309 if (gs_active) {
310 vs_total_size /= 2;
311 gs_total_size = vs_total_size;
312 }
313 else {
314 gs_total_size = 0;
315 }
316
317 gen6_emit_3DSTATE_URB(p->dev, vs_total_size, gs_total_size,
318 vs_entry_size, gs_entry_size, p->cp);
319
320 /*
321 * From the Sandy Bridge PRM, volume 2 part 1, page 27:
322 *
323 * "Because of a urb corruption caused by allocating a previous
324 * gsunit's urb entry to vsunit software is required to send a
325 * "GS NULL Fence" (Send URB fence with VS URB size == 1 and GS URB
326 * size == 0) plus a dummy DRAW call before any case where VS will
327 * be taking over GS URB space."
328 */
329 if (p->state.gs.active && !gs_active)
330 ilo_3d_pipeline_emit_flush_gen6(p);
331
332 p->state.gs.active = gs_active;
333 }
334 }
335
336 static void
337 gen6_pipeline_common_pointers_1(struct ilo_3d_pipeline *p,
338 const struct ilo_context *ilo,
339 struct gen6_pipeline_session *session)
340 {
341 /* 3DSTATE_VIEWPORT_STATE_POINTERS */
342 if (session->viewport_state_changed) {
343 gen6_emit_3DSTATE_VIEWPORT_STATE_POINTERS(p->dev,
344 p->state.CLIP_VIEWPORT,
345 p->state.SF_VIEWPORT,
346 p->state.CC_VIEWPORT, p->cp);
347 }
348 }
349
350 static void
351 gen6_pipeline_common_pointers_2(struct ilo_3d_pipeline *p,
352 const struct ilo_context *ilo,
353 struct gen6_pipeline_session *session)
354 {
355 /* 3DSTATE_CC_STATE_POINTERS */
356 if (session->cc_state_blend_changed ||
357 session->cc_state_dsa_changed ||
358 session->cc_state_cc_changed) {
359 gen6_emit_3DSTATE_CC_STATE_POINTERS(p->dev,
360 p->state.BLEND_STATE,
361 p->state.DEPTH_STENCIL_STATE,
362 p->state.COLOR_CALC_STATE, p->cp);
363 }
364
365 /* 3DSTATE_SAMPLER_STATE_POINTERS */
366 if (session->sampler_state_vs_changed ||
367 session->sampler_state_gs_changed ||
368 session->sampler_state_fs_changed) {
369 gen6_emit_3DSTATE_SAMPLER_STATE_POINTERS(p->dev,
370 p->state.vs.SAMPLER_STATE,
371 0,
372 p->state.wm.SAMPLER_STATE, p->cp);
373 }
374 }
375
376 static void
377 gen6_pipeline_common_pointers_3(struct ilo_3d_pipeline *p,
378 const struct ilo_context *ilo,
379 struct gen6_pipeline_session *session)
380 {
381 /* 3DSTATE_SCISSOR_STATE_POINTERS */
382 if (session->scissor_state_changed) {
383 gen6_emit_3DSTATE_SCISSOR_STATE_POINTERS(p->dev,
384 p->state.SCISSOR_RECT, p->cp);
385 }
386
387 /* 3DSTATE_BINDING_TABLE_POINTERS */
388 if (session->binding_table_vs_changed ||
389 session->binding_table_gs_changed ||
390 session->binding_table_fs_changed) {
391 gen6_emit_3DSTATE_BINDING_TABLE_POINTERS(p->dev,
392 p->state.vs.BINDING_TABLE_STATE,
393 p->state.gs.BINDING_TABLE_STATE,
394 p->state.wm.BINDING_TABLE_STATE, p->cp);
395 }
396 }
397
398 void
399 gen6_pipeline_vf(struct ilo_3d_pipeline *p,
400 const struct ilo_context *ilo,
401 struct gen6_pipeline_session *session)
402 {
403 if (p->dev->gen >= ILO_GEN(7.5)) {
404 /* 3DSTATE_INDEX_BUFFER */
405 if (DIRTY(IB) || session->batch_bo_changed) {
406 gen6_emit_3DSTATE_INDEX_BUFFER(p->dev,
407 &ilo->ib, false, p->cp);
408 }
409
410 /* 3DSTATE_VF */
411 if (session->primitive_restart_changed) {
412 gen7_emit_3DSTATE_VF(p->dev, ilo->draw->primitive_restart,
413 ilo->draw->restart_index, p->cp);
414 }
415 }
416 else {
417 /* 3DSTATE_INDEX_BUFFER */
418 if (DIRTY(IB) || session->primitive_restart_changed ||
419 session->batch_bo_changed) {
420 gen6_emit_3DSTATE_INDEX_BUFFER(p->dev,
421 &ilo->ib, ilo->draw->primitive_restart, p->cp);
422 }
423 }
424
425 /* 3DSTATE_VERTEX_BUFFERS */
426 if (DIRTY(VB) || DIRTY(VE) || session->batch_bo_changed)
427 gen6_emit_3DSTATE_VERTEX_BUFFERS(p->dev, ilo->ve, &ilo->vb, p->cp);
428
429 /* 3DSTATE_VERTEX_ELEMENTS */
430 if (DIRTY(VE) || DIRTY(VS)) {
431 const struct ilo_ve_state *ve = ilo->ve;
432 bool last_velement_edgeflag = false;
433 bool prepend_generate_ids = false;
434
435 if (ilo->vs) {
436 if (ilo_shader_get_kernel_param(ilo->vs,
437 ILO_KERNEL_VS_INPUT_EDGEFLAG)) {
438 /* we rely on the state tracker here */
439 assert(ilo_shader_get_kernel_param(ilo->vs,
440 ILO_KERNEL_INPUT_COUNT) == ve->count);
441
442 last_velement_edgeflag = true;
443 }
444
445 if (ilo_shader_get_kernel_param(ilo->vs,
446 ILO_KERNEL_VS_INPUT_INSTANCEID) ||
447 ilo_shader_get_kernel_param(ilo->vs,
448 ILO_KERNEL_VS_INPUT_VERTEXID))
449 prepend_generate_ids = true;
450 }
451
452 gen6_emit_3DSTATE_VERTEX_ELEMENTS(p->dev, ve,
453 last_velement_edgeflag, prepend_generate_ids, p->cp);
454 }
455 }
456
457 void
458 gen6_pipeline_vf_statistics(struct ilo_3d_pipeline *p,
459 const struct ilo_context *ilo,
460 struct gen6_pipeline_session *session)
461 {
462 /* 3DSTATE_VF_STATISTICS */
463 if (session->hw_ctx_changed)
464 gen6_emit_3DSTATE_VF_STATISTICS(p->dev, false, p->cp);
465 }
466
467 static void
468 gen6_pipeline_vf_draw(struct ilo_3d_pipeline *p,
469 const struct ilo_context *ilo,
470 struct gen6_pipeline_session *session)
471 {
472 /* 3DPRIMITIVE */
473 gen6_emit_3DPRIMITIVE(p->dev, ilo->draw, &ilo->ib, false, p->cp);
474 p->state.has_gen6_wa_pipe_control = false;
475 }
476
477 void
478 gen6_pipeline_vs(struct ilo_3d_pipeline *p,
479 const struct ilo_context *ilo,
480 struct gen6_pipeline_session *session)
481 {
482 const bool emit_3dstate_vs = (DIRTY(VS) || DIRTY(SAMPLER_VS) ||
483 session->kernel_bo_changed);
484 const bool emit_3dstate_constant_vs = session->pcb_state_vs_changed;
485
486 /*
487 * the classic i965 does this in upload_vs_state(), citing a spec that I
488 * cannot find
489 */
490 if (emit_3dstate_vs && p->dev->gen == ILO_GEN(6))
491 gen6_wa_pipe_control_post_sync(p, false);
492
493 /* 3DSTATE_CONSTANT_VS */
494 if (emit_3dstate_constant_vs) {
495 gen6_emit_3DSTATE_CONSTANT_VS(p->dev,
496 &p->state.vs.PUSH_CONSTANT_BUFFER,
497 &p->state.vs.PUSH_CONSTANT_BUFFER_size,
498 1, p->cp);
499 }
500
501 /* 3DSTATE_VS */
502 if (emit_3dstate_vs) {
503 const int num_samplers = ilo->sampler[PIPE_SHADER_VERTEX].count;
504
505 gen6_emit_3DSTATE_VS(p->dev, ilo->vs, num_samplers, p->cp);
506 }
507
508 if (emit_3dstate_constant_vs && p->dev->gen == ILO_GEN(6))
509 gen6_wa_pipe_control_vs_const_flush(p);
510 }
511
512 static void
513 gen6_pipeline_gs(struct ilo_3d_pipeline *p,
514 const struct ilo_context *ilo,
515 struct gen6_pipeline_session *session)
516 {
517 /* 3DSTATE_CONSTANT_GS */
518 if (session->pcb_state_gs_changed)
519 gen6_emit_3DSTATE_CONSTANT_GS(p->dev, NULL, NULL, 0, p->cp);
520
521 /* 3DSTATE_GS */
522 if (DIRTY(GS) || DIRTY(VS) ||
523 session->prim_changed || session->kernel_bo_changed) {
524 const int verts_per_prim = u_vertices_per_prim(session->reduced_prim);
525
526 gen6_emit_3DSTATE_GS(p->dev, ilo->gs, ilo->vs, verts_per_prim, p->cp);
527 }
528 }
529
530 bool
531 gen6_pipeline_update_max_svbi(struct ilo_3d_pipeline *p,
532 const struct ilo_context *ilo,
533 struct gen6_pipeline_session *session)
534 {
535 if (DIRTY(VS) || DIRTY(GS) || DIRTY(SO)) {
536 const struct pipe_stream_output_info *so_info =
537 (ilo->gs) ? ilo_shader_get_kernel_so_info(ilo->gs) :
538 (ilo->vs) ? ilo_shader_get_kernel_so_info(ilo->vs) : NULL;
539 unsigned max_svbi = 0xffffffff;
540 int i;
541
542 for (i = 0; i < so_info->num_outputs; i++) {
543 const int output_buffer = so_info->output[i].output_buffer;
544 const struct pipe_stream_output_target *so =
545 ilo->so.states[output_buffer];
546 const int struct_size = so_info->stride[output_buffer] * 4;
547 const int elem_size = so_info->output[i].num_components * 4;
548 int buf_size, count;
549
550 if (!so) {
551 max_svbi = 0;
552 break;
553 }
554
555 buf_size = so->buffer_size - so_info->output[i].dst_offset * 4;
556
557 count = buf_size / struct_size;
558 if (buf_size % struct_size >= elem_size)
559 count++;
560
561 if (count < max_svbi)
562 max_svbi = count;
563 }
564
565 if (p->state.so_max_vertices != max_svbi) {
566 p->state.so_max_vertices = max_svbi;
567 return true;
568 }
569 }
570
571 return false;
572 }
573
574 static void
575 gen6_pipeline_gs_svbi(struct ilo_3d_pipeline *p,
576 const struct ilo_context *ilo,
577 struct gen6_pipeline_session *session)
578 {
579 const bool emit = gen6_pipeline_update_max_svbi(p, ilo, session);
580
581 /* 3DSTATE_GS_SVB_INDEX */
582 if (emit) {
583 if (p->dev->gen == ILO_GEN(6))
584 gen6_wa_pipe_control_post_sync(p, false);
585
586 gen6_emit_3DSTATE_GS_SVB_INDEX(p->dev,
587 0, p->state.so_num_vertices, p->state.so_max_vertices,
588 false, p->cp);
589
590 if (session->hw_ctx_changed) {
591 int i;
592
593 /*
594 * From the Sandy Bridge PRM, volume 2 part 1, page 148:
595 *
596 * "If a buffer is not enabled then the SVBI must be set to 0x0
597 * in order to not cause overflow in that SVBI."
598 *
599 * "If a buffer is not enabled then the MaxSVBI must be set to
600 * 0xFFFFFFFF in order to not cause overflow in that SVBI."
601 */
602 for (i = 1; i < 4; i++) {
603 gen6_emit_3DSTATE_GS_SVB_INDEX(p->dev,
604 i, 0, 0xffffffff, false, p->cp);
605 }
606 }
607 }
608 }
609
610 void
611 gen6_pipeline_clip(struct ilo_3d_pipeline *p,
612 const struct ilo_context *ilo,
613 struct gen6_pipeline_session *session)
614 {
615 /* 3DSTATE_CLIP */
616 if (DIRTY(RASTERIZER) || DIRTY(FS) || DIRTY(VIEWPORT) || DIRTY(FB)) {
617 bool enable_guardband = true;
618 unsigned i;
619
620 /*
621 * We do not do 2D clipping yet. Guard band test should only be enabled
622 * when the viewport is larger than the framebuffer.
623 */
624 for (i = 0; i < ilo->viewport.count; i++) {
625 const struct ilo_viewport_cso *vp = &ilo->viewport.cso[i];
626
627 if (vp->min_x > 0.0f || vp->max_x < ilo->fb.state.width ||
628 vp->min_y > 0.0f || vp->max_y < ilo->fb.state.height) {
629 enable_guardband = false;
630 break;
631 }
632 }
633
634 gen6_emit_3DSTATE_CLIP(p->dev, ilo->rasterizer,
635 ilo->fs, enable_guardband, 1, p->cp);
636 }
637 }
638
639 static void
640 gen6_pipeline_sf(struct ilo_3d_pipeline *p,
641 const struct ilo_context *ilo,
642 struct gen6_pipeline_session *session)
643 {
644 /* 3DSTATE_SF */
645 if (DIRTY(RASTERIZER) || DIRTY(FS))
646 gen6_emit_3DSTATE_SF(p->dev, ilo->rasterizer, ilo->fs, p->cp);
647 }
648
649 void
650 gen6_pipeline_sf_rect(struct ilo_3d_pipeline *p,
651 const struct ilo_context *ilo,
652 struct gen6_pipeline_session *session)
653 {
654 /* 3DSTATE_DRAWING_RECTANGLE */
655 if (DIRTY(FB)) {
656 if (p->dev->gen == ILO_GEN(6))
657 gen6_wa_pipe_control_post_sync(p, false);
658
659 gen6_emit_3DSTATE_DRAWING_RECTANGLE(p->dev, 0, 0,
660 ilo->fb.state.width, ilo->fb.state.height, p->cp);
661 }
662 }
663
664 static void
665 gen6_pipeline_wm(struct ilo_3d_pipeline *p,
666 const struct ilo_context *ilo,
667 struct gen6_pipeline_session *session)
668 {
669 /* 3DSTATE_CONSTANT_PS */
670 if (session->pcb_state_fs_changed) {
671 gen6_emit_3DSTATE_CONSTANT_PS(p->dev,
672 &p->state.wm.PUSH_CONSTANT_BUFFER,
673 &p->state.wm.PUSH_CONSTANT_BUFFER_size,
674 1, p->cp);
675 }
676
677 /* 3DSTATE_WM */
678 if (DIRTY(FS) || DIRTY(SAMPLER_FS) || DIRTY(BLEND) || DIRTY(DSA) ||
679 DIRTY(RASTERIZER) || session->kernel_bo_changed) {
680 const int num_samplers = ilo->sampler[PIPE_SHADER_FRAGMENT].count;
681 const bool dual_blend = ilo->blend->dual_blend;
682 const bool cc_may_kill = (ilo->dsa->dw_alpha ||
683 ilo->blend->alpha_to_coverage);
684
685 if (p->dev->gen == ILO_GEN(6) && session->hw_ctx_changed)
686 gen6_wa_pipe_control_wm_max_threads_stall(p);
687
688 gen6_emit_3DSTATE_WM(p->dev, ilo->fs, num_samplers,
689 ilo->rasterizer, dual_blend, cc_may_kill, 0, p->cp);
690 }
691 }
692
693 static void
694 gen6_pipeline_wm_multisample(struct ilo_3d_pipeline *p,
695 const struct ilo_context *ilo,
696 struct gen6_pipeline_session *session)
697 {
698 /* 3DSTATE_MULTISAMPLE and 3DSTATE_SAMPLE_MASK */
699 if (DIRTY(SAMPLE_MASK) || DIRTY(FB)) {
700 const uint32_t *packed_sample_pos;
701
702 packed_sample_pos = (ilo->fb.num_samples > 1) ?
703 &p->packed_sample_position_4x : &p->packed_sample_position_1x;
704
705 if (p->dev->gen == ILO_GEN(6)) {
706 gen6_wa_pipe_control_post_sync(p, false);
707 gen6_wa_pipe_control_wm_multisample_flush(p);
708 }
709
710 gen6_emit_3DSTATE_MULTISAMPLE(p->dev,
711 ilo->fb.num_samples, packed_sample_pos,
712 ilo->rasterizer->state.half_pixel_center, p->cp);
713
714 gen6_emit_3DSTATE_SAMPLE_MASK(p->dev,
715 (ilo->fb.num_samples > 1) ? ilo->sample_mask : 0x1, p->cp);
716 }
717 }
718
719 static void
720 gen6_pipeline_wm_depth(struct ilo_3d_pipeline *p,
721 const struct ilo_context *ilo,
722 struct gen6_pipeline_session *session)
723 {
724 /* 3DSTATE_DEPTH_BUFFER and 3DSTATE_CLEAR_PARAMS */
725 if (DIRTY(FB) || session->batch_bo_changed) {
726 const struct ilo_zs_surface *zs;
727 uint32_t clear_params;
728
729 if (ilo->fb.state.zsbuf) {
730 const struct ilo_surface_cso *surface =
731 (const struct ilo_surface_cso *) ilo->fb.state.zsbuf;
732 const struct ilo_texture_slice *slice =
733 ilo_texture_get_slice(ilo_texture(surface->base.texture),
734 surface->base.u.tex.level, surface->base.u.tex.first_layer);
735
736 assert(!surface->is_rt);
737
738 zs = &surface->u.zs;
739 clear_params = slice->clear_value;
740 }
741 else {
742 zs = &ilo->fb.null_zs;
743 clear_params = 0;
744 }
745
746 if (p->dev->gen == ILO_GEN(6)) {
747 gen6_wa_pipe_control_post_sync(p, false);
748 gen6_wa_pipe_control_wm_depth_flush(p);
749 }
750
751 gen6_emit_3DSTATE_DEPTH_BUFFER(p->dev, zs, p->cp);
752 gen6_emit_3DSTATE_HIER_DEPTH_BUFFER(p->dev, zs, p->cp);
753 gen6_emit_3DSTATE_STENCIL_BUFFER(p->dev, zs, p->cp);
754 gen6_emit_3DSTATE_CLEAR_PARAMS(p->dev, clear_params, p->cp);
755 }
756 }
757
758 void
759 gen6_pipeline_wm_raster(struct ilo_3d_pipeline *p,
760 const struct ilo_context *ilo,
761 struct gen6_pipeline_session *session)
762 {
763 /* 3DSTATE_POLY_STIPPLE_PATTERN and 3DSTATE_POLY_STIPPLE_OFFSET */
764 if ((DIRTY(RASTERIZER) || DIRTY(POLY_STIPPLE)) &&
765 ilo->rasterizer->state.poly_stipple_enable) {
766 if (p->dev->gen == ILO_GEN(6))
767 gen6_wa_pipe_control_post_sync(p, false);
768
769 gen6_emit_3DSTATE_POLY_STIPPLE_PATTERN(p->dev,
770 &ilo->poly_stipple, p->cp);
771
772 gen6_emit_3DSTATE_POLY_STIPPLE_OFFSET(p->dev, 0, 0, p->cp);
773 }
774
775 /* 3DSTATE_LINE_STIPPLE */
776 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_stipple_enable) {
777 if (p->dev->gen == ILO_GEN(6))
778 gen6_wa_pipe_control_post_sync(p, false);
779
780 gen6_emit_3DSTATE_LINE_STIPPLE(p->dev,
781 ilo->rasterizer->state.line_stipple_pattern,
782 ilo->rasterizer->state.line_stipple_factor + 1, p->cp);
783 }
784
785 /* 3DSTATE_AA_LINE_PARAMETERS */
786 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_smooth) {
787 if (p->dev->gen == ILO_GEN(6))
788 gen6_wa_pipe_control_post_sync(p, false);
789
790 gen6_emit_3DSTATE_AA_LINE_PARAMETERS(p->dev, p->cp);
791 }
792 }
793
794 static void
795 gen6_pipeline_state_viewports(struct ilo_3d_pipeline *p,
796 const struct ilo_context *ilo,
797 struct gen6_pipeline_session *session)
798 {
799 /* SF_CLIP_VIEWPORT and CC_VIEWPORT */
800 if (p->dev->gen >= ILO_GEN(7) && DIRTY(VIEWPORT)) {
801 p->state.SF_CLIP_VIEWPORT = gen7_emit_SF_CLIP_VIEWPORT(p->dev,
802 ilo->viewport.cso, ilo->viewport.count, p->cp);
803
804 p->state.CC_VIEWPORT = gen6_emit_CC_VIEWPORT(p->dev,
805 ilo->viewport.cso, ilo->viewport.count, p->cp);
806
807 session->viewport_state_changed = true;
808 }
809 /* SF_VIEWPORT, CLIP_VIEWPORT, and CC_VIEWPORT */
810 else if (DIRTY(VIEWPORT)) {
811 p->state.CLIP_VIEWPORT = gen6_emit_CLIP_VIEWPORT(p->dev,
812 ilo->viewport.cso, ilo->viewport.count, p->cp);
813
814 p->state.SF_VIEWPORT = gen6_emit_SF_VIEWPORT(p->dev,
815 ilo->viewport.cso, ilo->viewport.count, p->cp);
816
817 p->state.CC_VIEWPORT = gen6_emit_CC_VIEWPORT(p->dev,
818 ilo->viewport.cso, ilo->viewport.count, p->cp);
819
820 session->viewport_state_changed = true;
821 }
822 }
823
824 static void
825 gen6_pipeline_state_cc(struct ilo_3d_pipeline *p,
826 const struct ilo_context *ilo,
827 struct gen6_pipeline_session *session)
828 {
829 /* BLEND_STATE */
830 if (DIRTY(BLEND) || DIRTY(FB) || DIRTY(DSA)) {
831 p->state.BLEND_STATE = gen6_emit_BLEND_STATE(p->dev,
832 ilo->blend, &ilo->fb, ilo->dsa, p->cp);
833
834 session->cc_state_blend_changed = true;
835 }
836
837 /* COLOR_CALC_STATE */
838 if (DIRTY(DSA) || DIRTY(STENCIL_REF) || DIRTY(BLEND_COLOR)) {
839 p->state.COLOR_CALC_STATE =
840 gen6_emit_COLOR_CALC_STATE(p->dev, &ilo->stencil_ref,
841 ilo->dsa->alpha_ref, &ilo->blend_color, p->cp);
842
843 session->cc_state_cc_changed = true;
844 }
845
846 /* DEPTH_STENCIL_STATE */
847 if (DIRTY(DSA)) {
848 p->state.DEPTH_STENCIL_STATE =
849 gen6_emit_DEPTH_STENCIL_STATE(p->dev, ilo->dsa, p->cp);
850
851 session->cc_state_dsa_changed = true;
852 }
853 }
854
855 static void
856 gen6_pipeline_state_scissors(struct ilo_3d_pipeline *p,
857 const struct ilo_context *ilo,
858 struct gen6_pipeline_session *session)
859 {
860 /* SCISSOR_RECT */
861 if (DIRTY(SCISSOR) || DIRTY(VIEWPORT)) {
862 /* there should be as many scissors as there are viewports */
863 p->state.SCISSOR_RECT = gen6_emit_SCISSOR_RECT(p->dev,
864 &ilo->scissor, ilo->viewport.count, p->cp);
865
866 session->scissor_state_changed = true;
867 }
868 }
869
870 static void
871 gen6_pipeline_state_surfaces_rt(struct ilo_3d_pipeline *p,
872 const struct ilo_context *ilo,
873 struct gen6_pipeline_session *session)
874 {
875 /* SURFACE_STATEs for render targets */
876 if (DIRTY(FB)) {
877 const struct ilo_fb_state *fb = &ilo->fb;
878 const int offset = ILO_WM_DRAW_SURFACE(0);
879 uint32_t *surface_state = &p->state.wm.SURFACE_STATE[offset];
880 int i;
881
882 for (i = 0; i < fb->state.nr_cbufs; i++) {
883 const struct ilo_surface_cso *surface =
884 (const struct ilo_surface_cso *) fb->state.cbufs[i];
885
886 if (!surface) {
887 surface_state[i] =
888 gen6_emit_SURFACE_STATE(p->dev, &fb->null_rt, true, p->cp);
889 }
890 else {
891 assert(surface && surface->is_rt);
892 surface_state[i] =
893 gen6_emit_SURFACE_STATE(p->dev, &surface->u.rt, true, p->cp);
894 }
895 }
896
897 /*
898 * Upload at least one render target, as
899 * brw_update_renderbuffer_surfaces() does. I don't know why.
900 */
901 if (i == 0) {
902 surface_state[i] =
903 gen6_emit_SURFACE_STATE(p->dev, &fb->null_rt, true, p->cp);
904
905 i++;
906 }
907
908 memset(&surface_state[i], 0, (ILO_MAX_DRAW_BUFFERS - i) * 4);
909
910 if (i && session->num_surfaces[PIPE_SHADER_FRAGMENT] < offset + i)
911 session->num_surfaces[PIPE_SHADER_FRAGMENT] = offset + i;
912
913 session->binding_table_fs_changed = true;
914 }
915 }
916
917 static void
918 gen6_pipeline_state_surfaces_so(struct ilo_3d_pipeline *p,
919 const struct ilo_context *ilo,
920 struct gen6_pipeline_session *session)
921 {
922 const struct ilo_so_state *so = &ilo->so;
923
924 if (p->dev->gen != ILO_GEN(6))
925 return;
926
927 /* SURFACE_STATEs for stream output targets */
928 if (DIRTY(VS) || DIRTY(GS) || DIRTY(SO)) {
929 const struct pipe_stream_output_info *so_info =
930 (ilo->gs) ? ilo_shader_get_kernel_so_info(ilo->gs) :
931 (ilo->vs) ? ilo_shader_get_kernel_so_info(ilo->vs) : NULL;
932 const int offset = ILO_GS_SO_SURFACE(0);
933 uint32_t *surface_state = &p->state.gs.SURFACE_STATE[offset];
934 int i;
935
936 for (i = 0; so_info && i < so_info->num_outputs; i++) {
937 const int target = so_info->output[i].output_buffer;
938 const struct pipe_stream_output_target *so_target =
939 (target < so->count) ? so->states[target] : NULL;
940
941 if (so_target) {
942 surface_state[i] = gen6_emit_so_SURFACE_STATE(p->dev,
943 so_target, so_info, i, p->cp);
944 }
945 else {
946 surface_state[i] = 0;
947 }
948 }
949
950 memset(&surface_state[i], 0, (ILO_MAX_SO_BINDINGS - i) * 4);
951
952 if (i && session->num_surfaces[PIPE_SHADER_GEOMETRY] < offset + i)
953 session->num_surfaces[PIPE_SHADER_GEOMETRY] = offset + i;
954
955 session->binding_table_gs_changed = true;
956 }
957 }
958
959 static void
960 gen6_pipeline_state_surfaces_view(struct ilo_3d_pipeline *p,
961 const struct ilo_context *ilo,
962 int shader_type,
963 struct gen6_pipeline_session *session)
964 {
965 const struct ilo_view_state *view = &ilo->view[shader_type];
966 uint32_t *surface_state;
967 int offset, i;
968 bool skip = false;
969
970 /* SURFACE_STATEs for sampler views */
971 switch (shader_type) {
972 case PIPE_SHADER_VERTEX:
973 if (DIRTY(VIEW_VS)) {
974 offset = ILO_VS_TEXTURE_SURFACE(0);
975 surface_state = &p->state.vs.SURFACE_STATE[offset];
976
977 session->binding_table_vs_changed = true;
978 }
979 else {
980 skip = true;
981 }
982 break;
983 case PIPE_SHADER_FRAGMENT:
984 if (DIRTY(VIEW_FS)) {
985 offset = ILO_WM_TEXTURE_SURFACE(0);
986 surface_state = &p->state.wm.SURFACE_STATE[offset];
987
988 session->binding_table_fs_changed = true;
989 }
990 else {
991 skip = true;
992 }
993 break;
994 default:
995 skip = true;
996 break;
997 }
998
999 if (skip)
1000 return;
1001
1002 for (i = 0; i < view->count; i++) {
1003 if (view->states[i]) {
1004 const struct ilo_view_cso *cso =
1005 (const struct ilo_view_cso *) view->states[i];
1006
1007 surface_state[i] =
1008 gen6_emit_SURFACE_STATE(p->dev, &cso->surface, false, p->cp);
1009 }
1010 else {
1011 surface_state[i] = 0;
1012 }
1013 }
1014
1015 memset(&surface_state[i], 0, (ILO_MAX_SAMPLER_VIEWS - i) * 4);
1016
1017 if (i && session->num_surfaces[shader_type] < offset + i)
1018 session->num_surfaces[shader_type] = offset + i;
1019 }
1020
1021 static void
1022 gen6_pipeline_state_surfaces_const(struct ilo_3d_pipeline *p,
1023 const struct ilo_context *ilo,
1024 int shader_type,
1025 struct gen6_pipeline_session *session)
1026 {
1027 const struct ilo_cbuf_state *cbuf = &ilo->cbuf[shader_type];
1028 uint32_t *surface_state;
1029 bool *binding_table_changed;
1030 int offset, count, i;
1031
1032 if (!DIRTY(CBUF))
1033 return;
1034
1035 /* SURFACE_STATEs for constant buffers */
1036 switch (shader_type) {
1037 case PIPE_SHADER_VERTEX:
1038 offset = ILO_VS_CONST_SURFACE(0);
1039 surface_state = &p->state.vs.SURFACE_STATE[offset];
1040 binding_table_changed = &session->binding_table_vs_changed;
1041 break;
1042 case PIPE_SHADER_FRAGMENT:
1043 offset = ILO_WM_CONST_SURFACE(0);
1044 surface_state = &p->state.wm.SURFACE_STATE[offset];
1045 binding_table_changed = &session->binding_table_fs_changed;
1046 break;
1047 default:
1048 return;
1049 break;
1050 }
1051
1052 /* constants are pushed via PCB */
1053 if (cbuf->enabled_mask == 0x1 && !cbuf->cso[0].resource) {
1054 memset(surface_state, 0, ILO_MAX_CONST_BUFFERS * 4);
1055 return;
1056 }
1057
1058 count = util_last_bit(cbuf->enabled_mask);
1059 for (i = 0; i < count; i++) {
1060 if (cbuf->cso[i].resource) {
1061 surface_state[i] = gen6_emit_SURFACE_STATE(p->dev,
1062 &cbuf->cso[i].surface, false, p->cp);
1063 }
1064 else {
1065 surface_state[i] = 0;
1066 }
1067 }
1068
1069 memset(&surface_state[count], 0, (ILO_MAX_CONST_BUFFERS - count) * 4);
1070
1071 if (count && session->num_surfaces[shader_type] < offset + count)
1072 session->num_surfaces[shader_type] = offset + count;
1073
1074 *binding_table_changed = true;
1075 }
1076
1077 static void
1078 gen6_pipeline_state_binding_tables(struct ilo_3d_pipeline *p,
1079 const struct ilo_context *ilo,
1080 int shader_type,
1081 struct gen6_pipeline_session *session)
1082 {
1083 uint32_t *binding_table_state, *surface_state;
1084 int *binding_table_state_size, size;
1085 bool skip = false;
1086
1087 /* BINDING_TABLE_STATE */
1088 switch (shader_type) {
1089 case PIPE_SHADER_VERTEX:
1090 surface_state = p->state.vs.SURFACE_STATE;
1091 binding_table_state = &p->state.vs.BINDING_TABLE_STATE;
1092 binding_table_state_size = &p->state.vs.BINDING_TABLE_STATE_size;
1093
1094 skip = !session->binding_table_vs_changed;
1095 break;
1096 case PIPE_SHADER_GEOMETRY:
1097 surface_state = p->state.gs.SURFACE_STATE;
1098 binding_table_state = &p->state.gs.BINDING_TABLE_STATE;
1099 binding_table_state_size = &p->state.gs.BINDING_TABLE_STATE_size;
1100
1101 skip = !session->binding_table_gs_changed;
1102 break;
1103 case PIPE_SHADER_FRAGMENT:
1104 surface_state = p->state.wm.SURFACE_STATE;
1105 binding_table_state = &p->state.wm.BINDING_TABLE_STATE;
1106 binding_table_state_size = &p->state.wm.BINDING_TABLE_STATE_size;
1107
1108 skip = !session->binding_table_fs_changed;
1109 break;
1110 default:
1111 skip = true;
1112 break;
1113 }
1114
1115 if (skip)
1116 return;
1117
1118 /*
1119 * If we have seemingly less SURFACE_STATEs than before, it could be that
1120 * we did not touch those reside at the tail in this upload. Loop over
1121 * them to figure out the real number of SURFACE_STATEs.
1122 */
1123 for (size = *binding_table_state_size;
1124 size > session->num_surfaces[shader_type]; size--) {
1125 if (surface_state[size - 1])
1126 break;
1127 }
1128 if (size < session->num_surfaces[shader_type])
1129 size = session->num_surfaces[shader_type];
1130
1131 *binding_table_state = gen6_emit_BINDING_TABLE_STATE(p->dev,
1132 surface_state, size, p->cp);
1133 *binding_table_state_size = size;
1134 }
1135
1136 static void
1137 gen6_pipeline_state_samplers(struct ilo_3d_pipeline *p,
1138 const struct ilo_context *ilo,
1139 int shader_type,
1140 struct gen6_pipeline_session *session)
1141 {
1142 const struct ilo_sampler_cso * const *samplers =
1143 ilo->sampler[shader_type].cso;
1144 const struct pipe_sampler_view * const *views =
1145 (const struct pipe_sampler_view **) ilo->view[shader_type].states;
1146 const int num_samplers = ilo->sampler[shader_type].count;
1147 const int num_views = ilo->view[shader_type].count;
1148 uint32_t *sampler_state, *border_color_state;
1149 bool emit_border_color = false;
1150 bool skip = false;
1151
1152 /* SAMPLER_BORDER_COLOR_STATE and SAMPLER_STATE */
1153 switch (shader_type) {
1154 case PIPE_SHADER_VERTEX:
1155 if (DIRTY(SAMPLER_VS) || DIRTY(VIEW_VS)) {
1156 sampler_state = &p->state.vs.SAMPLER_STATE;
1157 border_color_state = p->state.vs.SAMPLER_BORDER_COLOR_STATE;
1158
1159 if (DIRTY(SAMPLER_VS))
1160 emit_border_color = true;
1161
1162 session->sampler_state_vs_changed = true;
1163 }
1164 else {
1165 skip = true;
1166 }
1167 break;
1168 case PIPE_SHADER_FRAGMENT:
1169 if (DIRTY(SAMPLER_FS) || DIRTY(VIEW_FS)) {
1170 sampler_state = &p->state.wm.SAMPLER_STATE;
1171 border_color_state = p->state.wm.SAMPLER_BORDER_COLOR_STATE;
1172
1173 if (DIRTY(SAMPLER_FS))
1174 emit_border_color = true;
1175
1176 session->sampler_state_fs_changed = true;
1177 }
1178 else {
1179 skip = true;
1180 }
1181 break;
1182 default:
1183 skip = true;
1184 break;
1185 }
1186
1187 if (skip)
1188 return;
1189
1190 if (emit_border_color) {
1191 int i;
1192
1193 for (i = 0; i < num_samplers; i++) {
1194 border_color_state[i] = (samplers[i]) ?
1195 gen6_emit_SAMPLER_BORDER_COLOR_STATE(p->dev,
1196 samplers[i], p->cp) : 0;
1197 }
1198 }
1199
1200 /* should we take the minimum of num_samplers and num_views? */
1201 *sampler_state = gen6_emit_SAMPLER_STATE(p->dev,
1202 samplers, views,
1203 border_color_state,
1204 MIN2(num_samplers, num_views), p->cp);
1205 }
1206
1207 static void
1208 gen6_pipeline_state_pcb(struct ilo_3d_pipeline *p,
1209 const struct ilo_context *ilo,
1210 struct gen6_pipeline_session *session)
1211 {
1212 /* push constant buffer for VS */
1213 if (DIRTY(VS) || DIRTY(CBUF) || DIRTY(CLIP)) {
1214 const int cbuf0_size = (ilo->vs) ?
1215 ilo_shader_get_kernel_param(ilo->vs,
1216 ILO_KERNEL_PCB_CBUF0_SIZE) : 0;
1217 const int clip_state_size = (ilo->vs) ?
1218 ilo_shader_get_kernel_param(ilo->vs,
1219 ILO_KERNEL_VS_PCB_UCP_SIZE) : 0;
1220 const int total_size = cbuf0_size + clip_state_size;
1221
1222 if (total_size) {
1223 void *pcb;
1224
1225 p->state.vs.PUSH_CONSTANT_BUFFER =
1226 gen6_emit_push_constant_buffer(p->dev, total_size, &pcb, p->cp);
1227 p->state.vs.PUSH_CONSTANT_BUFFER_size = total_size;
1228
1229 if (cbuf0_size) {
1230 const struct ilo_cbuf_state *cbuf =
1231 &ilo->cbuf[PIPE_SHADER_VERTEX];
1232
1233 if (cbuf0_size <= cbuf->cso[0].user_buffer_size) {
1234 memcpy(pcb, cbuf->cso[0].user_buffer, cbuf0_size);
1235 }
1236 else {
1237 memcpy(pcb, cbuf->cso[0].user_buffer,
1238 cbuf->cso[0].user_buffer_size);
1239 memset(pcb + cbuf->cso[0].user_buffer_size, 0,
1240 cbuf0_size - cbuf->cso[0].user_buffer_size);
1241 }
1242
1243 pcb += cbuf0_size;
1244 }
1245
1246 if (clip_state_size)
1247 memcpy(pcb, &ilo->clip, clip_state_size);
1248
1249 session->pcb_state_vs_changed = true;
1250 }
1251 else if (p->state.vs.PUSH_CONSTANT_BUFFER_size) {
1252 p->state.vs.PUSH_CONSTANT_BUFFER = 0;
1253 p->state.vs.PUSH_CONSTANT_BUFFER_size = 0;
1254
1255 session->pcb_state_vs_changed = true;
1256 }
1257 }
1258
1259 /* push constant buffer for FS */
1260 if (DIRTY(FS) || DIRTY(CBUF)) {
1261 const int cbuf0_size = (ilo->fs) ?
1262 ilo_shader_get_kernel_param(ilo->fs, ILO_KERNEL_PCB_CBUF0_SIZE) : 0;
1263
1264 if (cbuf0_size) {
1265 const struct ilo_cbuf_state *cbuf = &ilo->cbuf[PIPE_SHADER_FRAGMENT];
1266 void *pcb;
1267
1268 p->state.wm.PUSH_CONSTANT_BUFFER =
1269 gen6_emit_push_constant_buffer(p->dev, cbuf0_size, &pcb, p->cp);
1270 p->state.wm.PUSH_CONSTANT_BUFFER_size = cbuf0_size;
1271
1272 if (cbuf0_size <= cbuf->cso[0].user_buffer_size) {
1273 memcpy(pcb, cbuf->cso[0].user_buffer, cbuf0_size);
1274 }
1275 else {
1276 memcpy(pcb, cbuf->cso[0].user_buffer,
1277 cbuf->cso[0].user_buffer_size);
1278 memset(pcb + cbuf->cso[0].user_buffer_size, 0,
1279 cbuf0_size - cbuf->cso[0].user_buffer_size);
1280 }
1281
1282 session->pcb_state_fs_changed = true;
1283 }
1284 else if (p->state.wm.PUSH_CONSTANT_BUFFER_size) {
1285 p->state.wm.PUSH_CONSTANT_BUFFER = 0;
1286 p->state.wm.PUSH_CONSTANT_BUFFER_size = 0;
1287
1288 session->pcb_state_fs_changed = true;
1289 }
1290 }
1291 }
1292
1293 #undef DIRTY
1294
1295 static void
1296 gen6_pipeline_commands(struct ilo_3d_pipeline *p,
1297 const struct ilo_context *ilo,
1298 struct gen6_pipeline_session *session)
1299 {
1300 /*
1301 * We try to keep the order of the commands match, as closely as possible,
1302 * that of the classic i965 driver. It allows us to compare the command
1303 * streams easily.
1304 */
1305 gen6_pipeline_common_select(p, ilo, session);
1306 gen6_pipeline_gs_svbi(p, ilo, session);
1307 gen6_pipeline_common_sip(p, ilo, session);
1308 gen6_pipeline_vf_statistics(p, ilo, session);
1309 gen6_pipeline_common_base_address(p, ilo, session);
1310 gen6_pipeline_common_pointers_1(p, ilo, session);
1311 gen6_pipeline_common_urb(p, ilo, session);
1312 gen6_pipeline_common_pointers_2(p, ilo, session);
1313 gen6_pipeline_wm_multisample(p, ilo, session);
1314 gen6_pipeline_vs(p, ilo, session);
1315 gen6_pipeline_gs(p, ilo, session);
1316 gen6_pipeline_clip(p, ilo, session);
1317 gen6_pipeline_sf(p, ilo, session);
1318 gen6_pipeline_wm(p, ilo, session);
1319 gen6_pipeline_common_pointers_3(p, ilo, session);
1320 gen6_pipeline_wm_depth(p, ilo, session);
1321 gen6_pipeline_wm_raster(p, ilo, session);
1322 gen6_pipeline_sf_rect(p, ilo, session);
1323 gen6_pipeline_vf(p, ilo, session);
1324 gen6_pipeline_vf_draw(p, ilo, session);
1325 }
1326
1327 void
1328 gen6_pipeline_states(struct ilo_3d_pipeline *p,
1329 const struct ilo_context *ilo,
1330 struct gen6_pipeline_session *session)
1331 {
1332 int shader_type;
1333
1334 gen6_pipeline_state_viewports(p, ilo, session);
1335 gen6_pipeline_state_cc(p, ilo, session);
1336 gen6_pipeline_state_scissors(p, ilo, session);
1337 gen6_pipeline_state_pcb(p, ilo, session);
1338
1339 /*
1340 * upload all SURAFCE_STATEs together so that we know there are minimal
1341 * paddings
1342 */
1343 gen6_pipeline_state_surfaces_rt(p, ilo, session);
1344 gen6_pipeline_state_surfaces_so(p, ilo, session);
1345 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1346 gen6_pipeline_state_surfaces_view(p, ilo, shader_type, session);
1347 gen6_pipeline_state_surfaces_const(p, ilo, shader_type, session);
1348 }
1349
1350 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1351 gen6_pipeline_state_samplers(p, ilo, shader_type, session);
1352 /* this must be called after all SURFACE_STATEs are uploaded */
1353 gen6_pipeline_state_binding_tables(p, ilo, shader_type, session);
1354 }
1355 }
1356
1357 void
1358 gen6_pipeline_prepare(const struct ilo_3d_pipeline *p,
1359 const struct ilo_context *ilo,
1360 struct gen6_pipeline_session *session)
1361 {
1362 memset(session, 0, sizeof(*session));
1363 session->pipe_dirty = ilo->dirty;
1364 session->reduced_prim = u_reduced_prim(ilo->draw->mode);
1365
1366 /* available space before the session */
1367 session->init_cp_space = ilo_cp_space(p->cp);
1368
1369 session->hw_ctx_changed =
1370 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_HW);
1371
1372 if (session->hw_ctx_changed) {
1373 /* these should be enough to make everything uploaded */
1374 session->batch_bo_changed = true;
1375 session->state_bo_changed = true;
1376 session->kernel_bo_changed = true;
1377 session->prim_changed = true;
1378 session->primitive_restart_changed = true;
1379 }
1380 else {
1381 /*
1382 * Any state that involves resources needs to be re-emitted when the
1383 * batch bo changed. This is because we do not pin the resources and
1384 * their offsets (or existence) may change between batch buffers.
1385 *
1386 * Since we messed around with ILO_3D_PIPELINE_INVALIDATE_BATCH_BO in
1387 * handle_invalid_batch_bo(), use ILO_3D_PIPELINE_INVALIDATE_STATE_BO as
1388 * a temporary workaround.
1389 */
1390 session->batch_bo_changed =
1391 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1392
1393 session->state_bo_changed =
1394 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1395 session->kernel_bo_changed =
1396 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
1397 session->prim_changed = (p->state.reduced_prim != session->reduced_prim);
1398 session->primitive_restart_changed =
1399 (p->state.primitive_restart != ilo->draw->primitive_restart);
1400 }
1401 }
1402
1403 void
1404 gen6_pipeline_draw(struct ilo_3d_pipeline *p,
1405 const struct ilo_context *ilo,
1406 struct gen6_pipeline_session *session)
1407 {
1408 /* force all states to be uploaded if the state bo changed */
1409 if (session->state_bo_changed)
1410 session->pipe_dirty = ILO_DIRTY_ALL;
1411 else
1412 session->pipe_dirty = ilo->dirty;
1413
1414 session->emit_draw_states(p, ilo, session);
1415
1416 /* force all commands to be uploaded if the HW context changed */
1417 if (session->hw_ctx_changed)
1418 session->pipe_dirty = ILO_DIRTY_ALL;
1419 else
1420 session->pipe_dirty = ilo->dirty;
1421
1422 session->emit_draw_commands(p, ilo, session);
1423 }
1424
1425 void
1426 gen6_pipeline_end(struct ilo_3d_pipeline *p,
1427 const struct ilo_context *ilo,
1428 struct gen6_pipeline_session *session)
1429 {
1430 /* sanity check size estimation */
1431 assert(session->init_cp_space - ilo_cp_space(p->cp) <=
1432 ilo_3d_pipeline_estimate_size(p, ILO_3D_PIPELINE_DRAW, ilo));
1433
1434 p->state.reduced_prim = session->reduced_prim;
1435 p->state.primitive_restart = ilo->draw->primitive_restart;
1436 }
1437
1438 static void
1439 ilo_3d_pipeline_emit_draw_gen6(struct ilo_3d_pipeline *p,
1440 const struct ilo_context *ilo)
1441 {
1442 struct gen6_pipeline_session session;
1443
1444 gen6_pipeline_prepare(p, ilo, &session);
1445
1446 session.emit_draw_states = gen6_pipeline_states;
1447 session.emit_draw_commands = gen6_pipeline_commands;
1448
1449 gen6_pipeline_draw(p, ilo, &session);
1450 gen6_pipeline_end(p, ilo, &session);
1451 }
1452
1453 void
1454 ilo_3d_pipeline_emit_flush_gen6(struct ilo_3d_pipeline *p)
1455 {
1456 if (p->dev->gen == ILO_GEN(6))
1457 gen6_wa_pipe_control_post_sync(p, false);
1458
1459 gen6_emit_PIPE_CONTROL(p->dev,
1460 GEN6_PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE |
1461 GEN6_PIPE_CONTROL_RENDER_CACHE_FLUSH |
1462 GEN6_PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1463 GEN6_PIPE_CONTROL_VF_CACHE_INVALIDATE |
1464 GEN6_PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
1465 GEN6_PIPE_CONTROL_WRITE_NONE |
1466 GEN6_PIPE_CONTROL_CS_STALL,
1467 0, 0, false, p->cp);
1468 }
1469
1470 void
1471 ilo_3d_pipeline_emit_write_timestamp_gen6(struct ilo_3d_pipeline *p,
1472 struct intel_bo *bo, int index)
1473 {
1474 if (p->dev->gen == ILO_GEN(6))
1475 gen6_wa_pipe_control_post_sync(p, true);
1476
1477 gen6_emit_PIPE_CONTROL(p->dev,
1478 GEN6_PIPE_CONTROL_WRITE_TIMESTAMP,
1479 bo, index * sizeof(uint64_t),
1480 true, p->cp);
1481 }
1482
1483 void
1484 ilo_3d_pipeline_emit_write_depth_count_gen6(struct ilo_3d_pipeline *p,
1485 struct intel_bo *bo, int index)
1486 {
1487 if (p->dev->gen == ILO_GEN(6))
1488 gen6_wa_pipe_control_post_sync(p, false);
1489
1490 gen6_emit_PIPE_CONTROL(p->dev,
1491 GEN6_PIPE_CONTROL_DEPTH_STALL |
1492 GEN6_PIPE_CONTROL_WRITE_PS_DEPTH_COUNT,
1493 bo, index * sizeof(uint64_t),
1494 true, p->cp);
1495 }
1496
1497 void
1498 ilo_3d_pipeline_emit_write_statistics_gen6(struct ilo_3d_pipeline *p,
1499 struct intel_bo *bo, int index)
1500 {
1501 uint32_t regs[] = {
1502 GEN6_REG_IA_VERTICES_COUNT,
1503 GEN6_REG_IA_PRIMITIVES_COUNT,
1504 GEN6_REG_VS_INVOCATION_COUNT,
1505 GEN6_REG_GS_INVOCATION_COUNT,
1506 GEN6_REG_GS_PRIMITIVES_COUNT,
1507 GEN6_REG_CL_INVOCATION_COUNT,
1508 GEN6_REG_CL_PRIMITIVES_COUNT,
1509 GEN6_REG_PS_INVOCATION_COUNT,
1510 p->dev->gen >= ILO_GEN(7) ? GEN6_REG_HS_INVOCATION_COUNT : 0,
1511 p->dev->gen >= ILO_GEN(7) ? GEN6_REG_DS_INVOCATION_COUNT : 0,
1512 0,
1513 };
1514 int i;
1515
1516 p->emit_flush(p);
1517
1518 for (i = 0; i < Elements(regs); i++) {
1519 const uint32_t bo_offset = (index + i) * sizeof(uint64_t);
1520
1521 if (regs[i]) {
1522 /* store lower 32 bits */
1523 gen6_emit_MI_STORE_REGISTER_MEM(p->dev,
1524 bo, bo_offset, regs[i], p->cp);
1525 /* store higher 32 bits */
1526 gen6_emit_MI_STORE_REGISTER_MEM(p->dev,
1527 bo, bo_offset + 4, regs[i] + 4, p->cp);
1528 }
1529 else {
1530 gen6_emit_MI_STORE_DATA_IMM(p->dev,
1531 bo, bo_offset, 0, true, p->cp);
1532 }
1533 }
1534 }
1535
1536 static void
1537 gen6_rectlist_vs_to_sf(struct ilo_3d_pipeline *p,
1538 const struct ilo_blitter *blitter,
1539 struct gen6_rectlist_session *session)
1540 {
1541 gen6_emit_3DSTATE_CONSTANT_VS(p->dev, NULL, NULL, 0, p->cp);
1542 gen6_emit_3DSTATE_VS(p->dev, NULL, 0, p->cp);
1543
1544 gen6_wa_pipe_control_vs_const_flush(p);
1545
1546 gen6_emit_3DSTATE_CONSTANT_GS(p->dev, NULL, NULL, 0, p->cp);
1547 gen6_emit_3DSTATE_GS(p->dev, NULL, NULL, 0, p->cp);
1548
1549 gen6_emit_3DSTATE_CLIP(p->dev, NULL, NULL, false, 0, p->cp);
1550 gen6_emit_3DSTATE_SF(p->dev, NULL, NULL, p->cp);
1551 }
1552
1553 static void
1554 gen6_rectlist_wm(struct ilo_3d_pipeline *p,
1555 const struct ilo_blitter *blitter,
1556 struct gen6_rectlist_session *session)
1557 {
1558 uint32_t hiz_op;
1559
1560 switch (blitter->op) {
1561 case ILO_BLITTER_RECTLIST_CLEAR_ZS:
1562 hiz_op = GEN6_WM_DW4_DEPTH_CLEAR;
1563 break;
1564 case ILO_BLITTER_RECTLIST_RESOLVE_Z:
1565 hiz_op = GEN6_WM_DW4_DEPTH_RESOLVE;
1566 break;
1567 case ILO_BLITTER_RECTLIST_RESOLVE_HIZ:
1568 hiz_op = GEN6_WM_DW4_HIZ_RESOLVE;
1569 break;
1570 default:
1571 hiz_op = 0;
1572 break;
1573 }
1574
1575 gen6_emit_3DSTATE_CONSTANT_PS(p->dev, NULL, NULL, 0, p->cp);
1576
1577 gen6_wa_pipe_control_wm_max_threads_stall(p);
1578 gen6_emit_3DSTATE_WM(p->dev, NULL, 0, NULL, false, false, hiz_op, p->cp);
1579 }
1580
1581 static void
1582 gen6_rectlist_wm_depth(struct ilo_3d_pipeline *p,
1583 const struct ilo_blitter *blitter,
1584 struct gen6_rectlist_session *session)
1585 {
1586 gen6_wa_pipe_control_wm_depth_flush(p);
1587
1588 if (blitter->uses & (ILO_BLITTER_USE_FB_DEPTH |
1589 ILO_BLITTER_USE_FB_STENCIL)) {
1590 gen6_emit_3DSTATE_DEPTH_BUFFER(p->dev,
1591 &blitter->fb.dst.u.zs, p->cp);
1592 }
1593
1594 if (blitter->uses & ILO_BLITTER_USE_FB_DEPTH) {
1595 gen6_emit_3DSTATE_HIER_DEPTH_BUFFER(p->dev,
1596 &blitter->fb.dst.u.zs, p->cp);
1597 }
1598
1599 if (blitter->uses & ILO_BLITTER_USE_FB_STENCIL) {
1600 gen6_emit_3DSTATE_STENCIL_BUFFER(p->dev,
1601 &blitter->fb.dst.u.zs, p->cp);
1602 }
1603
1604 gen6_emit_3DSTATE_CLEAR_PARAMS(p->dev,
1605 blitter->depth_clear_value, p->cp);
1606 }
1607
1608 static void
1609 gen6_rectlist_wm_multisample(struct ilo_3d_pipeline *p,
1610 const struct ilo_blitter *blitter,
1611 struct gen6_rectlist_session *session)
1612 {
1613 const uint32_t *packed_sample_pos = (blitter->fb.num_samples > 1) ?
1614 &p->packed_sample_position_4x : &p->packed_sample_position_1x;
1615
1616 gen6_wa_pipe_control_wm_multisample_flush(p);
1617
1618 gen6_emit_3DSTATE_MULTISAMPLE(p->dev, blitter->fb.num_samples,
1619 packed_sample_pos, true, p->cp);
1620
1621 gen6_emit_3DSTATE_SAMPLE_MASK(p->dev,
1622 (1 << blitter->fb.num_samples) - 1, p->cp);
1623 }
1624
1625 static void
1626 gen6_rectlist_commands(struct ilo_3d_pipeline *p,
1627 const struct ilo_blitter *blitter,
1628 struct gen6_rectlist_session *session)
1629 {
1630 gen6_wa_pipe_control_post_sync(p, false);
1631
1632 gen6_rectlist_wm_multisample(p, blitter, session);
1633
1634 gen6_emit_STATE_BASE_ADDRESS(p->dev,
1635 NULL, /* General State Base */
1636 p->cp->bo, /* Surface State Base */
1637 p->cp->bo, /* Dynamic State Base */
1638 NULL, /* Indirect Object Base */
1639 NULL, /* Instruction Base */
1640 0, 0, 0, 0, p->cp);
1641
1642 gen6_emit_3DSTATE_VERTEX_BUFFERS(p->dev,
1643 &blitter->ve, &blitter->vb, p->cp);
1644
1645 gen6_emit_3DSTATE_VERTEX_ELEMENTS(p->dev,
1646 &blitter->ve, false, false, p->cp);
1647
1648 gen6_emit_3DSTATE_URB(p->dev,
1649 p->dev->urb_size, 0, blitter->ve.count * 4 * sizeof(float), 0, p->cp);
1650 /* 3DSTATE_URB workaround */
1651 if (p->state.gs.active) {
1652 ilo_3d_pipeline_emit_flush_gen6(p);
1653 p->state.gs.active = false;
1654 }
1655
1656 if (blitter->uses &
1657 (ILO_BLITTER_USE_DSA | ILO_BLITTER_USE_CC)) {
1658 gen6_emit_3DSTATE_CC_STATE_POINTERS(p->dev, 0,
1659 session->DEPTH_STENCIL_STATE, session->COLOR_CALC_STATE, p->cp);
1660 }
1661
1662 gen6_rectlist_vs_to_sf(p, blitter, session);
1663 gen6_rectlist_wm(p, blitter, session);
1664
1665 if (blitter->uses & ILO_BLITTER_USE_VIEWPORT) {
1666 gen6_emit_3DSTATE_VIEWPORT_STATE_POINTERS(p->dev,
1667 0, 0, session->CC_VIEWPORT, p->cp);
1668 }
1669
1670 gen6_rectlist_wm_depth(p, blitter, session);
1671
1672 gen6_emit_3DSTATE_DRAWING_RECTANGLE(p->dev, 0, 0,
1673 blitter->fb.width, blitter->fb.height, p->cp);
1674
1675 gen6_emit_3DPRIMITIVE(p->dev, &blitter->draw, NULL, true, p->cp);
1676 }
1677
1678 static void
1679 gen6_rectlist_states(struct ilo_3d_pipeline *p,
1680 const struct ilo_blitter *blitter,
1681 struct gen6_rectlist_session *session)
1682 {
1683 if (blitter->uses & ILO_BLITTER_USE_DSA) {
1684 session->DEPTH_STENCIL_STATE =
1685 gen6_emit_DEPTH_STENCIL_STATE(p->dev, &blitter->dsa, p->cp);
1686 }
1687
1688 if (blitter->uses & ILO_BLITTER_USE_CC) {
1689 session->COLOR_CALC_STATE =
1690 gen6_emit_COLOR_CALC_STATE(p->dev, &blitter->cc.stencil_ref,
1691 blitter->cc.alpha_ref, &blitter->cc.blend_color, p->cp);
1692 }
1693
1694 if (blitter->uses & ILO_BLITTER_USE_VIEWPORT) {
1695 session->CC_VIEWPORT =
1696 gen6_emit_CC_VIEWPORT(p->dev, &blitter->viewport, 1, p->cp);
1697 }
1698 }
1699
1700 static void
1701 ilo_3d_pipeline_emit_rectlist_gen6(struct ilo_3d_pipeline *p,
1702 const struct ilo_blitter *blitter)
1703 {
1704 struct gen6_rectlist_session session;
1705
1706 memset(&session, 0, sizeof(session));
1707 gen6_rectlist_states(p, blitter, &session);
1708 gen6_rectlist_commands(p, blitter, &session);
1709 }
1710
1711 static int
1712 gen6_pipeline_max_command_size(const struct ilo_3d_pipeline *p)
1713 {
1714 static int size;
1715
1716 if (!size) {
1717 size += GEN6_3DSTATE_CONSTANT_ANY__SIZE * 3;
1718 size += GEN6_3DSTATE_GS_SVB_INDEX__SIZE * 4;
1719 size += GEN6_PIPE_CONTROL__SIZE * 5;
1720
1721 size +=
1722 GEN6_STATE_BASE_ADDRESS__SIZE +
1723 GEN6_STATE_SIP__SIZE +
1724 GEN6_3DSTATE_VF_STATISTICS__SIZE +
1725 GEN6_PIPELINE_SELECT__SIZE +
1726 GEN6_3DSTATE_BINDING_TABLE_POINTERS__SIZE +
1727 GEN6_3DSTATE_SAMPLER_STATE_POINTERS__SIZE +
1728 GEN6_3DSTATE_URB__SIZE +
1729 GEN6_3DSTATE_VERTEX_BUFFERS__SIZE +
1730 GEN6_3DSTATE_VERTEX_ELEMENTS__SIZE +
1731 GEN6_3DSTATE_INDEX_BUFFER__SIZE +
1732 GEN6_3DSTATE_VIEWPORT_STATE_POINTERS__SIZE +
1733 GEN6_3DSTATE_CC_STATE_POINTERS__SIZE +
1734 GEN6_3DSTATE_SCISSOR_STATE_POINTERS__SIZE +
1735 GEN6_3DSTATE_VS__SIZE +
1736 GEN6_3DSTATE_GS__SIZE +
1737 GEN6_3DSTATE_CLIP__SIZE +
1738 GEN6_3DSTATE_SF__SIZE +
1739 GEN6_3DSTATE_WM__SIZE +
1740 GEN6_3DSTATE_SAMPLE_MASK__SIZE +
1741 GEN6_3DSTATE_DRAWING_RECTANGLE__SIZE +
1742 GEN6_3DSTATE_DEPTH_BUFFER__SIZE +
1743 GEN6_3DSTATE_POLY_STIPPLE_OFFSET__SIZE +
1744 GEN6_3DSTATE_POLY_STIPPLE_PATTERN__SIZE +
1745 GEN6_3DSTATE_LINE_STIPPLE__SIZE +
1746 GEN6_3DSTATE_AA_LINE_PARAMETERS__SIZE +
1747 GEN6_3DSTATE_MULTISAMPLE__SIZE +
1748 GEN6_3DSTATE_STENCIL_BUFFER__SIZE +
1749 GEN6_3DSTATE_HIER_DEPTH_BUFFER__SIZE +
1750 GEN6_3DSTATE_CLEAR_PARAMS__SIZE +
1751 GEN6_3DPRIMITIVE__SIZE;
1752 }
1753
1754 return size;
1755 }
1756
1757 int
1758 gen6_pipeline_estimate_state_size(const struct ilo_3d_pipeline *p,
1759 const struct ilo_context *ilo)
1760 {
1761 static int static_size;
1762 int sh_type, size;
1763
1764 if (!static_size) {
1765 /* 64 bytes, or 16 dwords */
1766 const int alignment = 64 / 4;
1767
1768 /* pad first */
1769 size = alignment - 1;
1770
1771 /* CC states */
1772 size += align(GEN6_BLEND_STATE__SIZE * ILO_MAX_DRAW_BUFFERS, alignment);
1773 size += align(GEN6_DEPTH_STENCIL_STATE__SIZE, alignment);
1774 size += align(GEN6_COLOR_CALC_STATE__SIZE, alignment);
1775
1776 /* viewport arrays */
1777 if (p->dev->gen >= ILO_GEN(7)) {
1778 size +=
1779 align(GEN7_SF_CLIP_VIEWPORT__SIZE * ILO_MAX_VIEWPORTS, 16) +
1780 align(GEN6_CC_VIEWPORT__SIZE * ILO_MAX_VIEWPORTS, 8) +
1781 align(GEN6_SCISSOR_RECT__SIZE * ILO_MAX_VIEWPORTS, 8);
1782 }
1783 else {
1784 size +=
1785 align(GEN6_SF_VIEWPORT__SIZE * ILO_MAX_VIEWPORTS, 8) +
1786 align(GEN6_CLIP_VIEWPORT__SIZE * ILO_MAX_VIEWPORTS, 8) +
1787 align(GEN6_CC_VIEWPORT__SIZE * ILO_MAX_VIEWPORTS, 8) +
1788 align(GEN6_SCISSOR_RECT__SIZE * ILO_MAX_VIEWPORTS, 8);
1789 }
1790
1791 static_size = size;
1792 }
1793
1794 size = static_size;
1795
1796 for (sh_type = 0; sh_type < PIPE_SHADER_TYPES; sh_type++) {
1797 const int alignment = 32 / 4;
1798 int num_samplers, num_surfaces, pcb_size;
1799
1800 /* samplers */
1801 num_samplers = ilo->sampler[sh_type].count;
1802
1803 /* sampler views and constant buffers */
1804 num_surfaces = ilo->view[sh_type].count +
1805 util_bitcount(ilo->cbuf[sh_type].enabled_mask);
1806
1807 pcb_size = 0;
1808
1809 switch (sh_type) {
1810 case PIPE_SHADER_VERTEX:
1811 if (ilo->vs) {
1812 if (p->dev->gen == ILO_GEN(6)) {
1813 const struct pipe_stream_output_info *so_info =
1814 ilo_shader_get_kernel_so_info(ilo->vs);
1815
1816 /* stream outputs */
1817 num_surfaces += so_info->num_outputs;
1818 }
1819
1820 pcb_size = ilo_shader_get_kernel_param(ilo->vs,
1821 ILO_KERNEL_PCB_CBUF0_SIZE);
1822 pcb_size += ilo_shader_get_kernel_param(ilo->vs,
1823 ILO_KERNEL_VS_PCB_UCP_SIZE);
1824 }
1825 break;
1826 case PIPE_SHADER_GEOMETRY:
1827 if (ilo->gs && p->dev->gen == ILO_GEN(6)) {
1828 const struct pipe_stream_output_info *so_info =
1829 ilo_shader_get_kernel_so_info(ilo->gs);
1830
1831 /* stream outputs */
1832 num_surfaces += so_info->num_outputs;
1833 }
1834 break;
1835 case PIPE_SHADER_FRAGMENT:
1836 /* render targets */
1837 num_surfaces += ilo->fb.state.nr_cbufs;
1838
1839 if (ilo->fs) {
1840 pcb_size = ilo_shader_get_kernel_param(ilo->fs,
1841 ILO_KERNEL_PCB_CBUF0_SIZE);
1842 }
1843 break;
1844 default:
1845 break;
1846 }
1847
1848 /* SAMPLER_STATE array and SAMPLER_BORDER_COLORs */
1849 if (num_samplers) {
1850 size += align(GEN6_SAMPLER_STATE__SIZE * num_samplers, alignment) +
1851 align(GEN6_SAMPLER_BORDER_COLOR__SIZE, alignment) * num_samplers;
1852 }
1853
1854 /* BINDING_TABLE_STATE and SURFACE_STATEs */
1855 if (num_surfaces) {
1856 size += align(num_surfaces, alignment) +
1857 align(GEN6_SURFACE_STATE__SIZE, alignment) * num_surfaces;
1858 }
1859
1860 /* PCB */
1861 if (pcb_size)
1862 size += align(pcb_size, alignment);
1863 }
1864
1865 return size;
1866 }
1867
1868 static int
1869 ilo_3d_pipeline_estimate_size_gen6(struct ilo_3d_pipeline *p,
1870 enum ilo_3d_pipeline_action action,
1871 const void *arg)
1872 {
1873 int size;
1874
1875 switch (action) {
1876 case ILO_3D_PIPELINE_DRAW:
1877 {
1878 const struct ilo_context *ilo = arg;
1879
1880 size = gen6_pipeline_max_command_size(p) +
1881 gen6_pipeline_estimate_state_size(p, ilo);
1882 }
1883 break;
1884 case ILO_3D_PIPELINE_FLUSH:
1885 size = GEN6_PIPE_CONTROL__SIZE * 3;
1886 break;
1887 case ILO_3D_PIPELINE_WRITE_TIMESTAMP:
1888 size = GEN6_PIPE_CONTROL__SIZE * 2;
1889 break;
1890 case ILO_3D_PIPELINE_WRITE_DEPTH_COUNT:
1891 size = GEN6_PIPE_CONTROL__SIZE * 3;
1892 break;
1893 case ILO_3D_PIPELINE_WRITE_STATISTICS:
1894 {
1895 const int num_regs = 8;
1896 const int num_pads = 3;
1897
1898 size = GEN6_PIPE_CONTROL__SIZE;
1899 size += GEN6_MI_STORE_REGISTER_MEM__SIZE * 2 * num_regs;
1900 size += GEN6_MI_STORE_DATA_IMM__SIZE * num_pads;
1901 }
1902 break;
1903 case ILO_3D_PIPELINE_RECTLIST:
1904 size = 64 + 256; /* states + commands */
1905 break;
1906 default:
1907 assert(!"unknown 3D pipeline action");
1908 size = 0;
1909 break;
1910 }
1911
1912 return size;
1913 }
1914
1915 void
1916 ilo_3d_pipeline_init_gen6(struct ilo_3d_pipeline *p)
1917 {
1918 p->estimate_size = ilo_3d_pipeline_estimate_size_gen6;
1919 p->emit_draw = ilo_3d_pipeline_emit_draw_gen6;
1920 p->emit_flush = ilo_3d_pipeline_emit_flush_gen6;
1921 p->emit_write_timestamp = ilo_3d_pipeline_emit_write_timestamp_gen6;
1922 p->emit_write_depth_count = ilo_3d_pipeline_emit_write_depth_count_gen6;
1923 p->emit_write_statistics = ilo_3d_pipeline_emit_write_statistics_gen6;
1924 p->emit_rectlist = ilo_3d_pipeline_emit_rectlist_gen6;
1925 }