ilo: add genhw headers
[mesa.git] / src / gallium / drivers / ilo / ilo_3d_pipeline_gen6.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "genhw/genhw.h"
29 #include "util/u_dual_blend.h"
30 #include "util/u_prim.h"
31
32 #include "ilo_blitter.h"
33 #include "ilo_3d.h"
34 #include "ilo_context.h"
35 #include "ilo_cp.h"
36 #include "ilo_gpe_gen6.h"
37 #include "ilo_gpe_gen7.h"
38 #include "ilo_shader.h"
39 #include "ilo_state.h"
40 #include "ilo_3d_pipeline.h"
41 #include "ilo_3d_pipeline_gen6.h"
42
43 /**
44 * This should be called before any depth stall flush (including those
45 * produced by non-pipelined state commands) or cache flush on GEN6.
46 *
47 * \see intel_emit_post_sync_nonzero_flush()
48 */
49 static void
50 gen6_wa_pipe_control_post_sync(struct ilo_3d_pipeline *p,
51 bool caller_post_sync)
52 {
53 assert(p->dev->gen == ILO_GEN(6));
54
55 /* emit once */
56 if (p->state.has_gen6_wa_pipe_control)
57 return;
58
59 p->state.has_gen6_wa_pipe_control = true;
60
61 /*
62 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
63 *
64 * "Pipe-control with CS-stall bit set must be sent BEFORE the
65 * pipe-control with a post-sync op and no write-cache flushes."
66 *
67 * The workaround below necessitates this workaround.
68 */
69 gen6_emit_PIPE_CONTROL(p->dev,
70 PIPE_CONTROL_CS_STALL |
71 PIPE_CONTROL_STALL_AT_SCOREBOARD,
72 NULL, 0, false, p->cp);
73
74 /* the caller will emit the post-sync op */
75 if (caller_post_sync)
76 return;
77
78 /*
79 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
80 *
81 * "Before any depth stall flush (including those produced by
82 * non-pipelined state commands), software needs to first send a
83 * PIPE_CONTROL with no bits set except Post-Sync Operation != 0."
84 *
85 * "Before a PIPE_CONTROL with Write Cache Flush Enable =1, a
86 * PIPE_CONTROL with any non-zero post-sync-op is required."
87 */
88 gen6_emit_PIPE_CONTROL(p->dev,
89 PIPE_CONTROL_WRITE_IMMEDIATE,
90 p->workaround_bo, 0, false, p->cp);
91 }
92
93 static void
94 gen6_wa_pipe_control_wm_multisample_flush(struct ilo_3d_pipeline *p)
95 {
96 assert(p->dev->gen == ILO_GEN(6));
97
98 gen6_wa_pipe_control_post_sync(p, false);
99
100 /*
101 * From the Sandy Bridge PRM, volume 2 part 1, page 305:
102 *
103 * "Driver must guarentee that all the caches in the depth pipe are
104 * flushed before this command (3DSTATE_MULTISAMPLE) is parsed. This
105 * requires driver to send a PIPE_CONTROL with a CS stall along with a
106 * Depth Flush prior to this command."
107 */
108 gen6_emit_PIPE_CONTROL(p->dev,
109 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
110 PIPE_CONTROL_CS_STALL,
111 0, 0, false, p->cp);
112 }
113
114 static void
115 gen6_wa_pipe_control_wm_depth_flush(struct ilo_3d_pipeline *p)
116 {
117 assert(p->dev->gen == ILO_GEN(6));
118
119 gen6_wa_pipe_control_post_sync(p, false);
120
121 /*
122 * According to intel_emit_depth_stall_flushes() of classic i965, we need
123 * to emit a sequence of PIPE_CONTROLs prior to emitting depth related
124 * commands.
125 */
126 gen6_emit_PIPE_CONTROL(p->dev,
127 PIPE_CONTROL_DEPTH_STALL,
128 NULL, 0, false, p->cp);
129
130 gen6_emit_PIPE_CONTROL(p->dev,
131 PIPE_CONTROL_DEPTH_CACHE_FLUSH,
132 NULL, 0, false, p->cp);
133
134 gen6_emit_PIPE_CONTROL(p->dev,
135 PIPE_CONTROL_DEPTH_STALL,
136 NULL, 0, false, p->cp);
137 }
138
139 static void
140 gen6_wa_pipe_control_wm_max_threads_stall(struct ilo_3d_pipeline *p)
141 {
142 assert(p->dev->gen == ILO_GEN(6));
143
144 /* the post-sync workaround should cover this already */
145 if (p->state.has_gen6_wa_pipe_control)
146 return;
147
148 /*
149 * From the Sandy Bridge PRM, volume 2 part 1, page 274:
150 *
151 * "A PIPE_CONTROL command, with only the Stall At Pixel Scoreboard
152 * field set (DW1 Bit 1), must be issued prior to any change to the
153 * value in this field (Maximum Number of Threads in 3DSTATE_WM)"
154 */
155 gen6_emit_PIPE_CONTROL(p->dev,
156 PIPE_CONTROL_STALL_AT_SCOREBOARD,
157 NULL, 0, false, p->cp);
158
159 }
160
161 static void
162 gen6_wa_pipe_control_vs_const_flush(struct ilo_3d_pipeline *p)
163 {
164 assert(p->dev->gen == ILO_GEN(6));
165
166 gen6_wa_pipe_control_post_sync(p, false);
167
168 /*
169 * According to upload_vs_state() of classic i965, we need to emit
170 * PIPE_CONTROL after 3DSTATE_CONSTANT_VS so that the command is kept being
171 * buffered by VS FF, to the point that the FF dies.
172 */
173 gen6_emit_PIPE_CONTROL(p->dev,
174 PIPE_CONTROL_DEPTH_STALL |
175 PIPE_CONTROL_INSTRUCTION_FLUSH |
176 PIPE_CONTROL_STATE_CACHE_INVALIDATE,
177 NULL, 0, false, p->cp);
178 }
179
180 #define DIRTY(state) (session->pipe_dirty & ILO_DIRTY_ ## state)
181
182 void
183 gen6_pipeline_common_select(struct ilo_3d_pipeline *p,
184 const struct ilo_context *ilo,
185 struct gen6_pipeline_session *session)
186 {
187 /* PIPELINE_SELECT */
188 if (session->hw_ctx_changed) {
189 if (p->dev->gen == ILO_GEN(6))
190 gen6_wa_pipe_control_post_sync(p, false);
191
192 gen6_emit_PIPELINE_SELECT(p->dev, 0x0, p->cp);
193 }
194 }
195
196 void
197 gen6_pipeline_common_sip(struct ilo_3d_pipeline *p,
198 const struct ilo_context *ilo,
199 struct gen6_pipeline_session *session)
200 {
201 /* STATE_SIP */
202 if (session->hw_ctx_changed) {
203 if (p->dev->gen == ILO_GEN(6))
204 gen6_wa_pipe_control_post_sync(p, false);
205
206 gen6_emit_STATE_SIP(p->dev, 0, p->cp);
207 }
208 }
209
210 void
211 gen6_pipeline_common_base_address(struct ilo_3d_pipeline *p,
212 const struct ilo_context *ilo,
213 struct gen6_pipeline_session *session)
214 {
215 /* STATE_BASE_ADDRESS */
216 if (session->state_bo_changed || session->kernel_bo_changed ||
217 session->batch_bo_changed) {
218 if (p->dev->gen == ILO_GEN(6))
219 gen6_wa_pipe_control_post_sync(p, false);
220
221 gen6_emit_STATE_BASE_ADDRESS(p->dev,
222 NULL, p->cp->bo, p->cp->bo, NULL, ilo->hw3d->kernel.bo,
223 0, 0, 0, 0, p->cp);
224
225 /*
226 * From the Sandy Bridge PRM, volume 1 part 1, page 28:
227 *
228 * "The following commands must be reissued following any change to
229 * the base addresses:
230 *
231 * * 3DSTATE_BINDING_TABLE_POINTERS
232 * * 3DSTATE_SAMPLER_STATE_POINTERS
233 * * 3DSTATE_VIEWPORT_STATE_POINTERS
234 * * 3DSTATE_CC_POINTERS
235 * * MEDIA_STATE_POINTERS"
236 *
237 * 3DSTATE_SCISSOR_STATE_POINTERS is not on the list, but it is
238 * reasonable to also reissue the command. Same to PCB.
239 */
240 session->viewport_state_changed = true;
241
242 session->cc_state_blend_changed = true;
243 session->cc_state_dsa_changed = true;
244 session->cc_state_cc_changed = true;
245
246 session->scissor_state_changed = true;
247
248 session->binding_table_vs_changed = true;
249 session->binding_table_gs_changed = true;
250 session->binding_table_fs_changed = true;
251
252 session->sampler_state_vs_changed = true;
253 session->sampler_state_gs_changed = true;
254 session->sampler_state_fs_changed = true;
255
256 session->pcb_state_vs_changed = true;
257 session->pcb_state_gs_changed = true;
258 session->pcb_state_fs_changed = true;
259 }
260 }
261
262 static void
263 gen6_pipeline_common_urb(struct ilo_3d_pipeline *p,
264 const struct ilo_context *ilo,
265 struct gen6_pipeline_session *session)
266 {
267 /* 3DSTATE_URB */
268 if (DIRTY(VE) || DIRTY(VS) || DIRTY(GS)) {
269 const bool gs_active = (ilo->gs || (ilo->vs &&
270 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_VS_GEN6_SO)));
271 int vs_entry_size, gs_entry_size;
272 int vs_total_size, gs_total_size;
273
274 vs_entry_size = (ilo->vs) ?
275 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_OUTPUT_COUNT) : 0;
276
277 /*
278 * As indicated by 2e712e41db0c0676e9f30fc73172c0e8de8d84d4, VF and VS
279 * share VUE handles. The VUE allocation size must be large enough to
280 * store either VF outputs (number of VERTEX_ELEMENTs) and VS outputs.
281 *
282 * I am not sure if the PRM explicitly states that VF and VS share VUE
283 * handles. But here is a citation that implies so:
284 *
285 * From the Sandy Bridge PRM, volume 2 part 1, page 44:
286 *
287 * "Once a FF stage that spawn threads has sufficient input to
288 * initiate a thread, it must guarantee that it is safe to request
289 * the thread initiation. For all these FF stages, this check is
290 * based on :
291 *
292 * - The availability of output URB entries:
293 * - VS: As the input URB entries are overwritten with the
294 * VS-generated output data, output URB availability isn't a
295 * factor."
296 */
297 if (vs_entry_size < ilo->ve->count)
298 vs_entry_size = ilo->ve->count;
299
300 gs_entry_size = (ilo->gs) ?
301 ilo_shader_get_kernel_param(ilo->gs, ILO_KERNEL_OUTPUT_COUNT) :
302 (gs_active) ? vs_entry_size : 0;
303
304 /* in bytes */
305 vs_entry_size *= sizeof(float) * 4;
306 gs_entry_size *= sizeof(float) * 4;
307 vs_total_size = ilo->dev->urb_size;
308
309 if (gs_active) {
310 vs_total_size /= 2;
311 gs_total_size = vs_total_size;
312 }
313 else {
314 gs_total_size = 0;
315 }
316
317 gen6_emit_3DSTATE_URB(p->dev, vs_total_size, gs_total_size,
318 vs_entry_size, gs_entry_size, p->cp);
319
320 /*
321 * From the Sandy Bridge PRM, volume 2 part 1, page 27:
322 *
323 * "Because of a urb corruption caused by allocating a previous
324 * gsunit's urb entry to vsunit software is required to send a
325 * "GS NULL Fence" (Send URB fence with VS URB size == 1 and GS URB
326 * size == 0) plus a dummy DRAW call before any case where VS will
327 * be taking over GS URB space."
328 */
329 if (p->state.gs.active && !gs_active)
330 ilo_3d_pipeline_emit_flush_gen6(p);
331
332 p->state.gs.active = gs_active;
333 }
334 }
335
336 static void
337 gen6_pipeline_common_pointers_1(struct ilo_3d_pipeline *p,
338 const struct ilo_context *ilo,
339 struct gen6_pipeline_session *session)
340 {
341 /* 3DSTATE_VIEWPORT_STATE_POINTERS */
342 if (session->viewport_state_changed) {
343 gen6_emit_3DSTATE_VIEWPORT_STATE_POINTERS(p->dev,
344 p->state.CLIP_VIEWPORT,
345 p->state.SF_VIEWPORT,
346 p->state.CC_VIEWPORT, p->cp);
347 }
348 }
349
350 static void
351 gen6_pipeline_common_pointers_2(struct ilo_3d_pipeline *p,
352 const struct ilo_context *ilo,
353 struct gen6_pipeline_session *session)
354 {
355 /* 3DSTATE_CC_STATE_POINTERS */
356 if (session->cc_state_blend_changed ||
357 session->cc_state_dsa_changed ||
358 session->cc_state_cc_changed) {
359 gen6_emit_3DSTATE_CC_STATE_POINTERS(p->dev,
360 p->state.BLEND_STATE,
361 p->state.DEPTH_STENCIL_STATE,
362 p->state.COLOR_CALC_STATE, p->cp);
363 }
364
365 /* 3DSTATE_SAMPLER_STATE_POINTERS */
366 if (session->sampler_state_vs_changed ||
367 session->sampler_state_gs_changed ||
368 session->sampler_state_fs_changed) {
369 gen6_emit_3DSTATE_SAMPLER_STATE_POINTERS(p->dev,
370 p->state.vs.SAMPLER_STATE,
371 0,
372 p->state.wm.SAMPLER_STATE, p->cp);
373 }
374 }
375
376 static void
377 gen6_pipeline_common_pointers_3(struct ilo_3d_pipeline *p,
378 const struct ilo_context *ilo,
379 struct gen6_pipeline_session *session)
380 {
381 /* 3DSTATE_SCISSOR_STATE_POINTERS */
382 if (session->scissor_state_changed) {
383 gen6_emit_3DSTATE_SCISSOR_STATE_POINTERS(p->dev,
384 p->state.SCISSOR_RECT, p->cp);
385 }
386
387 /* 3DSTATE_BINDING_TABLE_POINTERS */
388 if (session->binding_table_vs_changed ||
389 session->binding_table_gs_changed ||
390 session->binding_table_fs_changed) {
391 gen6_emit_3DSTATE_BINDING_TABLE_POINTERS(p->dev,
392 p->state.vs.BINDING_TABLE_STATE,
393 p->state.gs.BINDING_TABLE_STATE,
394 p->state.wm.BINDING_TABLE_STATE, p->cp);
395 }
396 }
397
398 void
399 gen6_pipeline_vf(struct ilo_3d_pipeline *p,
400 const struct ilo_context *ilo,
401 struct gen6_pipeline_session *session)
402 {
403 if (p->dev->gen >= ILO_GEN(7.5)) {
404 /* 3DSTATE_INDEX_BUFFER */
405 if (DIRTY(IB) || session->batch_bo_changed) {
406 gen6_emit_3DSTATE_INDEX_BUFFER(p->dev,
407 &ilo->ib, false, p->cp);
408 }
409
410 /* 3DSTATE_VF */
411 if (session->primitive_restart_changed) {
412 gen7_emit_3DSTATE_VF(p->dev, ilo->draw->primitive_restart,
413 ilo->draw->restart_index, p->cp);
414 }
415 }
416 else {
417 /* 3DSTATE_INDEX_BUFFER */
418 if (DIRTY(IB) || session->primitive_restart_changed ||
419 session->batch_bo_changed) {
420 gen6_emit_3DSTATE_INDEX_BUFFER(p->dev,
421 &ilo->ib, ilo->draw->primitive_restart, p->cp);
422 }
423 }
424
425 /* 3DSTATE_VERTEX_BUFFERS */
426 if (DIRTY(VB) || DIRTY(VE) || session->batch_bo_changed)
427 gen6_emit_3DSTATE_VERTEX_BUFFERS(p->dev, ilo->ve, &ilo->vb, p->cp);
428
429 /* 3DSTATE_VERTEX_ELEMENTS */
430 if (DIRTY(VE) || DIRTY(VS)) {
431 const struct ilo_ve_state *ve = ilo->ve;
432 bool last_velement_edgeflag = false;
433 bool prepend_generate_ids = false;
434
435 if (ilo->vs) {
436 if (ilo_shader_get_kernel_param(ilo->vs,
437 ILO_KERNEL_VS_INPUT_EDGEFLAG)) {
438 /* we rely on the state tracker here */
439 assert(ilo_shader_get_kernel_param(ilo->vs,
440 ILO_KERNEL_INPUT_COUNT) == ve->count);
441
442 last_velement_edgeflag = true;
443 }
444
445 if (ilo_shader_get_kernel_param(ilo->vs,
446 ILO_KERNEL_VS_INPUT_INSTANCEID) ||
447 ilo_shader_get_kernel_param(ilo->vs,
448 ILO_KERNEL_VS_INPUT_VERTEXID))
449 prepend_generate_ids = true;
450 }
451
452 gen6_emit_3DSTATE_VERTEX_ELEMENTS(p->dev, ve,
453 last_velement_edgeflag, prepend_generate_ids, p->cp);
454 }
455 }
456
457 void
458 gen6_pipeline_vf_statistics(struct ilo_3d_pipeline *p,
459 const struct ilo_context *ilo,
460 struct gen6_pipeline_session *session)
461 {
462 /* 3DSTATE_VF_STATISTICS */
463 if (session->hw_ctx_changed)
464 gen6_emit_3DSTATE_VF_STATISTICS(p->dev, false, p->cp);
465 }
466
467 static void
468 gen6_pipeline_vf_draw(struct ilo_3d_pipeline *p,
469 const struct ilo_context *ilo,
470 struct gen6_pipeline_session *session)
471 {
472 /* 3DPRIMITIVE */
473 gen6_emit_3DPRIMITIVE(p->dev, ilo->draw, &ilo->ib, false, p->cp);
474 p->state.has_gen6_wa_pipe_control = false;
475 }
476
477 void
478 gen6_pipeline_vs(struct ilo_3d_pipeline *p,
479 const struct ilo_context *ilo,
480 struct gen6_pipeline_session *session)
481 {
482 const bool emit_3dstate_vs = (DIRTY(VS) || DIRTY(SAMPLER_VS) ||
483 session->kernel_bo_changed);
484 const bool emit_3dstate_constant_vs = session->pcb_state_vs_changed;
485
486 /*
487 * the classic i965 does this in upload_vs_state(), citing a spec that I
488 * cannot find
489 */
490 if (emit_3dstate_vs && p->dev->gen == ILO_GEN(6))
491 gen6_wa_pipe_control_post_sync(p, false);
492
493 /* 3DSTATE_CONSTANT_VS */
494 if (emit_3dstate_constant_vs) {
495 gen6_emit_3DSTATE_CONSTANT_VS(p->dev,
496 &p->state.vs.PUSH_CONSTANT_BUFFER,
497 &p->state.vs.PUSH_CONSTANT_BUFFER_size,
498 1, p->cp);
499 }
500
501 /* 3DSTATE_VS */
502 if (emit_3dstate_vs) {
503 const int num_samplers = ilo->sampler[PIPE_SHADER_VERTEX].count;
504
505 gen6_emit_3DSTATE_VS(p->dev, ilo->vs, num_samplers, p->cp);
506 }
507
508 if (emit_3dstate_constant_vs && p->dev->gen == ILO_GEN(6))
509 gen6_wa_pipe_control_vs_const_flush(p);
510 }
511
512 static void
513 gen6_pipeline_gs(struct ilo_3d_pipeline *p,
514 const struct ilo_context *ilo,
515 struct gen6_pipeline_session *session)
516 {
517 /* 3DSTATE_CONSTANT_GS */
518 if (session->pcb_state_gs_changed)
519 gen6_emit_3DSTATE_CONSTANT_GS(p->dev, NULL, NULL, 0, p->cp);
520
521 /* 3DSTATE_GS */
522 if (DIRTY(GS) || DIRTY(VS) ||
523 session->prim_changed || session->kernel_bo_changed) {
524 const int verts_per_prim = u_vertices_per_prim(session->reduced_prim);
525
526 gen6_emit_3DSTATE_GS(p->dev, ilo->gs, ilo->vs, verts_per_prim, p->cp);
527 }
528 }
529
530 bool
531 gen6_pipeline_update_max_svbi(struct ilo_3d_pipeline *p,
532 const struct ilo_context *ilo,
533 struct gen6_pipeline_session *session)
534 {
535 if (DIRTY(VS) || DIRTY(GS) || DIRTY(SO)) {
536 const struct pipe_stream_output_info *so_info =
537 (ilo->gs) ? ilo_shader_get_kernel_so_info(ilo->gs) :
538 (ilo->vs) ? ilo_shader_get_kernel_so_info(ilo->vs) : NULL;
539 unsigned max_svbi = 0xffffffff;
540 int i;
541
542 for (i = 0; i < so_info->num_outputs; i++) {
543 const int output_buffer = so_info->output[i].output_buffer;
544 const struct pipe_stream_output_target *so =
545 ilo->so.states[output_buffer];
546 const int struct_size = so_info->stride[output_buffer] * 4;
547 const int elem_size = so_info->output[i].num_components * 4;
548 int buf_size, count;
549
550 if (!so) {
551 max_svbi = 0;
552 break;
553 }
554
555 buf_size = so->buffer_size - so_info->output[i].dst_offset * 4;
556
557 count = buf_size / struct_size;
558 if (buf_size % struct_size >= elem_size)
559 count++;
560
561 if (count < max_svbi)
562 max_svbi = count;
563 }
564
565 if (p->state.so_max_vertices != max_svbi) {
566 p->state.so_max_vertices = max_svbi;
567 return true;
568 }
569 }
570
571 return false;
572 }
573
574 static void
575 gen6_pipeline_gs_svbi(struct ilo_3d_pipeline *p,
576 const struct ilo_context *ilo,
577 struct gen6_pipeline_session *session)
578 {
579 const bool emit = gen6_pipeline_update_max_svbi(p, ilo, session);
580
581 /* 3DSTATE_GS_SVB_INDEX */
582 if (emit) {
583 if (p->dev->gen == ILO_GEN(6))
584 gen6_wa_pipe_control_post_sync(p, false);
585
586 gen6_emit_3DSTATE_GS_SVB_INDEX(p->dev,
587 0, p->state.so_num_vertices, p->state.so_max_vertices,
588 false, p->cp);
589
590 if (session->hw_ctx_changed) {
591 int i;
592
593 /*
594 * From the Sandy Bridge PRM, volume 2 part 1, page 148:
595 *
596 * "If a buffer is not enabled then the SVBI must be set to 0x0
597 * in order to not cause overflow in that SVBI."
598 *
599 * "If a buffer is not enabled then the MaxSVBI must be set to
600 * 0xFFFFFFFF in order to not cause overflow in that SVBI."
601 */
602 for (i = 1; i < 4; i++) {
603 gen6_emit_3DSTATE_GS_SVB_INDEX(p->dev,
604 i, 0, 0xffffffff, false, p->cp);
605 }
606 }
607 }
608 }
609
610 void
611 gen6_pipeline_clip(struct ilo_3d_pipeline *p,
612 const struct ilo_context *ilo,
613 struct gen6_pipeline_session *session)
614 {
615 /* 3DSTATE_CLIP */
616 if (DIRTY(RASTERIZER) || DIRTY(FS) || DIRTY(VIEWPORT) || DIRTY(FB)) {
617 bool enable_guardband = true;
618 unsigned i;
619
620 /*
621 * We do not do 2D clipping yet. Guard band test should only be enabled
622 * when the viewport is larger than the framebuffer.
623 */
624 for (i = 0; i < ilo->viewport.count; i++) {
625 const struct ilo_viewport_cso *vp = &ilo->viewport.cso[i];
626
627 if (vp->min_x > 0.0f || vp->max_x < ilo->fb.state.width ||
628 vp->min_y > 0.0f || vp->max_y < ilo->fb.state.height) {
629 enable_guardband = false;
630 break;
631 }
632 }
633
634 gen6_emit_3DSTATE_CLIP(p->dev, ilo->rasterizer,
635 ilo->fs, enable_guardband, 1, p->cp);
636 }
637 }
638
639 static void
640 gen6_pipeline_sf(struct ilo_3d_pipeline *p,
641 const struct ilo_context *ilo,
642 struct gen6_pipeline_session *session)
643 {
644 /* 3DSTATE_SF */
645 if (DIRTY(RASTERIZER) || DIRTY(FS))
646 gen6_emit_3DSTATE_SF(p->dev, ilo->rasterizer, ilo->fs, p->cp);
647 }
648
649 void
650 gen6_pipeline_sf_rect(struct ilo_3d_pipeline *p,
651 const struct ilo_context *ilo,
652 struct gen6_pipeline_session *session)
653 {
654 /* 3DSTATE_DRAWING_RECTANGLE */
655 if (DIRTY(FB)) {
656 if (p->dev->gen == ILO_GEN(6))
657 gen6_wa_pipe_control_post_sync(p, false);
658
659 gen6_emit_3DSTATE_DRAWING_RECTANGLE(p->dev, 0, 0,
660 ilo->fb.state.width, ilo->fb.state.height, p->cp);
661 }
662 }
663
664 static void
665 gen6_pipeline_wm(struct ilo_3d_pipeline *p,
666 const struct ilo_context *ilo,
667 struct gen6_pipeline_session *session)
668 {
669 /* 3DSTATE_CONSTANT_PS */
670 if (session->pcb_state_fs_changed) {
671 gen6_emit_3DSTATE_CONSTANT_PS(p->dev,
672 &p->state.wm.PUSH_CONSTANT_BUFFER,
673 &p->state.wm.PUSH_CONSTANT_BUFFER_size,
674 1, p->cp);
675 }
676
677 /* 3DSTATE_WM */
678 if (DIRTY(FS) || DIRTY(SAMPLER_FS) || DIRTY(BLEND) || DIRTY(DSA) ||
679 DIRTY(RASTERIZER) || session->kernel_bo_changed) {
680 const int num_samplers = ilo->sampler[PIPE_SHADER_FRAGMENT].count;
681 const bool dual_blend = ilo->blend->dual_blend;
682 const bool cc_may_kill = (ilo->dsa->dw_alpha ||
683 ilo->blend->alpha_to_coverage);
684
685 if (p->dev->gen == ILO_GEN(6) && session->hw_ctx_changed)
686 gen6_wa_pipe_control_wm_max_threads_stall(p);
687
688 gen6_emit_3DSTATE_WM(p->dev, ilo->fs, num_samplers,
689 ilo->rasterizer, dual_blend, cc_may_kill, 0, p->cp);
690 }
691 }
692
693 static void
694 gen6_pipeline_wm_multisample(struct ilo_3d_pipeline *p,
695 const struct ilo_context *ilo,
696 struct gen6_pipeline_session *session)
697 {
698 /* 3DSTATE_MULTISAMPLE and 3DSTATE_SAMPLE_MASK */
699 if (DIRTY(SAMPLE_MASK) || DIRTY(FB)) {
700 const uint32_t *packed_sample_pos;
701
702 packed_sample_pos = (ilo->fb.num_samples > 1) ?
703 &p->packed_sample_position_4x : &p->packed_sample_position_1x;
704
705 if (p->dev->gen == ILO_GEN(6)) {
706 gen6_wa_pipe_control_post_sync(p, false);
707 gen6_wa_pipe_control_wm_multisample_flush(p);
708 }
709
710 gen6_emit_3DSTATE_MULTISAMPLE(p->dev,
711 ilo->fb.num_samples, packed_sample_pos,
712 ilo->rasterizer->state.half_pixel_center, p->cp);
713
714 gen6_emit_3DSTATE_SAMPLE_MASK(p->dev,
715 (ilo->fb.num_samples > 1) ? ilo->sample_mask : 0x1, p->cp);
716 }
717 }
718
719 static void
720 gen6_pipeline_wm_depth(struct ilo_3d_pipeline *p,
721 const struct ilo_context *ilo,
722 struct gen6_pipeline_session *session)
723 {
724 /* 3DSTATE_DEPTH_BUFFER and 3DSTATE_CLEAR_PARAMS */
725 if (DIRTY(FB) || session->batch_bo_changed) {
726 const struct ilo_zs_surface *zs;
727 struct ilo_zs_surface layer;
728 uint32_t clear_params;
729
730 if (ilo->fb.state.zsbuf) {
731 const struct ilo_surface_cso *surface =
732 (const struct ilo_surface_cso *) ilo->fb.state.zsbuf;
733 const struct ilo_texture_slice *slice =
734 ilo_texture_get_slice(ilo_texture(surface->base.texture),
735 surface->base.u.tex.level, surface->base.u.tex.first_layer);
736
737 if (ilo->fb.offset_to_layers) {
738 assert(surface->base.u.tex.first_layer ==
739 surface->base.u.tex.last_layer);
740
741 ilo_gpe_init_zs_surface(ilo->dev,
742 ilo_texture(surface->base.texture),
743 surface->base.format, surface->base.u.tex.level,
744 surface->base.u.tex.first_layer, 1, true, &layer);
745
746 zs = &layer;
747 }
748 else {
749 assert(!surface->is_rt);
750 zs = &surface->u.zs;
751 }
752
753 clear_params = slice->clear_value;
754 }
755 else {
756 zs = &ilo->fb.null_zs;
757 clear_params = 0;
758 }
759
760 if (p->dev->gen == ILO_GEN(6)) {
761 gen6_wa_pipe_control_post_sync(p, false);
762 gen6_wa_pipe_control_wm_depth_flush(p);
763 }
764
765 gen6_emit_3DSTATE_DEPTH_BUFFER(p->dev, zs, p->cp);
766 gen6_emit_3DSTATE_HIER_DEPTH_BUFFER(p->dev, zs, p->cp);
767 gen6_emit_3DSTATE_STENCIL_BUFFER(p->dev, zs, p->cp);
768 gen6_emit_3DSTATE_CLEAR_PARAMS(p->dev, clear_params, p->cp);
769 }
770 }
771
772 void
773 gen6_pipeline_wm_raster(struct ilo_3d_pipeline *p,
774 const struct ilo_context *ilo,
775 struct gen6_pipeline_session *session)
776 {
777 /* 3DSTATE_POLY_STIPPLE_PATTERN and 3DSTATE_POLY_STIPPLE_OFFSET */
778 if ((DIRTY(RASTERIZER) || DIRTY(POLY_STIPPLE)) &&
779 ilo->rasterizer->state.poly_stipple_enable) {
780 if (p->dev->gen == ILO_GEN(6))
781 gen6_wa_pipe_control_post_sync(p, false);
782
783 gen6_emit_3DSTATE_POLY_STIPPLE_PATTERN(p->dev,
784 &ilo->poly_stipple, p->cp);
785
786 gen6_emit_3DSTATE_POLY_STIPPLE_OFFSET(p->dev, 0, 0, p->cp);
787 }
788
789 /* 3DSTATE_LINE_STIPPLE */
790 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_stipple_enable) {
791 if (p->dev->gen == ILO_GEN(6))
792 gen6_wa_pipe_control_post_sync(p, false);
793
794 gen6_emit_3DSTATE_LINE_STIPPLE(p->dev,
795 ilo->rasterizer->state.line_stipple_pattern,
796 ilo->rasterizer->state.line_stipple_factor + 1, p->cp);
797 }
798
799 /* 3DSTATE_AA_LINE_PARAMETERS */
800 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_smooth) {
801 if (p->dev->gen == ILO_GEN(6))
802 gen6_wa_pipe_control_post_sync(p, false);
803
804 gen6_emit_3DSTATE_AA_LINE_PARAMETERS(p->dev, p->cp);
805 }
806 }
807
808 static void
809 gen6_pipeline_state_viewports(struct ilo_3d_pipeline *p,
810 const struct ilo_context *ilo,
811 struct gen6_pipeline_session *session)
812 {
813 /* SF_CLIP_VIEWPORT and CC_VIEWPORT */
814 if (p->dev->gen >= ILO_GEN(7) && DIRTY(VIEWPORT)) {
815 p->state.SF_CLIP_VIEWPORT = gen7_emit_SF_CLIP_VIEWPORT(p->dev,
816 ilo->viewport.cso, ilo->viewport.count, p->cp);
817
818 p->state.CC_VIEWPORT = gen6_emit_CC_VIEWPORT(p->dev,
819 ilo->viewport.cso, ilo->viewport.count, p->cp);
820
821 session->viewport_state_changed = true;
822 }
823 /* SF_VIEWPORT, CLIP_VIEWPORT, and CC_VIEWPORT */
824 else if (DIRTY(VIEWPORT)) {
825 p->state.CLIP_VIEWPORT = gen6_emit_CLIP_VIEWPORT(p->dev,
826 ilo->viewport.cso, ilo->viewport.count, p->cp);
827
828 p->state.SF_VIEWPORT = gen6_emit_SF_VIEWPORT(p->dev,
829 ilo->viewport.cso, ilo->viewport.count, p->cp);
830
831 p->state.CC_VIEWPORT = gen6_emit_CC_VIEWPORT(p->dev,
832 ilo->viewport.cso, ilo->viewport.count, p->cp);
833
834 session->viewport_state_changed = true;
835 }
836 }
837
838 static void
839 gen6_pipeline_state_cc(struct ilo_3d_pipeline *p,
840 const struct ilo_context *ilo,
841 struct gen6_pipeline_session *session)
842 {
843 /* BLEND_STATE */
844 if (DIRTY(BLEND) || DIRTY(FB) || DIRTY(DSA)) {
845 p->state.BLEND_STATE = gen6_emit_BLEND_STATE(p->dev,
846 ilo->blend, &ilo->fb, ilo->dsa, p->cp);
847
848 session->cc_state_blend_changed = true;
849 }
850
851 /* COLOR_CALC_STATE */
852 if (DIRTY(DSA) || DIRTY(STENCIL_REF) || DIRTY(BLEND_COLOR)) {
853 p->state.COLOR_CALC_STATE =
854 gen6_emit_COLOR_CALC_STATE(p->dev, &ilo->stencil_ref,
855 ilo->dsa->alpha_ref, &ilo->blend_color, p->cp);
856
857 session->cc_state_cc_changed = true;
858 }
859
860 /* DEPTH_STENCIL_STATE */
861 if (DIRTY(DSA)) {
862 p->state.DEPTH_STENCIL_STATE =
863 gen6_emit_DEPTH_STENCIL_STATE(p->dev, ilo->dsa, p->cp);
864
865 session->cc_state_dsa_changed = true;
866 }
867 }
868
869 static void
870 gen6_pipeline_state_scissors(struct ilo_3d_pipeline *p,
871 const struct ilo_context *ilo,
872 struct gen6_pipeline_session *session)
873 {
874 /* SCISSOR_RECT */
875 if (DIRTY(SCISSOR) || DIRTY(VIEWPORT)) {
876 /* there should be as many scissors as there are viewports */
877 p->state.SCISSOR_RECT = gen6_emit_SCISSOR_RECT(p->dev,
878 &ilo->scissor, ilo->viewport.count, p->cp);
879
880 session->scissor_state_changed = true;
881 }
882 }
883
884 static void
885 gen6_pipeline_state_surfaces_rt(struct ilo_3d_pipeline *p,
886 const struct ilo_context *ilo,
887 struct gen6_pipeline_session *session)
888 {
889 /* SURFACE_STATEs for render targets */
890 if (DIRTY(FB)) {
891 const struct ilo_fb_state *fb = &ilo->fb;
892 const int offset = ILO_WM_DRAW_SURFACE(0);
893 uint32_t *surface_state = &p->state.wm.SURFACE_STATE[offset];
894 int i;
895
896 for (i = 0; i < fb->state.nr_cbufs; i++) {
897 const struct ilo_surface_cso *surface =
898 (const struct ilo_surface_cso *) fb->state.cbufs[i];
899
900 if (!surface) {
901 surface_state[i] =
902 gen6_emit_SURFACE_STATE(p->dev, &fb->null_rt, true, p->cp);
903 }
904 else if (fb->offset_to_layers) {
905 struct ilo_view_surface layer;
906
907 assert(surface->base.u.tex.first_layer ==
908 surface->base.u.tex.last_layer);
909
910 ilo_gpe_init_view_surface_for_texture(ilo->dev,
911 ilo_texture(surface->base.texture),
912 surface->base.format,
913 surface->base.u.tex.level, 1,
914 surface->base.u.tex.first_layer, 1,
915 true, true, &layer);
916
917 surface_state[i] =
918 gen6_emit_SURFACE_STATE(p->dev, &layer, true, p->cp);
919 }
920 else {
921 assert(surface && surface->is_rt);
922 surface_state[i] =
923 gen6_emit_SURFACE_STATE(p->dev, &surface->u.rt, true, p->cp);
924 }
925 }
926
927 /*
928 * Upload at least one render target, as
929 * brw_update_renderbuffer_surfaces() does. I don't know why.
930 */
931 if (i == 0) {
932 surface_state[i] =
933 gen6_emit_SURFACE_STATE(p->dev, &fb->null_rt, true, p->cp);
934
935 i++;
936 }
937
938 memset(&surface_state[i], 0, (ILO_MAX_DRAW_BUFFERS - i) * 4);
939
940 if (i && session->num_surfaces[PIPE_SHADER_FRAGMENT] < offset + i)
941 session->num_surfaces[PIPE_SHADER_FRAGMENT] = offset + i;
942
943 session->binding_table_fs_changed = true;
944 }
945 }
946
947 static void
948 gen6_pipeline_state_surfaces_so(struct ilo_3d_pipeline *p,
949 const struct ilo_context *ilo,
950 struct gen6_pipeline_session *session)
951 {
952 const struct ilo_so_state *so = &ilo->so;
953
954 if (p->dev->gen != ILO_GEN(6))
955 return;
956
957 /* SURFACE_STATEs for stream output targets */
958 if (DIRTY(VS) || DIRTY(GS) || DIRTY(SO)) {
959 const struct pipe_stream_output_info *so_info =
960 (ilo->gs) ? ilo_shader_get_kernel_so_info(ilo->gs) :
961 (ilo->vs) ? ilo_shader_get_kernel_so_info(ilo->vs) : NULL;
962 const int offset = ILO_GS_SO_SURFACE(0);
963 uint32_t *surface_state = &p->state.gs.SURFACE_STATE[offset];
964 int i;
965
966 for (i = 0; so_info && i < so_info->num_outputs; i++) {
967 const int target = so_info->output[i].output_buffer;
968 const struct pipe_stream_output_target *so_target =
969 (target < so->count) ? so->states[target] : NULL;
970
971 if (so_target) {
972 surface_state[i] = gen6_emit_so_SURFACE_STATE(p->dev,
973 so_target, so_info, i, p->cp);
974 }
975 else {
976 surface_state[i] = 0;
977 }
978 }
979
980 memset(&surface_state[i], 0, (ILO_MAX_SO_BINDINGS - i) * 4);
981
982 if (i && session->num_surfaces[PIPE_SHADER_GEOMETRY] < offset + i)
983 session->num_surfaces[PIPE_SHADER_GEOMETRY] = offset + i;
984
985 session->binding_table_gs_changed = true;
986 }
987 }
988
989 static void
990 gen6_pipeline_state_surfaces_view(struct ilo_3d_pipeline *p,
991 const struct ilo_context *ilo,
992 int shader_type,
993 struct gen6_pipeline_session *session)
994 {
995 const struct ilo_view_state *view = &ilo->view[shader_type];
996 uint32_t *surface_state;
997 int offset, i;
998 bool skip = false;
999
1000 /* SURFACE_STATEs for sampler views */
1001 switch (shader_type) {
1002 case PIPE_SHADER_VERTEX:
1003 if (DIRTY(VIEW_VS)) {
1004 offset = ILO_VS_TEXTURE_SURFACE(0);
1005 surface_state = &p->state.vs.SURFACE_STATE[offset];
1006
1007 session->binding_table_vs_changed = true;
1008 }
1009 else {
1010 skip = true;
1011 }
1012 break;
1013 case PIPE_SHADER_FRAGMENT:
1014 if (DIRTY(VIEW_FS)) {
1015 offset = ILO_WM_TEXTURE_SURFACE(0);
1016 surface_state = &p->state.wm.SURFACE_STATE[offset];
1017
1018 session->binding_table_fs_changed = true;
1019 }
1020 else {
1021 skip = true;
1022 }
1023 break;
1024 default:
1025 skip = true;
1026 break;
1027 }
1028
1029 if (skip)
1030 return;
1031
1032 for (i = 0; i < view->count; i++) {
1033 if (view->states[i]) {
1034 const struct ilo_view_cso *cso =
1035 (const struct ilo_view_cso *) view->states[i];
1036
1037 surface_state[i] =
1038 gen6_emit_SURFACE_STATE(p->dev, &cso->surface, false, p->cp);
1039 }
1040 else {
1041 surface_state[i] = 0;
1042 }
1043 }
1044
1045 memset(&surface_state[i], 0, (ILO_MAX_SAMPLER_VIEWS - i) * 4);
1046
1047 if (i && session->num_surfaces[shader_type] < offset + i)
1048 session->num_surfaces[shader_type] = offset + i;
1049 }
1050
1051 static void
1052 gen6_pipeline_state_surfaces_const(struct ilo_3d_pipeline *p,
1053 const struct ilo_context *ilo,
1054 int shader_type,
1055 struct gen6_pipeline_session *session)
1056 {
1057 const struct ilo_cbuf_state *cbuf = &ilo->cbuf[shader_type];
1058 uint32_t *surface_state;
1059 bool *binding_table_changed;
1060 int offset, count, i;
1061
1062 if (!DIRTY(CBUF))
1063 return;
1064
1065 /* SURFACE_STATEs for constant buffers */
1066 switch (shader_type) {
1067 case PIPE_SHADER_VERTEX:
1068 offset = ILO_VS_CONST_SURFACE(0);
1069 surface_state = &p->state.vs.SURFACE_STATE[offset];
1070 binding_table_changed = &session->binding_table_vs_changed;
1071 break;
1072 case PIPE_SHADER_FRAGMENT:
1073 offset = ILO_WM_CONST_SURFACE(0);
1074 surface_state = &p->state.wm.SURFACE_STATE[offset];
1075 binding_table_changed = &session->binding_table_fs_changed;
1076 break;
1077 default:
1078 return;
1079 break;
1080 }
1081
1082 /* constants are pushed via PCB */
1083 if (cbuf->enabled_mask == 0x1 && !cbuf->cso[0].resource) {
1084 memset(surface_state, 0, ILO_MAX_CONST_BUFFERS * 4);
1085 return;
1086 }
1087
1088 count = util_last_bit(cbuf->enabled_mask);
1089 for (i = 0; i < count; i++) {
1090 if (cbuf->cso[i].resource) {
1091 surface_state[i] = gen6_emit_SURFACE_STATE(p->dev,
1092 &cbuf->cso[i].surface, false, p->cp);
1093 }
1094 else {
1095 surface_state[i] = 0;
1096 }
1097 }
1098
1099 memset(&surface_state[count], 0, (ILO_MAX_CONST_BUFFERS - count) * 4);
1100
1101 if (count && session->num_surfaces[shader_type] < offset + count)
1102 session->num_surfaces[shader_type] = offset + count;
1103
1104 *binding_table_changed = true;
1105 }
1106
1107 static void
1108 gen6_pipeline_state_binding_tables(struct ilo_3d_pipeline *p,
1109 const struct ilo_context *ilo,
1110 int shader_type,
1111 struct gen6_pipeline_session *session)
1112 {
1113 uint32_t *binding_table_state, *surface_state;
1114 int *binding_table_state_size, size;
1115 bool skip = false;
1116
1117 /* BINDING_TABLE_STATE */
1118 switch (shader_type) {
1119 case PIPE_SHADER_VERTEX:
1120 surface_state = p->state.vs.SURFACE_STATE;
1121 binding_table_state = &p->state.vs.BINDING_TABLE_STATE;
1122 binding_table_state_size = &p->state.vs.BINDING_TABLE_STATE_size;
1123
1124 skip = !session->binding_table_vs_changed;
1125 break;
1126 case PIPE_SHADER_GEOMETRY:
1127 surface_state = p->state.gs.SURFACE_STATE;
1128 binding_table_state = &p->state.gs.BINDING_TABLE_STATE;
1129 binding_table_state_size = &p->state.gs.BINDING_TABLE_STATE_size;
1130
1131 skip = !session->binding_table_gs_changed;
1132 break;
1133 case PIPE_SHADER_FRAGMENT:
1134 surface_state = p->state.wm.SURFACE_STATE;
1135 binding_table_state = &p->state.wm.BINDING_TABLE_STATE;
1136 binding_table_state_size = &p->state.wm.BINDING_TABLE_STATE_size;
1137
1138 skip = !session->binding_table_fs_changed;
1139 break;
1140 default:
1141 skip = true;
1142 break;
1143 }
1144
1145 if (skip)
1146 return;
1147
1148 /*
1149 * If we have seemingly less SURFACE_STATEs than before, it could be that
1150 * we did not touch those reside at the tail in this upload. Loop over
1151 * them to figure out the real number of SURFACE_STATEs.
1152 */
1153 for (size = *binding_table_state_size;
1154 size > session->num_surfaces[shader_type]; size--) {
1155 if (surface_state[size - 1])
1156 break;
1157 }
1158 if (size < session->num_surfaces[shader_type])
1159 size = session->num_surfaces[shader_type];
1160
1161 *binding_table_state = gen6_emit_BINDING_TABLE_STATE(p->dev,
1162 surface_state, size, p->cp);
1163 *binding_table_state_size = size;
1164 }
1165
1166 static void
1167 gen6_pipeline_state_samplers(struct ilo_3d_pipeline *p,
1168 const struct ilo_context *ilo,
1169 int shader_type,
1170 struct gen6_pipeline_session *session)
1171 {
1172 const struct ilo_sampler_cso * const *samplers =
1173 ilo->sampler[shader_type].cso;
1174 const struct pipe_sampler_view * const *views =
1175 (const struct pipe_sampler_view **) ilo->view[shader_type].states;
1176 const int num_samplers = ilo->sampler[shader_type].count;
1177 const int num_views = ilo->view[shader_type].count;
1178 uint32_t *sampler_state, *border_color_state;
1179 bool emit_border_color = false;
1180 bool skip = false;
1181
1182 /* SAMPLER_BORDER_COLOR_STATE and SAMPLER_STATE */
1183 switch (shader_type) {
1184 case PIPE_SHADER_VERTEX:
1185 if (DIRTY(SAMPLER_VS) || DIRTY(VIEW_VS)) {
1186 sampler_state = &p->state.vs.SAMPLER_STATE;
1187 border_color_state = p->state.vs.SAMPLER_BORDER_COLOR_STATE;
1188
1189 if (DIRTY(SAMPLER_VS))
1190 emit_border_color = true;
1191
1192 session->sampler_state_vs_changed = true;
1193 }
1194 else {
1195 skip = true;
1196 }
1197 break;
1198 case PIPE_SHADER_FRAGMENT:
1199 if (DIRTY(SAMPLER_FS) || DIRTY(VIEW_FS)) {
1200 sampler_state = &p->state.wm.SAMPLER_STATE;
1201 border_color_state = p->state.wm.SAMPLER_BORDER_COLOR_STATE;
1202
1203 if (DIRTY(SAMPLER_FS))
1204 emit_border_color = true;
1205
1206 session->sampler_state_fs_changed = true;
1207 }
1208 else {
1209 skip = true;
1210 }
1211 break;
1212 default:
1213 skip = true;
1214 break;
1215 }
1216
1217 if (skip)
1218 return;
1219
1220 if (emit_border_color) {
1221 int i;
1222
1223 for (i = 0; i < num_samplers; i++) {
1224 border_color_state[i] = (samplers[i]) ?
1225 gen6_emit_SAMPLER_BORDER_COLOR_STATE(p->dev,
1226 samplers[i], p->cp) : 0;
1227 }
1228 }
1229
1230 /* should we take the minimum of num_samplers and num_views? */
1231 *sampler_state = gen6_emit_SAMPLER_STATE(p->dev,
1232 samplers, views,
1233 border_color_state,
1234 MIN2(num_samplers, num_views), p->cp);
1235 }
1236
1237 static void
1238 gen6_pipeline_state_pcb(struct ilo_3d_pipeline *p,
1239 const struct ilo_context *ilo,
1240 struct gen6_pipeline_session *session)
1241 {
1242 /* push constant buffer for VS */
1243 if (DIRTY(VS) || DIRTY(CBUF) || DIRTY(CLIP)) {
1244 const int cbuf0_size = (ilo->vs) ?
1245 ilo_shader_get_kernel_param(ilo->vs,
1246 ILO_KERNEL_PCB_CBUF0_SIZE) : 0;
1247 const int clip_state_size = (ilo->vs) ?
1248 ilo_shader_get_kernel_param(ilo->vs,
1249 ILO_KERNEL_VS_PCB_UCP_SIZE) : 0;
1250 const int total_size = cbuf0_size + clip_state_size;
1251
1252 if (total_size) {
1253 void *pcb;
1254
1255 p->state.vs.PUSH_CONSTANT_BUFFER =
1256 gen6_emit_push_constant_buffer(p->dev, total_size, &pcb, p->cp);
1257 p->state.vs.PUSH_CONSTANT_BUFFER_size = total_size;
1258
1259 if (cbuf0_size) {
1260 const struct ilo_cbuf_state *cbuf =
1261 &ilo->cbuf[PIPE_SHADER_VERTEX];
1262
1263 if (cbuf0_size <= cbuf->cso[0].user_buffer_size) {
1264 memcpy(pcb, cbuf->cso[0].user_buffer, cbuf0_size);
1265 }
1266 else {
1267 memcpy(pcb, cbuf->cso[0].user_buffer,
1268 cbuf->cso[0].user_buffer_size);
1269 memset(pcb + cbuf->cso[0].user_buffer_size, 0,
1270 cbuf0_size - cbuf->cso[0].user_buffer_size);
1271 }
1272
1273 pcb += cbuf0_size;
1274 }
1275
1276 if (clip_state_size)
1277 memcpy(pcb, &ilo->clip, clip_state_size);
1278
1279 session->pcb_state_vs_changed = true;
1280 }
1281 else if (p->state.vs.PUSH_CONSTANT_BUFFER_size) {
1282 p->state.vs.PUSH_CONSTANT_BUFFER = 0;
1283 p->state.vs.PUSH_CONSTANT_BUFFER_size = 0;
1284
1285 session->pcb_state_vs_changed = true;
1286 }
1287 }
1288
1289 /* push constant buffer for FS */
1290 if (DIRTY(FS) || DIRTY(CBUF)) {
1291 const int cbuf0_size = (ilo->fs) ?
1292 ilo_shader_get_kernel_param(ilo->fs, ILO_KERNEL_PCB_CBUF0_SIZE) : 0;
1293
1294 if (cbuf0_size) {
1295 const struct ilo_cbuf_state *cbuf = &ilo->cbuf[PIPE_SHADER_FRAGMENT];
1296 void *pcb;
1297
1298 p->state.wm.PUSH_CONSTANT_BUFFER =
1299 gen6_emit_push_constant_buffer(p->dev, cbuf0_size, &pcb, p->cp);
1300 p->state.wm.PUSH_CONSTANT_BUFFER_size = cbuf0_size;
1301
1302 if (cbuf0_size <= cbuf->cso[0].user_buffer_size) {
1303 memcpy(pcb, cbuf->cso[0].user_buffer, cbuf0_size);
1304 }
1305 else {
1306 memcpy(pcb, cbuf->cso[0].user_buffer,
1307 cbuf->cso[0].user_buffer_size);
1308 memset(pcb + cbuf->cso[0].user_buffer_size, 0,
1309 cbuf0_size - cbuf->cso[0].user_buffer_size);
1310 }
1311
1312 session->pcb_state_fs_changed = true;
1313 }
1314 else if (p->state.wm.PUSH_CONSTANT_BUFFER_size) {
1315 p->state.wm.PUSH_CONSTANT_BUFFER = 0;
1316 p->state.wm.PUSH_CONSTANT_BUFFER_size = 0;
1317
1318 session->pcb_state_fs_changed = true;
1319 }
1320 }
1321 }
1322
1323 #undef DIRTY
1324
1325 static void
1326 gen6_pipeline_commands(struct ilo_3d_pipeline *p,
1327 const struct ilo_context *ilo,
1328 struct gen6_pipeline_session *session)
1329 {
1330 /*
1331 * We try to keep the order of the commands match, as closely as possible,
1332 * that of the classic i965 driver. It allows us to compare the command
1333 * streams easily.
1334 */
1335 gen6_pipeline_common_select(p, ilo, session);
1336 gen6_pipeline_gs_svbi(p, ilo, session);
1337 gen6_pipeline_common_sip(p, ilo, session);
1338 gen6_pipeline_vf_statistics(p, ilo, session);
1339 gen6_pipeline_common_base_address(p, ilo, session);
1340 gen6_pipeline_common_pointers_1(p, ilo, session);
1341 gen6_pipeline_common_urb(p, ilo, session);
1342 gen6_pipeline_common_pointers_2(p, ilo, session);
1343 gen6_pipeline_wm_multisample(p, ilo, session);
1344 gen6_pipeline_vs(p, ilo, session);
1345 gen6_pipeline_gs(p, ilo, session);
1346 gen6_pipeline_clip(p, ilo, session);
1347 gen6_pipeline_sf(p, ilo, session);
1348 gen6_pipeline_wm(p, ilo, session);
1349 gen6_pipeline_common_pointers_3(p, ilo, session);
1350 gen6_pipeline_wm_depth(p, ilo, session);
1351 gen6_pipeline_wm_raster(p, ilo, session);
1352 gen6_pipeline_sf_rect(p, ilo, session);
1353 gen6_pipeline_vf(p, ilo, session);
1354 gen6_pipeline_vf_draw(p, ilo, session);
1355 }
1356
1357 void
1358 gen6_pipeline_states(struct ilo_3d_pipeline *p,
1359 const struct ilo_context *ilo,
1360 struct gen6_pipeline_session *session)
1361 {
1362 int shader_type;
1363
1364 gen6_pipeline_state_viewports(p, ilo, session);
1365 gen6_pipeline_state_cc(p, ilo, session);
1366 gen6_pipeline_state_scissors(p, ilo, session);
1367 gen6_pipeline_state_pcb(p, ilo, session);
1368
1369 /*
1370 * upload all SURAFCE_STATEs together so that we know there are minimal
1371 * paddings
1372 */
1373 gen6_pipeline_state_surfaces_rt(p, ilo, session);
1374 gen6_pipeline_state_surfaces_so(p, ilo, session);
1375 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1376 gen6_pipeline_state_surfaces_view(p, ilo, shader_type, session);
1377 gen6_pipeline_state_surfaces_const(p, ilo, shader_type, session);
1378 }
1379
1380 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1381 gen6_pipeline_state_samplers(p, ilo, shader_type, session);
1382 /* this must be called after all SURFACE_STATEs are uploaded */
1383 gen6_pipeline_state_binding_tables(p, ilo, shader_type, session);
1384 }
1385 }
1386
1387 void
1388 gen6_pipeline_prepare(const struct ilo_3d_pipeline *p,
1389 const struct ilo_context *ilo,
1390 struct gen6_pipeline_session *session)
1391 {
1392 memset(session, 0, sizeof(*session));
1393 session->pipe_dirty = ilo->dirty;
1394 session->reduced_prim = u_reduced_prim(ilo->draw->mode);
1395
1396 /* available space before the session */
1397 session->init_cp_space = ilo_cp_space(p->cp);
1398
1399 session->hw_ctx_changed =
1400 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_HW);
1401
1402 if (session->hw_ctx_changed) {
1403 /* these should be enough to make everything uploaded */
1404 session->batch_bo_changed = true;
1405 session->state_bo_changed = true;
1406 session->kernel_bo_changed = true;
1407 session->prim_changed = true;
1408 session->primitive_restart_changed = true;
1409 }
1410 else {
1411 /*
1412 * Any state that involves resources needs to be re-emitted when the
1413 * batch bo changed. This is because we do not pin the resources and
1414 * their offsets (or existence) may change between batch buffers.
1415 *
1416 * Since we messed around with ILO_3D_PIPELINE_INVALIDATE_BATCH_BO in
1417 * handle_invalid_batch_bo(), use ILO_3D_PIPELINE_INVALIDATE_STATE_BO as
1418 * a temporary workaround.
1419 */
1420 session->batch_bo_changed =
1421 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1422
1423 session->state_bo_changed =
1424 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1425 session->kernel_bo_changed =
1426 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
1427 session->prim_changed = (p->state.reduced_prim != session->reduced_prim);
1428 session->primitive_restart_changed =
1429 (p->state.primitive_restart != ilo->draw->primitive_restart);
1430 }
1431 }
1432
1433 void
1434 gen6_pipeline_draw(struct ilo_3d_pipeline *p,
1435 const struct ilo_context *ilo,
1436 struct gen6_pipeline_session *session)
1437 {
1438 /* force all states to be uploaded if the state bo changed */
1439 if (session->state_bo_changed)
1440 session->pipe_dirty = ILO_DIRTY_ALL;
1441 else
1442 session->pipe_dirty = ilo->dirty;
1443
1444 session->emit_draw_states(p, ilo, session);
1445
1446 /* force all commands to be uploaded if the HW context changed */
1447 if (session->hw_ctx_changed)
1448 session->pipe_dirty = ILO_DIRTY_ALL;
1449 else
1450 session->pipe_dirty = ilo->dirty;
1451
1452 session->emit_draw_commands(p, ilo, session);
1453 }
1454
1455 void
1456 gen6_pipeline_end(struct ilo_3d_pipeline *p,
1457 const struct ilo_context *ilo,
1458 struct gen6_pipeline_session *session)
1459 {
1460 /* sanity check size estimation */
1461 assert(session->init_cp_space - ilo_cp_space(p->cp) <=
1462 ilo_3d_pipeline_estimate_size(p, ILO_3D_PIPELINE_DRAW, ilo));
1463
1464 p->state.reduced_prim = session->reduced_prim;
1465 p->state.primitive_restart = ilo->draw->primitive_restart;
1466 }
1467
1468 static void
1469 ilo_3d_pipeline_emit_draw_gen6(struct ilo_3d_pipeline *p,
1470 const struct ilo_context *ilo)
1471 {
1472 struct gen6_pipeline_session session;
1473
1474 gen6_pipeline_prepare(p, ilo, &session);
1475
1476 session.emit_draw_states = gen6_pipeline_states;
1477 session.emit_draw_commands = gen6_pipeline_commands;
1478
1479 gen6_pipeline_draw(p, ilo, &session);
1480 gen6_pipeline_end(p, ilo, &session);
1481 }
1482
1483 void
1484 ilo_3d_pipeline_emit_flush_gen6(struct ilo_3d_pipeline *p)
1485 {
1486 if (p->dev->gen == ILO_GEN(6))
1487 gen6_wa_pipe_control_post_sync(p, false);
1488
1489 gen6_emit_PIPE_CONTROL(p->dev,
1490 PIPE_CONTROL_INSTRUCTION_FLUSH |
1491 PIPE_CONTROL_WRITE_FLUSH |
1492 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1493 PIPE_CONTROL_VF_CACHE_INVALIDATE |
1494 PIPE_CONTROL_TC_FLUSH |
1495 PIPE_CONTROL_NO_WRITE |
1496 PIPE_CONTROL_CS_STALL,
1497 0, 0, false, p->cp);
1498 }
1499
1500 void
1501 ilo_3d_pipeline_emit_write_timestamp_gen6(struct ilo_3d_pipeline *p,
1502 struct intel_bo *bo, int index)
1503 {
1504 if (p->dev->gen == ILO_GEN(6))
1505 gen6_wa_pipe_control_post_sync(p, true);
1506
1507 gen6_emit_PIPE_CONTROL(p->dev,
1508 PIPE_CONTROL_WRITE_TIMESTAMP,
1509 bo, index * sizeof(uint64_t),
1510 true, p->cp);
1511 }
1512
1513 void
1514 ilo_3d_pipeline_emit_write_depth_count_gen6(struct ilo_3d_pipeline *p,
1515 struct intel_bo *bo, int index)
1516 {
1517 if (p->dev->gen == ILO_GEN(6))
1518 gen6_wa_pipe_control_post_sync(p, false);
1519
1520 gen6_emit_PIPE_CONTROL(p->dev,
1521 PIPE_CONTROL_DEPTH_STALL |
1522 PIPE_CONTROL_WRITE_DEPTH_COUNT,
1523 bo, index * sizeof(uint64_t),
1524 true, p->cp);
1525 }
1526
1527 void
1528 ilo_3d_pipeline_emit_write_statistics_gen6(struct ilo_3d_pipeline *p,
1529 struct intel_bo *bo, int index)
1530 {
1531 uint32_t regs[] = {
1532 IA_VERTICES_COUNT,
1533 IA_PRIMITIVES_COUNT,
1534 VS_INVOCATION_COUNT,
1535 GS_INVOCATION_COUNT,
1536 GS_PRIMITIVES_COUNT,
1537 CL_INVOCATION_COUNT,
1538 CL_PRIMITIVES_COUNT,
1539 PS_INVOCATION_COUNT,
1540 p->dev->gen >= ILO_GEN(7) ? HS_INVOCATION_COUNT : 0,
1541 p->dev->gen >= ILO_GEN(7) ? DS_INVOCATION_COUNT : 0,
1542 0,
1543 };
1544 int i;
1545
1546 p->emit_flush(p);
1547
1548 for (i = 0; i < Elements(regs); i++) {
1549 const uint32_t bo_offset = (index + i) * sizeof(uint64_t);
1550
1551 if (regs[i]) {
1552 /* store lower 32 bits */
1553 gen6_emit_MI_STORE_REGISTER_MEM(p->dev,
1554 bo, bo_offset, regs[i], p->cp);
1555 /* store higher 32 bits */
1556 gen6_emit_MI_STORE_REGISTER_MEM(p->dev,
1557 bo, bo_offset + 4, regs[i] + 4, p->cp);
1558 }
1559 else {
1560 gen6_emit_MI_STORE_DATA_IMM(p->dev,
1561 bo, bo_offset, 0, true, p->cp);
1562 }
1563 }
1564 }
1565
1566 static void
1567 gen6_rectlist_vs_to_sf(struct ilo_3d_pipeline *p,
1568 const struct ilo_blitter *blitter,
1569 struct gen6_rectlist_session *session)
1570 {
1571 gen6_emit_3DSTATE_CONSTANT_VS(p->dev, NULL, NULL, 0, p->cp);
1572 gen6_emit_3DSTATE_VS(p->dev, NULL, 0, p->cp);
1573
1574 gen6_wa_pipe_control_vs_const_flush(p);
1575
1576 gen6_emit_3DSTATE_CONSTANT_GS(p->dev, NULL, NULL, 0, p->cp);
1577 gen6_emit_3DSTATE_GS(p->dev, NULL, NULL, 0, p->cp);
1578
1579 gen6_emit_3DSTATE_CLIP(p->dev, NULL, NULL, false, 0, p->cp);
1580 gen6_emit_3DSTATE_SF(p->dev, NULL, NULL, p->cp);
1581 }
1582
1583 static void
1584 gen6_rectlist_wm(struct ilo_3d_pipeline *p,
1585 const struct ilo_blitter *blitter,
1586 struct gen6_rectlist_session *session)
1587 {
1588 uint32_t hiz_op;
1589
1590 switch (blitter->op) {
1591 case ILO_BLITTER_RECTLIST_CLEAR_ZS:
1592 hiz_op = GEN6_WM_DEPTH_CLEAR;
1593 break;
1594 case ILO_BLITTER_RECTLIST_RESOLVE_Z:
1595 hiz_op = GEN6_WM_DEPTH_RESOLVE;
1596 break;
1597 case ILO_BLITTER_RECTLIST_RESOLVE_HIZ:
1598 hiz_op = GEN6_WM_HIERARCHICAL_DEPTH_RESOLVE;
1599 break;
1600 default:
1601 hiz_op = 0;
1602 break;
1603 }
1604
1605 gen6_emit_3DSTATE_CONSTANT_PS(p->dev, NULL, NULL, 0, p->cp);
1606
1607 gen6_wa_pipe_control_wm_max_threads_stall(p);
1608 gen6_emit_3DSTATE_WM(p->dev, NULL, 0, NULL, false, false, hiz_op, p->cp);
1609 }
1610
1611 static void
1612 gen6_rectlist_wm_depth(struct ilo_3d_pipeline *p,
1613 const struct ilo_blitter *blitter,
1614 struct gen6_rectlist_session *session)
1615 {
1616 gen6_wa_pipe_control_wm_depth_flush(p);
1617
1618 if (blitter->uses & (ILO_BLITTER_USE_FB_DEPTH |
1619 ILO_BLITTER_USE_FB_STENCIL)) {
1620 gen6_emit_3DSTATE_DEPTH_BUFFER(p->dev,
1621 &blitter->fb.dst.u.zs, p->cp);
1622 }
1623
1624 if (blitter->uses & ILO_BLITTER_USE_FB_DEPTH) {
1625 gen6_emit_3DSTATE_HIER_DEPTH_BUFFER(p->dev,
1626 &blitter->fb.dst.u.zs, p->cp);
1627 }
1628
1629 if (blitter->uses & ILO_BLITTER_USE_FB_STENCIL) {
1630 gen6_emit_3DSTATE_STENCIL_BUFFER(p->dev,
1631 &blitter->fb.dst.u.zs, p->cp);
1632 }
1633
1634 gen6_emit_3DSTATE_CLEAR_PARAMS(p->dev,
1635 blitter->depth_clear_value, p->cp);
1636 }
1637
1638 static void
1639 gen6_rectlist_wm_multisample(struct ilo_3d_pipeline *p,
1640 const struct ilo_blitter *blitter,
1641 struct gen6_rectlist_session *session)
1642 {
1643 const uint32_t *packed_sample_pos = (blitter->fb.num_samples > 1) ?
1644 &p->packed_sample_position_4x : &p->packed_sample_position_1x;
1645
1646 gen6_wa_pipe_control_wm_multisample_flush(p);
1647
1648 gen6_emit_3DSTATE_MULTISAMPLE(p->dev, blitter->fb.num_samples,
1649 packed_sample_pos, true, p->cp);
1650
1651 gen6_emit_3DSTATE_SAMPLE_MASK(p->dev,
1652 (1 << blitter->fb.num_samples) - 1, p->cp);
1653 }
1654
1655 static void
1656 gen6_rectlist_commands(struct ilo_3d_pipeline *p,
1657 const struct ilo_blitter *blitter,
1658 struct gen6_rectlist_session *session)
1659 {
1660 gen6_wa_pipe_control_post_sync(p, false);
1661
1662 gen6_rectlist_wm_multisample(p, blitter, session);
1663
1664 gen6_emit_STATE_BASE_ADDRESS(p->dev,
1665 NULL, /* General State Base */
1666 p->cp->bo, /* Surface State Base */
1667 p->cp->bo, /* Dynamic State Base */
1668 NULL, /* Indirect Object Base */
1669 NULL, /* Instruction Base */
1670 0, 0, 0, 0, p->cp);
1671
1672 gen6_emit_3DSTATE_VERTEX_BUFFERS(p->dev,
1673 &blitter->ve, &blitter->vb, p->cp);
1674
1675 gen6_emit_3DSTATE_VERTEX_ELEMENTS(p->dev,
1676 &blitter->ve, false, false, p->cp);
1677
1678 gen6_emit_3DSTATE_URB(p->dev,
1679 p->dev->urb_size, 0, blitter->ve.count * 4 * sizeof(float), 0, p->cp);
1680 /* 3DSTATE_URB workaround */
1681 if (p->state.gs.active) {
1682 ilo_3d_pipeline_emit_flush_gen6(p);
1683 p->state.gs.active = false;
1684 }
1685
1686 if (blitter->uses &
1687 (ILO_BLITTER_USE_DSA | ILO_BLITTER_USE_CC)) {
1688 gen6_emit_3DSTATE_CC_STATE_POINTERS(p->dev, 0,
1689 session->DEPTH_STENCIL_STATE, session->COLOR_CALC_STATE, p->cp);
1690 }
1691
1692 gen6_rectlist_vs_to_sf(p, blitter, session);
1693 gen6_rectlist_wm(p, blitter, session);
1694
1695 if (blitter->uses & ILO_BLITTER_USE_VIEWPORT) {
1696 gen6_emit_3DSTATE_VIEWPORT_STATE_POINTERS(p->dev,
1697 0, 0, session->CC_VIEWPORT, p->cp);
1698 }
1699
1700 gen6_rectlist_wm_depth(p, blitter, session);
1701
1702 gen6_emit_3DSTATE_DRAWING_RECTANGLE(p->dev, 0, 0,
1703 blitter->fb.width, blitter->fb.height, p->cp);
1704
1705 gen6_emit_3DPRIMITIVE(p->dev, &blitter->draw, NULL, true, p->cp);
1706 }
1707
1708 static void
1709 gen6_rectlist_states(struct ilo_3d_pipeline *p,
1710 const struct ilo_blitter *blitter,
1711 struct gen6_rectlist_session *session)
1712 {
1713 if (blitter->uses & ILO_BLITTER_USE_DSA) {
1714 session->DEPTH_STENCIL_STATE =
1715 gen6_emit_DEPTH_STENCIL_STATE(p->dev, &blitter->dsa, p->cp);
1716 }
1717
1718 if (blitter->uses & ILO_BLITTER_USE_CC) {
1719 session->COLOR_CALC_STATE =
1720 gen6_emit_COLOR_CALC_STATE(p->dev, &blitter->cc.stencil_ref,
1721 blitter->cc.alpha_ref, &blitter->cc.blend_color, p->cp);
1722 }
1723
1724 if (blitter->uses & ILO_BLITTER_USE_VIEWPORT) {
1725 session->CC_VIEWPORT =
1726 gen6_emit_CC_VIEWPORT(p->dev, &blitter->viewport, 1, p->cp);
1727 }
1728 }
1729
1730 static void
1731 ilo_3d_pipeline_emit_rectlist_gen6(struct ilo_3d_pipeline *p,
1732 const struct ilo_blitter *blitter)
1733 {
1734 struct gen6_rectlist_session session;
1735
1736 memset(&session, 0, sizeof(session));
1737 gen6_rectlist_states(p, blitter, &session);
1738 gen6_rectlist_commands(p, blitter, &session);
1739 }
1740
1741 static int
1742 gen6_pipeline_estimate_commands(const struct ilo_3d_pipeline *p,
1743 const struct ilo_context *ilo)
1744 {
1745 static int size;
1746 enum ilo_gpe_gen6_command cmd;
1747
1748 if (size)
1749 return size;
1750
1751 for (cmd = 0; cmd < ILO_GPE_GEN6_COMMAND_COUNT; cmd++) {
1752 int count;
1753
1754 switch (cmd) {
1755 case ILO_GPE_GEN6_PIPE_CONTROL:
1756 /* for the workaround */
1757 count = 2;
1758 /* another one after 3DSTATE_URB */
1759 count += 1;
1760 /* and another one after 3DSTATE_CONSTANT_VS */
1761 count += 1;
1762 break;
1763 case ILO_GPE_GEN6_3DSTATE_GS_SVB_INDEX:
1764 /* there are 4 SVBIs */
1765 count = 4;
1766 break;
1767 case ILO_GPE_GEN6_3DSTATE_VERTEX_BUFFERS:
1768 count = 33;
1769 break;
1770 case ILO_GPE_GEN6_3DSTATE_VERTEX_ELEMENTS:
1771 count = 34;
1772 break;
1773 case ILO_GPE_GEN6_MEDIA_VFE_STATE:
1774 case ILO_GPE_GEN6_MEDIA_CURBE_LOAD:
1775 case ILO_GPE_GEN6_MEDIA_INTERFACE_DESCRIPTOR_LOAD:
1776 case ILO_GPE_GEN6_MEDIA_GATEWAY_STATE:
1777 case ILO_GPE_GEN6_MEDIA_STATE_FLUSH:
1778 case ILO_GPE_GEN6_MEDIA_OBJECT_WALKER:
1779 /* media commands */
1780 count = 0;
1781 break;
1782 default:
1783 count = 1;
1784 break;
1785 }
1786
1787 if (count)
1788 size += ilo_gpe_gen6_estimate_command_size(p->dev, cmd, count);
1789 }
1790
1791 return size;
1792 }
1793
1794 static int
1795 gen6_pipeline_estimate_states(const struct ilo_3d_pipeline *p,
1796 const struct ilo_context *ilo)
1797 {
1798 static int static_size;
1799 int shader_type, count, size;
1800
1801 if (!static_size) {
1802 struct {
1803 enum ilo_gpe_gen6_state state;
1804 int count;
1805 } static_states[] = {
1806 /* viewports */
1807 { ILO_GPE_GEN6_SF_VIEWPORT, 1 },
1808 { ILO_GPE_GEN6_CLIP_VIEWPORT, 1 },
1809 { ILO_GPE_GEN6_CC_VIEWPORT, 1 },
1810 /* cc */
1811 { ILO_GPE_GEN6_COLOR_CALC_STATE, 1 },
1812 { ILO_GPE_GEN6_BLEND_STATE, ILO_MAX_DRAW_BUFFERS },
1813 { ILO_GPE_GEN6_DEPTH_STENCIL_STATE, 1 },
1814 /* scissors */
1815 { ILO_GPE_GEN6_SCISSOR_RECT, 1 },
1816 /* binding table (vs, gs, fs) */
1817 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_VS_SURFACES },
1818 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_GS_SURFACES },
1819 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_WM_SURFACES },
1820 };
1821 int i;
1822
1823 for (i = 0; i < Elements(static_states); i++) {
1824 static_size += ilo_gpe_gen6_estimate_state_size(p->dev,
1825 static_states[i].state,
1826 static_states[i].count);
1827 }
1828 }
1829
1830 size = static_size;
1831
1832 /*
1833 * render targets (fs)
1834 * stream outputs (gs)
1835 * sampler views (vs, fs)
1836 * constant buffers (vs, fs)
1837 */
1838 count = ilo->fb.state.nr_cbufs;
1839
1840 if (ilo->gs) {
1841 const struct pipe_stream_output_info *so_info =
1842 ilo_shader_get_kernel_so_info(ilo->gs);
1843
1844 count += so_info->num_outputs;
1845 }
1846 else if (ilo->vs) {
1847 const struct pipe_stream_output_info *so_info =
1848 ilo_shader_get_kernel_so_info(ilo->vs);
1849
1850 count += so_info->num_outputs;
1851 }
1852
1853 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1854 count += ilo->view[shader_type].count;
1855 count += util_bitcount(ilo->cbuf[shader_type].enabled_mask);
1856 }
1857
1858 if (count) {
1859 size += ilo_gpe_gen6_estimate_state_size(p->dev,
1860 ILO_GPE_GEN6_SURFACE_STATE, count);
1861 }
1862
1863 /* samplers (vs, fs) */
1864 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1865 count = ilo->sampler[shader_type].count;
1866 if (count) {
1867 size += ilo_gpe_gen6_estimate_state_size(p->dev,
1868 ILO_GPE_GEN6_SAMPLER_BORDER_COLOR_STATE, count);
1869 size += ilo_gpe_gen6_estimate_state_size(p->dev,
1870 ILO_GPE_GEN6_SAMPLER_STATE, count);
1871 }
1872 }
1873
1874 /* pcb (vs) */
1875 if (ilo->vs) {
1876 const int cbuf0_size =
1877 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_PCB_CBUF0_SIZE);
1878 const int ucp_size =
1879 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_VS_PCB_UCP_SIZE);
1880
1881 size += ilo_gpe_gen6_estimate_state_size(p->dev,
1882 ILO_GPE_GEN6_PUSH_CONSTANT_BUFFER, cbuf0_size + ucp_size);
1883 }
1884
1885 /* pcb (fs) */
1886 if (ilo->fs) {
1887 const int cbuf0_size =
1888 ilo_shader_get_kernel_param(ilo->fs, ILO_KERNEL_PCB_CBUF0_SIZE);
1889
1890 size += ilo_gpe_gen6_estimate_state_size(p->dev,
1891 ILO_GPE_GEN6_PUSH_CONSTANT_BUFFER, cbuf0_size);
1892 }
1893
1894 return size;
1895 }
1896
1897 static int
1898 ilo_3d_pipeline_estimate_size_gen6(struct ilo_3d_pipeline *p,
1899 enum ilo_3d_pipeline_action action,
1900 const void *arg)
1901 {
1902 int size;
1903
1904 switch (action) {
1905 case ILO_3D_PIPELINE_DRAW:
1906 {
1907 const struct ilo_context *ilo = arg;
1908
1909 size = gen6_pipeline_estimate_commands(p, ilo) +
1910 gen6_pipeline_estimate_states(p, ilo);
1911 }
1912 break;
1913 case ILO_3D_PIPELINE_FLUSH:
1914 size = ilo_gpe_gen6_estimate_command_size(p->dev,
1915 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1916 break;
1917 case ILO_3D_PIPELINE_WRITE_TIMESTAMP:
1918 size = ilo_gpe_gen6_estimate_command_size(p->dev,
1919 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 2;
1920 break;
1921 case ILO_3D_PIPELINE_WRITE_DEPTH_COUNT:
1922 size = ilo_gpe_gen6_estimate_command_size(p->dev,
1923 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1924 break;
1925 case ILO_3D_PIPELINE_WRITE_STATISTICS:
1926 {
1927 const int num_regs = 8;
1928 const int num_pads = 3;
1929
1930 size = ilo_gpe_gen6_estimate_command_size(p->dev,
1931 ILO_GPE_GEN6_PIPE_CONTROL, 1);
1932 size += ilo_gpe_gen6_estimate_command_size(p->dev,
1933 ILO_GPE_GEN6_MI_STORE_REGISTER_MEM, 1) * 2 * num_regs;
1934 size += ilo_gpe_gen6_estimate_command_size(p->dev,
1935 ILO_GPE_GEN6_MI_STORE_DATA_IMM, 1) * num_pads;
1936 }
1937 break;
1938 case ILO_3D_PIPELINE_RECTLIST:
1939 size = 64 + 256; /* states + commands */
1940 break;
1941 default:
1942 assert(!"unknown 3D pipeline action");
1943 size = 0;
1944 break;
1945 }
1946
1947 return size;
1948 }
1949
1950 void
1951 ilo_3d_pipeline_init_gen6(struct ilo_3d_pipeline *p)
1952 {
1953 p->estimate_size = ilo_3d_pipeline_estimate_size_gen6;
1954 p->emit_draw = ilo_3d_pipeline_emit_draw_gen6;
1955 p->emit_flush = ilo_3d_pipeline_emit_flush_gen6;
1956 p->emit_write_timestamp = ilo_3d_pipeline_emit_write_timestamp_gen6;
1957 p->emit_write_depth_count = ilo_3d_pipeline_emit_write_depth_count_gen6;
1958 p->emit_write_statistics = ilo_3d_pipeline_emit_write_statistics_gen6;
1959 p->emit_rectlist = ilo_3d_pipeline_emit_rectlist_gen6;
1960 }