clover: Define an intrusive smart reference class.
[mesa.git] / src / gallium / drivers / ilo / ilo_3d_pipeline_gen6.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "util/u_dual_blend.h"
29 #include "util/u_prim.h"
30 #include "intel_reg.h"
31
32 #include "ilo_blitter.h"
33 #include "ilo_3d.h"
34 #include "ilo_context.h"
35 #include "ilo_cp.h"
36 #include "ilo_gpe_gen6.h"
37 #include "ilo_gpe_gen7.h"
38 #include "ilo_shader.h"
39 #include "ilo_state.h"
40 #include "ilo_3d_pipeline.h"
41 #include "ilo_3d_pipeline_gen6.h"
42
43 /**
44 * This should be called before any depth stall flush (including those
45 * produced by non-pipelined state commands) or cache flush on GEN6.
46 *
47 * \see intel_emit_post_sync_nonzero_flush()
48 */
49 static void
50 gen6_wa_pipe_control_post_sync(struct ilo_3d_pipeline *p,
51 bool caller_post_sync)
52 {
53 assert(p->dev->gen == ILO_GEN(6));
54
55 /* emit once */
56 if (p->state.has_gen6_wa_pipe_control)
57 return;
58
59 p->state.has_gen6_wa_pipe_control = true;
60
61 /*
62 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
63 *
64 * "Pipe-control with CS-stall bit set must be sent BEFORE the
65 * pipe-control with a post-sync op and no write-cache flushes."
66 *
67 * The workaround below necessitates this workaround.
68 */
69 gen6_emit_PIPE_CONTROL(p->dev,
70 PIPE_CONTROL_CS_STALL |
71 PIPE_CONTROL_STALL_AT_SCOREBOARD,
72 NULL, 0, false, p->cp);
73
74 /* the caller will emit the post-sync op */
75 if (caller_post_sync)
76 return;
77
78 /*
79 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
80 *
81 * "Before any depth stall flush (including those produced by
82 * non-pipelined state commands), software needs to first send a
83 * PIPE_CONTROL with no bits set except Post-Sync Operation != 0."
84 *
85 * "Before a PIPE_CONTROL with Write Cache Flush Enable =1, a
86 * PIPE_CONTROL with any non-zero post-sync-op is required."
87 */
88 gen6_emit_PIPE_CONTROL(p->dev,
89 PIPE_CONTROL_WRITE_IMMEDIATE,
90 p->workaround_bo, 0, false, p->cp);
91 }
92
93 static void
94 gen6_wa_pipe_control_wm_multisample_flush(struct ilo_3d_pipeline *p)
95 {
96 assert(p->dev->gen == ILO_GEN(6));
97
98 gen6_wa_pipe_control_post_sync(p, false);
99
100 /*
101 * From the Sandy Bridge PRM, volume 2 part 1, page 305:
102 *
103 * "Driver must guarentee that all the caches in the depth pipe are
104 * flushed before this command (3DSTATE_MULTISAMPLE) is parsed. This
105 * requires driver to send a PIPE_CONTROL with a CS stall along with a
106 * Depth Flush prior to this command."
107 */
108 gen6_emit_PIPE_CONTROL(p->dev,
109 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
110 PIPE_CONTROL_CS_STALL,
111 0, 0, false, p->cp);
112 }
113
114 static void
115 gen6_wa_pipe_control_wm_depth_flush(struct ilo_3d_pipeline *p)
116 {
117 assert(p->dev->gen == ILO_GEN(6));
118
119 gen6_wa_pipe_control_post_sync(p, false);
120
121 /*
122 * According to intel_emit_depth_stall_flushes() of classic i965, we need
123 * to emit a sequence of PIPE_CONTROLs prior to emitting depth related
124 * commands.
125 */
126 gen6_emit_PIPE_CONTROL(p->dev,
127 PIPE_CONTROL_DEPTH_STALL,
128 NULL, 0, false, p->cp);
129
130 gen6_emit_PIPE_CONTROL(p->dev,
131 PIPE_CONTROL_DEPTH_CACHE_FLUSH,
132 NULL, 0, false, p->cp);
133
134 gen6_emit_PIPE_CONTROL(p->dev,
135 PIPE_CONTROL_DEPTH_STALL,
136 NULL, 0, false, p->cp);
137 }
138
139 static void
140 gen6_wa_pipe_control_wm_max_threads_stall(struct ilo_3d_pipeline *p)
141 {
142 assert(p->dev->gen == ILO_GEN(6));
143
144 /* the post-sync workaround should cover this already */
145 if (p->state.has_gen6_wa_pipe_control)
146 return;
147
148 /*
149 * From the Sandy Bridge PRM, volume 2 part 1, page 274:
150 *
151 * "A PIPE_CONTROL command, with only the Stall At Pixel Scoreboard
152 * field set (DW1 Bit 1), must be issued prior to any change to the
153 * value in this field (Maximum Number of Threads in 3DSTATE_WM)"
154 */
155 gen6_emit_PIPE_CONTROL(p->dev,
156 PIPE_CONTROL_STALL_AT_SCOREBOARD,
157 NULL, 0, false, p->cp);
158
159 }
160
161 static void
162 gen6_wa_pipe_control_vs_const_flush(struct ilo_3d_pipeline *p)
163 {
164 assert(p->dev->gen == ILO_GEN(6));
165
166 gen6_wa_pipe_control_post_sync(p, false);
167
168 /*
169 * According to upload_vs_state() of classic i965, we need to emit
170 * PIPE_CONTROL after 3DSTATE_CONSTANT_VS so that the command is kept being
171 * buffered by VS FF, to the point that the FF dies.
172 */
173 gen6_emit_PIPE_CONTROL(p->dev,
174 PIPE_CONTROL_DEPTH_STALL |
175 PIPE_CONTROL_INSTRUCTION_FLUSH |
176 PIPE_CONTROL_STATE_CACHE_INVALIDATE,
177 NULL, 0, false, p->cp);
178 }
179
180 #define DIRTY(state) (session->pipe_dirty & ILO_DIRTY_ ## state)
181
182 void
183 gen6_pipeline_common_select(struct ilo_3d_pipeline *p,
184 const struct ilo_context *ilo,
185 struct gen6_pipeline_session *session)
186 {
187 /* PIPELINE_SELECT */
188 if (session->hw_ctx_changed) {
189 if (p->dev->gen == ILO_GEN(6))
190 gen6_wa_pipe_control_post_sync(p, false);
191
192 gen6_emit_PIPELINE_SELECT(p->dev, 0x0, p->cp);
193 }
194 }
195
196 void
197 gen6_pipeline_common_sip(struct ilo_3d_pipeline *p,
198 const struct ilo_context *ilo,
199 struct gen6_pipeline_session *session)
200 {
201 /* STATE_SIP */
202 if (session->hw_ctx_changed) {
203 if (p->dev->gen == ILO_GEN(6))
204 gen6_wa_pipe_control_post_sync(p, false);
205
206 gen6_emit_STATE_SIP(p->dev, 0, p->cp);
207 }
208 }
209
210 void
211 gen6_pipeline_common_base_address(struct ilo_3d_pipeline *p,
212 const struct ilo_context *ilo,
213 struct gen6_pipeline_session *session)
214 {
215 /* STATE_BASE_ADDRESS */
216 if (session->state_bo_changed || session->kernel_bo_changed ||
217 session->batch_bo_changed) {
218 if (p->dev->gen == ILO_GEN(6))
219 gen6_wa_pipe_control_post_sync(p, false);
220
221 gen6_emit_STATE_BASE_ADDRESS(p->dev,
222 NULL, p->cp->bo, p->cp->bo, NULL, ilo->hw3d->kernel.bo,
223 0, 0, 0, 0, p->cp);
224
225 /*
226 * From the Sandy Bridge PRM, volume 1 part 1, page 28:
227 *
228 * "The following commands must be reissued following any change to
229 * the base addresses:
230 *
231 * * 3DSTATE_BINDING_TABLE_POINTERS
232 * * 3DSTATE_SAMPLER_STATE_POINTERS
233 * * 3DSTATE_VIEWPORT_STATE_POINTERS
234 * * 3DSTATE_CC_POINTERS
235 * * MEDIA_STATE_POINTERS"
236 *
237 * 3DSTATE_SCISSOR_STATE_POINTERS is not on the list, but it is
238 * reasonable to also reissue the command. Same to PCB.
239 */
240 session->viewport_state_changed = true;
241
242 session->cc_state_blend_changed = true;
243 session->cc_state_dsa_changed = true;
244 session->cc_state_cc_changed = true;
245
246 session->scissor_state_changed = true;
247
248 session->binding_table_vs_changed = true;
249 session->binding_table_gs_changed = true;
250 session->binding_table_fs_changed = true;
251
252 session->sampler_state_vs_changed = true;
253 session->sampler_state_gs_changed = true;
254 session->sampler_state_fs_changed = true;
255
256 session->pcb_state_vs_changed = true;
257 session->pcb_state_gs_changed = true;
258 session->pcb_state_fs_changed = true;
259 }
260 }
261
262 static void
263 gen6_pipeline_common_urb(struct ilo_3d_pipeline *p,
264 const struct ilo_context *ilo,
265 struct gen6_pipeline_session *session)
266 {
267 /* 3DSTATE_URB */
268 if (DIRTY(VE) || DIRTY(VS) || DIRTY(GS)) {
269 const bool gs_active = (ilo->gs || (ilo->vs &&
270 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_VS_GEN6_SO)));
271 int vs_entry_size, gs_entry_size;
272 int vs_total_size, gs_total_size;
273
274 vs_entry_size = (ilo->vs) ?
275 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_OUTPUT_COUNT) : 0;
276
277 /*
278 * As indicated by 2e712e41db0c0676e9f30fc73172c0e8de8d84d4, VF and VS
279 * share VUE handles. The VUE allocation size must be large enough to
280 * store either VF outputs (number of VERTEX_ELEMENTs) and VS outputs.
281 *
282 * I am not sure if the PRM explicitly states that VF and VS share VUE
283 * handles. But here is a citation that implies so:
284 *
285 * From the Sandy Bridge PRM, volume 2 part 1, page 44:
286 *
287 * "Once a FF stage that spawn threads has sufficient input to
288 * initiate a thread, it must guarantee that it is safe to request
289 * the thread initiation. For all these FF stages, this check is
290 * based on :
291 *
292 * - The availability of output URB entries:
293 * - VS: As the input URB entries are overwritten with the
294 * VS-generated output data, output URB availability isn't a
295 * factor."
296 */
297 if (vs_entry_size < ilo->ve->count)
298 vs_entry_size = ilo->ve->count;
299
300 gs_entry_size = (ilo->gs) ?
301 ilo_shader_get_kernel_param(ilo->gs, ILO_KERNEL_OUTPUT_COUNT) :
302 (gs_active) ? vs_entry_size : 0;
303
304 /* in bytes */
305 vs_entry_size *= sizeof(float) * 4;
306 gs_entry_size *= sizeof(float) * 4;
307 vs_total_size = ilo->dev->urb_size;
308
309 if (gs_active) {
310 vs_total_size /= 2;
311 gs_total_size = vs_total_size;
312 }
313 else {
314 gs_total_size = 0;
315 }
316
317 gen6_emit_3DSTATE_URB(p->dev, vs_total_size, gs_total_size,
318 vs_entry_size, gs_entry_size, p->cp);
319
320 /*
321 * From the Sandy Bridge PRM, volume 2 part 1, page 27:
322 *
323 * "Because of a urb corruption caused by allocating a previous
324 * gsunit's urb entry to vsunit software is required to send a
325 * "GS NULL Fence" (Send URB fence with VS URB size == 1 and GS URB
326 * size == 0) plus a dummy DRAW call before any case where VS will
327 * be taking over GS URB space."
328 */
329 if (p->state.gs.active && !gs_active)
330 ilo_3d_pipeline_emit_flush_gen6(p);
331
332 p->state.gs.active = gs_active;
333 }
334 }
335
336 static void
337 gen6_pipeline_common_pointers_1(struct ilo_3d_pipeline *p,
338 const struct ilo_context *ilo,
339 struct gen6_pipeline_session *session)
340 {
341 /* 3DSTATE_VIEWPORT_STATE_POINTERS */
342 if (session->viewport_state_changed) {
343 gen6_emit_3DSTATE_VIEWPORT_STATE_POINTERS(p->dev,
344 p->state.CLIP_VIEWPORT,
345 p->state.SF_VIEWPORT,
346 p->state.CC_VIEWPORT, p->cp);
347 }
348 }
349
350 static void
351 gen6_pipeline_common_pointers_2(struct ilo_3d_pipeline *p,
352 const struct ilo_context *ilo,
353 struct gen6_pipeline_session *session)
354 {
355 /* 3DSTATE_CC_STATE_POINTERS */
356 if (session->cc_state_blend_changed ||
357 session->cc_state_dsa_changed ||
358 session->cc_state_cc_changed) {
359 gen6_emit_3DSTATE_CC_STATE_POINTERS(p->dev,
360 p->state.BLEND_STATE,
361 p->state.DEPTH_STENCIL_STATE,
362 p->state.COLOR_CALC_STATE, p->cp);
363 }
364
365 /* 3DSTATE_SAMPLER_STATE_POINTERS */
366 if (session->sampler_state_vs_changed ||
367 session->sampler_state_gs_changed ||
368 session->sampler_state_fs_changed) {
369 gen6_emit_3DSTATE_SAMPLER_STATE_POINTERS(p->dev,
370 p->state.vs.SAMPLER_STATE,
371 0,
372 p->state.wm.SAMPLER_STATE, p->cp);
373 }
374 }
375
376 static void
377 gen6_pipeline_common_pointers_3(struct ilo_3d_pipeline *p,
378 const struct ilo_context *ilo,
379 struct gen6_pipeline_session *session)
380 {
381 /* 3DSTATE_SCISSOR_STATE_POINTERS */
382 if (session->scissor_state_changed) {
383 gen6_emit_3DSTATE_SCISSOR_STATE_POINTERS(p->dev,
384 p->state.SCISSOR_RECT, p->cp);
385 }
386
387 /* 3DSTATE_BINDING_TABLE_POINTERS */
388 if (session->binding_table_vs_changed ||
389 session->binding_table_gs_changed ||
390 session->binding_table_fs_changed) {
391 gen6_emit_3DSTATE_BINDING_TABLE_POINTERS(p->dev,
392 p->state.vs.BINDING_TABLE_STATE,
393 p->state.gs.BINDING_TABLE_STATE,
394 p->state.wm.BINDING_TABLE_STATE, p->cp);
395 }
396 }
397
398 void
399 gen6_pipeline_vf(struct ilo_3d_pipeline *p,
400 const struct ilo_context *ilo,
401 struct gen6_pipeline_session *session)
402 {
403 if (p->dev->gen >= ILO_GEN(7.5)) {
404 /* 3DSTATE_INDEX_BUFFER */
405 if (DIRTY(IB) || session->batch_bo_changed) {
406 gen6_emit_3DSTATE_INDEX_BUFFER(p->dev,
407 &ilo->ib, false, p->cp);
408 }
409
410 /* 3DSTATE_VF */
411 if (session->primitive_restart_changed) {
412 gen7_emit_3DSTATE_VF(p->dev, ilo->draw->primitive_restart,
413 ilo->draw->restart_index, p->cp);
414 }
415 }
416 else {
417 /* 3DSTATE_INDEX_BUFFER */
418 if (DIRTY(IB) || session->primitive_restart_changed ||
419 session->batch_bo_changed) {
420 gen6_emit_3DSTATE_INDEX_BUFFER(p->dev,
421 &ilo->ib, ilo->draw->primitive_restart, p->cp);
422 }
423 }
424
425 /* 3DSTATE_VERTEX_BUFFERS */
426 if (DIRTY(VB) || DIRTY(VE) || session->batch_bo_changed)
427 gen6_emit_3DSTATE_VERTEX_BUFFERS(p->dev, ilo->ve, &ilo->vb, p->cp);
428
429 /* 3DSTATE_VERTEX_ELEMENTS */
430 if (DIRTY(VE) || DIRTY(VS)) {
431 const struct ilo_ve_state *ve = ilo->ve;
432 bool last_velement_edgeflag = false;
433 bool prepend_generate_ids = false;
434
435 if (ilo->vs) {
436 if (ilo_shader_get_kernel_param(ilo->vs,
437 ILO_KERNEL_VS_INPUT_EDGEFLAG)) {
438 /* we rely on the state tracker here */
439 assert(ilo_shader_get_kernel_param(ilo->vs,
440 ILO_KERNEL_INPUT_COUNT) == ve->count);
441
442 last_velement_edgeflag = true;
443 }
444
445 if (ilo_shader_get_kernel_param(ilo->vs,
446 ILO_KERNEL_VS_INPUT_INSTANCEID) ||
447 ilo_shader_get_kernel_param(ilo->vs,
448 ILO_KERNEL_VS_INPUT_VERTEXID))
449 prepend_generate_ids = true;
450 }
451
452 gen6_emit_3DSTATE_VERTEX_ELEMENTS(p->dev, ve,
453 last_velement_edgeflag, prepend_generate_ids, p->cp);
454 }
455 }
456
457 void
458 gen6_pipeline_vf_statistics(struct ilo_3d_pipeline *p,
459 const struct ilo_context *ilo,
460 struct gen6_pipeline_session *session)
461 {
462 /* 3DSTATE_VF_STATISTICS */
463 if (session->hw_ctx_changed)
464 gen6_emit_3DSTATE_VF_STATISTICS(p->dev, false, p->cp);
465 }
466
467 static void
468 gen6_pipeline_vf_draw(struct ilo_3d_pipeline *p,
469 const struct ilo_context *ilo,
470 struct gen6_pipeline_session *session)
471 {
472 /* 3DPRIMITIVE */
473 gen6_emit_3DPRIMITIVE(p->dev, ilo->draw, &ilo->ib, false, p->cp);
474 p->state.has_gen6_wa_pipe_control = false;
475 }
476
477 void
478 gen6_pipeline_vs(struct ilo_3d_pipeline *p,
479 const struct ilo_context *ilo,
480 struct gen6_pipeline_session *session)
481 {
482 const bool emit_3dstate_vs = (DIRTY(VS) || DIRTY(SAMPLER_VS) ||
483 session->kernel_bo_changed);
484 const bool emit_3dstate_constant_vs = session->pcb_state_vs_changed;
485
486 /*
487 * the classic i965 does this in upload_vs_state(), citing a spec that I
488 * cannot find
489 */
490 if (emit_3dstate_vs && p->dev->gen == ILO_GEN(6))
491 gen6_wa_pipe_control_post_sync(p, false);
492
493 /* 3DSTATE_CONSTANT_VS */
494 if (emit_3dstate_constant_vs) {
495 gen6_emit_3DSTATE_CONSTANT_VS(p->dev,
496 &p->state.vs.PUSH_CONSTANT_BUFFER,
497 &p->state.vs.PUSH_CONSTANT_BUFFER_size,
498 1, p->cp);
499 }
500
501 /* 3DSTATE_VS */
502 if (emit_3dstate_vs) {
503 const int num_samplers = ilo->sampler[PIPE_SHADER_VERTEX].count;
504
505 gen6_emit_3DSTATE_VS(p->dev, ilo->vs, num_samplers, p->cp);
506 }
507
508 if (emit_3dstate_constant_vs && p->dev->gen == ILO_GEN(6))
509 gen6_wa_pipe_control_vs_const_flush(p);
510 }
511
512 static void
513 gen6_pipeline_gs(struct ilo_3d_pipeline *p,
514 const struct ilo_context *ilo,
515 struct gen6_pipeline_session *session)
516 {
517 /* 3DSTATE_CONSTANT_GS */
518 if (session->pcb_state_gs_changed)
519 gen6_emit_3DSTATE_CONSTANT_GS(p->dev, NULL, NULL, 0, p->cp);
520
521 /* 3DSTATE_GS */
522 if (DIRTY(GS) || DIRTY(VS) ||
523 session->prim_changed || session->kernel_bo_changed) {
524 const int verts_per_prim = u_vertices_per_prim(session->reduced_prim);
525
526 gen6_emit_3DSTATE_GS(p->dev, ilo->gs, ilo->vs, verts_per_prim, p->cp);
527 }
528 }
529
530 bool
531 gen6_pipeline_update_max_svbi(struct ilo_3d_pipeline *p,
532 const struct ilo_context *ilo,
533 struct gen6_pipeline_session *session)
534 {
535 if (DIRTY(VS) || DIRTY(GS) || DIRTY(SO)) {
536 const struct pipe_stream_output_info *so_info =
537 (ilo->gs) ? ilo_shader_get_kernel_so_info(ilo->gs) :
538 (ilo->vs) ? ilo_shader_get_kernel_so_info(ilo->vs) : NULL;
539 unsigned max_svbi = 0xffffffff;
540 int i;
541
542 for (i = 0; i < so_info->num_outputs; i++) {
543 const int output_buffer = so_info->output[i].output_buffer;
544 const struct pipe_stream_output_target *so =
545 ilo->so.states[output_buffer];
546 const int struct_size = so_info->stride[output_buffer] * 4;
547 const int elem_size = so_info->output[i].num_components * 4;
548 int buf_size, count;
549
550 if (!so) {
551 max_svbi = 0;
552 break;
553 }
554
555 buf_size = so->buffer_size - so_info->output[i].dst_offset * 4;
556
557 count = buf_size / struct_size;
558 if (buf_size % struct_size >= elem_size)
559 count++;
560
561 if (count < max_svbi)
562 max_svbi = count;
563 }
564
565 if (p->state.so_max_vertices != max_svbi) {
566 p->state.so_max_vertices = max_svbi;
567 return true;
568 }
569 }
570
571 return false;
572 }
573
574 static void
575 gen6_pipeline_gs_svbi(struct ilo_3d_pipeline *p,
576 const struct ilo_context *ilo,
577 struct gen6_pipeline_session *session)
578 {
579 const bool emit = gen6_pipeline_update_max_svbi(p, ilo, session);
580
581 /* 3DSTATE_GS_SVB_INDEX */
582 if (emit) {
583 if (p->dev->gen == ILO_GEN(6))
584 gen6_wa_pipe_control_post_sync(p, false);
585
586 gen6_emit_3DSTATE_GS_SVB_INDEX(p->dev,
587 0, p->state.so_num_vertices, p->state.so_max_vertices,
588 false, p->cp);
589
590 if (session->hw_ctx_changed) {
591 int i;
592
593 /*
594 * From the Sandy Bridge PRM, volume 2 part 1, page 148:
595 *
596 * "If a buffer is not enabled then the SVBI must be set to 0x0
597 * in order to not cause overflow in that SVBI."
598 *
599 * "If a buffer is not enabled then the MaxSVBI must be set to
600 * 0xFFFFFFFF in order to not cause overflow in that SVBI."
601 */
602 for (i = 1; i < 4; i++) {
603 gen6_emit_3DSTATE_GS_SVB_INDEX(p->dev,
604 i, 0, 0xffffffff, false, p->cp);
605 }
606 }
607 }
608 }
609
610 void
611 gen6_pipeline_clip(struct ilo_3d_pipeline *p,
612 const struct ilo_context *ilo,
613 struct gen6_pipeline_session *session)
614 {
615 /* 3DSTATE_CLIP */
616 if (DIRTY(RASTERIZER) || DIRTY(FS) || DIRTY(VIEWPORT) || DIRTY(FB)) {
617 bool enable_guardband = true;
618 unsigned i;
619
620 /*
621 * We do not do 2D clipping yet. Guard band test should only be enabled
622 * when the viewport is larger than the framebuffer.
623 */
624 for (i = 0; i < ilo->viewport.count; i++) {
625 const struct ilo_viewport_cso *vp = &ilo->viewport.cso[i];
626
627 if (vp->min_x > 0.0f || vp->max_x < ilo->fb.state.width ||
628 vp->min_y > 0.0f || vp->max_y < ilo->fb.state.height) {
629 enable_guardband = false;
630 break;
631 }
632 }
633
634 gen6_emit_3DSTATE_CLIP(p->dev, ilo->rasterizer,
635 ilo->fs, enable_guardband, 1, p->cp);
636 }
637 }
638
639 static void
640 gen6_pipeline_sf(struct ilo_3d_pipeline *p,
641 const struct ilo_context *ilo,
642 struct gen6_pipeline_session *session)
643 {
644 /* 3DSTATE_SF */
645 if (DIRTY(RASTERIZER) || DIRTY(FS))
646 gen6_emit_3DSTATE_SF(p->dev, ilo->rasterizer, ilo->fs, p->cp);
647 }
648
649 void
650 gen6_pipeline_sf_rect(struct ilo_3d_pipeline *p,
651 const struct ilo_context *ilo,
652 struct gen6_pipeline_session *session)
653 {
654 /* 3DSTATE_DRAWING_RECTANGLE */
655 if (DIRTY(FB)) {
656 if (p->dev->gen == ILO_GEN(6))
657 gen6_wa_pipe_control_post_sync(p, false);
658
659 gen6_emit_3DSTATE_DRAWING_RECTANGLE(p->dev, 0, 0,
660 ilo->fb.state.width, ilo->fb.state.height, p->cp);
661 }
662 }
663
664 static void
665 gen6_pipeline_wm(struct ilo_3d_pipeline *p,
666 const struct ilo_context *ilo,
667 struct gen6_pipeline_session *session)
668 {
669 /* 3DSTATE_CONSTANT_PS */
670 if (session->pcb_state_fs_changed) {
671 gen6_emit_3DSTATE_CONSTANT_PS(p->dev,
672 &p->state.wm.PUSH_CONSTANT_BUFFER,
673 &p->state.wm.PUSH_CONSTANT_BUFFER_size,
674 1, p->cp);
675 }
676
677 /* 3DSTATE_WM */
678 if (DIRTY(FS) || DIRTY(SAMPLER_FS) || DIRTY(BLEND) || DIRTY(DSA) ||
679 DIRTY(RASTERIZER) || session->kernel_bo_changed) {
680 const int num_samplers = ilo->sampler[PIPE_SHADER_FRAGMENT].count;
681 const bool dual_blend = ilo->blend->dual_blend;
682 const bool cc_may_kill = (ilo->dsa->dw_alpha ||
683 ilo->blend->alpha_to_coverage);
684
685 if (p->dev->gen == ILO_GEN(6) && session->hw_ctx_changed)
686 gen6_wa_pipe_control_wm_max_threads_stall(p);
687
688 gen6_emit_3DSTATE_WM(p->dev, ilo->fs, num_samplers,
689 ilo->rasterizer, dual_blend, cc_may_kill, 0, p->cp);
690 }
691 }
692
693 static void
694 gen6_pipeline_wm_multisample(struct ilo_3d_pipeline *p,
695 const struct ilo_context *ilo,
696 struct gen6_pipeline_session *session)
697 {
698 /* 3DSTATE_MULTISAMPLE and 3DSTATE_SAMPLE_MASK */
699 if (DIRTY(SAMPLE_MASK) || DIRTY(FB)) {
700 const uint32_t *packed_sample_pos;
701
702 packed_sample_pos = (ilo->fb.num_samples > 1) ?
703 &p->packed_sample_position_4x : &p->packed_sample_position_1x;
704
705 if (p->dev->gen == ILO_GEN(6)) {
706 gen6_wa_pipe_control_post_sync(p, false);
707 gen6_wa_pipe_control_wm_multisample_flush(p);
708 }
709
710 gen6_emit_3DSTATE_MULTISAMPLE(p->dev,
711 ilo->fb.num_samples, packed_sample_pos,
712 ilo->rasterizer->state.half_pixel_center, p->cp);
713
714 gen6_emit_3DSTATE_SAMPLE_MASK(p->dev,
715 (ilo->fb.num_samples > 1) ? ilo->sample_mask : 0x1, p->cp);
716 }
717 }
718
719 static void
720 gen6_pipeline_wm_depth(struct ilo_3d_pipeline *p,
721 const struct ilo_context *ilo,
722 struct gen6_pipeline_session *session)
723 {
724 /* 3DSTATE_DEPTH_BUFFER and 3DSTATE_CLEAR_PARAMS */
725 if (DIRTY(FB) || session->batch_bo_changed) {
726 const struct ilo_zs_surface *zs;
727 struct ilo_zs_surface layer;
728
729 if (ilo->fb.state.zsbuf) {
730 const struct ilo_surface_cso *surface =
731 (const struct ilo_surface_cso *) ilo->fb.state.zsbuf;
732
733 if (ilo->fb.offset_to_layers) {
734 assert(surface->base.u.tex.first_layer ==
735 surface->base.u.tex.last_layer);
736
737 ilo_gpe_init_zs_surface(ilo->dev,
738 ilo_texture(surface->base.texture),
739 surface->base.format, surface->base.u.tex.level,
740 surface->base.u.tex.first_layer, 1, true, &layer);
741
742 zs = &layer;
743 }
744 else {
745 assert(!surface->is_rt);
746 zs = &surface->u.zs;
747 }
748 }
749 else {
750 zs = &ilo->fb.null_zs;
751 }
752
753 if (p->dev->gen == ILO_GEN(6)) {
754 gen6_wa_pipe_control_post_sync(p, false);
755 gen6_wa_pipe_control_wm_depth_flush(p);
756 }
757
758 gen6_emit_3DSTATE_DEPTH_BUFFER(p->dev, zs, p->cp);
759 gen6_emit_3DSTATE_HIER_DEPTH_BUFFER(p->dev, zs, p->cp);
760 gen6_emit_3DSTATE_STENCIL_BUFFER(p->dev, zs, p->cp);
761
762 /* TODO */
763 gen6_emit_3DSTATE_CLEAR_PARAMS(p->dev, 0, p->cp);
764 }
765 }
766
767 void
768 gen6_pipeline_wm_raster(struct ilo_3d_pipeline *p,
769 const struct ilo_context *ilo,
770 struct gen6_pipeline_session *session)
771 {
772 /* 3DSTATE_POLY_STIPPLE_PATTERN and 3DSTATE_POLY_STIPPLE_OFFSET */
773 if ((DIRTY(RASTERIZER) || DIRTY(POLY_STIPPLE)) &&
774 ilo->rasterizer->state.poly_stipple_enable) {
775 if (p->dev->gen == ILO_GEN(6))
776 gen6_wa_pipe_control_post_sync(p, false);
777
778 gen6_emit_3DSTATE_POLY_STIPPLE_PATTERN(p->dev,
779 &ilo->poly_stipple, p->cp);
780
781 gen6_emit_3DSTATE_POLY_STIPPLE_OFFSET(p->dev, 0, 0, p->cp);
782 }
783
784 /* 3DSTATE_LINE_STIPPLE */
785 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_stipple_enable) {
786 if (p->dev->gen == ILO_GEN(6))
787 gen6_wa_pipe_control_post_sync(p, false);
788
789 gen6_emit_3DSTATE_LINE_STIPPLE(p->dev,
790 ilo->rasterizer->state.line_stipple_pattern,
791 ilo->rasterizer->state.line_stipple_factor + 1, p->cp);
792 }
793
794 /* 3DSTATE_AA_LINE_PARAMETERS */
795 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_smooth) {
796 if (p->dev->gen == ILO_GEN(6))
797 gen6_wa_pipe_control_post_sync(p, false);
798
799 gen6_emit_3DSTATE_AA_LINE_PARAMETERS(p->dev, p->cp);
800 }
801 }
802
803 static void
804 gen6_pipeline_state_viewports(struct ilo_3d_pipeline *p,
805 const struct ilo_context *ilo,
806 struct gen6_pipeline_session *session)
807 {
808 /* SF_CLIP_VIEWPORT and CC_VIEWPORT */
809 if (p->dev->gen >= ILO_GEN(7) && DIRTY(VIEWPORT)) {
810 p->state.SF_CLIP_VIEWPORT = gen7_emit_SF_CLIP_VIEWPORT(p->dev,
811 ilo->viewport.cso, ilo->viewport.count, p->cp);
812
813 p->state.CC_VIEWPORT = gen6_emit_CC_VIEWPORT(p->dev,
814 ilo->viewport.cso, ilo->viewport.count, p->cp);
815
816 session->viewport_state_changed = true;
817 }
818 /* SF_VIEWPORT, CLIP_VIEWPORT, and CC_VIEWPORT */
819 else if (DIRTY(VIEWPORT)) {
820 p->state.CLIP_VIEWPORT = gen6_emit_CLIP_VIEWPORT(p->dev,
821 ilo->viewport.cso, ilo->viewport.count, p->cp);
822
823 p->state.SF_VIEWPORT = gen6_emit_SF_VIEWPORT(p->dev,
824 ilo->viewport.cso, ilo->viewport.count, p->cp);
825
826 p->state.CC_VIEWPORT = gen6_emit_CC_VIEWPORT(p->dev,
827 ilo->viewport.cso, ilo->viewport.count, p->cp);
828
829 session->viewport_state_changed = true;
830 }
831 }
832
833 static void
834 gen6_pipeline_state_cc(struct ilo_3d_pipeline *p,
835 const struct ilo_context *ilo,
836 struct gen6_pipeline_session *session)
837 {
838 /* BLEND_STATE */
839 if (DIRTY(BLEND) || DIRTY(FB) || DIRTY(DSA)) {
840 p->state.BLEND_STATE = gen6_emit_BLEND_STATE(p->dev,
841 ilo->blend, &ilo->fb, ilo->dsa, p->cp);
842
843 session->cc_state_blend_changed = true;
844 }
845
846 /* COLOR_CALC_STATE */
847 if (DIRTY(DSA) || DIRTY(STENCIL_REF) || DIRTY(BLEND_COLOR)) {
848 p->state.COLOR_CALC_STATE =
849 gen6_emit_COLOR_CALC_STATE(p->dev, &ilo->stencil_ref,
850 ilo->dsa->alpha_ref, &ilo->blend_color, p->cp);
851
852 session->cc_state_cc_changed = true;
853 }
854
855 /* DEPTH_STENCIL_STATE */
856 if (DIRTY(DSA)) {
857 p->state.DEPTH_STENCIL_STATE =
858 gen6_emit_DEPTH_STENCIL_STATE(p->dev, ilo->dsa, p->cp);
859
860 session->cc_state_dsa_changed = true;
861 }
862 }
863
864 static void
865 gen6_pipeline_state_scissors(struct ilo_3d_pipeline *p,
866 const struct ilo_context *ilo,
867 struct gen6_pipeline_session *session)
868 {
869 /* SCISSOR_RECT */
870 if (DIRTY(SCISSOR) || DIRTY(VIEWPORT)) {
871 /* there should be as many scissors as there are viewports */
872 p->state.SCISSOR_RECT = gen6_emit_SCISSOR_RECT(p->dev,
873 &ilo->scissor, ilo->viewport.count, p->cp);
874
875 session->scissor_state_changed = true;
876 }
877 }
878
879 static void
880 gen6_pipeline_state_surfaces_rt(struct ilo_3d_pipeline *p,
881 const struct ilo_context *ilo,
882 struct gen6_pipeline_session *session)
883 {
884 /* SURFACE_STATEs for render targets */
885 if (DIRTY(FB)) {
886 const struct ilo_fb_state *fb = &ilo->fb;
887 const int offset = ILO_WM_DRAW_SURFACE(0);
888 uint32_t *surface_state = &p->state.wm.SURFACE_STATE[offset];
889 int i;
890
891 for (i = 0; i < fb->state.nr_cbufs; i++) {
892 const struct ilo_surface_cso *surface =
893 (const struct ilo_surface_cso *) fb->state.cbufs[i];
894
895 if (!surface) {
896 surface_state[i] =
897 gen6_emit_SURFACE_STATE(p->dev, &fb->null_rt, true, p->cp);
898 }
899 else if (fb->offset_to_layers) {
900 struct ilo_view_surface layer;
901
902 assert(surface->base.u.tex.first_layer ==
903 surface->base.u.tex.last_layer);
904
905 ilo_gpe_init_view_surface_for_texture(ilo->dev,
906 ilo_texture(surface->base.texture),
907 surface->base.format,
908 surface->base.u.tex.level, 1,
909 surface->base.u.tex.first_layer, 1,
910 true, true, &layer);
911
912 surface_state[i] =
913 gen6_emit_SURFACE_STATE(p->dev, &layer, true, p->cp);
914 }
915 else {
916 assert(surface && surface->is_rt);
917 surface_state[i] =
918 gen6_emit_SURFACE_STATE(p->dev, &surface->u.rt, true, p->cp);
919 }
920 }
921
922 /*
923 * Upload at least one render target, as
924 * brw_update_renderbuffer_surfaces() does. I don't know why.
925 */
926 if (i == 0) {
927 surface_state[i] =
928 gen6_emit_SURFACE_STATE(p->dev, &fb->null_rt, true, p->cp);
929
930 i++;
931 }
932
933 memset(&surface_state[i], 0, (ILO_MAX_DRAW_BUFFERS - i) * 4);
934
935 if (i && session->num_surfaces[PIPE_SHADER_FRAGMENT] < offset + i)
936 session->num_surfaces[PIPE_SHADER_FRAGMENT] = offset + i;
937
938 session->binding_table_fs_changed = true;
939 }
940 }
941
942 static void
943 gen6_pipeline_state_surfaces_so(struct ilo_3d_pipeline *p,
944 const struct ilo_context *ilo,
945 struct gen6_pipeline_session *session)
946 {
947 const struct ilo_so_state *so = &ilo->so;
948
949 if (p->dev->gen != ILO_GEN(6))
950 return;
951
952 /* SURFACE_STATEs for stream output targets */
953 if (DIRTY(VS) || DIRTY(GS) || DIRTY(SO)) {
954 const struct pipe_stream_output_info *so_info =
955 (ilo->gs) ? ilo_shader_get_kernel_so_info(ilo->gs) :
956 (ilo->vs) ? ilo_shader_get_kernel_so_info(ilo->vs) : NULL;
957 const int offset = ILO_GS_SO_SURFACE(0);
958 uint32_t *surface_state = &p->state.gs.SURFACE_STATE[offset];
959 int i;
960
961 for (i = 0; so_info && i < so_info->num_outputs; i++) {
962 const int target = so_info->output[i].output_buffer;
963 const struct pipe_stream_output_target *so_target =
964 (target < so->count) ? so->states[target] : NULL;
965
966 if (so_target) {
967 surface_state[i] = gen6_emit_so_SURFACE_STATE(p->dev,
968 so_target, so_info, i, p->cp);
969 }
970 else {
971 surface_state[i] = 0;
972 }
973 }
974
975 memset(&surface_state[i], 0, (ILO_MAX_SO_BINDINGS - i) * 4);
976
977 if (i && session->num_surfaces[PIPE_SHADER_GEOMETRY] < offset + i)
978 session->num_surfaces[PIPE_SHADER_GEOMETRY] = offset + i;
979
980 session->binding_table_gs_changed = true;
981 }
982 }
983
984 static void
985 gen6_pipeline_state_surfaces_view(struct ilo_3d_pipeline *p,
986 const struct ilo_context *ilo,
987 int shader_type,
988 struct gen6_pipeline_session *session)
989 {
990 const struct ilo_view_state *view = &ilo->view[shader_type];
991 uint32_t *surface_state;
992 int offset, i;
993 bool skip = false;
994
995 /* SURFACE_STATEs for sampler views */
996 switch (shader_type) {
997 case PIPE_SHADER_VERTEX:
998 if (DIRTY(VIEW_VS)) {
999 offset = ILO_VS_TEXTURE_SURFACE(0);
1000 surface_state = &p->state.vs.SURFACE_STATE[offset];
1001
1002 session->binding_table_vs_changed = true;
1003 }
1004 else {
1005 skip = true;
1006 }
1007 break;
1008 case PIPE_SHADER_FRAGMENT:
1009 if (DIRTY(VIEW_FS)) {
1010 offset = ILO_WM_TEXTURE_SURFACE(0);
1011 surface_state = &p->state.wm.SURFACE_STATE[offset];
1012
1013 session->binding_table_fs_changed = true;
1014 }
1015 else {
1016 skip = true;
1017 }
1018 break;
1019 default:
1020 skip = true;
1021 break;
1022 }
1023
1024 if (skip)
1025 return;
1026
1027 for (i = 0; i < view->count; i++) {
1028 if (view->states[i]) {
1029 const struct ilo_view_cso *cso =
1030 (const struct ilo_view_cso *) view->states[i];
1031
1032 surface_state[i] =
1033 gen6_emit_SURFACE_STATE(p->dev, &cso->surface, false, p->cp);
1034 }
1035 else {
1036 surface_state[i] = 0;
1037 }
1038 }
1039
1040 memset(&surface_state[i], 0, (ILO_MAX_SAMPLER_VIEWS - i) * 4);
1041
1042 if (i && session->num_surfaces[shader_type] < offset + i)
1043 session->num_surfaces[shader_type] = offset + i;
1044 }
1045
1046 static void
1047 gen6_pipeline_state_surfaces_const(struct ilo_3d_pipeline *p,
1048 const struct ilo_context *ilo,
1049 int shader_type,
1050 struct gen6_pipeline_session *session)
1051 {
1052 const struct ilo_cbuf_state *cbuf = &ilo->cbuf[shader_type];
1053 uint32_t *surface_state;
1054 bool *binding_table_changed;
1055 int offset, count, i;
1056
1057 if (!DIRTY(CBUF))
1058 return;
1059
1060 /* SURFACE_STATEs for constant buffers */
1061 switch (shader_type) {
1062 case PIPE_SHADER_VERTEX:
1063 offset = ILO_VS_CONST_SURFACE(0);
1064 surface_state = &p->state.vs.SURFACE_STATE[offset];
1065 binding_table_changed = &session->binding_table_vs_changed;
1066 break;
1067 case PIPE_SHADER_FRAGMENT:
1068 offset = ILO_WM_CONST_SURFACE(0);
1069 surface_state = &p->state.wm.SURFACE_STATE[offset];
1070 binding_table_changed = &session->binding_table_fs_changed;
1071 break;
1072 default:
1073 return;
1074 break;
1075 }
1076
1077 /* constants are pushed via PCB */
1078 if (cbuf->enabled_mask == 0x1 && !cbuf->cso[0].resource) {
1079 memset(surface_state, 0, ILO_MAX_CONST_BUFFERS * 4);
1080 return;
1081 }
1082
1083 count = util_last_bit(cbuf->enabled_mask);
1084 for (i = 0; i < count; i++) {
1085 if (cbuf->cso[i].resource) {
1086 surface_state[i] = gen6_emit_SURFACE_STATE(p->dev,
1087 &cbuf->cso[i].surface, false, p->cp);
1088 }
1089 else {
1090 surface_state[i] = 0;
1091 }
1092 }
1093
1094 memset(&surface_state[count], 0, (ILO_MAX_CONST_BUFFERS - count) * 4);
1095
1096 if (count && session->num_surfaces[shader_type] < offset + count)
1097 session->num_surfaces[shader_type] = offset + count;
1098
1099 *binding_table_changed = true;
1100 }
1101
1102 static void
1103 gen6_pipeline_state_binding_tables(struct ilo_3d_pipeline *p,
1104 const struct ilo_context *ilo,
1105 int shader_type,
1106 struct gen6_pipeline_session *session)
1107 {
1108 uint32_t *binding_table_state, *surface_state;
1109 int *binding_table_state_size, size;
1110 bool skip = false;
1111
1112 /* BINDING_TABLE_STATE */
1113 switch (shader_type) {
1114 case PIPE_SHADER_VERTEX:
1115 surface_state = p->state.vs.SURFACE_STATE;
1116 binding_table_state = &p->state.vs.BINDING_TABLE_STATE;
1117 binding_table_state_size = &p->state.vs.BINDING_TABLE_STATE_size;
1118
1119 skip = !session->binding_table_vs_changed;
1120 break;
1121 case PIPE_SHADER_GEOMETRY:
1122 surface_state = p->state.gs.SURFACE_STATE;
1123 binding_table_state = &p->state.gs.BINDING_TABLE_STATE;
1124 binding_table_state_size = &p->state.gs.BINDING_TABLE_STATE_size;
1125
1126 skip = !session->binding_table_gs_changed;
1127 break;
1128 case PIPE_SHADER_FRAGMENT:
1129 surface_state = p->state.wm.SURFACE_STATE;
1130 binding_table_state = &p->state.wm.BINDING_TABLE_STATE;
1131 binding_table_state_size = &p->state.wm.BINDING_TABLE_STATE_size;
1132
1133 skip = !session->binding_table_fs_changed;
1134 break;
1135 default:
1136 skip = true;
1137 break;
1138 }
1139
1140 if (skip)
1141 return;
1142
1143 /*
1144 * If we have seemingly less SURFACE_STATEs than before, it could be that
1145 * we did not touch those reside at the tail in this upload. Loop over
1146 * them to figure out the real number of SURFACE_STATEs.
1147 */
1148 for (size = *binding_table_state_size;
1149 size > session->num_surfaces[shader_type]; size--) {
1150 if (surface_state[size - 1])
1151 break;
1152 }
1153 if (size < session->num_surfaces[shader_type])
1154 size = session->num_surfaces[shader_type];
1155
1156 *binding_table_state = gen6_emit_BINDING_TABLE_STATE(p->dev,
1157 surface_state, size, p->cp);
1158 *binding_table_state_size = size;
1159 }
1160
1161 static void
1162 gen6_pipeline_state_samplers(struct ilo_3d_pipeline *p,
1163 const struct ilo_context *ilo,
1164 int shader_type,
1165 struct gen6_pipeline_session *session)
1166 {
1167 const struct ilo_sampler_cso * const *samplers =
1168 ilo->sampler[shader_type].cso;
1169 const struct pipe_sampler_view * const *views =
1170 (const struct pipe_sampler_view **) ilo->view[shader_type].states;
1171 const int num_samplers = ilo->sampler[shader_type].count;
1172 const int num_views = ilo->view[shader_type].count;
1173 uint32_t *sampler_state, *border_color_state;
1174 bool emit_border_color = false;
1175 bool skip = false;
1176
1177 /* SAMPLER_BORDER_COLOR_STATE and SAMPLER_STATE */
1178 switch (shader_type) {
1179 case PIPE_SHADER_VERTEX:
1180 if (DIRTY(SAMPLER_VS) || DIRTY(VIEW_VS)) {
1181 sampler_state = &p->state.vs.SAMPLER_STATE;
1182 border_color_state = p->state.vs.SAMPLER_BORDER_COLOR_STATE;
1183
1184 if (DIRTY(SAMPLER_VS))
1185 emit_border_color = true;
1186
1187 session->sampler_state_vs_changed = true;
1188 }
1189 else {
1190 skip = true;
1191 }
1192 break;
1193 case PIPE_SHADER_FRAGMENT:
1194 if (DIRTY(SAMPLER_FS) || DIRTY(VIEW_FS)) {
1195 sampler_state = &p->state.wm.SAMPLER_STATE;
1196 border_color_state = p->state.wm.SAMPLER_BORDER_COLOR_STATE;
1197
1198 if (DIRTY(SAMPLER_FS))
1199 emit_border_color = true;
1200
1201 session->sampler_state_fs_changed = true;
1202 }
1203 else {
1204 skip = true;
1205 }
1206 break;
1207 default:
1208 skip = true;
1209 break;
1210 }
1211
1212 if (skip)
1213 return;
1214
1215 if (emit_border_color) {
1216 int i;
1217
1218 for (i = 0; i < num_samplers; i++) {
1219 border_color_state[i] = (samplers[i]) ?
1220 gen6_emit_SAMPLER_BORDER_COLOR_STATE(p->dev,
1221 samplers[i], p->cp) : 0;
1222 }
1223 }
1224
1225 /* should we take the minimum of num_samplers and num_views? */
1226 *sampler_state = gen6_emit_SAMPLER_STATE(p->dev,
1227 samplers, views,
1228 border_color_state,
1229 MIN2(num_samplers, num_views), p->cp);
1230 }
1231
1232 static void
1233 gen6_pipeline_state_pcb(struct ilo_3d_pipeline *p,
1234 const struct ilo_context *ilo,
1235 struct gen6_pipeline_session *session)
1236 {
1237 /* push constant buffer for VS */
1238 if (DIRTY(VS) || DIRTY(CBUF) || DIRTY(CLIP)) {
1239 const int cbuf0_size = (ilo->vs) ?
1240 ilo_shader_get_kernel_param(ilo->vs,
1241 ILO_KERNEL_PCB_CBUF0_SIZE) : 0;
1242 const int clip_state_size = (ilo->vs) ?
1243 ilo_shader_get_kernel_param(ilo->vs,
1244 ILO_KERNEL_VS_PCB_UCP_SIZE) : 0;
1245 const int total_size = cbuf0_size + clip_state_size;
1246
1247 if (total_size) {
1248 void *pcb;
1249
1250 p->state.vs.PUSH_CONSTANT_BUFFER =
1251 gen6_emit_push_constant_buffer(p->dev, total_size, &pcb, p->cp);
1252 p->state.vs.PUSH_CONSTANT_BUFFER_size = total_size;
1253
1254 if (cbuf0_size) {
1255 const struct ilo_cbuf_state *cbuf =
1256 &ilo->cbuf[PIPE_SHADER_VERTEX];
1257
1258 if (cbuf0_size <= cbuf->cso[0].user_buffer_size) {
1259 memcpy(pcb, cbuf->cso[0].user_buffer, cbuf0_size);
1260 }
1261 else {
1262 memcpy(pcb, cbuf->cso[0].user_buffer,
1263 cbuf->cso[0].user_buffer_size);
1264 memset(pcb + cbuf->cso[0].user_buffer_size, 0,
1265 cbuf0_size - cbuf->cso[0].user_buffer_size);
1266 }
1267
1268 pcb += cbuf0_size;
1269 }
1270
1271 if (clip_state_size)
1272 memcpy(pcb, &ilo->clip, clip_state_size);
1273
1274 session->pcb_state_vs_changed = true;
1275 }
1276 else if (p->state.vs.PUSH_CONSTANT_BUFFER_size) {
1277 p->state.vs.PUSH_CONSTANT_BUFFER = 0;
1278 p->state.vs.PUSH_CONSTANT_BUFFER_size = 0;
1279
1280 session->pcb_state_vs_changed = true;
1281 }
1282 }
1283
1284 /* push constant buffer for FS */
1285 if (DIRTY(FS) || DIRTY(CBUF)) {
1286 const int cbuf0_size = (ilo->fs) ?
1287 ilo_shader_get_kernel_param(ilo->fs, ILO_KERNEL_PCB_CBUF0_SIZE) : 0;
1288
1289 if (cbuf0_size) {
1290 const struct ilo_cbuf_state *cbuf = &ilo->cbuf[PIPE_SHADER_FRAGMENT];
1291 void *pcb;
1292
1293 p->state.wm.PUSH_CONSTANT_BUFFER =
1294 gen6_emit_push_constant_buffer(p->dev, cbuf0_size, &pcb, p->cp);
1295 p->state.wm.PUSH_CONSTANT_BUFFER_size = cbuf0_size;
1296
1297 if (cbuf0_size <= cbuf->cso[0].user_buffer_size) {
1298 memcpy(pcb, cbuf->cso[0].user_buffer, cbuf0_size);
1299 }
1300 else {
1301 memcpy(pcb, cbuf->cso[0].user_buffer,
1302 cbuf->cso[0].user_buffer_size);
1303 memset(pcb + cbuf->cso[0].user_buffer_size, 0,
1304 cbuf0_size - cbuf->cso[0].user_buffer_size);
1305 }
1306
1307 session->pcb_state_fs_changed = true;
1308 }
1309 else if (p->state.wm.PUSH_CONSTANT_BUFFER_size) {
1310 p->state.wm.PUSH_CONSTANT_BUFFER = 0;
1311 p->state.wm.PUSH_CONSTANT_BUFFER_size = 0;
1312
1313 session->pcb_state_fs_changed = true;
1314 }
1315 }
1316 }
1317
1318 #undef DIRTY
1319
1320 static void
1321 gen6_pipeline_commands(struct ilo_3d_pipeline *p,
1322 const struct ilo_context *ilo,
1323 struct gen6_pipeline_session *session)
1324 {
1325 /*
1326 * We try to keep the order of the commands match, as closely as possible,
1327 * that of the classic i965 driver. It allows us to compare the command
1328 * streams easily.
1329 */
1330 gen6_pipeline_common_select(p, ilo, session);
1331 gen6_pipeline_gs_svbi(p, ilo, session);
1332 gen6_pipeline_common_sip(p, ilo, session);
1333 gen6_pipeline_vf_statistics(p, ilo, session);
1334 gen6_pipeline_common_base_address(p, ilo, session);
1335 gen6_pipeline_common_pointers_1(p, ilo, session);
1336 gen6_pipeline_common_urb(p, ilo, session);
1337 gen6_pipeline_common_pointers_2(p, ilo, session);
1338 gen6_pipeline_wm_multisample(p, ilo, session);
1339 gen6_pipeline_vs(p, ilo, session);
1340 gen6_pipeline_gs(p, ilo, session);
1341 gen6_pipeline_clip(p, ilo, session);
1342 gen6_pipeline_sf(p, ilo, session);
1343 gen6_pipeline_wm(p, ilo, session);
1344 gen6_pipeline_common_pointers_3(p, ilo, session);
1345 gen6_pipeline_wm_depth(p, ilo, session);
1346 gen6_pipeline_wm_raster(p, ilo, session);
1347 gen6_pipeline_sf_rect(p, ilo, session);
1348 gen6_pipeline_vf(p, ilo, session);
1349 gen6_pipeline_vf_draw(p, ilo, session);
1350 }
1351
1352 void
1353 gen6_pipeline_states(struct ilo_3d_pipeline *p,
1354 const struct ilo_context *ilo,
1355 struct gen6_pipeline_session *session)
1356 {
1357 int shader_type;
1358
1359 gen6_pipeline_state_viewports(p, ilo, session);
1360 gen6_pipeline_state_cc(p, ilo, session);
1361 gen6_pipeline_state_scissors(p, ilo, session);
1362 gen6_pipeline_state_pcb(p, ilo, session);
1363
1364 /*
1365 * upload all SURAFCE_STATEs together so that we know there are minimal
1366 * paddings
1367 */
1368 gen6_pipeline_state_surfaces_rt(p, ilo, session);
1369 gen6_pipeline_state_surfaces_so(p, ilo, session);
1370 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1371 gen6_pipeline_state_surfaces_view(p, ilo, shader_type, session);
1372 gen6_pipeline_state_surfaces_const(p, ilo, shader_type, session);
1373 }
1374
1375 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1376 gen6_pipeline_state_samplers(p, ilo, shader_type, session);
1377 /* this must be called after all SURFACE_STATEs are uploaded */
1378 gen6_pipeline_state_binding_tables(p, ilo, shader_type, session);
1379 }
1380 }
1381
1382 void
1383 gen6_pipeline_prepare(const struct ilo_3d_pipeline *p,
1384 const struct ilo_context *ilo,
1385 struct gen6_pipeline_session *session)
1386 {
1387 memset(session, 0, sizeof(*session));
1388 session->pipe_dirty = ilo->dirty;
1389 session->reduced_prim = u_reduced_prim(ilo->draw->mode);
1390
1391 /* available space before the session */
1392 session->init_cp_space = ilo_cp_space(p->cp);
1393
1394 session->hw_ctx_changed =
1395 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_HW);
1396
1397 if (session->hw_ctx_changed) {
1398 /* these should be enough to make everything uploaded */
1399 session->batch_bo_changed = true;
1400 session->state_bo_changed = true;
1401 session->kernel_bo_changed = true;
1402 session->prim_changed = true;
1403 session->primitive_restart_changed = true;
1404 }
1405 else {
1406 /*
1407 * Any state that involves resources needs to be re-emitted when the
1408 * batch bo changed. This is because we do not pin the resources and
1409 * their offsets (or existence) may change between batch buffers.
1410 *
1411 * Since we messed around with ILO_3D_PIPELINE_INVALIDATE_BATCH_BO in
1412 * handle_invalid_batch_bo(), use ILO_3D_PIPELINE_INVALIDATE_STATE_BO as
1413 * a temporary workaround.
1414 */
1415 session->batch_bo_changed =
1416 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1417
1418 session->state_bo_changed =
1419 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1420 session->kernel_bo_changed =
1421 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
1422 session->prim_changed = (p->state.reduced_prim != session->reduced_prim);
1423 session->primitive_restart_changed =
1424 (p->state.primitive_restart != ilo->draw->primitive_restart);
1425 }
1426 }
1427
1428 void
1429 gen6_pipeline_draw(struct ilo_3d_pipeline *p,
1430 const struct ilo_context *ilo,
1431 struct gen6_pipeline_session *session)
1432 {
1433 /* force all states to be uploaded if the state bo changed */
1434 if (session->state_bo_changed)
1435 session->pipe_dirty = ILO_DIRTY_ALL;
1436 else
1437 session->pipe_dirty = ilo->dirty;
1438
1439 session->emit_draw_states(p, ilo, session);
1440
1441 /* force all commands to be uploaded if the HW context changed */
1442 if (session->hw_ctx_changed)
1443 session->pipe_dirty = ILO_DIRTY_ALL;
1444 else
1445 session->pipe_dirty = ilo->dirty;
1446
1447 session->emit_draw_commands(p, ilo, session);
1448 }
1449
1450 void
1451 gen6_pipeline_end(struct ilo_3d_pipeline *p,
1452 const struct ilo_context *ilo,
1453 struct gen6_pipeline_session *session)
1454 {
1455 /* sanity check size estimation */
1456 assert(session->init_cp_space - ilo_cp_space(p->cp) <=
1457 ilo_3d_pipeline_estimate_size(p, ILO_3D_PIPELINE_DRAW, ilo));
1458
1459 p->state.reduced_prim = session->reduced_prim;
1460 p->state.primitive_restart = ilo->draw->primitive_restart;
1461 }
1462
1463 static void
1464 ilo_3d_pipeline_emit_draw_gen6(struct ilo_3d_pipeline *p,
1465 const struct ilo_context *ilo)
1466 {
1467 struct gen6_pipeline_session session;
1468
1469 gen6_pipeline_prepare(p, ilo, &session);
1470
1471 session.emit_draw_states = gen6_pipeline_states;
1472 session.emit_draw_commands = gen6_pipeline_commands;
1473
1474 gen6_pipeline_draw(p, ilo, &session);
1475 gen6_pipeline_end(p, ilo, &session);
1476 }
1477
1478 void
1479 ilo_3d_pipeline_emit_flush_gen6(struct ilo_3d_pipeline *p)
1480 {
1481 if (p->dev->gen == ILO_GEN(6))
1482 gen6_wa_pipe_control_post_sync(p, false);
1483
1484 gen6_emit_PIPE_CONTROL(p->dev,
1485 PIPE_CONTROL_INSTRUCTION_FLUSH |
1486 PIPE_CONTROL_WRITE_FLUSH |
1487 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1488 PIPE_CONTROL_VF_CACHE_INVALIDATE |
1489 PIPE_CONTROL_TC_FLUSH |
1490 PIPE_CONTROL_NO_WRITE |
1491 PIPE_CONTROL_CS_STALL,
1492 0, 0, false, p->cp);
1493 }
1494
1495 void
1496 ilo_3d_pipeline_emit_write_timestamp_gen6(struct ilo_3d_pipeline *p,
1497 struct intel_bo *bo, int index)
1498 {
1499 if (p->dev->gen == ILO_GEN(6))
1500 gen6_wa_pipe_control_post_sync(p, true);
1501
1502 gen6_emit_PIPE_CONTROL(p->dev,
1503 PIPE_CONTROL_WRITE_TIMESTAMP,
1504 bo, index * sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE,
1505 true, p->cp);
1506 }
1507
1508 void
1509 ilo_3d_pipeline_emit_write_depth_count_gen6(struct ilo_3d_pipeline *p,
1510 struct intel_bo *bo, int index)
1511 {
1512 if (p->dev->gen == ILO_GEN(6))
1513 gen6_wa_pipe_control_post_sync(p, false);
1514
1515 gen6_emit_PIPE_CONTROL(p->dev,
1516 PIPE_CONTROL_DEPTH_STALL |
1517 PIPE_CONTROL_WRITE_DEPTH_COUNT,
1518 bo, index * sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE,
1519 true, p->cp);
1520 }
1521
1522 static void
1523 gen6_rectlist_vs_to_sf(struct ilo_3d_pipeline *p,
1524 const struct ilo_blitter *blitter,
1525 struct gen6_rectlist_session *session)
1526 {
1527 gen6_emit_3DSTATE_CONSTANT_VS(p->dev, NULL, NULL, 0, p->cp);
1528 gen6_emit_3DSTATE_VS(p->dev, NULL, 0, p->cp);
1529
1530 gen6_wa_pipe_control_vs_const_flush(p);
1531
1532 gen6_emit_3DSTATE_CONSTANT_GS(p->dev, NULL, NULL, 0, p->cp);
1533 gen6_emit_3DSTATE_GS(p->dev, NULL, NULL, 0, p->cp);
1534
1535 gen6_emit_3DSTATE_CLIP(p->dev, NULL, NULL, false, 0, p->cp);
1536 gen6_emit_3DSTATE_SF(p->dev, NULL, NULL, p->cp);
1537 }
1538
1539 static void
1540 gen6_rectlist_wm(struct ilo_3d_pipeline *p,
1541 const struct ilo_blitter *blitter,
1542 struct gen6_rectlist_session *session)
1543 {
1544 uint32_t hiz_op;
1545
1546 switch (blitter->op) {
1547 case ILO_BLITTER_RECTLIST_CLEAR_ZS:
1548 hiz_op = GEN6_WM_DEPTH_CLEAR;
1549 break;
1550 case ILO_BLITTER_RECTLIST_RESOLVE_Z:
1551 hiz_op = GEN6_WM_DEPTH_RESOLVE;
1552 break;
1553 case ILO_BLITTER_RECTLIST_RESOLVE_HIZ:
1554 hiz_op = GEN6_WM_HIERARCHICAL_DEPTH_RESOLVE;
1555 break;
1556 default:
1557 hiz_op = 0;
1558 break;
1559 }
1560
1561 gen6_emit_3DSTATE_CONSTANT_PS(p->dev, NULL, NULL, 0, p->cp);
1562
1563 gen6_wa_pipe_control_wm_max_threads_stall(p);
1564 gen6_emit_3DSTATE_WM(p->dev, NULL, 0, NULL, false, false, hiz_op, p->cp);
1565 }
1566
1567 static void
1568 gen6_rectlist_wm_depth(struct ilo_3d_pipeline *p,
1569 const struct ilo_blitter *blitter,
1570 struct gen6_rectlist_session *session)
1571 {
1572 gen6_wa_pipe_control_wm_depth_flush(p);
1573
1574 if (blitter->uses & ILO_BLITTER_USE_FB_DEPTH) {
1575 gen6_emit_3DSTATE_DEPTH_BUFFER(p->dev,
1576 &blitter->fb.dst.u.zs, p->cp);
1577
1578 gen6_emit_3DSTATE_HIER_DEPTH_BUFFER(p->dev,
1579 &blitter->fb.dst.u.zs, p->cp);
1580 }
1581
1582 if (blitter->uses & ILO_BLITTER_USE_FB_STENCIL) {
1583 gen6_emit_3DSTATE_STENCIL_BUFFER(p->dev,
1584 &blitter->fb.dst.u.zs, p->cp);
1585 }
1586
1587 gen6_emit_3DSTATE_CLEAR_PARAMS(p->dev,
1588 blitter->depth_clear_value, p->cp);
1589 }
1590
1591 static void
1592 gen6_rectlist_wm_multisample(struct ilo_3d_pipeline *p,
1593 const struct ilo_blitter *blitter,
1594 struct gen6_rectlist_session *session)
1595 {
1596 const uint32_t *packed_sample_pos = (blitter->fb.num_samples > 1) ?
1597 &p->packed_sample_position_4x : &p->packed_sample_position_1x;
1598
1599 gen6_wa_pipe_control_wm_multisample_flush(p);
1600
1601 gen6_emit_3DSTATE_MULTISAMPLE(p->dev, blitter->fb.num_samples,
1602 packed_sample_pos, true, p->cp);
1603
1604 gen6_emit_3DSTATE_SAMPLE_MASK(p->dev,
1605 (1 << blitter->fb.num_samples) - 1, p->cp);
1606 }
1607
1608 static void
1609 gen6_rectlist_commands(struct ilo_3d_pipeline *p,
1610 const struct ilo_blitter *blitter,
1611 struct gen6_rectlist_session *session)
1612 {
1613 gen6_wa_pipe_control_post_sync(p, false);
1614
1615 gen6_rectlist_wm_multisample(p, blitter, session);
1616
1617 gen6_emit_STATE_BASE_ADDRESS(p->dev,
1618 NULL, /* General State Base */
1619 p->cp->bo, /* Surface State Base */
1620 p->cp->bo, /* Dynamic State Base */
1621 NULL, /* Indirect Object Base */
1622 NULL, /* Instruction Base */
1623 0, 0, 0, 0, p->cp);
1624
1625 gen6_emit_3DSTATE_VERTEX_BUFFERS(p->dev,
1626 &blitter->ve, &blitter->vb, p->cp);
1627
1628 gen6_emit_3DSTATE_VERTEX_ELEMENTS(p->dev,
1629 &blitter->ve, false, false, p->cp);
1630
1631 gen6_emit_3DSTATE_URB(p->dev,
1632 p->dev->urb_size, 0, blitter->ve.count * 4 * sizeof(float), 0, p->cp);
1633 /* 3DSTATE_URB workaround */
1634 if (p->state.gs.active) {
1635 ilo_3d_pipeline_emit_flush_gen6(p);
1636 p->state.gs.active = false;
1637 }
1638
1639 if (blitter->uses &
1640 (ILO_BLITTER_USE_DSA | ILO_BLITTER_USE_CC)) {
1641 gen6_emit_3DSTATE_CC_STATE_POINTERS(p->dev, 0,
1642 session->DEPTH_STENCIL_STATE, session->COLOR_CALC_STATE, p->cp);
1643 }
1644
1645 gen6_rectlist_vs_to_sf(p, blitter, session);
1646 gen6_rectlist_wm(p, blitter, session);
1647
1648 if (blitter->uses & ILO_BLITTER_USE_VIEWPORT) {
1649 gen6_emit_3DSTATE_VIEWPORT_STATE_POINTERS(p->dev,
1650 0, 0, session->CC_VIEWPORT, p->cp);
1651 }
1652
1653 gen6_rectlist_wm_depth(p, blitter, session);
1654
1655 gen6_emit_3DSTATE_DRAWING_RECTANGLE(p->dev, 0, 0,
1656 blitter->fb.width, blitter->fb.height, p->cp);
1657
1658 gen6_emit_3DPRIMITIVE(p->dev, &blitter->draw, NULL, true, p->cp);
1659 }
1660
1661 static void
1662 gen6_rectlist_states(struct ilo_3d_pipeline *p,
1663 const struct ilo_blitter *blitter,
1664 struct gen6_rectlist_session *session)
1665 {
1666 if (blitter->uses & ILO_BLITTER_USE_DSA) {
1667 session->DEPTH_STENCIL_STATE =
1668 gen6_emit_DEPTH_STENCIL_STATE(p->dev, &blitter->dsa, p->cp);
1669 }
1670
1671 if (blitter->uses & ILO_BLITTER_USE_CC) {
1672 session->COLOR_CALC_STATE =
1673 gen6_emit_COLOR_CALC_STATE(p->dev, &blitter->cc.stencil_ref,
1674 blitter->cc.alpha_ref, &blitter->cc.blend_color, p->cp);
1675 }
1676
1677 if (blitter->uses & ILO_BLITTER_USE_VIEWPORT) {
1678 session->CC_VIEWPORT =
1679 gen6_emit_CC_VIEWPORT(p->dev, &blitter->viewport, 1, p->cp);
1680 }
1681 }
1682
1683 static void
1684 ilo_3d_pipeline_emit_rectlist_gen6(struct ilo_3d_pipeline *p,
1685 const struct ilo_blitter *blitter)
1686 {
1687 struct gen6_rectlist_session session;
1688
1689 memset(&session, 0, sizeof(session));
1690 gen6_rectlist_states(p, blitter, &session);
1691 gen6_rectlist_commands(p, blitter, &session);
1692 }
1693
1694 static int
1695 gen6_pipeline_estimate_commands(const struct ilo_3d_pipeline *p,
1696 const struct ilo_context *ilo)
1697 {
1698 static int size;
1699 enum ilo_gpe_gen6_command cmd;
1700
1701 if (size)
1702 return size;
1703
1704 for (cmd = 0; cmd < ILO_GPE_GEN6_COMMAND_COUNT; cmd++) {
1705 int count;
1706
1707 switch (cmd) {
1708 case ILO_GPE_GEN6_PIPE_CONTROL:
1709 /* for the workaround */
1710 count = 2;
1711 /* another one after 3DSTATE_URB */
1712 count += 1;
1713 /* and another one after 3DSTATE_CONSTANT_VS */
1714 count += 1;
1715 break;
1716 case ILO_GPE_GEN6_3DSTATE_GS_SVB_INDEX:
1717 /* there are 4 SVBIs */
1718 count = 4;
1719 break;
1720 case ILO_GPE_GEN6_3DSTATE_VERTEX_BUFFERS:
1721 count = 33;
1722 break;
1723 case ILO_GPE_GEN6_3DSTATE_VERTEX_ELEMENTS:
1724 count = 34;
1725 break;
1726 case ILO_GPE_GEN6_MEDIA_VFE_STATE:
1727 case ILO_GPE_GEN6_MEDIA_CURBE_LOAD:
1728 case ILO_GPE_GEN6_MEDIA_INTERFACE_DESCRIPTOR_LOAD:
1729 case ILO_GPE_GEN6_MEDIA_GATEWAY_STATE:
1730 case ILO_GPE_GEN6_MEDIA_STATE_FLUSH:
1731 case ILO_GPE_GEN6_MEDIA_OBJECT_WALKER:
1732 /* media commands */
1733 count = 0;
1734 break;
1735 default:
1736 count = 1;
1737 break;
1738 }
1739
1740 if (count)
1741 size += ilo_gpe_gen6_estimate_command_size(p->dev, cmd, count);
1742 }
1743
1744 return size;
1745 }
1746
1747 static int
1748 gen6_pipeline_estimate_states(const struct ilo_3d_pipeline *p,
1749 const struct ilo_context *ilo)
1750 {
1751 static int static_size;
1752 int shader_type, count, size;
1753
1754 if (!static_size) {
1755 struct {
1756 enum ilo_gpe_gen6_state state;
1757 int count;
1758 } static_states[] = {
1759 /* viewports */
1760 { ILO_GPE_GEN6_SF_VIEWPORT, 1 },
1761 { ILO_GPE_GEN6_CLIP_VIEWPORT, 1 },
1762 { ILO_GPE_GEN6_CC_VIEWPORT, 1 },
1763 /* cc */
1764 { ILO_GPE_GEN6_COLOR_CALC_STATE, 1 },
1765 { ILO_GPE_GEN6_BLEND_STATE, ILO_MAX_DRAW_BUFFERS },
1766 { ILO_GPE_GEN6_DEPTH_STENCIL_STATE, 1 },
1767 /* scissors */
1768 { ILO_GPE_GEN6_SCISSOR_RECT, 1 },
1769 /* binding table (vs, gs, fs) */
1770 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_VS_SURFACES },
1771 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_GS_SURFACES },
1772 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_WM_SURFACES },
1773 };
1774 int i;
1775
1776 for (i = 0; i < Elements(static_states); i++) {
1777 static_size += ilo_gpe_gen6_estimate_state_size(p->dev,
1778 static_states[i].state,
1779 static_states[i].count);
1780 }
1781 }
1782
1783 size = static_size;
1784
1785 /*
1786 * render targets (fs)
1787 * stream outputs (gs)
1788 * sampler views (vs, fs)
1789 * constant buffers (vs, fs)
1790 */
1791 count = ilo->fb.state.nr_cbufs;
1792
1793 if (ilo->gs) {
1794 const struct pipe_stream_output_info *so_info =
1795 ilo_shader_get_kernel_so_info(ilo->gs);
1796
1797 count += so_info->num_outputs;
1798 }
1799 else if (ilo->vs) {
1800 const struct pipe_stream_output_info *so_info =
1801 ilo_shader_get_kernel_so_info(ilo->vs);
1802
1803 count += so_info->num_outputs;
1804 }
1805
1806 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1807 count += ilo->view[shader_type].count;
1808 count += util_bitcount(ilo->cbuf[shader_type].enabled_mask);
1809 }
1810
1811 if (count) {
1812 size += ilo_gpe_gen6_estimate_state_size(p->dev,
1813 ILO_GPE_GEN6_SURFACE_STATE, count);
1814 }
1815
1816 /* samplers (vs, fs) */
1817 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1818 count = ilo->sampler[shader_type].count;
1819 if (count) {
1820 size += ilo_gpe_gen6_estimate_state_size(p->dev,
1821 ILO_GPE_GEN6_SAMPLER_BORDER_COLOR_STATE, count);
1822 size += ilo_gpe_gen6_estimate_state_size(p->dev,
1823 ILO_GPE_GEN6_SAMPLER_STATE, count);
1824 }
1825 }
1826
1827 /* pcb (vs) */
1828 if (ilo->vs) {
1829 const int cbuf0_size =
1830 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_PCB_CBUF0_SIZE);
1831 const int ucp_size =
1832 ilo_shader_get_kernel_param(ilo->vs, ILO_KERNEL_VS_PCB_UCP_SIZE);
1833
1834 size += ilo_gpe_gen6_estimate_state_size(p->dev,
1835 ILO_GPE_GEN6_PUSH_CONSTANT_BUFFER, cbuf0_size + ucp_size);
1836 }
1837
1838 /* pcb (fs) */
1839 if (ilo->fs) {
1840 const int cbuf0_size =
1841 ilo_shader_get_kernel_param(ilo->fs, ILO_KERNEL_PCB_CBUF0_SIZE);
1842
1843 size += ilo_gpe_gen6_estimate_state_size(p->dev,
1844 ILO_GPE_GEN6_PUSH_CONSTANT_BUFFER, cbuf0_size);
1845 }
1846
1847 return size;
1848 }
1849
1850 static int
1851 ilo_3d_pipeline_estimate_size_gen6(struct ilo_3d_pipeline *p,
1852 enum ilo_3d_pipeline_action action,
1853 const void *arg)
1854 {
1855 int size;
1856
1857 switch (action) {
1858 case ILO_3D_PIPELINE_DRAW:
1859 {
1860 const struct ilo_context *ilo = arg;
1861
1862 size = gen6_pipeline_estimate_commands(p, ilo) +
1863 gen6_pipeline_estimate_states(p, ilo);
1864 }
1865 break;
1866 case ILO_3D_PIPELINE_FLUSH:
1867 size = ilo_gpe_gen6_estimate_command_size(p->dev,
1868 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1869 break;
1870 case ILO_3D_PIPELINE_WRITE_TIMESTAMP:
1871 size = ilo_gpe_gen6_estimate_command_size(p->dev,
1872 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 2;
1873 break;
1874 case ILO_3D_PIPELINE_WRITE_DEPTH_COUNT:
1875 size = ilo_gpe_gen6_estimate_command_size(p->dev,
1876 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1877 break;
1878 case ILO_3D_PIPELINE_RECTLIST:
1879 size = 64 + 256; /* states + commands */
1880 break;
1881 default:
1882 assert(!"unknown 3D pipeline action");
1883 size = 0;
1884 break;
1885 }
1886
1887 return size;
1888 }
1889
1890 void
1891 ilo_3d_pipeline_init_gen6(struct ilo_3d_pipeline *p)
1892 {
1893 p->estimate_size = ilo_3d_pipeline_estimate_size_gen6;
1894 p->emit_draw = ilo_3d_pipeline_emit_draw_gen6;
1895 p->emit_flush = ilo_3d_pipeline_emit_flush_gen6;
1896 p->emit_write_timestamp = ilo_3d_pipeline_emit_write_timestamp_gen6;
1897 p->emit_write_depth_count = ilo_3d_pipeline_emit_write_depth_count_gen6;
1898 p->emit_rectlist = ilo_3d_pipeline_emit_rectlist_gen6;
1899 }