2 * Mesa 3-D graphics library
4 * Copyright (C) 2013 LunarG, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Chia-I Wu <olv@lunarg.com>
28 #include "util/u_dual_blend.h"
29 #include "util/u_prim.h"
30 #include "intel_reg.h"
33 #include "ilo_context.h"
35 #include "ilo_gpe_gen6.h"
36 #include "ilo_gpe_gen7.h"
37 #include "ilo_shader.h"
38 #include "ilo_state.h"
39 #include "ilo_3d_pipeline.h"
40 #include "ilo_3d_pipeline_gen6.h"
43 * This should be called before any depth stall flush (including those
44 * produced by non-pipelined state commands) or cache flush on GEN6.
46 * \see intel_emit_post_sync_nonzero_flush()
49 gen6_wa_pipe_control_post_sync(struct ilo_3d_pipeline
*p
,
50 bool caller_post_sync
)
52 assert(p
->dev
->gen
== ILO_GEN(6));
55 if (p
->state
.has_gen6_wa_pipe_control
)
58 p
->state
.has_gen6_wa_pipe_control
= true;
61 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
63 * "Pipe-control with CS-stall bit set must be sent BEFORE the
64 * pipe-control with a post-sync op and no write-cache flushes."
66 * The workaround below necessitates this workaround.
68 gen6_emit_PIPE_CONTROL(p
->dev
,
69 PIPE_CONTROL_CS_STALL
|
70 PIPE_CONTROL_STALL_AT_SCOREBOARD
,
71 NULL
, 0, false, p
->cp
);
73 /* the caller will emit the post-sync op */
78 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
80 * "Before any depth stall flush (including those produced by
81 * non-pipelined state commands), software needs to first send a
82 * PIPE_CONTROL with no bits set except Post-Sync Operation != 0."
84 * "Before a PIPE_CONTROL with Write Cache Flush Enable =1, a
85 * PIPE_CONTROL with any non-zero post-sync-op is required."
87 gen6_emit_PIPE_CONTROL(p
->dev
,
88 PIPE_CONTROL_WRITE_IMMEDIATE
,
89 p
->workaround_bo
, 0, false, p
->cp
);
93 gen6_wa_pipe_control_wm_multisample_flush(struct ilo_3d_pipeline
*p
)
95 assert(p
->dev
->gen
== ILO_GEN(6));
97 gen6_wa_pipe_control_post_sync(p
, false);
100 * From the Sandy Bridge PRM, volume 2 part 1, page 305:
102 * "Driver must guarentee that all the caches in the depth pipe are
103 * flushed before this command (3DSTATE_MULTISAMPLE) is parsed. This
104 * requires driver to send a PIPE_CONTROL with a CS stall along with a
105 * Depth Flush prior to this command."
107 gen6_emit_PIPE_CONTROL(p
->dev
,
108 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
109 PIPE_CONTROL_CS_STALL
,
114 gen6_wa_pipe_control_wm_depth_flush(struct ilo_3d_pipeline
*p
)
116 assert(p
->dev
->gen
== ILO_GEN(6));
118 gen6_wa_pipe_control_post_sync(p
, false);
121 * According to intel_emit_depth_stall_flushes() of classic i965, we need
122 * to emit a sequence of PIPE_CONTROLs prior to emitting depth related
125 gen6_emit_PIPE_CONTROL(p
->dev
,
126 PIPE_CONTROL_DEPTH_STALL
,
127 NULL
, 0, false, p
->cp
);
129 gen6_emit_PIPE_CONTROL(p
->dev
,
130 PIPE_CONTROL_DEPTH_CACHE_FLUSH
,
131 NULL
, 0, false, p
->cp
);
133 gen6_emit_PIPE_CONTROL(p
->dev
,
134 PIPE_CONTROL_DEPTH_STALL
,
135 NULL
, 0, false, p
->cp
);
139 gen6_wa_pipe_control_wm_max_threads_stall(struct ilo_3d_pipeline
*p
)
141 assert(p
->dev
->gen
== ILO_GEN(6));
143 /* the post-sync workaround should cover this already */
144 if (p
->state
.has_gen6_wa_pipe_control
)
148 * From the Sandy Bridge PRM, volume 2 part 1, page 274:
150 * "A PIPE_CONTROL command, with only the Stall At Pixel Scoreboard
151 * field set (DW1 Bit 1), must be issued prior to any change to the
152 * value in this field (Maximum Number of Threads in 3DSTATE_WM)"
154 gen6_emit_PIPE_CONTROL(p
->dev
,
155 PIPE_CONTROL_STALL_AT_SCOREBOARD
,
156 NULL
, 0, false, p
->cp
);
161 gen6_wa_pipe_control_vs_const_flush(struct ilo_3d_pipeline
*p
)
163 assert(p
->dev
->gen
== ILO_GEN(6));
165 gen6_wa_pipe_control_post_sync(p
, false);
168 * According to upload_vs_state() of classic i965, we need to emit
169 * PIPE_CONTROL after 3DSTATE_CONSTANT_VS so that the command is kept being
170 * buffered by VS FF, to the point that the FF dies.
172 gen6_emit_PIPE_CONTROL(p
->dev
,
173 PIPE_CONTROL_DEPTH_STALL
|
174 PIPE_CONTROL_INSTRUCTION_FLUSH
|
175 PIPE_CONTROL_STATE_CACHE_INVALIDATE
,
176 NULL
, 0, false, p
->cp
);
179 #define DIRTY(state) (session->pipe_dirty & ILO_DIRTY_ ## state)
182 gen6_pipeline_common_select(struct ilo_3d_pipeline
*p
,
183 const struct ilo_context
*ilo
,
184 struct gen6_pipeline_session
*session
)
186 /* PIPELINE_SELECT */
187 if (session
->hw_ctx_changed
) {
188 if (p
->dev
->gen
== ILO_GEN(6))
189 gen6_wa_pipe_control_post_sync(p
, false);
191 gen6_emit_PIPELINE_SELECT(p
->dev
, 0x0, p
->cp
);
196 gen6_pipeline_common_sip(struct ilo_3d_pipeline
*p
,
197 const struct ilo_context
*ilo
,
198 struct gen6_pipeline_session
*session
)
201 if (session
->hw_ctx_changed
) {
202 if (p
->dev
->gen
== ILO_GEN(6))
203 gen6_wa_pipe_control_post_sync(p
, false);
205 gen6_emit_STATE_SIP(p
->dev
, 0, p
->cp
);
210 gen6_pipeline_common_base_address(struct ilo_3d_pipeline
*p
,
211 const struct ilo_context
*ilo
,
212 struct gen6_pipeline_session
*session
)
214 /* STATE_BASE_ADDRESS */
215 if (session
->state_bo_changed
|| session
->kernel_bo_changed
||
216 session
->batch_bo_changed
) {
217 if (p
->dev
->gen
== ILO_GEN(6))
218 gen6_wa_pipe_control_post_sync(p
, false);
220 gen6_emit_STATE_BASE_ADDRESS(p
->dev
,
221 NULL
, p
->cp
->bo
, p
->cp
->bo
, NULL
, ilo
->hw3d
->kernel
.bo
,
225 * From the Sandy Bridge PRM, volume 1 part 1, page 28:
227 * "The following commands must be reissued following any change to
228 * the base addresses:
230 * * 3DSTATE_BINDING_TABLE_POINTERS
231 * * 3DSTATE_SAMPLER_STATE_POINTERS
232 * * 3DSTATE_VIEWPORT_STATE_POINTERS
233 * * 3DSTATE_CC_POINTERS
234 * * MEDIA_STATE_POINTERS"
236 * 3DSTATE_SCISSOR_STATE_POINTERS is not on the list, but it is
237 * reasonable to also reissue the command. Same to PCB.
239 session
->viewport_state_changed
= true;
241 session
->cc_state_blend_changed
= true;
242 session
->cc_state_dsa_changed
= true;
243 session
->cc_state_cc_changed
= true;
245 session
->scissor_state_changed
= true;
247 session
->binding_table_vs_changed
= true;
248 session
->binding_table_gs_changed
= true;
249 session
->binding_table_fs_changed
= true;
251 session
->sampler_state_vs_changed
= true;
252 session
->sampler_state_gs_changed
= true;
253 session
->sampler_state_fs_changed
= true;
255 session
->pcb_state_vs_changed
= true;
256 session
->pcb_state_gs_changed
= true;
257 session
->pcb_state_fs_changed
= true;
262 gen6_pipeline_common_urb(struct ilo_3d_pipeline
*p
,
263 const struct ilo_context
*ilo
,
264 struct gen6_pipeline_session
*session
)
267 if (DIRTY(VE
) || DIRTY(VS
) || DIRTY(GS
)) {
268 const bool gs_active
= (ilo
->gs
|| (ilo
->vs
&&
269 ilo_shader_get_kernel_param(ilo
->vs
, ILO_KERNEL_VS_GEN6_SO
)));
270 int vs_entry_size
, gs_entry_size
;
271 int vs_total_size
, gs_total_size
;
273 vs_entry_size
= (ilo
->vs
) ?
274 ilo_shader_get_kernel_param(ilo
->vs
, ILO_KERNEL_OUTPUT_COUNT
) : 0;
277 * As indicated by 2e712e41db0c0676e9f30fc73172c0e8de8d84d4, VF and VS
278 * share VUE handles. The VUE allocation size must be large enough to
279 * store either VF outputs (number of VERTEX_ELEMENTs) and VS outputs.
281 * I am not sure if the PRM explicitly states that VF and VS share VUE
282 * handles. But here is a citation that implies so:
284 * From the Sandy Bridge PRM, volume 2 part 1, page 44:
286 * "Once a FF stage that spawn threads has sufficient input to
287 * initiate a thread, it must guarantee that it is safe to request
288 * the thread initiation. For all these FF stages, this check is
291 * - The availability of output URB entries:
292 * - VS: As the input URB entries are overwritten with the
293 * VS-generated output data, output URB availability isn't a
296 if (vs_entry_size
< ilo
->ve
->count
)
297 vs_entry_size
= ilo
->ve
->count
;
299 gs_entry_size
= (ilo
->gs
) ?
300 ilo_shader_get_kernel_param(ilo
->gs
, ILO_KERNEL_OUTPUT_COUNT
) :
301 (gs_active
) ? vs_entry_size
: 0;
304 vs_entry_size
*= sizeof(float) * 4;
305 gs_entry_size
*= sizeof(float) * 4;
306 vs_total_size
= ilo
->dev
->urb_size
;
310 gs_total_size
= vs_total_size
;
316 gen6_emit_3DSTATE_URB(p
->dev
, vs_total_size
, gs_total_size
,
317 vs_entry_size
, gs_entry_size
, p
->cp
);
320 * From the Sandy Bridge PRM, volume 2 part 1, page 27:
322 * "Because of a urb corruption caused by allocating a previous
323 * gsunit's urb entry to vsunit software is required to send a
324 * "GS NULL Fence" (Send URB fence with VS URB size == 1 and GS URB
325 * size == 0) plus a dummy DRAW call before any case where VS will
326 * be taking over GS URB space."
328 if (p
->state
.gs
.active
&& !gs_active
)
329 ilo_3d_pipeline_emit_flush_gen6(p
);
331 p
->state
.gs
.active
= gs_active
;
336 gen6_pipeline_common_pointers_1(struct ilo_3d_pipeline
*p
,
337 const struct ilo_context
*ilo
,
338 struct gen6_pipeline_session
*session
)
340 /* 3DSTATE_VIEWPORT_STATE_POINTERS */
341 if (session
->viewport_state_changed
) {
342 gen6_emit_3DSTATE_VIEWPORT_STATE_POINTERS(p
->dev
,
343 p
->state
.CLIP_VIEWPORT
,
344 p
->state
.SF_VIEWPORT
,
345 p
->state
.CC_VIEWPORT
, p
->cp
);
350 gen6_pipeline_common_pointers_2(struct ilo_3d_pipeline
*p
,
351 const struct ilo_context
*ilo
,
352 struct gen6_pipeline_session
*session
)
354 /* 3DSTATE_CC_STATE_POINTERS */
355 if (session
->cc_state_blend_changed
||
356 session
->cc_state_dsa_changed
||
357 session
->cc_state_cc_changed
) {
358 gen6_emit_3DSTATE_CC_STATE_POINTERS(p
->dev
,
359 p
->state
.BLEND_STATE
,
360 p
->state
.DEPTH_STENCIL_STATE
,
361 p
->state
.COLOR_CALC_STATE
, p
->cp
);
364 /* 3DSTATE_SAMPLER_STATE_POINTERS */
365 if (session
->sampler_state_vs_changed
||
366 session
->sampler_state_gs_changed
||
367 session
->sampler_state_fs_changed
) {
368 gen6_emit_3DSTATE_SAMPLER_STATE_POINTERS(p
->dev
,
369 p
->state
.vs
.SAMPLER_STATE
,
371 p
->state
.wm
.SAMPLER_STATE
, p
->cp
);
376 gen6_pipeline_common_pointers_3(struct ilo_3d_pipeline
*p
,
377 const struct ilo_context
*ilo
,
378 struct gen6_pipeline_session
*session
)
380 /* 3DSTATE_SCISSOR_STATE_POINTERS */
381 if (session
->scissor_state_changed
) {
382 gen6_emit_3DSTATE_SCISSOR_STATE_POINTERS(p
->dev
,
383 p
->state
.SCISSOR_RECT
, p
->cp
);
386 /* 3DSTATE_BINDING_TABLE_POINTERS */
387 if (session
->binding_table_vs_changed
||
388 session
->binding_table_gs_changed
||
389 session
->binding_table_fs_changed
) {
390 gen6_emit_3DSTATE_BINDING_TABLE_POINTERS(p
->dev
,
391 p
->state
.vs
.BINDING_TABLE_STATE
,
392 p
->state
.gs
.BINDING_TABLE_STATE
,
393 p
->state
.wm
.BINDING_TABLE_STATE
, p
->cp
);
398 gen6_pipeline_vf(struct ilo_3d_pipeline
*p
,
399 const struct ilo_context
*ilo
,
400 struct gen6_pipeline_session
*session
)
402 /* 3DSTATE_INDEX_BUFFER */
403 if (DIRTY(IB
) || session
->primitive_restart_changed
||
404 session
->batch_bo_changed
) {
405 gen6_emit_3DSTATE_INDEX_BUFFER(p
->dev
,
406 &ilo
->ib
, ilo
->draw
->primitive_restart
, p
->cp
);
409 /* 3DSTATE_VERTEX_BUFFERS */
410 if (DIRTY(VB
) || DIRTY(VE
) || session
->batch_bo_changed
)
411 gen6_emit_3DSTATE_VERTEX_BUFFERS(p
->dev
, ilo
->ve
, &ilo
->vb
, p
->cp
);
413 /* 3DSTATE_VERTEX_ELEMENTS */
414 if (DIRTY(VE
) || DIRTY(VS
)) {
415 const struct ilo_ve_state
*ve
= ilo
->ve
;
416 bool last_velement_edgeflag
= false;
417 bool prepend_generate_ids
= false;
420 if (ilo_shader_get_kernel_param(ilo
->vs
,
421 ILO_KERNEL_VS_INPUT_EDGEFLAG
)) {
422 /* we rely on the state tracker here */
423 assert(ilo_shader_get_kernel_param(ilo
->vs
,
424 ILO_KERNEL_INPUT_COUNT
) == ve
->count
);
426 last_velement_edgeflag
= true;
429 if (ilo_shader_get_kernel_param(ilo
->vs
,
430 ILO_KERNEL_VS_INPUT_INSTANCEID
) ||
431 ilo_shader_get_kernel_param(ilo
->vs
,
432 ILO_KERNEL_VS_INPUT_VERTEXID
))
433 prepend_generate_ids
= true;
436 gen6_emit_3DSTATE_VERTEX_ELEMENTS(p
->dev
, ve
,
437 last_velement_edgeflag
, prepend_generate_ids
, p
->cp
);
442 gen6_pipeline_vf_statistics(struct ilo_3d_pipeline
*p
,
443 const struct ilo_context
*ilo
,
444 struct gen6_pipeline_session
*session
)
446 /* 3DSTATE_VF_STATISTICS */
447 if (session
->hw_ctx_changed
)
448 gen6_emit_3DSTATE_VF_STATISTICS(p
->dev
, false, p
->cp
);
452 gen6_pipeline_vf_draw(struct ilo_3d_pipeline
*p
,
453 const struct ilo_context
*ilo
,
454 struct gen6_pipeline_session
*session
)
457 gen6_emit_3DPRIMITIVE(p
->dev
, ilo
->draw
, &ilo
->ib
, false, p
->cp
);
458 p
->state
.has_gen6_wa_pipe_control
= false;
462 gen6_pipeline_vs(struct ilo_3d_pipeline
*p
,
463 const struct ilo_context
*ilo
,
464 struct gen6_pipeline_session
*session
)
466 const bool emit_3dstate_vs
= (DIRTY(VS
) || DIRTY(SAMPLER_VS
) ||
467 session
->kernel_bo_changed
);
468 const bool emit_3dstate_constant_vs
= session
->pcb_state_vs_changed
;
471 * the classic i965 does this in upload_vs_state(), citing a spec that I
474 if (emit_3dstate_vs
&& p
->dev
->gen
== ILO_GEN(6))
475 gen6_wa_pipe_control_post_sync(p
, false);
477 /* 3DSTATE_CONSTANT_VS */
478 if (emit_3dstate_constant_vs
) {
479 gen6_emit_3DSTATE_CONSTANT_VS(p
->dev
,
480 &p
->state
.vs
.PUSH_CONSTANT_BUFFER
,
481 &p
->state
.vs
.PUSH_CONSTANT_BUFFER_size
,
486 if (emit_3dstate_vs
) {
487 const int num_samplers
= ilo
->sampler
[PIPE_SHADER_VERTEX
].count
;
489 gen6_emit_3DSTATE_VS(p
->dev
, ilo
->vs
, num_samplers
, p
->cp
);
492 if (emit_3dstate_constant_vs
&& p
->dev
->gen
== ILO_GEN(6))
493 gen6_wa_pipe_control_vs_const_flush(p
);
497 gen6_pipeline_gs(struct ilo_3d_pipeline
*p
,
498 const struct ilo_context
*ilo
,
499 struct gen6_pipeline_session
*session
)
501 /* 3DSTATE_CONSTANT_GS */
502 if (session
->pcb_state_gs_changed
)
503 gen6_emit_3DSTATE_CONSTANT_GS(p
->dev
, NULL
, NULL
, 0, p
->cp
);
506 if (DIRTY(GS
) || DIRTY(VS
) ||
507 session
->prim_changed
|| session
->kernel_bo_changed
) {
508 const int verts_per_prim
= u_vertices_per_prim(session
->reduced_prim
);
510 gen6_emit_3DSTATE_GS(p
->dev
, ilo
->gs
, ilo
->vs
, verts_per_prim
, p
->cp
);
515 gen6_pipeline_update_max_svbi(struct ilo_3d_pipeline
*p
,
516 const struct ilo_context
*ilo
,
517 struct gen6_pipeline_session
*session
)
519 if (DIRTY(VS
) || DIRTY(GS
) || DIRTY(SO
)) {
520 const struct pipe_stream_output_info
*so_info
=
521 (ilo
->gs
) ? ilo_shader_get_kernel_so_info(ilo
->gs
) :
522 (ilo
->vs
) ? ilo_shader_get_kernel_so_info(ilo
->vs
) : NULL
;
523 unsigned max_svbi
= 0xffffffff;
526 for (i
= 0; i
< so_info
->num_outputs
; i
++) {
527 const int output_buffer
= so_info
->output
[i
].output_buffer
;
528 const struct pipe_stream_output_target
*so
=
529 ilo
->so
.states
[output_buffer
];
530 const int struct_size
= so_info
->stride
[output_buffer
] * 4;
531 const int elem_size
= so_info
->output
[i
].num_components
* 4;
539 buf_size
= so
->buffer_size
- so_info
->output
[i
].dst_offset
* 4;
541 count
= buf_size
/ struct_size
;
542 if (buf_size
% struct_size
>= elem_size
)
545 if (count
< max_svbi
)
549 if (p
->state
.so_max_vertices
!= max_svbi
) {
550 p
->state
.so_max_vertices
= max_svbi
;
559 gen6_pipeline_gs_svbi(struct ilo_3d_pipeline
*p
,
560 const struct ilo_context
*ilo
,
561 struct gen6_pipeline_session
*session
)
563 const bool emit
= gen6_pipeline_update_max_svbi(p
, ilo
, session
);
565 /* 3DSTATE_GS_SVB_INDEX */
567 if (p
->dev
->gen
== ILO_GEN(6))
568 gen6_wa_pipe_control_post_sync(p
, false);
570 gen6_emit_3DSTATE_GS_SVB_INDEX(p
->dev
,
571 0, p
->state
.so_num_vertices
, p
->state
.so_max_vertices
,
574 if (session
->hw_ctx_changed
) {
578 * From the Sandy Bridge PRM, volume 2 part 1, page 148:
580 * "If a buffer is not enabled then the SVBI must be set to 0x0
581 * in order to not cause overflow in that SVBI."
583 * "If a buffer is not enabled then the MaxSVBI must be set to
584 * 0xFFFFFFFF in order to not cause overflow in that SVBI."
586 for (i
= 1; i
< 4; i
++) {
587 gen6_emit_3DSTATE_GS_SVB_INDEX(p
->dev
,
588 i
, 0, 0xffffffff, false, p
->cp
);
595 gen6_pipeline_clip(struct ilo_3d_pipeline
*p
,
596 const struct ilo_context
*ilo
,
597 struct gen6_pipeline_session
*session
)
600 if (DIRTY(RASTERIZER
) || DIRTY(FS
) || DIRTY(VIEWPORT
) || DIRTY(FB
)) {
601 bool enable_guardband
= true;
605 * We do not do 2D clipping yet. Guard band test should only be enabled
606 * when the viewport is larger than the framebuffer.
608 for (i
= 0; i
< ilo
->viewport
.count
; i
++) {
609 const struct ilo_viewport_cso
*vp
= &ilo
->viewport
.cso
[i
];
611 if (vp
->min_x
> 0.0f
|| vp
->max_x
< ilo
->fb
.state
.width
||
612 vp
->min_y
> 0.0f
|| vp
->max_y
< ilo
->fb
.state
.height
) {
613 enable_guardband
= false;
618 gen6_emit_3DSTATE_CLIP(p
->dev
, ilo
->rasterizer
,
619 ilo
->fs
, enable_guardband
, 1, p
->cp
);
624 gen6_pipeline_sf(struct ilo_3d_pipeline
*p
,
625 const struct ilo_context
*ilo
,
626 struct gen6_pipeline_session
*session
)
629 if (DIRTY(RASTERIZER
) || DIRTY(VS
) || DIRTY(GS
) || DIRTY(FS
)) {
630 gen6_emit_3DSTATE_SF(p
->dev
, ilo
->rasterizer
, ilo
->fs
,
631 (ilo
->gs
) ? ilo
->gs
: ilo
->vs
, p
->cp
);
636 gen6_pipeline_sf_rect(struct ilo_3d_pipeline
*p
,
637 const struct ilo_context
*ilo
,
638 struct gen6_pipeline_session
*session
)
640 /* 3DSTATE_DRAWING_RECTANGLE */
642 if (p
->dev
->gen
== ILO_GEN(6))
643 gen6_wa_pipe_control_post_sync(p
, false);
645 gen6_emit_3DSTATE_DRAWING_RECTANGLE(p
->dev
, 0, 0,
646 ilo
->fb
.state
.width
, ilo
->fb
.state
.height
, p
->cp
);
651 gen6_pipeline_wm(struct ilo_3d_pipeline
*p
,
652 const struct ilo_context
*ilo
,
653 struct gen6_pipeline_session
*session
)
655 /* 3DSTATE_CONSTANT_PS */
656 if (session
->pcb_state_fs_changed
)
657 gen6_emit_3DSTATE_CONSTANT_PS(p
->dev
, NULL
, NULL
, 0, p
->cp
);
660 if (DIRTY(FS
) || DIRTY(SAMPLER_FS
) || DIRTY(BLEND
) || DIRTY(DSA
) ||
661 DIRTY(RASTERIZER
) || session
->kernel_bo_changed
) {
662 const int num_samplers
= ilo
->sampler
[PIPE_SHADER_FRAGMENT
].count
;
663 const bool dual_blend
= ilo
->blend
->dual_blend
;
664 const bool cc_may_kill
= (ilo
->dsa
->dw_alpha
||
665 ilo
->blend
->alpha_to_coverage
);
667 if (p
->dev
->gen
== ILO_GEN(6) && session
->hw_ctx_changed
)
668 gen6_wa_pipe_control_wm_max_threads_stall(p
);
670 gen6_emit_3DSTATE_WM(p
->dev
, ilo
->fs
, num_samplers
,
671 ilo
->rasterizer
, dual_blend
, cc_may_kill
, p
->cp
);
676 gen6_pipeline_wm_multisample(struct ilo_3d_pipeline
*p
,
677 const struct ilo_context
*ilo
,
678 struct gen6_pipeline_session
*session
)
680 /* 3DSTATE_MULTISAMPLE and 3DSTATE_SAMPLE_MASK */
681 if (DIRTY(SAMPLE_MASK
) || DIRTY(FB
)) {
682 const uint32_t *packed_sample_pos
;
684 packed_sample_pos
= (ilo
->fb
.num_samples
> 1) ?
685 &p
->packed_sample_position_4x
: &p
->packed_sample_position_1x
;
687 if (p
->dev
->gen
== ILO_GEN(6)) {
688 gen6_wa_pipe_control_post_sync(p
, false);
689 gen6_wa_pipe_control_wm_multisample_flush(p
);
692 gen6_emit_3DSTATE_MULTISAMPLE(p
->dev
,
693 ilo
->fb
.num_samples
, packed_sample_pos
,
694 ilo
->rasterizer
->state
.half_pixel_center
, p
->cp
);
696 gen6_emit_3DSTATE_SAMPLE_MASK(p
->dev
,
697 (ilo
->fb
.num_samples
> 1) ? ilo
->sample_mask
: 0x1, p
->cp
);
702 gen6_pipeline_wm_depth(struct ilo_3d_pipeline
*p
,
703 const struct ilo_context
*ilo
,
704 struct gen6_pipeline_session
*session
)
706 /* 3DSTATE_DEPTH_BUFFER and 3DSTATE_CLEAR_PARAMS */
707 if (DIRTY(FB
) || session
->batch_bo_changed
) {
708 const struct ilo_zs_surface
*zs
;
710 if (ilo
->fb
.state
.zsbuf
) {
711 const struct ilo_surface_cso
*surface
=
712 (const struct ilo_surface_cso
*) ilo
->fb
.state
.zsbuf
;
714 assert(!surface
->is_rt
);
718 zs
= &ilo
->fb
.null_zs
;
721 if (p
->dev
->gen
== ILO_GEN(6)) {
722 gen6_wa_pipe_control_post_sync(p
, false);
723 gen6_wa_pipe_control_wm_depth_flush(p
);
726 gen6_emit_3DSTATE_DEPTH_BUFFER(p
->dev
, zs
, p
->cp
);
729 gen6_emit_3DSTATE_CLEAR_PARAMS(p
->dev
, 0, p
->cp
);
734 gen6_pipeline_wm_raster(struct ilo_3d_pipeline
*p
,
735 const struct ilo_context
*ilo
,
736 struct gen6_pipeline_session
*session
)
738 /* 3DSTATE_POLY_STIPPLE_PATTERN and 3DSTATE_POLY_STIPPLE_OFFSET */
739 if ((DIRTY(RASTERIZER
) || DIRTY(POLY_STIPPLE
)) &&
740 ilo
->rasterizer
->state
.poly_stipple_enable
) {
741 if (p
->dev
->gen
== ILO_GEN(6))
742 gen6_wa_pipe_control_post_sync(p
, false);
744 gen6_emit_3DSTATE_POLY_STIPPLE_PATTERN(p
->dev
,
745 &ilo
->poly_stipple
, p
->cp
);
747 gen6_emit_3DSTATE_POLY_STIPPLE_OFFSET(p
->dev
, 0, 0, p
->cp
);
750 /* 3DSTATE_LINE_STIPPLE */
751 if (DIRTY(RASTERIZER
) && ilo
->rasterizer
->state
.line_stipple_enable
) {
752 if (p
->dev
->gen
== ILO_GEN(6))
753 gen6_wa_pipe_control_post_sync(p
, false);
755 gen6_emit_3DSTATE_LINE_STIPPLE(p
->dev
,
756 ilo
->rasterizer
->state
.line_stipple_pattern
,
757 ilo
->rasterizer
->state
.line_stipple_factor
+ 1, p
->cp
);
760 /* 3DSTATE_AA_LINE_PARAMETERS */
761 if (DIRTY(RASTERIZER
) && ilo
->rasterizer
->state
.line_smooth
) {
762 if (p
->dev
->gen
== ILO_GEN(6))
763 gen6_wa_pipe_control_post_sync(p
, false);
765 gen6_emit_3DSTATE_AA_LINE_PARAMETERS(p
->dev
, p
->cp
);
770 gen6_pipeline_state_viewports(struct ilo_3d_pipeline
*p
,
771 const struct ilo_context
*ilo
,
772 struct gen6_pipeline_session
*session
)
774 /* SF_CLIP_VIEWPORT and CC_VIEWPORT */
775 if (p
->dev
->gen
>= ILO_GEN(7) && DIRTY(VIEWPORT
)) {
776 p
->state
.SF_CLIP_VIEWPORT
= gen7_emit_SF_CLIP_VIEWPORT(p
->dev
,
777 ilo
->viewport
.cso
, ilo
->viewport
.count
, p
->cp
);
779 p
->state
.CC_VIEWPORT
= gen6_emit_CC_VIEWPORT(p
->dev
,
780 ilo
->viewport
.cso
, ilo
->viewport
.count
, p
->cp
);
782 session
->viewport_state_changed
= true;
784 /* SF_VIEWPORT, CLIP_VIEWPORT, and CC_VIEWPORT */
785 else if (DIRTY(VIEWPORT
)) {
786 p
->state
.CLIP_VIEWPORT
= gen6_emit_CLIP_VIEWPORT(p
->dev
,
787 ilo
->viewport
.cso
, ilo
->viewport
.count
, p
->cp
);
789 p
->state
.SF_VIEWPORT
= gen6_emit_SF_VIEWPORT(p
->dev
,
790 ilo
->viewport
.cso
, ilo
->viewport
.count
, p
->cp
);
792 p
->state
.CC_VIEWPORT
= gen6_emit_CC_VIEWPORT(p
->dev
,
793 ilo
->viewport
.cso
, ilo
->viewport
.count
, p
->cp
);
795 session
->viewport_state_changed
= true;
800 gen6_pipeline_state_cc(struct ilo_3d_pipeline
*p
,
801 const struct ilo_context
*ilo
,
802 struct gen6_pipeline_session
*session
)
805 if (DIRTY(BLEND
) || DIRTY(FB
) || DIRTY(DSA
)) {
806 p
->state
.BLEND_STATE
= gen6_emit_BLEND_STATE(p
->dev
,
807 ilo
->blend
, &ilo
->fb
, ilo
->dsa
, p
->cp
);
809 session
->cc_state_blend_changed
= true;
812 /* COLOR_CALC_STATE */
813 if (DIRTY(DSA
) || DIRTY(STENCIL_REF
) || DIRTY(BLEND_COLOR
)) {
814 p
->state
.COLOR_CALC_STATE
=
815 gen6_emit_COLOR_CALC_STATE(p
->dev
, &ilo
->stencil_ref
,
816 ilo
->dsa
->alpha_ref
, &ilo
->blend_color
, p
->cp
);
818 session
->cc_state_cc_changed
= true;
821 /* DEPTH_STENCIL_STATE */
823 p
->state
.DEPTH_STENCIL_STATE
=
824 gen6_emit_DEPTH_STENCIL_STATE(p
->dev
, ilo
->dsa
, p
->cp
);
826 session
->cc_state_dsa_changed
= true;
831 gen6_pipeline_state_scissors(struct ilo_3d_pipeline
*p
,
832 const struct ilo_context
*ilo
,
833 struct gen6_pipeline_session
*session
)
836 if (DIRTY(SCISSOR
) || DIRTY(VIEWPORT
)) {
837 /* there should be as many scissors as there are viewports */
838 p
->state
.SCISSOR_RECT
= gen6_emit_SCISSOR_RECT(p
->dev
,
839 &ilo
->scissor
, ilo
->viewport
.count
, p
->cp
);
841 session
->scissor_state_changed
= true;
846 gen6_pipeline_state_surfaces_rt(struct ilo_3d_pipeline
*p
,
847 const struct ilo_context
*ilo
,
848 struct gen6_pipeline_session
*session
)
850 /* SURFACE_STATEs for render targets */
852 const struct ilo_fb_state
*fb
= &ilo
->fb
;
853 const int offset
= ILO_WM_DRAW_SURFACE(0);
854 uint32_t *surface_state
= &p
->state
.wm
.SURFACE_STATE
[offset
];
857 for (i
= 0; i
< fb
->state
.nr_cbufs
; i
++) {
858 const struct ilo_surface_cso
*surface
=
859 (const struct ilo_surface_cso
*) fb
->state
.cbufs
[i
];
861 assert(surface
&& surface
->is_rt
);
863 gen6_emit_SURFACE_STATE(p
->dev
, &surface
->u
.rt
, true, p
->cp
);
867 * Upload at least one render target, as
868 * brw_update_renderbuffer_surfaces() does. I don't know why.
871 struct ilo_view_surface null_surface
;
873 ilo_gpe_init_view_surface_null(p
->dev
,
874 fb
->state
.width
, fb
->state
.height
,
875 1, 0, &null_surface
);
878 gen6_emit_SURFACE_STATE(p
->dev
, &null_surface
, true, p
->cp
);
883 memset(&surface_state
[i
], 0, (ILO_MAX_DRAW_BUFFERS
- i
) * 4);
885 if (i
&& session
->num_surfaces
[PIPE_SHADER_FRAGMENT
] < offset
+ i
)
886 session
->num_surfaces
[PIPE_SHADER_FRAGMENT
] = offset
+ i
;
888 session
->binding_table_fs_changed
= true;
893 gen6_pipeline_state_surfaces_so(struct ilo_3d_pipeline
*p
,
894 const struct ilo_context
*ilo
,
895 struct gen6_pipeline_session
*session
)
897 const struct ilo_so_state
*so
= &ilo
->so
;
899 if (p
->dev
->gen
!= ILO_GEN(6))
902 /* SURFACE_STATEs for stream output targets */
903 if (DIRTY(VS
) || DIRTY(GS
) || DIRTY(SO
)) {
904 const struct pipe_stream_output_info
*so_info
=
905 (ilo
->gs
) ? ilo_shader_get_kernel_so_info(ilo
->gs
) :
906 (ilo
->vs
) ? ilo_shader_get_kernel_so_info(ilo
->vs
) : NULL
;
907 const int offset
= ILO_GS_SO_SURFACE(0);
908 uint32_t *surface_state
= &p
->state
.gs
.SURFACE_STATE
[offset
];
911 for (i
= 0; so_info
&& i
< so_info
->num_outputs
; i
++) {
912 const int target
= so_info
->output
[i
].output_buffer
;
913 const struct pipe_stream_output_target
*so_target
=
914 (target
< so
->count
) ? so
->states
[target
] : NULL
;
917 surface_state
[i
] = gen6_emit_so_SURFACE_STATE(p
->dev
,
918 so_target
, so_info
, i
, p
->cp
);
921 surface_state
[i
] = 0;
925 memset(&surface_state
[i
], 0, (ILO_MAX_SO_BINDINGS
- i
) * 4);
927 if (i
&& session
->num_surfaces
[PIPE_SHADER_GEOMETRY
] < offset
+ i
)
928 session
->num_surfaces
[PIPE_SHADER_GEOMETRY
] = offset
+ i
;
930 session
->binding_table_gs_changed
= true;
935 gen6_pipeline_state_surfaces_view(struct ilo_3d_pipeline
*p
,
936 const struct ilo_context
*ilo
,
938 struct gen6_pipeline_session
*session
)
940 const struct ilo_view_state
*view
= &ilo
->view
[shader_type
];
941 uint32_t *surface_state
;
945 /* SURFACE_STATEs for sampler views */
946 switch (shader_type
) {
947 case PIPE_SHADER_VERTEX
:
948 if (DIRTY(VIEW_VS
)) {
949 offset
= ILO_VS_TEXTURE_SURFACE(0);
950 surface_state
= &p
->state
.vs
.SURFACE_STATE
[offset
];
952 session
->binding_table_vs_changed
= true;
958 case PIPE_SHADER_FRAGMENT
:
959 if (DIRTY(VIEW_FS
)) {
960 offset
= ILO_WM_TEXTURE_SURFACE(0);
961 surface_state
= &p
->state
.wm
.SURFACE_STATE
[offset
];
963 session
->binding_table_fs_changed
= true;
977 for (i
= 0; i
< view
->count
; i
++) {
978 if (view
->states
[i
]) {
979 const struct ilo_view_cso
*cso
=
980 (const struct ilo_view_cso
*) view
->states
[i
];
983 gen6_emit_SURFACE_STATE(p
->dev
, &cso
->surface
, false, p
->cp
);
986 surface_state
[i
] = 0;
990 memset(&surface_state
[i
], 0, (ILO_MAX_SAMPLER_VIEWS
- i
) * 4);
992 if (i
&& session
->num_surfaces
[shader_type
] < offset
+ i
)
993 session
->num_surfaces
[shader_type
] = offset
+ i
;
997 gen6_pipeline_state_surfaces_const(struct ilo_3d_pipeline
*p
,
998 const struct ilo_context
*ilo
,
1000 struct gen6_pipeline_session
*session
)
1002 const struct ilo_cbuf_state
*cbuf
= &ilo
->cbuf
[shader_type
];
1003 uint32_t *surface_state
;
1004 int offset
, count
, i
;
1007 /* SURFACE_STATEs for constant buffers */
1008 switch (shader_type
) {
1009 case PIPE_SHADER_VERTEX
:
1011 offset
= ILO_VS_CONST_SURFACE(0);
1012 surface_state
= &p
->state
.vs
.SURFACE_STATE
[offset
];
1014 session
->binding_table_vs_changed
= true;
1020 case PIPE_SHADER_FRAGMENT
:
1022 offset
= ILO_WM_CONST_SURFACE(0);
1023 surface_state
= &p
->state
.wm
.SURFACE_STATE
[offset
];
1025 session
->binding_table_fs_changed
= true;
1039 count
= util_last_bit(cbuf
->enabled_mask
);
1040 for (i
= 0; i
< count
; i
++) {
1041 if (cbuf
->cso
[i
].resource
) {
1042 surface_state
[i
] = gen6_emit_SURFACE_STATE(p
->dev
,
1043 &cbuf
->cso
[i
].surface
, false, p
->cp
);
1046 surface_state
[i
] = 0;
1050 memset(&surface_state
[count
], 0, (ILO_MAX_CONST_BUFFERS
- count
) * 4);
1052 if (count
&& session
->num_surfaces
[shader_type
] < offset
+ count
)
1053 session
->num_surfaces
[shader_type
] = offset
+ count
;
1057 gen6_pipeline_state_binding_tables(struct ilo_3d_pipeline
*p
,
1058 const struct ilo_context
*ilo
,
1060 struct gen6_pipeline_session
*session
)
1062 uint32_t *binding_table_state
, *surface_state
;
1063 int *binding_table_state_size
, size
;
1066 /* BINDING_TABLE_STATE */
1067 switch (shader_type
) {
1068 case PIPE_SHADER_VERTEX
:
1069 surface_state
= p
->state
.vs
.SURFACE_STATE
;
1070 binding_table_state
= &p
->state
.vs
.BINDING_TABLE_STATE
;
1071 binding_table_state_size
= &p
->state
.vs
.BINDING_TABLE_STATE_size
;
1073 skip
= !session
->binding_table_vs_changed
;
1075 case PIPE_SHADER_GEOMETRY
:
1076 surface_state
= p
->state
.gs
.SURFACE_STATE
;
1077 binding_table_state
= &p
->state
.gs
.BINDING_TABLE_STATE
;
1078 binding_table_state_size
= &p
->state
.gs
.BINDING_TABLE_STATE_size
;
1080 skip
= !session
->binding_table_gs_changed
;
1082 case PIPE_SHADER_FRAGMENT
:
1083 surface_state
= p
->state
.wm
.SURFACE_STATE
;
1084 binding_table_state
= &p
->state
.wm
.BINDING_TABLE_STATE
;
1085 binding_table_state_size
= &p
->state
.wm
.BINDING_TABLE_STATE_size
;
1087 skip
= !session
->binding_table_fs_changed
;
1098 * If we have seemingly less SURFACE_STATEs than before, it could be that
1099 * we did not touch those reside at the tail in this upload. Loop over
1100 * them to figure out the real number of SURFACE_STATEs.
1102 for (size
= *binding_table_state_size
;
1103 size
> session
->num_surfaces
[shader_type
]; size
--) {
1104 if (surface_state
[size
- 1])
1107 if (size
< session
->num_surfaces
[shader_type
])
1108 size
= session
->num_surfaces
[shader_type
];
1110 *binding_table_state
= gen6_emit_BINDING_TABLE_STATE(p
->dev
,
1111 surface_state
, size
, p
->cp
);
1112 *binding_table_state_size
= size
;
1116 gen6_pipeline_state_samplers(struct ilo_3d_pipeline
*p
,
1117 const struct ilo_context
*ilo
,
1119 struct gen6_pipeline_session
*session
)
1121 const struct ilo_sampler_cso
* const *samplers
=
1122 ilo
->sampler
[shader_type
].cso
;
1123 const struct pipe_sampler_view
* const *views
=
1124 (const struct pipe_sampler_view
**) ilo
->view
[shader_type
].states
;
1125 const int num_samplers
= ilo
->sampler
[shader_type
].count
;
1126 const int num_views
= ilo
->view
[shader_type
].count
;
1127 uint32_t *sampler_state
, *border_color_state
;
1128 bool emit_border_color
= false;
1131 /* SAMPLER_BORDER_COLOR_STATE and SAMPLER_STATE */
1132 switch (shader_type
) {
1133 case PIPE_SHADER_VERTEX
:
1134 if (DIRTY(SAMPLER_VS
) || DIRTY(VIEW_VS
)) {
1135 sampler_state
= &p
->state
.vs
.SAMPLER_STATE
;
1136 border_color_state
= p
->state
.vs
.SAMPLER_BORDER_COLOR_STATE
;
1138 if (DIRTY(SAMPLER_VS
))
1139 emit_border_color
= true;
1141 session
->sampler_state_vs_changed
= true;
1147 case PIPE_SHADER_FRAGMENT
:
1148 if (DIRTY(SAMPLER_FS
) || DIRTY(VIEW_FS
)) {
1149 sampler_state
= &p
->state
.wm
.SAMPLER_STATE
;
1150 border_color_state
= p
->state
.wm
.SAMPLER_BORDER_COLOR_STATE
;
1152 if (DIRTY(SAMPLER_FS
))
1153 emit_border_color
= true;
1155 session
->sampler_state_fs_changed
= true;
1169 if (emit_border_color
) {
1172 for (i
= 0; i
< num_samplers
; i
++) {
1173 border_color_state
[i
] = (samplers
[i
]) ?
1174 gen6_emit_SAMPLER_BORDER_COLOR_STATE(p
->dev
,
1175 samplers
[i
], p
->cp
) : 0;
1179 /* should we take the minimum of num_samplers and num_views? */
1180 *sampler_state
= gen6_emit_SAMPLER_STATE(p
->dev
,
1183 MIN2(num_samplers
, num_views
), p
->cp
);
1187 gen6_pipeline_state_pcb(struct ilo_3d_pipeline
*p
,
1188 const struct ilo_context
*ilo
,
1189 struct gen6_pipeline_session
*session
)
1191 /* push constant buffer for VS */
1192 if (DIRTY(VS
) || DIRTY(CLIP
)) {
1193 const int clip_state_size
= (ilo
->vs
) ?
1194 ilo_shader_get_kernel_param(ilo
->vs
,
1195 ILO_KERNEL_VS_PCB_UCP_SIZE
) : 0;
1197 if (clip_state_size
) {
1200 p
->state
.vs
.PUSH_CONSTANT_BUFFER_size
= clip_state_size
;
1201 p
->state
.vs
.PUSH_CONSTANT_BUFFER
=
1202 gen6_emit_push_constant_buffer(p
->dev
,
1203 p
->state
.vs
.PUSH_CONSTANT_BUFFER_size
, &pcb
, p
->cp
);
1205 memcpy(pcb
, &ilo
->clip
, clip_state_size
);
1208 p
->state
.vs
.PUSH_CONSTANT_BUFFER_size
= 0;
1209 p
->state
.vs
.PUSH_CONSTANT_BUFFER
= 0;
1212 session
->pcb_state_vs_changed
= true;
1219 gen6_pipeline_commands(struct ilo_3d_pipeline
*p
,
1220 const struct ilo_context
*ilo
,
1221 struct gen6_pipeline_session
*session
)
1224 * We try to keep the order of the commands match, as closely as possible,
1225 * that of the classic i965 driver. It allows us to compare the command
1228 gen6_pipeline_common_select(p
, ilo
, session
);
1229 gen6_pipeline_gs_svbi(p
, ilo
, session
);
1230 gen6_pipeline_common_sip(p
, ilo
, session
);
1231 gen6_pipeline_vf_statistics(p
, ilo
, session
);
1232 gen6_pipeline_common_base_address(p
, ilo
, session
);
1233 gen6_pipeline_common_pointers_1(p
, ilo
, session
);
1234 gen6_pipeline_common_urb(p
, ilo
, session
);
1235 gen6_pipeline_common_pointers_2(p
, ilo
, session
);
1236 gen6_pipeline_wm_multisample(p
, ilo
, session
);
1237 gen6_pipeline_vs(p
, ilo
, session
);
1238 gen6_pipeline_gs(p
, ilo
, session
);
1239 gen6_pipeline_clip(p
, ilo
, session
);
1240 gen6_pipeline_sf(p
, ilo
, session
);
1241 gen6_pipeline_wm(p
, ilo
, session
);
1242 gen6_pipeline_common_pointers_3(p
, ilo
, session
);
1243 gen6_pipeline_wm_depth(p
, ilo
, session
);
1244 gen6_pipeline_wm_raster(p
, ilo
, session
);
1245 gen6_pipeline_sf_rect(p
, ilo
, session
);
1246 gen6_pipeline_vf(p
, ilo
, session
);
1247 gen6_pipeline_vf_draw(p
, ilo
, session
);
1251 gen6_pipeline_states(struct ilo_3d_pipeline
*p
,
1252 const struct ilo_context
*ilo
,
1253 struct gen6_pipeline_session
*session
)
1257 gen6_pipeline_state_viewports(p
, ilo
, session
);
1258 gen6_pipeline_state_cc(p
, ilo
, session
);
1259 gen6_pipeline_state_scissors(p
, ilo
, session
);
1260 gen6_pipeline_state_pcb(p
, ilo
, session
);
1263 * upload all SURAFCE_STATEs together so that we know there are minimal
1266 gen6_pipeline_state_surfaces_rt(p
, ilo
, session
);
1267 gen6_pipeline_state_surfaces_so(p
, ilo
, session
);
1268 for (shader_type
= 0; shader_type
< PIPE_SHADER_TYPES
; shader_type
++) {
1269 gen6_pipeline_state_surfaces_view(p
, ilo
, shader_type
, session
);
1270 gen6_pipeline_state_surfaces_const(p
, ilo
, shader_type
, session
);
1273 for (shader_type
= 0; shader_type
< PIPE_SHADER_TYPES
; shader_type
++) {
1274 gen6_pipeline_state_samplers(p
, ilo
, shader_type
, session
);
1275 /* this must be called after all SURFACE_STATEs are uploaded */
1276 gen6_pipeline_state_binding_tables(p
, ilo
, shader_type
, session
);
1281 gen6_pipeline_prepare(const struct ilo_3d_pipeline
*p
,
1282 const struct ilo_context
*ilo
,
1283 struct gen6_pipeline_session
*session
)
1285 memset(session
, 0, sizeof(*session
));
1286 session
->pipe_dirty
= ilo
->dirty
;
1287 session
->reduced_prim
= u_reduced_prim(ilo
->draw
->mode
);
1289 /* available space before the session */
1290 session
->init_cp_space
= ilo_cp_space(p
->cp
);
1292 session
->hw_ctx_changed
=
1293 (p
->invalidate_flags
& ILO_3D_PIPELINE_INVALIDATE_HW
);
1295 if (session
->hw_ctx_changed
) {
1296 /* these should be enough to make everything uploaded */
1297 session
->batch_bo_changed
= true;
1298 session
->state_bo_changed
= true;
1299 session
->kernel_bo_changed
= true;
1300 session
->prim_changed
= true;
1301 session
->primitive_restart_changed
= true;
1305 * Any state that involves resources needs to be re-emitted when the
1306 * batch bo changed. This is because we do not pin the resources and
1307 * their offsets (or existence) may change between batch buffers.
1309 * Since we messed around with ILO_3D_PIPELINE_INVALIDATE_BATCH_BO in
1310 * handle_invalid_batch_bo(), use ILO_3D_PIPELINE_INVALIDATE_STATE_BO as
1311 * a temporary workaround.
1313 session
->batch_bo_changed
=
1314 (p
->invalidate_flags
& ILO_3D_PIPELINE_INVALIDATE_STATE_BO
);
1316 session
->state_bo_changed
=
1317 (p
->invalidate_flags
& ILO_3D_PIPELINE_INVALIDATE_STATE_BO
);
1318 session
->kernel_bo_changed
=
1319 (p
->invalidate_flags
& ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO
);
1320 session
->prim_changed
= (p
->state
.reduced_prim
!= session
->reduced_prim
);
1321 session
->primitive_restart_changed
=
1322 (p
->state
.primitive_restart
!= ilo
->draw
->primitive_restart
);
1327 gen6_pipeline_draw(struct ilo_3d_pipeline
*p
,
1328 const struct ilo_context
*ilo
,
1329 struct gen6_pipeline_session
*session
)
1331 /* force all states to be uploaded if the state bo changed */
1332 if (session
->state_bo_changed
)
1333 session
->pipe_dirty
= ILO_DIRTY_ALL
;
1335 session
->pipe_dirty
= ilo
->dirty
;
1337 session
->emit_draw_states(p
, ilo
, session
);
1339 /* force all commands to be uploaded if the HW context changed */
1340 if (session
->hw_ctx_changed
)
1341 session
->pipe_dirty
= ILO_DIRTY_ALL
;
1343 session
->pipe_dirty
= ilo
->dirty
;
1345 session
->emit_draw_commands(p
, ilo
, session
);
1349 gen6_pipeline_end(struct ilo_3d_pipeline
*p
,
1350 const struct ilo_context
*ilo
,
1351 struct gen6_pipeline_session
*session
)
1353 /* sanity check size estimation */
1354 assert(session
->init_cp_space
- ilo_cp_space(p
->cp
) <=
1355 ilo_3d_pipeline_estimate_size(p
, ILO_3D_PIPELINE_DRAW
, ilo
));
1357 p
->state
.reduced_prim
= session
->reduced_prim
;
1358 p
->state
.primitive_restart
= ilo
->draw
->primitive_restart
;
1362 ilo_3d_pipeline_emit_draw_gen6(struct ilo_3d_pipeline
*p
,
1363 const struct ilo_context
*ilo
)
1365 struct gen6_pipeline_session session
;
1367 gen6_pipeline_prepare(p
, ilo
, &session
);
1369 session
.emit_draw_states
= gen6_pipeline_states
;
1370 session
.emit_draw_commands
= gen6_pipeline_commands
;
1372 gen6_pipeline_draw(p
, ilo
, &session
);
1373 gen6_pipeline_end(p
, ilo
, &session
);
1377 ilo_3d_pipeline_emit_flush_gen6(struct ilo_3d_pipeline
*p
)
1379 if (p
->dev
->gen
== ILO_GEN(6))
1380 gen6_wa_pipe_control_post_sync(p
, false);
1382 gen6_emit_PIPE_CONTROL(p
->dev
,
1383 PIPE_CONTROL_INSTRUCTION_FLUSH
|
1384 PIPE_CONTROL_WRITE_FLUSH
|
1385 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
1386 PIPE_CONTROL_VF_CACHE_INVALIDATE
|
1387 PIPE_CONTROL_TC_FLUSH
|
1388 PIPE_CONTROL_NO_WRITE
|
1389 PIPE_CONTROL_CS_STALL
,
1390 0, 0, false, p
->cp
);
1394 ilo_3d_pipeline_emit_write_timestamp_gen6(struct ilo_3d_pipeline
*p
,
1395 struct intel_bo
*bo
, int index
)
1397 if (p
->dev
->gen
== ILO_GEN(6))
1398 gen6_wa_pipe_control_post_sync(p
, true);
1400 gen6_emit_PIPE_CONTROL(p
->dev
,
1401 PIPE_CONTROL_WRITE_TIMESTAMP
,
1402 bo
, index
* sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE
,
1407 ilo_3d_pipeline_emit_write_depth_count_gen6(struct ilo_3d_pipeline
*p
,
1408 struct intel_bo
*bo
, int index
)
1410 if (p
->dev
->gen
== ILO_GEN(6))
1411 gen6_wa_pipe_control_post_sync(p
, false);
1413 gen6_emit_PIPE_CONTROL(p
->dev
,
1414 PIPE_CONTROL_DEPTH_STALL
|
1415 PIPE_CONTROL_WRITE_DEPTH_COUNT
,
1416 bo
, index
* sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE
,
1421 gen6_pipeline_estimate_commands(const struct ilo_3d_pipeline
*p
,
1422 const struct ilo_context
*ilo
)
1425 enum ilo_gpe_gen6_command cmd
;
1430 for (cmd
= 0; cmd
< ILO_GPE_GEN6_COMMAND_COUNT
; cmd
++) {
1434 case ILO_GPE_GEN6_PIPE_CONTROL
:
1435 /* for the workaround */
1437 /* another one after 3DSTATE_URB */
1439 /* and another one after 3DSTATE_CONSTANT_VS */
1442 case ILO_GPE_GEN6_3DSTATE_GS_SVB_INDEX
:
1443 /* there are 4 SVBIs */
1446 case ILO_GPE_GEN6_3DSTATE_VERTEX_BUFFERS
:
1449 case ILO_GPE_GEN6_3DSTATE_VERTEX_ELEMENTS
:
1452 case ILO_GPE_GEN6_MEDIA_VFE_STATE
:
1453 case ILO_GPE_GEN6_MEDIA_CURBE_LOAD
:
1454 case ILO_GPE_GEN6_MEDIA_INTERFACE_DESCRIPTOR_LOAD
:
1455 case ILO_GPE_GEN6_MEDIA_GATEWAY_STATE
:
1456 case ILO_GPE_GEN6_MEDIA_STATE_FLUSH
:
1457 case ILO_GPE_GEN6_MEDIA_OBJECT_WALKER
:
1458 /* media commands */
1467 size
+= ilo_gpe_gen6_estimate_command_size(p
->dev
, cmd
, count
);
1474 gen6_pipeline_estimate_states(const struct ilo_3d_pipeline
*p
,
1475 const struct ilo_context
*ilo
)
1477 static int static_size
;
1478 int shader_type
, count
, size
;
1482 enum ilo_gpe_gen6_state state
;
1484 } static_states
[] = {
1486 { ILO_GPE_GEN6_SF_VIEWPORT
, 1 },
1487 { ILO_GPE_GEN6_CLIP_VIEWPORT
, 1 },
1488 { ILO_GPE_GEN6_CC_VIEWPORT
, 1 },
1490 { ILO_GPE_GEN6_COLOR_CALC_STATE
, 1 },
1491 { ILO_GPE_GEN6_BLEND_STATE
, ILO_MAX_DRAW_BUFFERS
},
1492 { ILO_GPE_GEN6_DEPTH_STENCIL_STATE
, 1 },
1494 { ILO_GPE_GEN6_SCISSOR_RECT
, 1 },
1495 /* binding table (vs, gs, fs) */
1496 { ILO_GPE_GEN6_BINDING_TABLE_STATE
, ILO_MAX_VS_SURFACES
},
1497 { ILO_GPE_GEN6_BINDING_TABLE_STATE
, ILO_MAX_GS_SURFACES
},
1498 { ILO_GPE_GEN6_BINDING_TABLE_STATE
, ILO_MAX_WM_SURFACES
},
1502 for (i
= 0; i
< Elements(static_states
); i
++) {
1503 static_size
+= ilo_gpe_gen6_estimate_state_size(p
->dev
,
1504 static_states
[i
].state
,
1505 static_states
[i
].count
);
1512 * render targets (fs)
1513 * stream outputs (gs)
1514 * sampler views (vs, fs)
1515 * constant buffers (vs, fs)
1517 count
= ilo
->fb
.state
.nr_cbufs
;
1520 const struct pipe_stream_output_info
*so_info
=
1521 ilo_shader_get_kernel_so_info(ilo
->gs
);
1523 count
+= so_info
->num_outputs
;
1526 const struct pipe_stream_output_info
*so_info
=
1527 ilo_shader_get_kernel_so_info(ilo
->vs
);
1529 count
+= so_info
->num_outputs
;
1532 for (shader_type
= 0; shader_type
< PIPE_SHADER_TYPES
; shader_type
++) {
1533 count
+= ilo
->view
[shader_type
].count
;
1534 count
+= util_bitcount(ilo
->cbuf
[shader_type
].enabled_mask
);
1538 size
+= ilo_gpe_gen6_estimate_state_size(p
->dev
,
1539 ILO_GPE_GEN6_SURFACE_STATE
, count
);
1542 /* samplers (vs, fs) */
1543 for (shader_type
= 0; shader_type
< PIPE_SHADER_TYPES
; shader_type
++) {
1544 count
= ilo
->sampler
[shader_type
].count
;
1546 size
+= ilo_gpe_gen6_estimate_state_size(p
->dev
,
1547 ILO_GPE_GEN6_SAMPLER_BORDER_COLOR_STATE
, count
);
1548 size
+= ilo_gpe_gen6_estimate_state_size(p
->dev
,
1549 ILO_GPE_GEN6_SAMPLER_STATE
, count
);
1555 ilo_shader_get_kernel_param(ilo
->vs
, ILO_KERNEL_VS_PCB_UCP_SIZE
)) {
1556 const int pcb_size
=
1557 ilo_shader_get_kernel_param(ilo
->vs
, ILO_KERNEL_VS_PCB_UCP_SIZE
);
1559 size
+= ilo_gpe_gen6_estimate_state_size(p
->dev
,
1560 ILO_GPE_GEN6_PUSH_CONSTANT_BUFFER
, pcb_size
);
1567 ilo_3d_pipeline_estimate_size_gen6(struct ilo_3d_pipeline
*p
,
1568 enum ilo_3d_pipeline_action action
,
1574 case ILO_3D_PIPELINE_DRAW
:
1576 const struct ilo_context
*ilo
= arg
;
1578 size
= gen6_pipeline_estimate_commands(p
, ilo
) +
1579 gen6_pipeline_estimate_states(p
, ilo
);
1582 case ILO_3D_PIPELINE_FLUSH
:
1583 size
= ilo_gpe_gen6_estimate_command_size(p
->dev
,
1584 ILO_GPE_GEN6_PIPE_CONTROL
, 1) * 3;
1586 case ILO_3D_PIPELINE_WRITE_TIMESTAMP
:
1587 size
= ilo_gpe_gen6_estimate_command_size(p
->dev
,
1588 ILO_GPE_GEN6_PIPE_CONTROL
, 1) * 2;
1590 case ILO_3D_PIPELINE_WRITE_DEPTH_COUNT
:
1591 size
= ilo_gpe_gen6_estimate_command_size(p
->dev
,
1592 ILO_GPE_GEN6_PIPE_CONTROL
, 1) * 3;
1595 assert(!"unknown 3D pipeline action");
1604 ilo_3d_pipeline_init_gen6(struct ilo_3d_pipeline
*p
)
1606 p
->estimate_size
= ilo_3d_pipeline_estimate_size_gen6
;
1607 p
->emit_draw
= ilo_3d_pipeline_emit_draw_gen6
;
1608 p
->emit_flush
= ilo_3d_pipeline_emit_flush_gen6
;
1609 p
->emit_write_timestamp
= ilo_3d_pipeline_emit_write_timestamp_gen6
;
1610 p
->emit_write_depth_count
= ilo_3d_pipeline_emit_write_depth_count_gen6
;