2 * Mesa 3-D graphics library
4 * Copyright (C) 2013 LunarG, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Chia-I Wu <olv@lunarg.com>
28 #include "util/u_dual_blend.h"
29 #include "util/u_prim.h"
30 #include "intel_reg.h"
33 #include "ilo_context.h"
35 #include "ilo_gpe_gen6.h"
36 #include "ilo_gpe_gen7.h"
37 #include "ilo_shader.h"
38 #include "ilo_state.h"
39 #include "ilo_3d_pipeline.h"
40 #include "ilo_3d_pipeline_gen6.h"
43 * This should be called before any depth stall flush (including those
44 * produced by non-pipelined state commands) or cache flush on GEN6.
46 * \see intel_emit_post_sync_nonzero_flush()
49 gen6_wa_pipe_control_post_sync(struct ilo_3d_pipeline
*p
,
50 bool caller_post_sync
)
52 assert(p
->dev
->gen
== ILO_GEN(6));
55 if (p
->state
.has_gen6_wa_pipe_control
)
58 p
->state
.has_gen6_wa_pipe_control
= true;
61 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
63 * "Pipe-control with CS-stall bit set must be sent BEFORE the
64 * pipe-control with a post-sync op and no write-cache flushes."
66 * The workaround below necessitates this workaround.
68 gen6_emit_PIPE_CONTROL(p
->dev
,
69 PIPE_CONTROL_CS_STALL
|
70 PIPE_CONTROL_STALL_AT_SCOREBOARD
,
71 NULL
, 0, false, p
->cp
);
73 /* the caller will emit the post-sync op */
78 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
80 * "Before any depth stall flush (including those produced by
81 * non-pipelined state commands), software needs to first send a
82 * PIPE_CONTROL with no bits set except Post-Sync Operation != 0."
84 * "Before a PIPE_CONTROL with Write Cache Flush Enable =1, a
85 * PIPE_CONTROL with any non-zero post-sync-op is required."
87 gen6_emit_PIPE_CONTROL(p
->dev
,
88 PIPE_CONTROL_WRITE_IMMEDIATE
,
89 p
->workaround_bo
, 0, false, p
->cp
);
93 gen6_wa_pipe_control_wm_multisample_flush(struct ilo_3d_pipeline
*p
)
95 assert(p
->dev
->gen
== ILO_GEN(6));
97 gen6_wa_pipe_control_post_sync(p
, false);
100 * From the Sandy Bridge PRM, volume 2 part 1, page 305:
102 * "Driver must guarentee that all the caches in the depth pipe are
103 * flushed before this command (3DSTATE_MULTISAMPLE) is parsed. This
104 * requires driver to send a PIPE_CONTROL with a CS stall along with a
105 * Depth Flush prior to this command."
107 gen6_emit_PIPE_CONTROL(p
->dev
,
108 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
109 PIPE_CONTROL_CS_STALL
,
114 gen6_wa_pipe_control_wm_depth_flush(struct ilo_3d_pipeline
*p
)
116 assert(p
->dev
->gen
== ILO_GEN(6));
118 gen6_wa_pipe_control_post_sync(p
, false);
121 * According to intel_emit_depth_stall_flushes() of classic i965, we need
122 * to emit a sequence of PIPE_CONTROLs prior to emitting depth related
125 gen6_emit_PIPE_CONTROL(p
->dev
,
126 PIPE_CONTROL_DEPTH_STALL
,
127 NULL
, 0, false, p
->cp
);
129 gen6_emit_PIPE_CONTROL(p
->dev
,
130 PIPE_CONTROL_DEPTH_CACHE_FLUSH
,
131 NULL
, 0, false, p
->cp
);
133 gen6_emit_PIPE_CONTROL(p
->dev
,
134 PIPE_CONTROL_DEPTH_STALL
,
135 NULL
, 0, false, p
->cp
);
139 gen6_wa_pipe_control_wm_max_threads_stall(struct ilo_3d_pipeline
*p
)
141 assert(p
->dev
->gen
== ILO_GEN(6));
143 /* the post-sync workaround should cover this already */
144 if (p
->state
.has_gen6_wa_pipe_control
)
148 * From the Sandy Bridge PRM, volume 2 part 1, page 274:
150 * "A PIPE_CONTROL command, with only the Stall At Pixel Scoreboard
151 * field set (DW1 Bit 1), must be issued prior to any change to the
152 * value in this field (Maximum Number of Threads in 3DSTATE_WM)"
154 gen6_emit_PIPE_CONTROL(p
->dev
,
155 PIPE_CONTROL_STALL_AT_SCOREBOARD
,
156 NULL
, 0, false, p
->cp
);
161 gen6_wa_pipe_control_vs_const_flush(struct ilo_3d_pipeline
*p
)
163 assert(p
->dev
->gen
== ILO_GEN(6));
165 gen6_wa_pipe_control_post_sync(p
, false);
168 * According to upload_vs_state() of classic i965, we need to emit
169 * PIPE_CONTROL after 3DSTATE_CONSTANT_VS so that the command is kept being
170 * buffered by VS FF, to the point that the FF dies.
172 gen6_emit_PIPE_CONTROL(p
->dev
,
173 PIPE_CONTROL_DEPTH_STALL
|
174 PIPE_CONTROL_INSTRUCTION_FLUSH
|
175 PIPE_CONTROL_STATE_CACHE_INVALIDATE
,
176 NULL
, 0, false, p
->cp
);
179 #define DIRTY(state) (session->pipe_dirty & ILO_DIRTY_ ## state)
182 gen6_pipeline_common_select(struct ilo_3d_pipeline
*p
,
183 const struct ilo_context
*ilo
,
184 struct gen6_pipeline_session
*session
)
186 /* PIPELINE_SELECT */
187 if (session
->hw_ctx_changed
) {
188 if (p
->dev
->gen
== ILO_GEN(6))
189 gen6_wa_pipe_control_post_sync(p
, false);
191 gen6_emit_PIPELINE_SELECT(p
->dev
, 0x0, p
->cp
);
196 gen6_pipeline_common_sip(struct ilo_3d_pipeline
*p
,
197 const struct ilo_context
*ilo
,
198 struct gen6_pipeline_session
*session
)
201 if (session
->hw_ctx_changed
) {
202 if (p
->dev
->gen
== ILO_GEN(6))
203 gen6_wa_pipe_control_post_sync(p
, false);
205 gen6_emit_STATE_SIP(p
->dev
, 0, p
->cp
);
210 gen6_pipeline_common_base_address(struct ilo_3d_pipeline
*p
,
211 const struct ilo_context
*ilo
,
212 struct gen6_pipeline_session
*session
)
214 /* STATE_BASE_ADDRESS */
215 if (session
->state_bo_changed
|| session
->kernel_bo_changed
||
216 session
->batch_bo_changed
) {
217 if (p
->dev
->gen
== ILO_GEN(6))
218 gen6_wa_pipe_control_post_sync(p
, false);
220 gen6_emit_STATE_BASE_ADDRESS(p
->dev
,
221 NULL
, p
->cp
->bo
, p
->cp
->bo
, NULL
, ilo
->hw3d
->kernel
.bo
,
225 * From the Sandy Bridge PRM, volume 1 part 1, page 28:
227 * "The following commands must be reissued following any change to
228 * the base addresses:
230 * * 3DSTATE_BINDING_TABLE_POINTERS
231 * * 3DSTATE_SAMPLER_STATE_POINTERS
232 * * 3DSTATE_VIEWPORT_STATE_POINTERS
233 * * 3DSTATE_CC_POINTERS
234 * * MEDIA_STATE_POINTERS"
236 * 3DSTATE_SCISSOR_STATE_POINTERS is not on the list, but it is
237 * reasonable to also reissue the command. Same to PCB.
239 session
->viewport_state_changed
= true;
241 session
->cc_state_blend_changed
= true;
242 session
->cc_state_dsa_changed
= true;
243 session
->cc_state_cc_changed
= true;
245 session
->scissor_state_changed
= true;
247 session
->binding_table_vs_changed
= true;
248 session
->binding_table_gs_changed
= true;
249 session
->binding_table_fs_changed
= true;
251 session
->sampler_state_vs_changed
= true;
252 session
->sampler_state_gs_changed
= true;
253 session
->sampler_state_fs_changed
= true;
255 session
->pcb_state_vs_changed
= true;
256 session
->pcb_state_gs_changed
= true;
257 session
->pcb_state_fs_changed
= true;
262 gen6_pipeline_common_urb(struct ilo_3d_pipeline
*p
,
263 const struct ilo_context
*ilo
,
264 struct gen6_pipeline_session
*session
)
267 if (DIRTY(VE
) || DIRTY(VS
) || DIRTY(GS
)) {
268 const bool gs_active
= (ilo
->gs
|| (ilo
->vs
&&
269 ilo_shader_get_kernel_param(ilo
->vs
, ILO_KERNEL_VS_GEN6_SO
)));
270 int vs_entry_size
, gs_entry_size
;
271 int vs_total_size
, gs_total_size
;
273 vs_entry_size
= (ilo
->vs
) ?
274 ilo_shader_get_kernel_param(ilo
->vs
, ILO_KERNEL_OUTPUT_COUNT
) : 0;
277 * As indicated by 2e712e41db0c0676e9f30fc73172c0e8de8d84d4, VF and VS
278 * share VUE handles. The VUE allocation size must be large enough to
279 * store either VF outputs (number of VERTEX_ELEMENTs) and VS outputs.
281 * I am not sure if the PRM explicitly states that VF and VS share VUE
282 * handles. But here is a citation that implies so:
284 * From the Sandy Bridge PRM, volume 2 part 1, page 44:
286 * "Once a FF stage that spawn threads has sufficient input to
287 * initiate a thread, it must guarantee that it is safe to request
288 * the thread initiation. For all these FF stages, this check is
291 * - The availability of output URB entries:
292 * - VS: As the input URB entries are overwritten with the
293 * VS-generated output data, output URB availability isn't a
296 if (vs_entry_size
< ilo
->ve
->count
)
297 vs_entry_size
= ilo
->ve
->count
;
299 gs_entry_size
= (ilo
->gs
) ?
300 ilo_shader_get_kernel_param(ilo
->gs
, ILO_KERNEL_OUTPUT_COUNT
) :
301 (gs_active
) ? vs_entry_size
: 0;
304 vs_entry_size
*= sizeof(float) * 4;
305 gs_entry_size
*= sizeof(float) * 4;
306 vs_total_size
= ilo
->dev
->urb_size
;
310 gs_total_size
= vs_total_size
;
316 gen6_emit_3DSTATE_URB(p
->dev
, vs_total_size
, gs_total_size
,
317 vs_entry_size
, gs_entry_size
, p
->cp
);
320 * From the Sandy Bridge PRM, volume 2 part 1, page 27:
322 * "Because of a urb corruption caused by allocating a previous
323 * gsunit's urb entry to vsunit software is required to send a
324 * "GS NULL Fence" (Send URB fence with VS URB size == 1 and GS URB
325 * size == 0) plus a dummy DRAW call before any case where VS will
326 * be taking over GS URB space."
328 if (p
->state
.gs
.active
&& !gs_active
)
329 ilo_3d_pipeline_emit_flush_gen6(p
);
331 p
->state
.gs
.active
= gs_active
;
336 gen6_pipeline_common_pointers_1(struct ilo_3d_pipeline
*p
,
337 const struct ilo_context
*ilo
,
338 struct gen6_pipeline_session
*session
)
340 /* 3DSTATE_VIEWPORT_STATE_POINTERS */
341 if (session
->viewport_state_changed
) {
342 gen6_emit_3DSTATE_VIEWPORT_STATE_POINTERS(p
->dev
,
343 p
->state
.CLIP_VIEWPORT
,
344 p
->state
.SF_VIEWPORT
,
345 p
->state
.CC_VIEWPORT
, p
->cp
);
350 gen6_pipeline_common_pointers_2(struct ilo_3d_pipeline
*p
,
351 const struct ilo_context
*ilo
,
352 struct gen6_pipeline_session
*session
)
354 /* 3DSTATE_CC_STATE_POINTERS */
355 if (session
->cc_state_blend_changed
||
356 session
->cc_state_dsa_changed
||
357 session
->cc_state_cc_changed
) {
358 gen6_emit_3DSTATE_CC_STATE_POINTERS(p
->dev
,
359 p
->state
.BLEND_STATE
,
360 p
->state
.DEPTH_STENCIL_STATE
,
361 p
->state
.COLOR_CALC_STATE
, p
->cp
);
364 /* 3DSTATE_SAMPLER_STATE_POINTERS */
365 if (session
->sampler_state_vs_changed
||
366 session
->sampler_state_gs_changed
||
367 session
->sampler_state_fs_changed
) {
368 gen6_emit_3DSTATE_SAMPLER_STATE_POINTERS(p
->dev
,
369 p
->state
.vs
.SAMPLER_STATE
,
371 p
->state
.wm
.SAMPLER_STATE
, p
->cp
);
376 gen6_pipeline_common_pointers_3(struct ilo_3d_pipeline
*p
,
377 const struct ilo_context
*ilo
,
378 struct gen6_pipeline_session
*session
)
380 /* 3DSTATE_SCISSOR_STATE_POINTERS */
381 if (session
->scissor_state_changed
) {
382 gen6_emit_3DSTATE_SCISSOR_STATE_POINTERS(p
->dev
,
383 p
->state
.SCISSOR_RECT
, p
->cp
);
386 /* 3DSTATE_BINDING_TABLE_POINTERS */
387 if (session
->binding_table_vs_changed
||
388 session
->binding_table_gs_changed
||
389 session
->binding_table_fs_changed
) {
390 gen6_emit_3DSTATE_BINDING_TABLE_POINTERS(p
->dev
,
391 p
->state
.vs
.BINDING_TABLE_STATE
,
392 p
->state
.gs
.BINDING_TABLE_STATE
,
393 p
->state
.wm
.BINDING_TABLE_STATE
, p
->cp
);
398 gen6_pipeline_vf(struct ilo_3d_pipeline
*p
,
399 const struct ilo_context
*ilo
,
400 struct gen6_pipeline_session
*session
)
402 /* 3DSTATE_INDEX_BUFFER */
403 if (DIRTY(IB
) || session
->primitive_restart_changed
||
404 session
->batch_bo_changed
) {
405 gen6_emit_3DSTATE_INDEX_BUFFER(p
->dev
,
406 &ilo
->ib
, ilo
->draw
->primitive_restart
, p
->cp
);
409 /* 3DSTATE_VERTEX_BUFFERS */
410 if (DIRTY(VB
) || DIRTY(VE
) || session
->batch_bo_changed
)
411 gen6_emit_3DSTATE_VERTEX_BUFFERS(p
->dev
, ilo
->ve
, &ilo
->vb
, p
->cp
);
413 /* 3DSTATE_VERTEX_ELEMENTS */
414 if (DIRTY(VE
) || DIRTY(VS
)) {
415 const struct ilo_ve_state
*ve
= ilo
->ve
;
416 bool last_velement_edgeflag
= false;
417 bool prepend_generate_ids
= false;
420 if (ilo_shader_get_kernel_param(ilo
->vs
,
421 ILO_KERNEL_VS_INPUT_EDGEFLAG
)) {
422 /* we rely on the state tracker here */
423 assert(ilo_shader_get_kernel_param(ilo
->vs
,
424 ILO_KERNEL_INPUT_COUNT
) == ve
->count
);
426 last_velement_edgeflag
= true;
429 if (ilo_shader_get_kernel_param(ilo
->vs
,
430 ILO_KERNEL_VS_INPUT_INSTANCEID
) ||
431 ilo_shader_get_kernel_param(ilo
->vs
,
432 ILO_KERNEL_VS_INPUT_VERTEXID
))
433 prepend_generate_ids
= true;
436 gen6_emit_3DSTATE_VERTEX_ELEMENTS(p
->dev
, ve
,
437 last_velement_edgeflag
, prepend_generate_ids
, p
->cp
);
442 gen6_pipeline_vf_statistics(struct ilo_3d_pipeline
*p
,
443 const struct ilo_context
*ilo
,
444 struct gen6_pipeline_session
*session
)
446 /* 3DSTATE_VF_STATISTICS */
447 if (session
->hw_ctx_changed
)
448 gen6_emit_3DSTATE_VF_STATISTICS(p
->dev
, false, p
->cp
);
452 gen6_pipeline_vf_draw(struct ilo_3d_pipeline
*p
,
453 const struct ilo_context
*ilo
,
454 struct gen6_pipeline_session
*session
)
457 gen6_emit_3DPRIMITIVE(p
->dev
, ilo
->draw
, &ilo
->ib
, false, p
->cp
);
458 p
->state
.has_gen6_wa_pipe_control
= false;
462 gen6_pipeline_vs(struct ilo_3d_pipeline
*p
,
463 const struct ilo_context
*ilo
,
464 struct gen6_pipeline_session
*session
)
466 const bool emit_3dstate_vs
= (DIRTY(VS
) || DIRTY(SAMPLER_VS
) ||
467 session
->kernel_bo_changed
);
468 const bool emit_3dstate_constant_vs
= session
->pcb_state_vs_changed
;
471 * the classic i965 does this in upload_vs_state(), citing a spec that I
474 if (emit_3dstate_vs
&& p
->dev
->gen
== ILO_GEN(6))
475 gen6_wa_pipe_control_post_sync(p
, false);
477 /* 3DSTATE_CONSTANT_VS */
478 if (emit_3dstate_constant_vs
) {
479 gen6_emit_3DSTATE_CONSTANT_VS(p
->dev
,
480 &p
->state
.vs
.PUSH_CONSTANT_BUFFER
,
481 &p
->state
.vs
.PUSH_CONSTANT_BUFFER_size
,
486 if (emit_3dstate_vs
) {
487 const int num_samplers
= ilo
->sampler
[PIPE_SHADER_VERTEX
].count
;
489 gen6_emit_3DSTATE_VS(p
->dev
, ilo
->vs
, num_samplers
, p
->cp
);
492 if (emit_3dstate_constant_vs
&& p
->dev
->gen
== ILO_GEN(6))
493 gen6_wa_pipe_control_vs_const_flush(p
);
497 gen6_pipeline_gs(struct ilo_3d_pipeline
*p
,
498 const struct ilo_context
*ilo
,
499 struct gen6_pipeline_session
*session
)
501 /* 3DSTATE_CONSTANT_GS */
502 if (session
->pcb_state_gs_changed
)
503 gen6_emit_3DSTATE_CONSTANT_GS(p
->dev
, NULL
, NULL
, 0, p
->cp
);
506 if (DIRTY(GS
) || DIRTY(VS
) ||
507 session
->prim_changed
|| session
->kernel_bo_changed
) {
508 const int verts_per_prim
= u_vertices_per_prim(session
->reduced_prim
);
510 gen6_emit_3DSTATE_GS(p
->dev
, ilo
->gs
, ilo
->vs
, verts_per_prim
, p
->cp
);
515 gen6_pipeline_update_max_svbi(struct ilo_3d_pipeline
*p
,
516 const struct ilo_context
*ilo
,
517 struct gen6_pipeline_session
*session
)
519 if (DIRTY(VS
) || DIRTY(GS
) || DIRTY(SO
)) {
520 const struct pipe_stream_output_info
*so_info
=
521 (ilo
->gs
) ? ilo_shader_get_kernel_so_info(ilo
->gs
) :
522 (ilo
->vs
) ? ilo_shader_get_kernel_so_info(ilo
->vs
) : NULL
;
523 unsigned max_svbi
= 0xffffffff;
526 for (i
= 0; i
< so_info
->num_outputs
; i
++) {
527 const int output_buffer
= so_info
->output
[i
].output_buffer
;
528 const struct pipe_stream_output_target
*so
=
529 ilo
->so
.states
[output_buffer
];
530 const int struct_size
= so_info
->stride
[output_buffer
] * 4;
531 const int elem_size
= so_info
->output
[i
].num_components
* 4;
539 buf_size
= so
->buffer_size
- so_info
->output
[i
].dst_offset
* 4;
541 count
= buf_size
/ struct_size
;
542 if (buf_size
% struct_size
>= elem_size
)
545 if (count
< max_svbi
)
549 if (p
->state
.so_max_vertices
!= max_svbi
) {
550 p
->state
.so_max_vertices
= max_svbi
;
559 gen6_pipeline_gs_svbi(struct ilo_3d_pipeline
*p
,
560 const struct ilo_context
*ilo
,
561 struct gen6_pipeline_session
*session
)
563 const bool emit
= gen6_pipeline_update_max_svbi(p
, ilo
, session
);
565 /* 3DSTATE_GS_SVB_INDEX */
567 if (p
->dev
->gen
== ILO_GEN(6))
568 gen6_wa_pipe_control_post_sync(p
, false);
570 gen6_emit_3DSTATE_GS_SVB_INDEX(p
->dev
,
571 0, p
->state
.so_num_vertices
, p
->state
.so_max_vertices
,
574 if (session
->hw_ctx_changed
) {
578 * From the Sandy Bridge PRM, volume 2 part 1, page 148:
580 * "If a buffer is not enabled then the SVBI must be set to 0x0
581 * in order to not cause overflow in that SVBI."
583 * "If a buffer is not enabled then the MaxSVBI must be set to
584 * 0xFFFFFFFF in order to not cause overflow in that SVBI."
586 for (i
= 1; i
< 4; i
++) {
587 gen6_emit_3DSTATE_GS_SVB_INDEX(p
->dev
,
588 i
, 0, 0xffffffff, false, p
->cp
);
595 gen6_pipeline_clip(struct ilo_3d_pipeline
*p
,
596 const struct ilo_context
*ilo
,
597 struct gen6_pipeline_session
*session
)
600 if (DIRTY(RASTERIZER
) || DIRTY(FS
) || DIRTY(VIEWPORT
) || DIRTY(FB
)) {
601 bool enable_guardband
= true;
605 * We do not do 2D clipping yet. Guard band test should only be enabled
606 * when the viewport is larger than the framebuffer.
608 for (i
= 0; i
< ilo
->viewport
.count
; i
++) {
609 const struct ilo_viewport_cso
*vp
= &ilo
->viewport
.cso
[i
];
611 if (vp
->min_x
> 0.0f
|| vp
->max_x
< ilo
->fb
.state
.width
||
612 vp
->min_y
> 0.0f
|| vp
->max_y
< ilo
->fb
.state
.height
) {
613 enable_guardband
= false;
618 gen6_emit_3DSTATE_CLIP(p
->dev
, ilo
->rasterizer
,
619 ilo
->fs
, enable_guardband
, 1, p
->cp
);
624 gen6_pipeline_sf(struct ilo_3d_pipeline
*p
,
625 const struct ilo_context
*ilo
,
626 struct gen6_pipeline_session
*session
)
629 if (DIRTY(RASTERIZER
) || DIRTY(FS
))
630 gen6_emit_3DSTATE_SF(p
->dev
, ilo
->rasterizer
, ilo
->fs
, p
->cp
);
634 gen6_pipeline_sf_rect(struct ilo_3d_pipeline
*p
,
635 const struct ilo_context
*ilo
,
636 struct gen6_pipeline_session
*session
)
638 /* 3DSTATE_DRAWING_RECTANGLE */
640 if (p
->dev
->gen
== ILO_GEN(6))
641 gen6_wa_pipe_control_post_sync(p
, false);
643 gen6_emit_3DSTATE_DRAWING_RECTANGLE(p
->dev
, 0, 0,
644 ilo
->fb
.state
.width
, ilo
->fb
.state
.height
, p
->cp
);
649 gen6_pipeline_wm(struct ilo_3d_pipeline
*p
,
650 const struct ilo_context
*ilo
,
651 struct gen6_pipeline_session
*session
)
653 /* 3DSTATE_CONSTANT_PS */
654 if (session
->pcb_state_fs_changed
) {
655 gen6_emit_3DSTATE_CONSTANT_PS(p
->dev
,
656 &p
->state
.wm
.PUSH_CONSTANT_BUFFER
,
657 &p
->state
.wm
.PUSH_CONSTANT_BUFFER_size
,
662 if (DIRTY(FS
) || DIRTY(SAMPLER_FS
) || DIRTY(BLEND
) || DIRTY(DSA
) ||
663 DIRTY(RASTERIZER
) || session
->kernel_bo_changed
) {
664 const int num_samplers
= ilo
->sampler
[PIPE_SHADER_FRAGMENT
].count
;
665 const bool dual_blend
= ilo
->blend
->dual_blend
;
666 const bool cc_may_kill
= (ilo
->dsa
->dw_alpha
||
667 ilo
->blend
->alpha_to_coverage
);
669 if (p
->dev
->gen
== ILO_GEN(6) && session
->hw_ctx_changed
)
670 gen6_wa_pipe_control_wm_max_threads_stall(p
);
672 gen6_emit_3DSTATE_WM(p
->dev
, ilo
->fs
, num_samplers
,
673 ilo
->rasterizer
, dual_blend
, cc_may_kill
, p
->cp
);
678 gen6_pipeline_wm_multisample(struct ilo_3d_pipeline
*p
,
679 const struct ilo_context
*ilo
,
680 struct gen6_pipeline_session
*session
)
682 /* 3DSTATE_MULTISAMPLE and 3DSTATE_SAMPLE_MASK */
683 if (DIRTY(SAMPLE_MASK
) || DIRTY(FB
)) {
684 const uint32_t *packed_sample_pos
;
686 packed_sample_pos
= (ilo
->fb
.num_samples
> 1) ?
687 &p
->packed_sample_position_4x
: &p
->packed_sample_position_1x
;
689 if (p
->dev
->gen
== ILO_GEN(6)) {
690 gen6_wa_pipe_control_post_sync(p
, false);
691 gen6_wa_pipe_control_wm_multisample_flush(p
);
694 gen6_emit_3DSTATE_MULTISAMPLE(p
->dev
,
695 ilo
->fb
.num_samples
, packed_sample_pos
,
696 ilo
->rasterizer
->state
.half_pixel_center
, p
->cp
);
698 gen6_emit_3DSTATE_SAMPLE_MASK(p
->dev
,
699 (ilo
->fb
.num_samples
> 1) ? ilo
->sample_mask
: 0x1, p
->cp
);
704 gen6_pipeline_wm_depth(struct ilo_3d_pipeline
*p
,
705 const struct ilo_context
*ilo
,
706 struct gen6_pipeline_session
*session
)
708 /* 3DSTATE_DEPTH_BUFFER and 3DSTATE_CLEAR_PARAMS */
709 if (DIRTY(FB
) || session
->batch_bo_changed
) {
710 const struct ilo_zs_surface
*zs
;
712 if (ilo
->fb
.state
.zsbuf
) {
713 const struct ilo_surface_cso
*surface
=
714 (const struct ilo_surface_cso
*) ilo
->fb
.state
.zsbuf
;
716 assert(!surface
->is_rt
);
720 zs
= &ilo
->fb
.null_zs
;
723 if (p
->dev
->gen
== ILO_GEN(6)) {
724 gen6_wa_pipe_control_post_sync(p
, false);
725 gen6_wa_pipe_control_wm_depth_flush(p
);
728 gen6_emit_3DSTATE_DEPTH_BUFFER(p
->dev
, zs
, p
->cp
);
731 gen6_emit_3DSTATE_CLEAR_PARAMS(p
->dev
, 0, p
->cp
);
736 gen6_pipeline_wm_raster(struct ilo_3d_pipeline
*p
,
737 const struct ilo_context
*ilo
,
738 struct gen6_pipeline_session
*session
)
740 /* 3DSTATE_POLY_STIPPLE_PATTERN and 3DSTATE_POLY_STIPPLE_OFFSET */
741 if ((DIRTY(RASTERIZER
) || DIRTY(POLY_STIPPLE
)) &&
742 ilo
->rasterizer
->state
.poly_stipple_enable
) {
743 if (p
->dev
->gen
== ILO_GEN(6))
744 gen6_wa_pipe_control_post_sync(p
, false);
746 gen6_emit_3DSTATE_POLY_STIPPLE_PATTERN(p
->dev
,
747 &ilo
->poly_stipple
, p
->cp
);
749 gen6_emit_3DSTATE_POLY_STIPPLE_OFFSET(p
->dev
, 0, 0, p
->cp
);
752 /* 3DSTATE_LINE_STIPPLE */
753 if (DIRTY(RASTERIZER
) && ilo
->rasterizer
->state
.line_stipple_enable
) {
754 if (p
->dev
->gen
== ILO_GEN(6))
755 gen6_wa_pipe_control_post_sync(p
, false);
757 gen6_emit_3DSTATE_LINE_STIPPLE(p
->dev
,
758 ilo
->rasterizer
->state
.line_stipple_pattern
,
759 ilo
->rasterizer
->state
.line_stipple_factor
+ 1, p
->cp
);
762 /* 3DSTATE_AA_LINE_PARAMETERS */
763 if (DIRTY(RASTERIZER
) && ilo
->rasterizer
->state
.line_smooth
) {
764 if (p
->dev
->gen
== ILO_GEN(6))
765 gen6_wa_pipe_control_post_sync(p
, false);
767 gen6_emit_3DSTATE_AA_LINE_PARAMETERS(p
->dev
, p
->cp
);
772 gen6_pipeline_state_viewports(struct ilo_3d_pipeline
*p
,
773 const struct ilo_context
*ilo
,
774 struct gen6_pipeline_session
*session
)
776 /* SF_CLIP_VIEWPORT and CC_VIEWPORT */
777 if (p
->dev
->gen
>= ILO_GEN(7) && DIRTY(VIEWPORT
)) {
778 p
->state
.SF_CLIP_VIEWPORT
= gen7_emit_SF_CLIP_VIEWPORT(p
->dev
,
779 ilo
->viewport
.cso
, ilo
->viewport
.count
, p
->cp
);
781 p
->state
.CC_VIEWPORT
= gen6_emit_CC_VIEWPORT(p
->dev
,
782 ilo
->viewport
.cso
, ilo
->viewport
.count
, p
->cp
);
784 session
->viewport_state_changed
= true;
786 /* SF_VIEWPORT, CLIP_VIEWPORT, and CC_VIEWPORT */
787 else if (DIRTY(VIEWPORT
)) {
788 p
->state
.CLIP_VIEWPORT
= gen6_emit_CLIP_VIEWPORT(p
->dev
,
789 ilo
->viewport
.cso
, ilo
->viewport
.count
, p
->cp
);
791 p
->state
.SF_VIEWPORT
= gen6_emit_SF_VIEWPORT(p
->dev
,
792 ilo
->viewport
.cso
, ilo
->viewport
.count
, p
->cp
);
794 p
->state
.CC_VIEWPORT
= gen6_emit_CC_VIEWPORT(p
->dev
,
795 ilo
->viewport
.cso
, ilo
->viewport
.count
, p
->cp
);
797 session
->viewport_state_changed
= true;
802 gen6_pipeline_state_cc(struct ilo_3d_pipeline
*p
,
803 const struct ilo_context
*ilo
,
804 struct gen6_pipeline_session
*session
)
807 if (DIRTY(BLEND
) || DIRTY(FB
) || DIRTY(DSA
)) {
808 p
->state
.BLEND_STATE
= gen6_emit_BLEND_STATE(p
->dev
,
809 ilo
->blend
, &ilo
->fb
, ilo
->dsa
, p
->cp
);
811 session
->cc_state_blend_changed
= true;
814 /* COLOR_CALC_STATE */
815 if (DIRTY(DSA
) || DIRTY(STENCIL_REF
) || DIRTY(BLEND_COLOR
)) {
816 p
->state
.COLOR_CALC_STATE
=
817 gen6_emit_COLOR_CALC_STATE(p
->dev
, &ilo
->stencil_ref
,
818 ilo
->dsa
->alpha_ref
, &ilo
->blend_color
, p
->cp
);
820 session
->cc_state_cc_changed
= true;
823 /* DEPTH_STENCIL_STATE */
825 p
->state
.DEPTH_STENCIL_STATE
=
826 gen6_emit_DEPTH_STENCIL_STATE(p
->dev
, ilo
->dsa
, p
->cp
);
828 session
->cc_state_dsa_changed
= true;
833 gen6_pipeline_state_scissors(struct ilo_3d_pipeline
*p
,
834 const struct ilo_context
*ilo
,
835 struct gen6_pipeline_session
*session
)
838 if (DIRTY(SCISSOR
) || DIRTY(VIEWPORT
)) {
839 /* there should be as many scissors as there are viewports */
840 p
->state
.SCISSOR_RECT
= gen6_emit_SCISSOR_RECT(p
->dev
,
841 &ilo
->scissor
, ilo
->viewport
.count
, p
->cp
);
843 session
->scissor_state_changed
= true;
848 gen6_pipeline_state_surfaces_rt(struct ilo_3d_pipeline
*p
,
849 const struct ilo_context
*ilo
,
850 struct gen6_pipeline_session
*session
)
852 /* SURFACE_STATEs for render targets */
854 const struct ilo_fb_state
*fb
= &ilo
->fb
;
855 const int offset
= ILO_WM_DRAW_SURFACE(0);
856 uint32_t *surface_state
= &p
->state
.wm
.SURFACE_STATE
[offset
];
859 for (i
= 0; i
< fb
->state
.nr_cbufs
; i
++) {
860 const struct ilo_surface_cso
*surface
=
861 (const struct ilo_surface_cso
*) fb
->state
.cbufs
[i
];
863 assert(surface
&& surface
->is_rt
);
865 gen6_emit_SURFACE_STATE(p
->dev
, &surface
->u
.rt
, true, p
->cp
);
869 * Upload at least one render target, as
870 * brw_update_renderbuffer_surfaces() does. I don't know why.
873 struct ilo_view_surface null_surface
;
875 ilo_gpe_init_view_surface_null(p
->dev
,
876 fb
->state
.width
, fb
->state
.height
,
877 1, 0, &null_surface
);
880 gen6_emit_SURFACE_STATE(p
->dev
, &null_surface
, true, p
->cp
);
885 memset(&surface_state
[i
], 0, (ILO_MAX_DRAW_BUFFERS
- i
) * 4);
887 if (i
&& session
->num_surfaces
[PIPE_SHADER_FRAGMENT
] < offset
+ i
)
888 session
->num_surfaces
[PIPE_SHADER_FRAGMENT
] = offset
+ i
;
890 session
->binding_table_fs_changed
= true;
895 gen6_pipeline_state_surfaces_so(struct ilo_3d_pipeline
*p
,
896 const struct ilo_context
*ilo
,
897 struct gen6_pipeline_session
*session
)
899 const struct ilo_so_state
*so
= &ilo
->so
;
901 if (p
->dev
->gen
!= ILO_GEN(6))
904 /* SURFACE_STATEs for stream output targets */
905 if (DIRTY(VS
) || DIRTY(GS
) || DIRTY(SO
)) {
906 const struct pipe_stream_output_info
*so_info
=
907 (ilo
->gs
) ? ilo_shader_get_kernel_so_info(ilo
->gs
) :
908 (ilo
->vs
) ? ilo_shader_get_kernel_so_info(ilo
->vs
) : NULL
;
909 const int offset
= ILO_GS_SO_SURFACE(0);
910 uint32_t *surface_state
= &p
->state
.gs
.SURFACE_STATE
[offset
];
913 for (i
= 0; so_info
&& i
< so_info
->num_outputs
; i
++) {
914 const int target
= so_info
->output
[i
].output_buffer
;
915 const struct pipe_stream_output_target
*so_target
=
916 (target
< so
->count
) ? so
->states
[target
] : NULL
;
919 surface_state
[i
] = gen6_emit_so_SURFACE_STATE(p
->dev
,
920 so_target
, so_info
, i
, p
->cp
);
923 surface_state
[i
] = 0;
927 memset(&surface_state
[i
], 0, (ILO_MAX_SO_BINDINGS
- i
) * 4);
929 if (i
&& session
->num_surfaces
[PIPE_SHADER_GEOMETRY
] < offset
+ i
)
930 session
->num_surfaces
[PIPE_SHADER_GEOMETRY
] = offset
+ i
;
932 session
->binding_table_gs_changed
= true;
937 gen6_pipeline_state_surfaces_view(struct ilo_3d_pipeline
*p
,
938 const struct ilo_context
*ilo
,
940 struct gen6_pipeline_session
*session
)
942 const struct ilo_view_state
*view
= &ilo
->view
[shader_type
];
943 uint32_t *surface_state
;
947 /* SURFACE_STATEs for sampler views */
948 switch (shader_type
) {
949 case PIPE_SHADER_VERTEX
:
950 if (DIRTY(VIEW_VS
)) {
951 offset
= ILO_VS_TEXTURE_SURFACE(0);
952 surface_state
= &p
->state
.vs
.SURFACE_STATE
[offset
];
954 session
->binding_table_vs_changed
= true;
960 case PIPE_SHADER_FRAGMENT
:
961 if (DIRTY(VIEW_FS
)) {
962 offset
= ILO_WM_TEXTURE_SURFACE(0);
963 surface_state
= &p
->state
.wm
.SURFACE_STATE
[offset
];
965 session
->binding_table_fs_changed
= true;
979 for (i
= 0; i
< view
->count
; i
++) {
980 if (view
->states
[i
]) {
981 const struct ilo_view_cso
*cso
=
982 (const struct ilo_view_cso
*) view
->states
[i
];
985 gen6_emit_SURFACE_STATE(p
->dev
, &cso
->surface
, false, p
->cp
);
988 surface_state
[i
] = 0;
992 memset(&surface_state
[i
], 0, (ILO_MAX_SAMPLER_VIEWS
- i
) * 4);
994 if (i
&& session
->num_surfaces
[shader_type
] < offset
+ i
)
995 session
->num_surfaces
[shader_type
] = offset
+ i
;
999 gen6_pipeline_state_surfaces_const(struct ilo_3d_pipeline
*p
,
1000 const struct ilo_context
*ilo
,
1002 struct gen6_pipeline_session
*session
)
1004 const struct ilo_cbuf_state
*cbuf
= &ilo
->cbuf
[shader_type
];
1005 uint32_t *surface_state
;
1006 bool *binding_table_changed
;
1007 int offset
, count
, i
;
1012 /* SURFACE_STATEs for constant buffers */
1013 switch (shader_type
) {
1014 case PIPE_SHADER_VERTEX
:
1015 offset
= ILO_VS_CONST_SURFACE(0);
1016 surface_state
= &p
->state
.vs
.SURFACE_STATE
[offset
];
1017 binding_table_changed
= &session
->binding_table_vs_changed
;
1019 case PIPE_SHADER_FRAGMENT
:
1020 offset
= ILO_WM_CONST_SURFACE(0);
1021 surface_state
= &p
->state
.wm
.SURFACE_STATE
[offset
];
1022 binding_table_changed
= &session
->binding_table_fs_changed
;
1029 /* constants are pushed via PCB */
1030 if (cbuf
->enabled_mask
== 0x1 && !cbuf
->cso
[0].resource
) {
1031 memset(surface_state
, 0, ILO_MAX_CONST_BUFFERS
* 4);
1035 count
= util_last_bit(cbuf
->enabled_mask
);
1036 for (i
= 0; i
< count
; i
++) {
1037 if (cbuf
->cso
[i
].resource
) {
1038 surface_state
[i
] = gen6_emit_SURFACE_STATE(p
->dev
,
1039 &cbuf
->cso
[i
].surface
, false, p
->cp
);
1042 surface_state
[i
] = 0;
1046 memset(&surface_state
[count
], 0, (ILO_MAX_CONST_BUFFERS
- count
) * 4);
1048 if (count
&& session
->num_surfaces
[shader_type
] < offset
+ count
)
1049 session
->num_surfaces
[shader_type
] = offset
+ count
;
1051 *binding_table_changed
= true;
1055 gen6_pipeline_state_binding_tables(struct ilo_3d_pipeline
*p
,
1056 const struct ilo_context
*ilo
,
1058 struct gen6_pipeline_session
*session
)
1060 uint32_t *binding_table_state
, *surface_state
;
1061 int *binding_table_state_size
, size
;
1064 /* BINDING_TABLE_STATE */
1065 switch (shader_type
) {
1066 case PIPE_SHADER_VERTEX
:
1067 surface_state
= p
->state
.vs
.SURFACE_STATE
;
1068 binding_table_state
= &p
->state
.vs
.BINDING_TABLE_STATE
;
1069 binding_table_state_size
= &p
->state
.vs
.BINDING_TABLE_STATE_size
;
1071 skip
= !session
->binding_table_vs_changed
;
1073 case PIPE_SHADER_GEOMETRY
:
1074 surface_state
= p
->state
.gs
.SURFACE_STATE
;
1075 binding_table_state
= &p
->state
.gs
.BINDING_TABLE_STATE
;
1076 binding_table_state_size
= &p
->state
.gs
.BINDING_TABLE_STATE_size
;
1078 skip
= !session
->binding_table_gs_changed
;
1080 case PIPE_SHADER_FRAGMENT
:
1081 surface_state
= p
->state
.wm
.SURFACE_STATE
;
1082 binding_table_state
= &p
->state
.wm
.BINDING_TABLE_STATE
;
1083 binding_table_state_size
= &p
->state
.wm
.BINDING_TABLE_STATE_size
;
1085 skip
= !session
->binding_table_fs_changed
;
1096 * If we have seemingly less SURFACE_STATEs than before, it could be that
1097 * we did not touch those reside at the tail in this upload. Loop over
1098 * them to figure out the real number of SURFACE_STATEs.
1100 for (size
= *binding_table_state_size
;
1101 size
> session
->num_surfaces
[shader_type
]; size
--) {
1102 if (surface_state
[size
- 1])
1105 if (size
< session
->num_surfaces
[shader_type
])
1106 size
= session
->num_surfaces
[shader_type
];
1108 *binding_table_state
= gen6_emit_BINDING_TABLE_STATE(p
->dev
,
1109 surface_state
, size
, p
->cp
);
1110 *binding_table_state_size
= size
;
1114 gen6_pipeline_state_samplers(struct ilo_3d_pipeline
*p
,
1115 const struct ilo_context
*ilo
,
1117 struct gen6_pipeline_session
*session
)
1119 const struct ilo_sampler_cso
* const *samplers
=
1120 ilo
->sampler
[shader_type
].cso
;
1121 const struct pipe_sampler_view
* const *views
=
1122 (const struct pipe_sampler_view
**) ilo
->view
[shader_type
].states
;
1123 const int num_samplers
= ilo
->sampler
[shader_type
].count
;
1124 const int num_views
= ilo
->view
[shader_type
].count
;
1125 uint32_t *sampler_state
, *border_color_state
;
1126 bool emit_border_color
= false;
1129 /* SAMPLER_BORDER_COLOR_STATE and SAMPLER_STATE */
1130 switch (shader_type
) {
1131 case PIPE_SHADER_VERTEX
:
1132 if (DIRTY(SAMPLER_VS
) || DIRTY(VIEW_VS
)) {
1133 sampler_state
= &p
->state
.vs
.SAMPLER_STATE
;
1134 border_color_state
= p
->state
.vs
.SAMPLER_BORDER_COLOR_STATE
;
1136 if (DIRTY(SAMPLER_VS
))
1137 emit_border_color
= true;
1139 session
->sampler_state_vs_changed
= true;
1145 case PIPE_SHADER_FRAGMENT
:
1146 if (DIRTY(SAMPLER_FS
) || DIRTY(VIEW_FS
)) {
1147 sampler_state
= &p
->state
.wm
.SAMPLER_STATE
;
1148 border_color_state
= p
->state
.wm
.SAMPLER_BORDER_COLOR_STATE
;
1150 if (DIRTY(SAMPLER_FS
))
1151 emit_border_color
= true;
1153 session
->sampler_state_fs_changed
= true;
1167 if (emit_border_color
) {
1170 for (i
= 0; i
< num_samplers
; i
++) {
1171 border_color_state
[i
] = (samplers
[i
]) ?
1172 gen6_emit_SAMPLER_BORDER_COLOR_STATE(p
->dev
,
1173 samplers
[i
], p
->cp
) : 0;
1177 /* should we take the minimum of num_samplers and num_views? */
1178 *sampler_state
= gen6_emit_SAMPLER_STATE(p
->dev
,
1181 MIN2(num_samplers
, num_views
), p
->cp
);
1185 gen6_pipeline_state_pcb(struct ilo_3d_pipeline
*p
,
1186 const struct ilo_context
*ilo
,
1187 struct gen6_pipeline_session
*session
)
1189 /* push constant buffer for VS */
1190 if (DIRTY(VS
) || DIRTY(CBUF
) || DIRTY(CLIP
)) {
1191 const int cbuf0_size
= (ilo
->vs
) ?
1192 ilo_shader_get_kernel_param(ilo
->vs
,
1193 ILO_KERNEL_PCB_CBUF0_SIZE
) : 0;
1194 const int clip_state_size
= (ilo
->vs
) ?
1195 ilo_shader_get_kernel_param(ilo
->vs
,
1196 ILO_KERNEL_VS_PCB_UCP_SIZE
) : 0;
1197 const int total_size
= cbuf0_size
+ clip_state_size
;
1202 p
->state
.vs
.PUSH_CONSTANT_BUFFER
=
1203 gen6_emit_push_constant_buffer(p
->dev
, total_size
, &pcb
, p
->cp
);
1204 p
->state
.vs
.PUSH_CONSTANT_BUFFER_size
= total_size
;
1207 const struct ilo_cbuf_state
*cbuf
=
1208 &ilo
->cbuf
[PIPE_SHADER_VERTEX
];
1210 if (cbuf0_size
<= cbuf
->cso
[0].user_buffer_size
) {
1211 memcpy(pcb
, cbuf
->cso
[0].user_buffer
, cbuf0_size
);
1214 memcpy(pcb
, cbuf
->cso
[0].user_buffer
,
1215 cbuf
->cso
[0].user_buffer_size
);
1216 memset(pcb
+ cbuf
->cso
[0].user_buffer_size
, 0,
1217 cbuf0_size
- cbuf
->cso
[0].user_buffer_size
);
1223 if (clip_state_size
)
1224 memcpy(pcb
, &ilo
->clip
, clip_state_size
);
1226 session
->pcb_state_vs_changed
= true;
1228 else if (p
->state
.vs
.PUSH_CONSTANT_BUFFER_size
) {
1229 p
->state
.vs
.PUSH_CONSTANT_BUFFER
= 0;
1230 p
->state
.vs
.PUSH_CONSTANT_BUFFER_size
= 0;
1232 session
->pcb_state_vs_changed
= true;
1236 /* push constant buffer for FS */
1237 if (DIRTY(FS
) || DIRTY(CBUF
)) {
1238 const int cbuf0_size
= (ilo
->fs
) ?
1239 ilo_shader_get_kernel_param(ilo
->fs
, ILO_KERNEL_PCB_CBUF0_SIZE
) : 0;
1242 const struct ilo_cbuf_state
*cbuf
= &ilo
->cbuf
[PIPE_SHADER_FRAGMENT
];
1245 p
->state
.wm
.PUSH_CONSTANT_BUFFER
=
1246 gen6_emit_push_constant_buffer(p
->dev
, cbuf0_size
, &pcb
, p
->cp
);
1247 p
->state
.wm
.PUSH_CONSTANT_BUFFER_size
= cbuf0_size
;
1249 if (cbuf0_size
<= cbuf
->cso
[0].user_buffer_size
) {
1250 memcpy(pcb
, cbuf
->cso
[0].user_buffer
, cbuf0_size
);
1253 memcpy(pcb
, cbuf
->cso
[0].user_buffer
,
1254 cbuf
->cso
[0].user_buffer_size
);
1255 memset(pcb
+ cbuf
->cso
[0].user_buffer_size
, 0,
1256 cbuf0_size
- cbuf
->cso
[0].user_buffer_size
);
1259 session
->pcb_state_fs_changed
= true;
1261 else if (p
->state
.wm
.PUSH_CONSTANT_BUFFER_size
) {
1262 p
->state
.wm
.PUSH_CONSTANT_BUFFER
= 0;
1263 p
->state
.wm
.PUSH_CONSTANT_BUFFER_size
= 0;
1265 session
->pcb_state_fs_changed
= true;
1273 gen6_pipeline_commands(struct ilo_3d_pipeline
*p
,
1274 const struct ilo_context
*ilo
,
1275 struct gen6_pipeline_session
*session
)
1278 * We try to keep the order of the commands match, as closely as possible,
1279 * that of the classic i965 driver. It allows us to compare the command
1282 gen6_pipeline_common_select(p
, ilo
, session
);
1283 gen6_pipeline_gs_svbi(p
, ilo
, session
);
1284 gen6_pipeline_common_sip(p
, ilo
, session
);
1285 gen6_pipeline_vf_statistics(p
, ilo
, session
);
1286 gen6_pipeline_common_base_address(p
, ilo
, session
);
1287 gen6_pipeline_common_pointers_1(p
, ilo
, session
);
1288 gen6_pipeline_common_urb(p
, ilo
, session
);
1289 gen6_pipeline_common_pointers_2(p
, ilo
, session
);
1290 gen6_pipeline_wm_multisample(p
, ilo
, session
);
1291 gen6_pipeline_vs(p
, ilo
, session
);
1292 gen6_pipeline_gs(p
, ilo
, session
);
1293 gen6_pipeline_clip(p
, ilo
, session
);
1294 gen6_pipeline_sf(p
, ilo
, session
);
1295 gen6_pipeline_wm(p
, ilo
, session
);
1296 gen6_pipeline_common_pointers_3(p
, ilo
, session
);
1297 gen6_pipeline_wm_depth(p
, ilo
, session
);
1298 gen6_pipeline_wm_raster(p
, ilo
, session
);
1299 gen6_pipeline_sf_rect(p
, ilo
, session
);
1300 gen6_pipeline_vf(p
, ilo
, session
);
1301 gen6_pipeline_vf_draw(p
, ilo
, session
);
1305 gen6_pipeline_states(struct ilo_3d_pipeline
*p
,
1306 const struct ilo_context
*ilo
,
1307 struct gen6_pipeline_session
*session
)
1311 gen6_pipeline_state_viewports(p
, ilo
, session
);
1312 gen6_pipeline_state_cc(p
, ilo
, session
);
1313 gen6_pipeline_state_scissors(p
, ilo
, session
);
1314 gen6_pipeline_state_pcb(p
, ilo
, session
);
1317 * upload all SURAFCE_STATEs together so that we know there are minimal
1320 gen6_pipeline_state_surfaces_rt(p
, ilo
, session
);
1321 gen6_pipeline_state_surfaces_so(p
, ilo
, session
);
1322 for (shader_type
= 0; shader_type
< PIPE_SHADER_TYPES
; shader_type
++) {
1323 gen6_pipeline_state_surfaces_view(p
, ilo
, shader_type
, session
);
1324 gen6_pipeline_state_surfaces_const(p
, ilo
, shader_type
, session
);
1327 for (shader_type
= 0; shader_type
< PIPE_SHADER_TYPES
; shader_type
++) {
1328 gen6_pipeline_state_samplers(p
, ilo
, shader_type
, session
);
1329 /* this must be called after all SURFACE_STATEs are uploaded */
1330 gen6_pipeline_state_binding_tables(p
, ilo
, shader_type
, session
);
1335 gen6_pipeline_prepare(const struct ilo_3d_pipeline
*p
,
1336 const struct ilo_context
*ilo
,
1337 struct gen6_pipeline_session
*session
)
1339 memset(session
, 0, sizeof(*session
));
1340 session
->pipe_dirty
= ilo
->dirty
;
1341 session
->reduced_prim
= u_reduced_prim(ilo
->draw
->mode
);
1343 /* available space before the session */
1344 session
->init_cp_space
= ilo_cp_space(p
->cp
);
1346 session
->hw_ctx_changed
=
1347 (p
->invalidate_flags
& ILO_3D_PIPELINE_INVALIDATE_HW
);
1349 if (session
->hw_ctx_changed
) {
1350 /* these should be enough to make everything uploaded */
1351 session
->batch_bo_changed
= true;
1352 session
->state_bo_changed
= true;
1353 session
->kernel_bo_changed
= true;
1354 session
->prim_changed
= true;
1355 session
->primitive_restart_changed
= true;
1359 * Any state that involves resources needs to be re-emitted when the
1360 * batch bo changed. This is because we do not pin the resources and
1361 * their offsets (or existence) may change between batch buffers.
1363 * Since we messed around with ILO_3D_PIPELINE_INVALIDATE_BATCH_BO in
1364 * handle_invalid_batch_bo(), use ILO_3D_PIPELINE_INVALIDATE_STATE_BO as
1365 * a temporary workaround.
1367 session
->batch_bo_changed
=
1368 (p
->invalidate_flags
& ILO_3D_PIPELINE_INVALIDATE_STATE_BO
);
1370 session
->state_bo_changed
=
1371 (p
->invalidate_flags
& ILO_3D_PIPELINE_INVALIDATE_STATE_BO
);
1372 session
->kernel_bo_changed
=
1373 (p
->invalidate_flags
& ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO
);
1374 session
->prim_changed
= (p
->state
.reduced_prim
!= session
->reduced_prim
);
1375 session
->primitive_restart_changed
=
1376 (p
->state
.primitive_restart
!= ilo
->draw
->primitive_restart
);
1381 gen6_pipeline_draw(struct ilo_3d_pipeline
*p
,
1382 const struct ilo_context
*ilo
,
1383 struct gen6_pipeline_session
*session
)
1385 /* force all states to be uploaded if the state bo changed */
1386 if (session
->state_bo_changed
)
1387 session
->pipe_dirty
= ILO_DIRTY_ALL
;
1389 session
->pipe_dirty
= ilo
->dirty
;
1391 session
->emit_draw_states(p
, ilo
, session
);
1393 /* force all commands to be uploaded if the HW context changed */
1394 if (session
->hw_ctx_changed
)
1395 session
->pipe_dirty
= ILO_DIRTY_ALL
;
1397 session
->pipe_dirty
= ilo
->dirty
;
1399 session
->emit_draw_commands(p
, ilo
, session
);
1403 gen6_pipeline_end(struct ilo_3d_pipeline
*p
,
1404 const struct ilo_context
*ilo
,
1405 struct gen6_pipeline_session
*session
)
1407 /* sanity check size estimation */
1408 assert(session
->init_cp_space
- ilo_cp_space(p
->cp
) <=
1409 ilo_3d_pipeline_estimate_size(p
, ILO_3D_PIPELINE_DRAW
, ilo
));
1411 p
->state
.reduced_prim
= session
->reduced_prim
;
1412 p
->state
.primitive_restart
= ilo
->draw
->primitive_restart
;
1416 ilo_3d_pipeline_emit_draw_gen6(struct ilo_3d_pipeline
*p
,
1417 const struct ilo_context
*ilo
)
1419 struct gen6_pipeline_session session
;
1421 gen6_pipeline_prepare(p
, ilo
, &session
);
1423 session
.emit_draw_states
= gen6_pipeline_states
;
1424 session
.emit_draw_commands
= gen6_pipeline_commands
;
1426 gen6_pipeline_draw(p
, ilo
, &session
);
1427 gen6_pipeline_end(p
, ilo
, &session
);
1431 ilo_3d_pipeline_emit_flush_gen6(struct ilo_3d_pipeline
*p
)
1433 if (p
->dev
->gen
== ILO_GEN(6))
1434 gen6_wa_pipe_control_post_sync(p
, false);
1436 gen6_emit_PIPE_CONTROL(p
->dev
,
1437 PIPE_CONTROL_INSTRUCTION_FLUSH
|
1438 PIPE_CONTROL_WRITE_FLUSH
|
1439 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
1440 PIPE_CONTROL_VF_CACHE_INVALIDATE
|
1441 PIPE_CONTROL_TC_FLUSH
|
1442 PIPE_CONTROL_NO_WRITE
|
1443 PIPE_CONTROL_CS_STALL
,
1444 0, 0, false, p
->cp
);
1448 ilo_3d_pipeline_emit_write_timestamp_gen6(struct ilo_3d_pipeline
*p
,
1449 struct intel_bo
*bo
, int index
)
1451 if (p
->dev
->gen
== ILO_GEN(6))
1452 gen6_wa_pipe_control_post_sync(p
, true);
1454 gen6_emit_PIPE_CONTROL(p
->dev
,
1455 PIPE_CONTROL_WRITE_TIMESTAMP
,
1456 bo
, index
* sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE
,
1461 ilo_3d_pipeline_emit_write_depth_count_gen6(struct ilo_3d_pipeline
*p
,
1462 struct intel_bo
*bo
, int index
)
1464 if (p
->dev
->gen
== ILO_GEN(6))
1465 gen6_wa_pipe_control_post_sync(p
, false);
1467 gen6_emit_PIPE_CONTROL(p
->dev
,
1468 PIPE_CONTROL_DEPTH_STALL
|
1469 PIPE_CONTROL_WRITE_DEPTH_COUNT
,
1470 bo
, index
* sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE
,
1475 gen6_pipeline_estimate_commands(const struct ilo_3d_pipeline
*p
,
1476 const struct ilo_context
*ilo
)
1479 enum ilo_gpe_gen6_command cmd
;
1484 for (cmd
= 0; cmd
< ILO_GPE_GEN6_COMMAND_COUNT
; cmd
++) {
1488 case ILO_GPE_GEN6_PIPE_CONTROL
:
1489 /* for the workaround */
1491 /* another one after 3DSTATE_URB */
1493 /* and another one after 3DSTATE_CONSTANT_VS */
1496 case ILO_GPE_GEN6_3DSTATE_GS_SVB_INDEX
:
1497 /* there are 4 SVBIs */
1500 case ILO_GPE_GEN6_3DSTATE_VERTEX_BUFFERS
:
1503 case ILO_GPE_GEN6_3DSTATE_VERTEX_ELEMENTS
:
1506 case ILO_GPE_GEN6_MEDIA_VFE_STATE
:
1507 case ILO_GPE_GEN6_MEDIA_CURBE_LOAD
:
1508 case ILO_GPE_GEN6_MEDIA_INTERFACE_DESCRIPTOR_LOAD
:
1509 case ILO_GPE_GEN6_MEDIA_GATEWAY_STATE
:
1510 case ILO_GPE_GEN6_MEDIA_STATE_FLUSH
:
1511 case ILO_GPE_GEN6_MEDIA_OBJECT_WALKER
:
1512 /* media commands */
1521 size
+= ilo_gpe_gen6_estimate_command_size(p
->dev
, cmd
, count
);
1528 gen6_pipeline_estimate_states(const struct ilo_3d_pipeline
*p
,
1529 const struct ilo_context
*ilo
)
1531 static int static_size
;
1532 int shader_type
, count
, size
;
1536 enum ilo_gpe_gen6_state state
;
1538 } static_states
[] = {
1540 { ILO_GPE_GEN6_SF_VIEWPORT
, 1 },
1541 { ILO_GPE_GEN6_CLIP_VIEWPORT
, 1 },
1542 { ILO_GPE_GEN6_CC_VIEWPORT
, 1 },
1544 { ILO_GPE_GEN6_COLOR_CALC_STATE
, 1 },
1545 { ILO_GPE_GEN6_BLEND_STATE
, ILO_MAX_DRAW_BUFFERS
},
1546 { ILO_GPE_GEN6_DEPTH_STENCIL_STATE
, 1 },
1548 { ILO_GPE_GEN6_SCISSOR_RECT
, 1 },
1549 /* binding table (vs, gs, fs) */
1550 { ILO_GPE_GEN6_BINDING_TABLE_STATE
, ILO_MAX_VS_SURFACES
},
1551 { ILO_GPE_GEN6_BINDING_TABLE_STATE
, ILO_MAX_GS_SURFACES
},
1552 { ILO_GPE_GEN6_BINDING_TABLE_STATE
, ILO_MAX_WM_SURFACES
},
1556 for (i
= 0; i
< Elements(static_states
); i
++) {
1557 static_size
+= ilo_gpe_gen6_estimate_state_size(p
->dev
,
1558 static_states
[i
].state
,
1559 static_states
[i
].count
);
1566 * render targets (fs)
1567 * stream outputs (gs)
1568 * sampler views (vs, fs)
1569 * constant buffers (vs, fs)
1571 count
= ilo
->fb
.state
.nr_cbufs
;
1574 const struct pipe_stream_output_info
*so_info
=
1575 ilo_shader_get_kernel_so_info(ilo
->gs
);
1577 count
+= so_info
->num_outputs
;
1580 const struct pipe_stream_output_info
*so_info
=
1581 ilo_shader_get_kernel_so_info(ilo
->vs
);
1583 count
+= so_info
->num_outputs
;
1586 for (shader_type
= 0; shader_type
< PIPE_SHADER_TYPES
; shader_type
++) {
1587 count
+= ilo
->view
[shader_type
].count
;
1588 count
+= util_bitcount(ilo
->cbuf
[shader_type
].enabled_mask
);
1592 size
+= ilo_gpe_gen6_estimate_state_size(p
->dev
,
1593 ILO_GPE_GEN6_SURFACE_STATE
, count
);
1596 /* samplers (vs, fs) */
1597 for (shader_type
= 0; shader_type
< PIPE_SHADER_TYPES
; shader_type
++) {
1598 count
= ilo
->sampler
[shader_type
].count
;
1600 size
+= ilo_gpe_gen6_estimate_state_size(p
->dev
,
1601 ILO_GPE_GEN6_SAMPLER_BORDER_COLOR_STATE
, count
);
1602 size
+= ilo_gpe_gen6_estimate_state_size(p
->dev
,
1603 ILO_GPE_GEN6_SAMPLER_STATE
, count
);
1609 const int cbuf0_size
=
1610 ilo_shader_get_kernel_param(ilo
->vs
, ILO_KERNEL_PCB_CBUF0_SIZE
);
1611 const int ucp_size
=
1612 ilo_shader_get_kernel_param(ilo
->vs
, ILO_KERNEL_VS_PCB_UCP_SIZE
);
1614 size
+= ilo_gpe_gen6_estimate_state_size(p
->dev
,
1615 ILO_GPE_GEN6_PUSH_CONSTANT_BUFFER
, cbuf0_size
+ ucp_size
);
1620 const int cbuf0_size
=
1621 ilo_shader_get_kernel_param(ilo
->fs
, ILO_KERNEL_PCB_CBUF0_SIZE
);
1623 size
+= ilo_gpe_gen6_estimate_state_size(p
->dev
,
1624 ILO_GPE_GEN6_PUSH_CONSTANT_BUFFER
, cbuf0_size
);
1631 ilo_3d_pipeline_estimate_size_gen6(struct ilo_3d_pipeline
*p
,
1632 enum ilo_3d_pipeline_action action
,
1638 case ILO_3D_PIPELINE_DRAW
:
1640 const struct ilo_context
*ilo
= arg
;
1642 size
= gen6_pipeline_estimate_commands(p
, ilo
) +
1643 gen6_pipeline_estimate_states(p
, ilo
);
1646 case ILO_3D_PIPELINE_FLUSH
:
1647 size
= ilo_gpe_gen6_estimate_command_size(p
->dev
,
1648 ILO_GPE_GEN6_PIPE_CONTROL
, 1) * 3;
1650 case ILO_3D_PIPELINE_WRITE_TIMESTAMP
:
1651 size
= ilo_gpe_gen6_estimate_command_size(p
->dev
,
1652 ILO_GPE_GEN6_PIPE_CONTROL
, 1) * 2;
1654 case ILO_3D_PIPELINE_WRITE_DEPTH_COUNT
:
1655 size
= ilo_gpe_gen6_estimate_command_size(p
->dev
,
1656 ILO_GPE_GEN6_PIPE_CONTROL
, 1) * 3;
1659 assert(!"unknown 3D pipeline action");
1668 ilo_3d_pipeline_init_gen6(struct ilo_3d_pipeline
*p
)
1670 p
->estimate_size
= ilo_3d_pipeline_estimate_size_gen6
;
1671 p
->emit_draw
= ilo_3d_pipeline_emit_draw_gen6
;
1672 p
->emit_flush
= ilo_3d_pipeline_emit_flush_gen6
;
1673 p
->emit_write_timestamp
= ilo_3d_pipeline_emit_write_timestamp_gen6
;
1674 p
->emit_write_depth_count
= ilo_3d_pipeline_emit_write_depth_count_gen6
;