ilo: rename ilo_3d_pipeline*.[ch] to ilo_render*.[ch]
[mesa.git] / src / gallium / drivers / ilo / ilo_render_gen6.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "genhw/genhw.h"
29 #include "util/u_dual_blend.h"
30 #include "util/u_prim.h"
31
32 #include "ilo_blitter.h"
33 #include "ilo_builder_3d.h"
34 #include "ilo_builder_mi.h"
35 #include "ilo_builder_render.h"
36 #include "ilo_query.h"
37 #include "ilo_shader.h"
38 #include "ilo_state.h"
39 #include "ilo_render.h"
40 #include "ilo_render_gen.h"
41
42 /**
43 * A wrapper for gen6_PIPE_CONTROL().
44 */
45 static inline void
46 gen6_pipe_control(struct ilo_3d_pipeline *p, uint32_t dw1)
47 {
48 struct intel_bo *bo = (dw1 & GEN6_PIPE_CONTROL_WRITE__MASK) ?
49 p->workaround_bo : NULL;
50
51 ILO_DEV_ASSERT(p->dev, 6, 6);
52
53 gen6_PIPE_CONTROL(p->builder, dw1, bo, 0, false);
54
55 p->state.current_pipe_control_dw1 |= dw1;
56
57 assert(!p->state.deferred_pipe_control_dw1);
58 }
59
60 /**
61 * This should be called before PIPE_CONTROL.
62 */
63 static void
64 gen6_wa_pre_pipe_control(struct ilo_3d_pipeline *p, uint32_t dw1)
65 {
66 /*
67 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
68 *
69 * "Pipe-control with CS-stall bit set must be sent BEFORE the
70 * pipe-control with a post-sync op and no write-cache flushes."
71 *
72 * This WA may also be triggered indirectly by the other two WAs on the
73 * same page:
74 *
75 * "Before any depth stall flush (including those produced by
76 * non-pipelined state commands), software needs to first send a
77 * PIPE_CONTROL with no bits set except Post-Sync Operation != 0."
78 *
79 * "Before a PIPE_CONTROL with Write Cache Flush Enable =1, a
80 * PIPE_CONTROL with any non-zero post-sync-op is required."
81 */
82 const bool direct_wa_cond = (dw1 & GEN6_PIPE_CONTROL_WRITE__MASK) &&
83 !(dw1 & GEN6_PIPE_CONTROL_RENDER_CACHE_FLUSH);
84 const bool indirect_wa_cond = (dw1 & GEN6_PIPE_CONTROL_DEPTH_STALL) |
85 (dw1 & GEN6_PIPE_CONTROL_RENDER_CACHE_FLUSH);
86
87 ILO_DEV_ASSERT(p->dev, 6, 6);
88
89 if (!direct_wa_cond && !indirect_wa_cond)
90 return;
91
92 if (!(p->state.current_pipe_control_dw1 & GEN6_PIPE_CONTROL_CS_STALL)) {
93 /*
94 * From the Sandy Bridge PRM, volume 2 part 1, page 73:
95 *
96 * "1 of the following must also be set (when CS stall is set):
97 *
98 * - Depth Cache Flush Enable ([0] of DW1)
99 * - Stall at Pixel Scoreboard ([1] of DW1)
100 * - Depth Stall ([13] of DW1)
101 * - Post-Sync Operation ([13] of DW1)
102 * - Render Target Cache Flush Enable ([12] of DW1)
103 * - Notify Enable ([8] of DW1)"
104 *
105 * Because of the WAs above, we have to pick Stall at Pixel Scoreboard.
106 */
107 const uint32_t direct_wa = GEN6_PIPE_CONTROL_CS_STALL |
108 GEN6_PIPE_CONTROL_PIXEL_SCOREBOARD_STALL;
109
110 gen6_pipe_control(p, direct_wa);
111 }
112
113 if (indirect_wa_cond &&
114 !(p->state.current_pipe_control_dw1 & GEN6_PIPE_CONTROL_WRITE__MASK)) {
115 const uint32_t indirect_wa = GEN6_PIPE_CONTROL_WRITE_IMM;
116
117 gen6_pipe_control(p, indirect_wa);
118 }
119 }
120
121 /**
122 * This should be called before any non-pipelined state command.
123 */
124 static void
125 gen6_wa_pre_non_pipelined(struct ilo_3d_pipeline *p)
126 {
127 ILO_DEV_ASSERT(p->dev, 6, 6);
128
129 /* non-pipelined state commands produce depth stall */
130 gen6_wa_pre_pipe_control(p, GEN6_PIPE_CONTROL_DEPTH_STALL);
131 }
132
133 static void
134 gen6_wa_post_3dstate_constant_vs(struct ilo_3d_pipeline *p)
135 {
136 /*
137 * According to upload_vs_state() of the classic driver, we need to emit a
138 * PIPE_CONTROL after 3DSTATE_CONSTANT_VS, otherwise the command is kept
139 * being buffered by VS FF, to the point that the FF dies.
140 */
141 const uint32_t dw1 = GEN6_PIPE_CONTROL_DEPTH_STALL |
142 GEN6_PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE |
143 GEN6_PIPE_CONTROL_STATE_CACHE_INVALIDATE;
144
145 gen6_wa_pre_pipe_control(p, dw1);
146
147 if ((p->state.current_pipe_control_dw1 & dw1) != dw1)
148 gen6_pipe_control(p, dw1);
149 }
150
151 static void
152 gen6_wa_pre_3dstate_wm_max_threads(struct ilo_3d_pipeline *p)
153 {
154 /*
155 * From the Sandy Bridge PRM, volume 2 part 1, page 274:
156 *
157 * "A PIPE_CONTROL command, with only the Stall At Pixel Scoreboard
158 * field set (DW1 Bit 1), must be issued prior to any change to the
159 * value in this field (Maximum Number of Threads in 3DSTATE_WM)"
160 */
161 const uint32_t dw1 = GEN6_PIPE_CONTROL_PIXEL_SCOREBOARD_STALL;
162
163 ILO_DEV_ASSERT(p->dev, 6, 6);
164
165 gen6_wa_pre_pipe_control(p, dw1);
166
167 if ((p->state.current_pipe_control_dw1 & dw1) != dw1)
168 gen6_pipe_control(p, dw1);
169 }
170
171 static void
172 gen6_wa_pre_3dstate_multisample(struct ilo_3d_pipeline *p)
173 {
174 /*
175 * From the Sandy Bridge PRM, volume 2 part 1, page 305:
176 *
177 * "Driver must guarentee that all the caches in the depth pipe are
178 * flushed before this command (3DSTATE_MULTISAMPLE) is parsed. This
179 * requires driver to send a PIPE_CONTROL with a CS stall along with a
180 * Depth Flush prior to this command."
181 */
182 const uint32_t dw1 = GEN6_PIPE_CONTROL_DEPTH_CACHE_FLUSH |
183 GEN6_PIPE_CONTROL_CS_STALL;
184
185 ILO_DEV_ASSERT(p->dev, 6, 6);
186
187 gen6_wa_pre_pipe_control(p, dw1);
188
189 if ((p->state.current_pipe_control_dw1 & dw1) != dw1)
190 gen6_pipe_control(p, dw1);
191 }
192
193 static void
194 gen6_wa_pre_depth(struct ilo_3d_pipeline *p)
195 {
196 ILO_DEV_ASSERT(p->dev, 6, 6);
197
198 /*
199 * From the Ivy Bridge PRM, volume 2 part 1, page 315:
200 *
201 * "Restriction: Prior to changing Depth/Stencil Buffer state (i.e.,
202 * any combination of 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS,
203 * 3DSTATE_STENCIL_BUFFER, 3DSTATE_HIER_DEPTH_BUFFER) SW must first
204 * issue a pipelined depth stall (PIPE_CONTROL with Depth Stall bit
205 * set), followed by a pipelined depth cache flush (PIPE_CONTROL with
206 * Depth Flush Bit set, followed by another pipelined depth stall
207 * (PIPE_CONTROL with Depth Stall Bit set), unless SW can otherwise
208 * guarantee that the pipeline from WM onwards is already flushed
209 * (e.g., via a preceding MI_FLUSH)."
210 *
211 * According to the classic driver, it also applies for GEN6.
212 */
213 gen6_wa_pre_pipe_control(p, GEN6_PIPE_CONTROL_DEPTH_STALL |
214 GEN6_PIPE_CONTROL_DEPTH_CACHE_FLUSH);
215
216 gen6_pipe_control(p, GEN6_PIPE_CONTROL_DEPTH_STALL);
217 gen6_pipe_control(p, GEN6_PIPE_CONTROL_DEPTH_CACHE_FLUSH);
218 gen6_pipe_control(p, GEN6_PIPE_CONTROL_DEPTH_STALL);
219 }
220
221 #define DIRTY(state) (session->pipe_dirty & ILO_DIRTY_ ## state)
222
223 void
224 gen6_pipeline_common_select(struct ilo_3d_pipeline *p,
225 const struct ilo_state_vector *vec,
226 struct gen6_pipeline_session *session)
227 {
228 /* PIPELINE_SELECT */
229 if (session->hw_ctx_changed) {
230 if (ilo_dev_gen(p->dev) == ILO_GEN(6))
231 gen6_wa_pre_non_pipelined(p);
232
233 gen6_PIPELINE_SELECT(p->builder, 0x0);
234 }
235 }
236
237 void
238 gen6_pipeline_common_sip(struct ilo_3d_pipeline *p,
239 const struct ilo_state_vector *vec,
240 struct gen6_pipeline_session *session)
241 {
242 /* STATE_SIP */
243 if (session->hw_ctx_changed) {
244 if (ilo_dev_gen(p->dev) == ILO_GEN(6))
245 gen6_wa_pre_non_pipelined(p);
246
247 gen6_STATE_SIP(p->builder, 0);
248 }
249 }
250
251 void
252 gen6_pipeline_common_base_address(struct ilo_3d_pipeline *p,
253 const struct ilo_state_vector *vec,
254 struct gen6_pipeline_session *session)
255 {
256 /* STATE_BASE_ADDRESS */
257 if (session->state_bo_changed || session->kernel_bo_changed ||
258 session->batch_bo_changed) {
259 if (ilo_dev_gen(p->dev) == ILO_GEN(6))
260 gen6_wa_pre_non_pipelined(p);
261
262 gen6_state_base_address(p->builder, session->hw_ctx_changed);
263
264 /*
265 * From the Sandy Bridge PRM, volume 1 part 1, page 28:
266 *
267 * "The following commands must be reissued following any change to
268 * the base addresses:
269 *
270 * * 3DSTATE_BINDING_TABLE_POINTERS
271 * * 3DSTATE_SAMPLER_STATE_POINTERS
272 * * 3DSTATE_VIEWPORT_STATE_POINTERS
273 * * 3DSTATE_CC_POINTERS
274 * * MEDIA_STATE_POINTERS"
275 *
276 * 3DSTATE_SCISSOR_STATE_POINTERS is not on the list, but it is
277 * reasonable to also reissue the command. Same to PCB.
278 */
279 session->viewport_state_changed = true;
280
281 session->cc_state_blend_changed = true;
282 session->cc_state_dsa_changed = true;
283 session->cc_state_cc_changed = true;
284
285 session->scissor_state_changed = true;
286
287 session->binding_table_vs_changed = true;
288 session->binding_table_gs_changed = true;
289 session->binding_table_fs_changed = true;
290
291 session->sampler_state_vs_changed = true;
292 session->sampler_state_gs_changed = true;
293 session->sampler_state_fs_changed = true;
294
295 session->pcb_state_vs_changed = true;
296 session->pcb_state_gs_changed = true;
297 session->pcb_state_fs_changed = true;
298 }
299 }
300
301 static void
302 gen6_pipeline_common_urb(struct ilo_3d_pipeline *p,
303 const struct ilo_state_vector *vec,
304 struct gen6_pipeline_session *session)
305 {
306 /* 3DSTATE_URB */
307 if (DIRTY(VE) || DIRTY(VS) || DIRTY(GS)) {
308 const bool gs_active = (vec->gs || (vec->vs &&
309 ilo_shader_get_kernel_param(vec->vs, ILO_KERNEL_VS_GEN6_SO)));
310 int vs_entry_size, gs_entry_size;
311 int vs_total_size, gs_total_size;
312
313 vs_entry_size = (vec->vs) ?
314 ilo_shader_get_kernel_param(vec->vs, ILO_KERNEL_OUTPUT_COUNT) : 0;
315
316 /*
317 * As indicated by 2e712e41db0c0676e9f30fc73172c0e8de8d84d4, VF and VS
318 * share VUE handles. The VUE allocation size must be large enough to
319 * store either VF outputs (number of VERTEX_ELEMENTs) and VS outputs.
320 *
321 * I am not sure if the PRM explicitly states that VF and VS share VUE
322 * handles. But here is a citation that implies so:
323 *
324 * From the Sandy Bridge PRM, volume 2 part 1, page 44:
325 *
326 * "Once a FF stage that spawn threads has sufficient input to
327 * initiate a thread, it must guarantee that it is safe to request
328 * the thread initiation. For all these FF stages, this check is
329 * based on :
330 *
331 * - The availability of output URB entries:
332 * - VS: As the input URB entries are overwritten with the
333 * VS-generated output data, output URB availability isn't a
334 * factor."
335 */
336 if (vs_entry_size < vec->ve->count)
337 vs_entry_size = vec->ve->count;
338
339 gs_entry_size = (vec->gs) ?
340 ilo_shader_get_kernel_param(vec->gs, ILO_KERNEL_OUTPUT_COUNT) :
341 (gs_active) ? vs_entry_size : 0;
342
343 /* in bytes */
344 vs_entry_size *= sizeof(float) * 4;
345 gs_entry_size *= sizeof(float) * 4;
346 vs_total_size = p->dev->urb_size;
347
348 if (gs_active) {
349 vs_total_size /= 2;
350 gs_total_size = vs_total_size;
351 }
352 else {
353 gs_total_size = 0;
354 }
355
356 gen6_3DSTATE_URB(p->builder, vs_total_size, gs_total_size,
357 vs_entry_size, gs_entry_size);
358
359 /*
360 * From the Sandy Bridge PRM, volume 2 part 1, page 27:
361 *
362 * "Because of a urb corruption caused by allocating a previous
363 * gsunit's urb entry to vsunit software is required to send a
364 * "GS NULL Fence" (Send URB fence with VS URB size == 1 and GS URB
365 * size == 0) plus a dummy DRAW call before any case where VS will
366 * be taking over GS URB space."
367 */
368 if (p->state.gs.active && !gs_active)
369 ilo_3d_pipeline_emit_flush_gen6(p);
370
371 p->state.gs.active = gs_active;
372 }
373 }
374
375 static void
376 gen6_pipeline_common_pointers_1(struct ilo_3d_pipeline *p,
377 const struct ilo_state_vector *vec,
378 struct gen6_pipeline_session *session)
379 {
380 /* 3DSTATE_VIEWPORT_STATE_POINTERS */
381 if (session->viewport_state_changed) {
382 gen6_3DSTATE_VIEWPORT_STATE_POINTERS(p->builder,
383 p->state.CLIP_VIEWPORT,
384 p->state.SF_VIEWPORT,
385 p->state.CC_VIEWPORT);
386 }
387 }
388
389 static void
390 gen6_pipeline_common_pointers_2(struct ilo_3d_pipeline *p,
391 const struct ilo_state_vector *vec,
392 struct gen6_pipeline_session *session)
393 {
394 /* 3DSTATE_CC_STATE_POINTERS */
395 if (session->cc_state_blend_changed ||
396 session->cc_state_dsa_changed ||
397 session->cc_state_cc_changed) {
398 gen6_3DSTATE_CC_STATE_POINTERS(p->builder,
399 p->state.BLEND_STATE,
400 p->state.DEPTH_STENCIL_STATE,
401 p->state.COLOR_CALC_STATE);
402 }
403
404 /* 3DSTATE_SAMPLER_STATE_POINTERS */
405 if (session->sampler_state_vs_changed ||
406 session->sampler_state_gs_changed ||
407 session->sampler_state_fs_changed) {
408 gen6_3DSTATE_SAMPLER_STATE_POINTERS(p->builder,
409 p->state.vs.SAMPLER_STATE,
410 0,
411 p->state.wm.SAMPLER_STATE);
412 }
413 }
414
415 static void
416 gen6_pipeline_common_pointers_3(struct ilo_3d_pipeline *p,
417 const struct ilo_state_vector *vec,
418 struct gen6_pipeline_session *session)
419 {
420 /* 3DSTATE_SCISSOR_STATE_POINTERS */
421 if (session->scissor_state_changed) {
422 gen6_3DSTATE_SCISSOR_STATE_POINTERS(p->builder,
423 p->state.SCISSOR_RECT);
424 }
425
426 /* 3DSTATE_BINDING_TABLE_POINTERS */
427 if (session->binding_table_vs_changed ||
428 session->binding_table_gs_changed ||
429 session->binding_table_fs_changed) {
430 gen6_3DSTATE_BINDING_TABLE_POINTERS(p->builder,
431 p->state.vs.BINDING_TABLE_STATE,
432 p->state.gs.BINDING_TABLE_STATE,
433 p->state.wm.BINDING_TABLE_STATE);
434 }
435 }
436
437 void
438 gen6_pipeline_vf(struct ilo_3d_pipeline *p,
439 const struct ilo_state_vector *vec,
440 struct gen6_pipeline_session *session)
441 {
442 if (ilo_dev_gen(p->dev) >= ILO_GEN(7.5)) {
443 /* 3DSTATE_INDEX_BUFFER */
444 if (DIRTY(IB) || session->batch_bo_changed) {
445 gen6_3DSTATE_INDEX_BUFFER(p->builder,
446 &vec->ib, false);
447 }
448
449 /* 3DSTATE_VF */
450 if (session->primitive_restart_changed) {
451 gen7_3DSTATE_VF(p->builder, vec->draw->primitive_restart,
452 vec->draw->restart_index);
453 }
454 }
455 else {
456 /* 3DSTATE_INDEX_BUFFER */
457 if (DIRTY(IB) || session->primitive_restart_changed ||
458 session->batch_bo_changed) {
459 gen6_3DSTATE_INDEX_BUFFER(p->builder,
460 &vec->ib, vec->draw->primitive_restart);
461 }
462 }
463
464 /* 3DSTATE_VERTEX_BUFFERS */
465 if (DIRTY(VB) || DIRTY(VE) || session->batch_bo_changed)
466 gen6_3DSTATE_VERTEX_BUFFERS(p->builder, vec->ve, &vec->vb);
467
468 /* 3DSTATE_VERTEX_ELEMENTS */
469 if (DIRTY(VE) || DIRTY(VS)) {
470 const struct ilo_ve_state *ve = vec->ve;
471 bool last_velement_edgeflag = false;
472 bool prepend_generate_ids = false;
473
474 if (vec->vs) {
475 if (ilo_shader_get_kernel_param(vec->vs,
476 ILO_KERNEL_VS_INPUT_EDGEFLAG)) {
477 /* we rely on the state tracker here */
478 assert(ilo_shader_get_kernel_param(vec->vs,
479 ILO_KERNEL_INPUT_COUNT) == ve->count);
480
481 last_velement_edgeflag = true;
482 }
483
484 if (ilo_shader_get_kernel_param(vec->vs,
485 ILO_KERNEL_VS_INPUT_INSTANCEID) ||
486 ilo_shader_get_kernel_param(vec->vs,
487 ILO_KERNEL_VS_INPUT_VERTEXID))
488 prepend_generate_ids = true;
489 }
490
491 gen6_3DSTATE_VERTEX_ELEMENTS(p->builder, ve,
492 last_velement_edgeflag, prepend_generate_ids);
493 }
494 }
495
496 void
497 gen6_pipeline_vf_statistics(struct ilo_3d_pipeline *p,
498 const struct ilo_state_vector *vec,
499 struct gen6_pipeline_session *session)
500 {
501 /* 3DSTATE_VF_STATISTICS */
502 if (session->hw_ctx_changed)
503 gen6_3DSTATE_VF_STATISTICS(p->builder, false);
504 }
505
506 static void
507 gen6_pipeline_vf_draw(struct ilo_3d_pipeline *p,
508 const struct ilo_state_vector *vec,
509 struct gen6_pipeline_session *session)
510 {
511 /* 3DPRIMITIVE */
512 gen6_3DPRIMITIVE(p->builder, vec->draw, &vec->ib);
513
514 p->state.current_pipe_control_dw1 = 0;
515 assert(!p->state.deferred_pipe_control_dw1);
516 }
517
518 void
519 gen6_pipeline_vs(struct ilo_3d_pipeline *p,
520 const struct ilo_state_vector *vec,
521 struct gen6_pipeline_session *session)
522 {
523 const bool emit_3dstate_vs = (DIRTY(VS) || DIRTY(SAMPLER_VS) ||
524 session->kernel_bo_changed);
525 const bool emit_3dstate_constant_vs = session->pcb_state_vs_changed;
526
527 /*
528 * the classic i965 does this in upload_vs_state(), citing a spec that I
529 * cannot find
530 */
531 if (emit_3dstate_vs && ilo_dev_gen(p->dev) == ILO_GEN(6))
532 gen6_wa_pre_non_pipelined(p);
533
534 /* 3DSTATE_CONSTANT_VS */
535 if (emit_3dstate_constant_vs) {
536 gen6_3DSTATE_CONSTANT_VS(p->builder,
537 &p->state.vs.PUSH_CONSTANT_BUFFER,
538 &p->state.vs.PUSH_CONSTANT_BUFFER_size,
539 1);
540 }
541
542 /* 3DSTATE_VS */
543 if (emit_3dstate_vs) {
544 const int num_samplers = vec->sampler[PIPE_SHADER_VERTEX].count;
545
546 gen6_3DSTATE_VS(p->builder, vec->vs, num_samplers);
547 }
548
549 if (emit_3dstate_constant_vs && ilo_dev_gen(p->dev) == ILO_GEN(6))
550 gen6_wa_post_3dstate_constant_vs(p);
551 }
552
553 static void
554 gen6_pipeline_gs(struct ilo_3d_pipeline *p,
555 const struct ilo_state_vector *vec,
556 struct gen6_pipeline_session *session)
557 {
558 /* 3DSTATE_CONSTANT_GS */
559 if (session->pcb_state_gs_changed)
560 gen6_3DSTATE_CONSTANT_GS(p->builder, NULL, NULL, 0);
561
562 /* 3DSTATE_GS */
563 if (DIRTY(GS) || DIRTY(VS) ||
564 session->prim_changed || session->kernel_bo_changed) {
565 const int verts_per_prim = u_vertices_per_prim(session->reduced_prim);
566
567 gen6_3DSTATE_GS(p->builder, vec->gs, vec->vs, verts_per_prim);
568 }
569 }
570
571 static bool
572 gen6_pipeline_update_max_svbi(struct ilo_3d_pipeline *p,
573 const struct ilo_state_vector *vec,
574 struct gen6_pipeline_session *session)
575 {
576 if (DIRTY(VS) || DIRTY(GS) || DIRTY(SO)) {
577 const struct pipe_stream_output_info *so_info =
578 (vec->gs) ? ilo_shader_get_kernel_so_info(vec->gs) :
579 (vec->vs) ? ilo_shader_get_kernel_so_info(vec->vs) : NULL;
580 unsigned max_svbi = 0xffffffff;
581 int i;
582
583 for (i = 0; i < so_info->num_outputs; i++) {
584 const int output_buffer = so_info->output[i].output_buffer;
585 const struct pipe_stream_output_target *so =
586 vec->so.states[output_buffer];
587 const int struct_size = so_info->stride[output_buffer] * 4;
588 const int elem_size = so_info->output[i].num_components * 4;
589 int buf_size, count;
590
591 if (!so) {
592 max_svbi = 0;
593 break;
594 }
595
596 buf_size = so->buffer_size - so_info->output[i].dst_offset * 4;
597
598 count = buf_size / struct_size;
599 if (buf_size % struct_size >= elem_size)
600 count++;
601
602 if (count < max_svbi)
603 max_svbi = count;
604 }
605
606 if (p->state.so_max_vertices != max_svbi) {
607 p->state.so_max_vertices = max_svbi;
608 return true;
609 }
610 }
611
612 return false;
613 }
614
615 static void
616 gen6_pipeline_gs_svbi(struct ilo_3d_pipeline *p,
617 const struct ilo_state_vector *vec,
618 struct gen6_pipeline_session *session)
619 {
620 const bool emit = gen6_pipeline_update_max_svbi(p, vec, session);
621
622 /* 3DSTATE_GS_SVB_INDEX */
623 if (emit) {
624 if (ilo_dev_gen(p->dev) == ILO_GEN(6))
625 gen6_wa_pre_non_pipelined(p);
626
627 gen6_3DSTATE_GS_SVB_INDEX(p->builder,
628 0, 0, p->state.so_max_vertices,
629 false);
630
631 if (session->hw_ctx_changed) {
632 int i;
633
634 /*
635 * From the Sandy Bridge PRM, volume 2 part 1, page 148:
636 *
637 * "If a buffer is not enabled then the SVBI must be set to 0x0
638 * in order to not cause overflow in that SVBI."
639 *
640 * "If a buffer is not enabled then the MaxSVBI must be set to
641 * 0xFFFFFFFF in order to not cause overflow in that SVBI."
642 */
643 for (i = 1; i < 4; i++) {
644 gen6_3DSTATE_GS_SVB_INDEX(p->builder,
645 i, 0, 0xffffffff, false);
646 }
647 }
648 }
649 }
650
651 void
652 gen6_pipeline_clip(struct ilo_3d_pipeline *p,
653 const struct ilo_state_vector *vec,
654 struct gen6_pipeline_session *session)
655 {
656 /* 3DSTATE_CLIP */
657 if (DIRTY(RASTERIZER) || DIRTY(FS) || DIRTY(VIEWPORT) || DIRTY(FB)) {
658 bool enable_guardband = true;
659 unsigned i;
660
661 /*
662 * We do not do 2D clipping yet. Guard band test should only be enabled
663 * when the viewport is larger than the framebuffer.
664 */
665 for (i = 0; i < vec->viewport.count; i++) {
666 const struct ilo_viewport_cso *vp = &vec->viewport.cso[i];
667
668 if (vp->min_x > 0.0f || vp->max_x < vec->fb.state.width ||
669 vp->min_y > 0.0f || vp->max_y < vec->fb.state.height) {
670 enable_guardband = false;
671 break;
672 }
673 }
674
675 gen6_3DSTATE_CLIP(p->builder, vec->rasterizer,
676 vec->fs, enable_guardband, 1);
677 }
678 }
679
680 static void
681 gen6_pipeline_sf(struct ilo_3d_pipeline *p,
682 const struct ilo_state_vector *vec,
683 struct gen6_pipeline_session *session)
684 {
685 /* 3DSTATE_SF */
686 if (DIRTY(RASTERIZER) || DIRTY(FS))
687 gen6_3DSTATE_SF(p->builder, vec->rasterizer, vec->fs);
688 }
689
690 void
691 gen6_pipeline_sf_rect(struct ilo_3d_pipeline *p,
692 const struct ilo_state_vector *vec,
693 struct gen6_pipeline_session *session)
694 {
695 /* 3DSTATE_DRAWING_RECTANGLE */
696 if (DIRTY(FB)) {
697 if (ilo_dev_gen(p->dev) == ILO_GEN(6))
698 gen6_wa_pre_non_pipelined(p);
699
700 gen6_3DSTATE_DRAWING_RECTANGLE(p->builder, 0, 0,
701 vec->fb.state.width, vec->fb.state.height);
702 }
703 }
704
705 static void
706 gen6_pipeline_wm(struct ilo_3d_pipeline *p,
707 const struct ilo_state_vector *vec,
708 struct gen6_pipeline_session *session)
709 {
710 /* 3DSTATE_CONSTANT_PS */
711 if (session->pcb_state_fs_changed) {
712 gen6_3DSTATE_CONSTANT_PS(p->builder,
713 &p->state.wm.PUSH_CONSTANT_BUFFER,
714 &p->state.wm.PUSH_CONSTANT_BUFFER_size,
715 1);
716 }
717
718 /* 3DSTATE_WM */
719 if (DIRTY(FS) || DIRTY(SAMPLER_FS) || DIRTY(BLEND) || DIRTY(DSA) ||
720 DIRTY(RASTERIZER) || session->kernel_bo_changed) {
721 const int num_samplers = vec->sampler[PIPE_SHADER_FRAGMENT].count;
722 const bool dual_blend = vec->blend->dual_blend;
723 const bool cc_may_kill = (vec->dsa->dw_alpha ||
724 vec->blend->alpha_to_coverage);
725
726 if (ilo_dev_gen(p->dev) == ILO_GEN(6) && session->hw_ctx_changed)
727 gen6_wa_pre_3dstate_wm_max_threads(p);
728
729 gen6_3DSTATE_WM(p->builder, vec->fs, num_samplers,
730 vec->rasterizer, dual_blend, cc_may_kill, 0);
731 }
732 }
733
734 static void
735 gen6_pipeline_wm_multisample(struct ilo_3d_pipeline *p,
736 const struct ilo_state_vector *vec,
737 struct gen6_pipeline_session *session)
738 {
739 /* 3DSTATE_MULTISAMPLE and 3DSTATE_SAMPLE_MASK */
740 if (DIRTY(SAMPLE_MASK) || DIRTY(FB)) {
741 const uint32_t *packed_sample_pos;
742
743 packed_sample_pos = (vec->fb.num_samples > 1) ?
744 &p->packed_sample_position_4x : &p->packed_sample_position_1x;
745
746 if (ilo_dev_gen(p->dev) == ILO_GEN(6)) {
747 gen6_wa_pre_non_pipelined(p);
748 gen6_wa_pre_3dstate_multisample(p);
749 }
750
751 gen6_3DSTATE_MULTISAMPLE(p->builder,
752 vec->fb.num_samples, packed_sample_pos,
753 vec->rasterizer->state.half_pixel_center);
754
755 gen6_3DSTATE_SAMPLE_MASK(p->builder,
756 (vec->fb.num_samples > 1) ? vec->sample_mask : 0x1);
757 }
758 }
759
760 static void
761 gen6_pipeline_wm_depth(struct ilo_3d_pipeline *p,
762 const struct ilo_state_vector *vec,
763 struct gen6_pipeline_session *session)
764 {
765 /* 3DSTATE_DEPTH_BUFFER and 3DSTATE_CLEAR_PARAMS */
766 if (DIRTY(FB) || session->batch_bo_changed) {
767 const struct ilo_zs_surface *zs;
768 uint32_t clear_params;
769
770 if (vec->fb.state.zsbuf) {
771 const struct ilo_surface_cso *surface =
772 (const struct ilo_surface_cso *) vec->fb.state.zsbuf;
773 const struct ilo_texture_slice *slice =
774 ilo_texture_get_slice(ilo_texture(surface->base.texture),
775 surface->base.u.tex.level, surface->base.u.tex.first_layer);
776
777 assert(!surface->is_rt);
778
779 zs = &surface->u.zs;
780 clear_params = slice->clear_value;
781 }
782 else {
783 zs = &vec->fb.null_zs;
784 clear_params = 0;
785 }
786
787 if (ilo_dev_gen(p->dev) == ILO_GEN(6)) {
788 gen6_wa_pre_non_pipelined(p);
789 gen6_wa_pre_depth(p);
790 }
791
792 gen6_3DSTATE_DEPTH_BUFFER(p->builder, zs);
793 gen6_3DSTATE_HIER_DEPTH_BUFFER(p->builder, zs);
794 gen6_3DSTATE_STENCIL_BUFFER(p->builder, zs);
795 gen6_3DSTATE_CLEAR_PARAMS(p->builder, clear_params);
796 }
797 }
798
799 void
800 gen6_pipeline_wm_raster(struct ilo_3d_pipeline *p,
801 const struct ilo_state_vector *vec,
802 struct gen6_pipeline_session *session)
803 {
804 /* 3DSTATE_POLY_STIPPLE_PATTERN and 3DSTATE_POLY_STIPPLE_OFFSET */
805 if ((DIRTY(RASTERIZER) || DIRTY(POLY_STIPPLE)) &&
806 vec->rasterizer->state.poly_stipple_enable) {
807 if (ilo_dev_gen(p->dev) == ILO_GEN(6))
808 gen6_wa_pre_non_pipelined(p);
809
810 gen6_3DSTATE_POLY_STIPPLE_PATTERN(p->builder,
811 &vec->poly_stipple);
812
813 gen6_3DSTATE_POLY_STIPPLE_OFFSET(p->builder, 0, 0);
814 }
815
816 /* 3DSTATE_LINE_STIPPLE */
817 if (DIRTY(RASTERIZER) && vec->rasterizer->state.line_stipple_enable) {
818 if (ilo_dev_gen(p->dev) == ILO_GEN(6))
819 gen6_wa_pre_non_pipelined(p);
820
821 gen6_3DSTATE_LINE_STIPPLE(p->builder,
822 vec->rasterizer->state.line_stipple_pattern,
823 vec->rasterizer->state.line_stipple_factor + 1);
824 }
825
826 /* 3DSTATE_AA_LINE_PARAMETERS */
827 if (DIRTY(RASTERIZER) && vec->rasterizer->state.line_smooth) {
828 if (ilo_dev_gen(p->dev) == ILO_GEN(6))
829 gen6_wa_pre_non_pipelined(p);
830
831 gen6_3DSTATE_AA_LINE_PARAMETERS(p->builder);
832 }
833 }
834
835 static void
836 gen6_pipeline_state_viewports(struct ilo_3d_pipeline *p,
837 const struct ilo_state_vector *vec,
838 struct gen6_pipeline_session *session)
839 {
840 /* SF_CLIP_VIEWPORT and CC_VIEWPORT */
841 if (ilo_dev_gen(p->dev) >= ILO_GEN(7) && DIRTY(VIEWPORT)) {
842 p->state.SF_CLIP_VIEWPORT = gen7_SF_CLIP_VIEWPORT(p->builder,
843 vec->viewport.cso, vec->viewport.count);
844
845 p->state.CC_VIEWPORT = gen6_CC_VIEWPORT(p->builder,
846 vec->viewport.cso, vec->viewport.count);
847
848 session->viewport_state_changed = true;
849 }
850 /* SF_VIEWPORT, CLIP_VIEWPORT, and CC_VIEWPORT */
851 else if (DIRTY(VIEWPORT)) {
852 p->state.CLIP_VIEWPORT = gen6_CLIP_VIEWPORT(p->builder,
853 vec->viewport.cso, vec->viewport.count);
854
855 p->state.SF_VIEWPORT = gen6_SF_VIEWPORT(p->builder,
856 vec->viewport.cso, vec->viewport.count);
857
858 p->state.CC_VIEWPORT = gen6_CC_VIEWPORT(p->builder,
859 vec->viewport.cso, vec->viewport.count);
860
861 session->viewport_state_changed = true;
862 }
863 }
864
865 static void
866 gen6_pipeline_state_cc(struct ilo_3d_pipeline *p,
867 const struct ilo_state_vector *vec,
868 struct gen6_pipeline_session *session)
869 {
870 /* BLEND_STATE */
871 if (DIRTY(BLEND) || DIRTY(FB) || DIRTY(DSA)) {
872 p->state.BLEND_STATE = gen6_BLEND_STATE(p->builder,
873 vec->blend, &vec->fb, vec->dsa);
874
875 session->cc_state_blend_changed = true;
876 }
877
878 /* COLOR_CALC_STATE */
879 if (DIRTY(DSA) || DIRTY(STENCIL_REF) || DIRTY(BLEND_COLOR)) {
880 p->state.COLOR_CALC_STATE =
881 gen6_COLOR_CALC_STATE(p->builder, &vec->stencil_ref,
882 vec->dsa->alpha_ref, &vec->blend_color);
883
884 session->cc_state_cc_changed = true;
885 }
886
887 /* DEPTH_STENCIL_STATE */
888 if (DIRTY(DSA)) {
889 p->state.DEPTH_STENCIL_STATE =
890 gen6_DEPTH_STENCIL_STATE(p->builder, vec->dsa);
891
892 session->cc_state_dsa_changed = true;
893 }
894 }
895
896 static void
897 gen6_pipeline_state_scissors(struct ilo_3d_pipeline *p,
898 const struct ilo_state_vector *vec,
899 struct gen6_pipeline_session *session)
900 {
901 /* SCISSOR_RECT */
902 if (DIRTY(SCISSOR) || DIRTY(VIEWPORT)) {
903 /* there should be as many scissors as there are viewports */
904 p->state.SCISSOR_RECT = gen6_SCISSOR_RECT(p->builder,
905 &vec->scissor, vec->viewport.count);
906
907 session->scissor_state_changed = true;
908 }
909 }
910
911 static void
912 gen6_pipeline_state_surfaces_rt(struct ilo_3d_pipeline *p,
913 const struct ilo_state_vector *vec,
914 struct gen6_pipeline_session *session)
915 {
916 /* SURFACE_STATEs for render targets */
917 if (DIRTY(FB)) {
918 const struct ilo_fb_state *fb = &vec->fb;
919 const int offset = ILO_WM_DRAW_SURFACE(0);
920 uint32_t *surface_state = &p->state.wm.SURFACE_STATE[offset];
921 int i;
922
923 for (i = 0; i < fb->state.nr_cbufs; i++) {
924 const struct ilo_surface_cso *surface =
925 (const struct ilo_surface_cso *) fb->state.cbufs[i];
926
927 if (!surface) {
928 surface_state[i] =
929 gen6_SURFACE_STATE(p->builder, &fb->null_rt, true);
930 }
931 else {
932 assert(surface && surface->is_rt);
933 surface_state[i] =
934 gen6_SURFACE_STATE(p->builder, &surface->u.rt, true);
935 }
936 }
937
938 /*
939 * Upload at least one render target, as
940 * brw_update_renderbuffer_surfaces() does. I don't know why.
941 */
942 if (i == 0) {
943 surface_state[i] =
944 gen6_SURFACE_STATE(p->builder, &fb->null_rt, true);
945
946 i++;
947 }
948
949 memset(&surface_state[i], 0, (ILO_MAX_DRAW_BUFFERS - i) * 4);
950
951 if (i && session->num_surfaces[PIPE_SHADER_FRAGMENT] < offset + i)
952 session->num_surfaces[PIPE_SHADER_FRAGMENT] = offset + i;
953
954 session->binding_table_fs_changed = true;
955 }
956 }
957
958 static void
959 gen6_pipeline_state_surfaces_so(struct ilo_3d_pipeline *p,
960 const struct ilo_state_vector *vec,
961 struct gen6_pipeline_session *session)
962 {
963 const struct ilo_so_state *so = &vec->so;
964
965 if (ilo_dev_gen(p->dev) != ILO_GEN(6))
966 return;
967
968 /* SURFACE_STATEs for stream output targets */
969 if (DIRTY(VS) || DIRTY(GS) || DIRTY(SO)) {
970 const struct pipe_stream_output_info *so_info =
971 (vec->gs) ? ilo_shader_get_kernel_so_info(vec->gs) :
972 (vec->vs) ? ilo_shader_get_kernel_so_info(vec->vs) : NULL;
973 const int offset = ILO_GS_SO_SURFACE(0);
974 uint32_t *surface_state = &p->state.gs.SURFACE_STATE[offset];
975 int i;
976
977 for (i = 0; so_info && i < so_info->num_outputs; i++) {
978 const int target = so_info->output[i].output_buffer;
979 const struct pipe_stream_output_target *so_target =
980 (target < so->count) ? so->states[target] : NULL;
981
982 if (so_target) {
983 surface_state[i] = gen6_so_SURFACE_STATE(p->builder,
984 so_target, so_info, i);
985 }
986 else {
987 surface_state[i] = 0;
988 }
989 }
990
991 memset(&surface_state[i], 0, (ILO_MAX_SO_BINDINGS - i) * 4);
992
993 if (i && session->num_surfaces[PIPE_SHADER_GEOMETRY] < offset + i)
994 session->num_surfaces[PIPE_SHADER_GEOMETRY] = offset + i;
995
996 session->binding_table_gs_changed = true;
997 }
998 }
999
1000 static void
1001 gen6_pipeline_state_surfaces_view(struct ilo_3d_pipeline *p,
1002 const struct ilo_state_vector *vec,
1003 int shader_type,
1004 struct gen6_pipeline_session *session)
1005 {
1006 const struct ilo_view_state *view = &vec->view[shader_type];
1007 uint32_t *surface_state;
1008 int offset, i;
1009 bool skip = false;
1010
1011 /* SURFACE_STATEs for sampler views */
1012 switch (shader_type) {
1013 case PIPE_SHADER_VERTEX:
1014 if (DIRTY(VIEW_VS)) {
1015 offset = ILO_VS_TEXTURE_SURFACE(0);
1016 surface_state = &p->state.vs.SURFACE_STATE[offset];
1017
1018 session->binding_table_vs_changed = true;
1019 }
1020 else {
1021 skip = true;
1022 }
1023 break;
1024 case PIPE_SHADER_FRAGMENT:
1025 if (DIRTY(VIEW_FS)) {
1026 offset = ILO_WM_TEXTURE_SURFACE(0);
1027 surface_state = &p->state.wm.SURFACE_STATE[offset];
1028
1029 session->binding_table_fs_changed = true;
1030 }
1031 else {
1032 skip = true;
1033 }
1034 break;
1035 default:
1036 skip = true;
1037 break;
1038 }
1039
1040 if (skip)
1041 return;
1042
1043 for (i = 0; i < view->count; i++) {
1044 if (view->states[i]) {
1045 const struct ilo_view_cso *cso =
1046 (const struct ilo_view_cso *) view->states[i];
1047
1048 surface_state[i] =
1049 gen6_SURFACE_STATE(p->builder, &cso->surface, false);
1050 }
1051 else {
1052 surface_state[i] = 0;
1053 }
1054 }
1055
1056 memset(&surface_state[i], 0, (ILO_MAX_SAMPLER_VIEWS - i) * 4);
1057
1058 if (i && session->num_surfaces[shader_type] < offset + i)
1059 session->num_surfaces[shader_type] = offset + i;
1060 }
1061
1062 static void
1063 gen6_pipeline_state_surfaces_const(struct ilo_3d_pipeline *p,
1064 const struct ilo_state_vector *vec,
1065 int shader_type,
1066 struct gen6_pipeline_session *session)
1067 {
1068 const struct ilo_cbuf_state *cbuf = &vec->cbuf[shader_type];
1069 uint32_t *surface_state;
1070 bool *binding_table_changed;
1071 int offset, count, i;
1072
1073 if (!DIRTY(CBUF))
1074 return;
1075
1076 /* SURFACE_STATEs for constant buffers */
1077 switch (shader_type) {
1078 case PIPE_SHADER_VERTEX:
1079 offset = ILO_VS_CONST_SURFACE(0);
1080 surface_state = &p->state.vs.SURFACE_STATE[offset];
1081 binding_table_changed = &session->binding_table_vs_changed;
1082 break;
1083 case PIPE_SHADER_FRAGMENT:
1084 offset = ILO_WM_CONST_SURFACE(0);
1085 surface_state = &p->state.wm.SURFACE_STATE[offset];
1086 binding_table_changed = &session->binding_table_fs_changed;
1087 break;
1088 default:
1089 return;
1090 break;
1091 }
1092
1093 /* constants are pushed via PCB */
1094 if (cbuf->enabled_mask == 0x1 && !cbuf->cso[0].resource) {
1095 memset(surface_state, 0, ILO_MAX_CONST_BUFFERS * 4);
1096 return;
1097 }
1098
1099 count = util_last_bit(cbuf->enabled_mask);
1100 for (i = 0; i < count; i++) {
1101 if (cbuf->cso[i].resource) {
1102 surface_state[i] = gen6_SURFACE_STATE(p->builder,
1103 &cbuf->cso[i].surface, false);
1104 }
1105 else {
1106 surface_state[i] = 0;
1107 }
1108 }
1109
1110 memset(&surface_state[count], 0, (ILO_MAX_CONST_BUFFERS - count) * 4);
1111
1112 if (count && session->num_surfaces[shader_type] < offset + count)
1113 session->num_surfaces[shader_type] = offset + count;
1114
1115 *binding_table_changed = true;
1116 }
1117
1118 static void
1119 gen6_pipeline_state_binding_tables(struct ilo_3d_pipeline *p,
1120 const struct ilo_state_vector *vec,
1121 int shader_type,
1122 struct gen6_pipeline_session *session)
1123 {
1124 uint32_t *binding_table_state, *surface_state;
1125 int *binding_table_state_size, size;
1126 bool skip = false;
1127
1128 /* BINDING_TABLE_STATE */
1129 switch (shader_type) {
1130 case PIPE_SHADER_VERTEX:
1131 surface_state = p->state.vs.SURFACE_STATE;
1132 binding_table_state = &p->state.vs.BINDING_TABLE_STATE;
1133 binding_table_state_size = &p->state.vs.BINDING_TABLE_STATE_size;
1134
1135 skip = !session->binding_table_vs_changed;
1136 break;
1137 case PIPE_SHADER_GEOMETRY:
1138 surface_state = p->state.gs.SURFACE_STATE;
1139 binding_table_state = &p->state.gs.BINDING_TABLE_STATE;
1140 binding_table_state_size = &p->state.gs.BINDING_TABLE_STATE_size;
1141
1142 skip = !session->binding_table_gs_changed;
1143 break;
1144 case PIPE_SHADER_FRAGMENT:
1145 surface_state = p->state.wm.SURFACE_STATE;
1146 binding_table_state = &p->state.wm.BINDING_TABLE_STATE;
1147 binding_table_state_size = &p->state.wm.BINDING_TABLE_STATE_size;
1148
1149 skip = !session->binding_table_fs_changed;
1150 break;
1151 default:
1152 skip = true;
1153 break;
1154 }
1155
1156 if (skip)
1157 return;
1158
1159 /*
1160 * If we have seemingly less SURFACE_STATEs than before, it could be that
1161 * we did not touch those reside at the tail in this upload. Loop over
1162 * them to figure out the real number of SURFACE_STATEs.
1163 */
1164 for (size = *binding_table_state_size;
1165 size > session->num_surfaces[shader_type]; size--) {
1166 if (surface_state[size - 1])
1167 break;
1168 }
1169 if (size < session->num_surfaces[shader_type])
1170 size = session->num_surfaces[shader_type];
1171
1172 *binding_table_state = gen6_BINDING_TABLE_STATE(p->builder,
1173 surface_state, size);
1174 *binding_table_state_size = size;
1175 }
1176
1177 static void
1178 gen6_pipeline_state_samplers(struct ilo_3d_pipeline *p,
1179 const struct ilo_state_vector *vec,
1180 int shader_type,
1181 struct gen6_pipeline_session *session)
1182 {
1183 const struct ilo_sampler_cso * const *samplers =
1184 vec->sampler[shader_type].cso;
1185 const struct pipe_sampler_view * const *views =
1186 (const struct pipe_sampler_view **) vec->view[shader_type].states;
1187 const int num_samplers = vec->sampler[shader_type].count;
1188 const int num_views = vec->view[shader_type].count;
1189 uint32_t *sampler_state, *border_color_state;
1190 bool emit_border_color = false;
1191 bool skip = false;
1192
1193 /* SAMPLER_BORDER_COLOR_STATE and SAMPLER_STATE */
1194 switch (shader_type) {
1195 case PIPE_SHADER_VERTEX:
1196 if (DIRTY(SAMPLER_VS) || DIRTY(VIEW_VS)) {
1197 sampler_state = &p->state.vs.SAMPLER_STATE;
1198 border_color_state = p->state.vs.SAMPLER_BORDER_COLOR_STATE;
1199
1200 if (DIRTY(SAMPLER_VS))
1201 emit_border_color = true;
1202
1203 session->sampler_state_vs_changed = true;
1204 }
1205 else {
1206 skip = true;
1207 }
1208 break;
1209 case PIPE_SHADER_FRAGMENT:
1210 if (DIRTY(SAMPLER_FS) || DIRTY(VIEW_FS)) {
1211 sampler_state = &p->state.wm.SAMPLER_STATE;
1212 border_color_state = p->state.wm.SAMPLER_BORDER_COLOR_STATE;
1213
1214 if (DIRTY(SAMPLER_FS))
1215 emit_border_color = true;
1216
1217 session->sampler_state_fs_changed = true;
1218 }
1219 else {
1220 skip = true;
1221 }
1222 break;
1223 default:
1224 skip = true;
1225 break;
1226 }
1227
1228 if (skip)
1229 return;
1230
1231 if (emit_border_color) {
1232 int i;
1233
1234 for (i = 0; i < num_samplers; i++) {
1235 border_color_state[i] = (samplers[i]) ?
1236 gen6_SAMPLER_BORDER_COLOR_STATE(p->builder, samplers[i]) : 0;
1237 }
1238 }
1239
1240 /* should we take the minimum of num_samplers and num_views? */
1241 *sampler_state = gen6_SAMPLER_STATE(p->builder,
1242 samplers, views,
1243 border_color_state,
1244 MIN2(num_samplers, num_views));
1245 }
1246
1247 static void
1248 gen6_pipeline_state_pcb(struct ilo_3d_pipeline *p,
1249 const struct ilo_state_vector *vec,
1250 struct gen6_pipeline_session *session)
1251 {
1252 /* push constant buffer for VS */
1253 if (DIRTY(VS) || DIRTY(CBUF) || DIRTY(CLIP)) {
1254 const int cbuf0_size = (vec->vs) ?
1255 ilo_shader_get_kernel_param(vec->vs,
1256 ILO_KERNEL_PCB_CBUF0_SIZE) : 0;
1257 const int clip_state_size = (vec->vs) ?
1258 ilo_shader_get_kernel_param(vec->vs,
1259 ILO_KERNEL_VS_PCB_UCP_SIZE) : 0;
1260 const int total_size = cbuf0_size + clip_state_size;
1261
1262 if (total_size) {
1263 void *pcb;
1264
1265 p->state.vs.PUSH_CONSTANT_BUFFER =
1266 gen6_push_constant_buffer(p->builder, total_size, &pcb);
1267 p->state.vs.PUSH_CONSTANT_BUFFER_size = total_size;
1268
1269 if (cbuf0_size) {
1270 const struct ilo_cbuf_state *cbuf =
1271 &vec->cbuf[PIPE_SHADER_VERTEX];
1272
1273 if (cbuf0_size <= cbuf->cso[0].user_buffer_size) {
1274 memcpy(pcb, cbuf->cso[0].user_buffer, cbuf0_size);
1275 }
1276 else {
1277 memcpy(pcb, cbuf->cso[0].user_buffer,
1278 cbuf->cso[0].user_buffer_size);
1279 memset(pcb + cbuf->cso[0].user_buffer_size, 0,
1280 cbuf0_size - cbuf->cso[0].user_buffer_size);
1281 }
1282
1283 pcb += cbuf0_size;
1284 }
1285
1286 if (clip_state_size)
1287 memcpy(pcb, &vec->clip, clip_state_size);
1288
1289 session->pcb_state_vs_changed = true;
1290 }
1291 else if (p->state.vs.PUSH_CONSTANT_BUFFER_size) {
1292 p->state.vs.PUSH_CONSTANT_BUFFER = 0;
1293 p->state.vs.PUSH_CONSTANT_BUFFER_size = 0;
1294
1295 session->pcb_state_vs_changed = true;
1296 }
1297 }
1298
1299 /* push constant buffer for FS */
1300 if (DIRTY(FS) || DIRTY(CBUF)) {
1301 const int cbuf0_size = (vec->fs) ?
1302 ilo_shader_get_kernel_param(vec->fs, ILO_KERNEL_PCB_CBUF0_SIZE) : 0;
1303
1304 if (cbuf0_size) {
1305 const struct ilo_cbuf_state *cbuf = &vec->cbuf[PIPE_SHADER_FRAGMENT];
1306 void *pcb;
1307
1308 p->state.wm.PUSH_CONSTANT_BUFFER =
1309 gen6_push_constant_buffer(p->builder, cbuf0_size, &pcb);
1310 p->state.wm.PUSH_CONSTANT_BUFFER_size = cbuf0_size;
1311
1312 if (cbuf0_size <= cbuf->cso[0].user_buffer_size) {
1313 memcpy(pcb, cbuf->cso[0].user_buffer, cbuf0_size);
1314 }
1315 else {
1316 memcpy(pcb, cbuf->cso[0].user_buffer,
1317 cbuf->cso[0].user_buffer_size);
1318 memset(pcb + cbuf->cso[0].user_buffer_size, 0,
1319 cbuf0_size - cbuf->cso[0].user_buffer_size);
1320 }
1321
1322 session->pcb_state_fs_changed = true;
1323 }
1324 else if (p->state.wm.PUSH_CONSTANT_BUFFER_size) {
1325 p->state.wm.PUSH_CONSTANT_BUFFER = 0;
1326 p->state.wm.PUSH_CONSTANT_BUFFER_size = 0;
1327
1328 session->pcb_state_fs_changed = true;
1329 }
1330 }
1331 }
1332
1333 #undef DIRTY
1334
1335 static void
1336 gen6_pipeline_commands(struct ilo_3d_pipeline *p,
1337 const struct ilo_state_vector *vec,
1338 struct gen6_pipeline_session *session)
1339 {
1340 /*
1341 * We try to keep the order of the commands match, as closely as possible,
1342 * that of the classic i965 driver. It allows us to compare the command
1343 * streams easily.
1344 */
1345 gen6_pipeline_common_select(p, vec, session);
1346 gen6_pipeline_gs_svbi(p, vec, session);
1347 gen6_pipeline_common_sip(p, vec, session);
1348 gen6_pipeline_vf_statistics(p, vec, session);
1349 gen6_pipeline_common_base_address(p, vec, session);
1350 gen6_pipeline_common_pointers_1(p, vec, session);
1351 gen6_pipeline_common_urb(p, vec, session);
1352 gen6_pipeline_common_pointers_2(p, vec, session);
1353 gen6_pipeline_wm_multisample(p, vec, session);
1354 gen6_pipeline_vs(p, vec, session);
1355 gen6_pipeline_gs(p, vec, session);
1356 gen6_pipeline_clip(p, vec, session);
1357 gen6_pipeline_sf(p, vec, session);
1358 gen6_pipeline_wm(p, vec, session);
1359 gen6_pipeline_common_pointers_3(p, vec, session);
1360 gen6_pipeline_wm_depth(p, vec, session);
1361 gen6_pipeline_wm_raster(p, vec, session);
1362 gen6_pipeline_sf_rect(p, vec, session);
1363 gen6_pipeline_vf(p, vec, session);
1364 gen6_pipeline_vf_draw(p, vec, session);
1365 }
1366
1367 void
1368 gen6_pipeline_states(struct ilo_3d_pipeline *p,
1369 const struct ilo_state_vector *vec,
1370 struct gen6_pipeline_session *session)
1371 {
1372 int shader_type;
1373
1374 gen6_pipeline_state_viewports(p, vec, session);
1375 gen6_pipeline_state_cc(p, vec, session);
1376 gen6_pipeline_state_scissors(p, vec, session);
1377 gen6_pipeline_state_pcb(p, vec, session);
1378
1379 /*
1380 * upload all SURAFCE_STATEs together so that we know there are minimal
1381 * paddings
1382 */
1383 gen6_pipeline_state_surfaces_rt(p, vec, session);
1384 gen6_pipeline_state_surfaces_so(p, vec, session);
1385 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1386 gen6_pipeline_state_surfaces_view(p, vec, shader_type, session);
1387 gen6_pipeline_state_surfaces_const(p, vec, shader_type, session);
1388 }
1389
1390 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1391 gen6_pipeline_state_samplers(p, vec, shader_type, session);
1392 /* this must be called after all SURFACE_STATEs are uploaded */
1393 gen6_pipeline_state_binding_tables(p, vec, shader_type, session);
1394 }
1395 }
1396
1397 void
1398 gen6_pipeline_prepare(const struct ilo_3d_pipeline *p,
1399 const struct ilo_state_vector *vec,
1400 struct gen6_pipeline_session *session)
1401 {
1402 memset(session, 0, sizeof(*session));
1403 session->pipe_dirty = vec->dirty;
1404 session->reduced_prim = u_reduced_prim(vec->draw->mode);
1405
1406 session->hw_ctx_changed =
1407 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_HW);
1408
1409 if (session->hw_ctx_changed) {
1410 /* these should be enough to make everything uploaded */
1411 session->batch_bo_changed = true;
1412 session->state_bo_changed = true;
1413 session->kernel_bo_changed = true;
1414 session->prim_changed = true;
1415 session->primitive_restart_changed = true;
1416 } else {
1417 /*
1418 * Any state that involves resources needs to be re-emitted when the
1419 * batch bo changed. This is because we do not pin the resources and
1420 * their offsets (or existence) may change between batch buffers.
1421 */
1422 session->batch_bo_changed =
1423 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_BATCH_BO);
1424
1425 session->state_bo_changed =
1426 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1427 session->kernel_bo_changed =
1428 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
1429 session->prim_changed = (p->state.reduced_prim != session->reduced_prim);
1430 session->primitive_restart_changed =
1431 (p->state.primitive_restart != vec->draw->primitive_restart);
1432 }
1433 }
1434
1435 void
1436 gen6_pipeline_draw(struct ilo_3d_pipeline *p,
1437 const struct ilo_state_vector *vec,
1438 struct gen6_pipeline_session *session)
1439 {
1440 /* force all states to be uploaded if the state bo changed */
1441 if (session->state_bo_changed)
1442 session->pipe_dirty = ILO_DIRTY_ALL;
1443 else
1444 session->pipe_dirty = vec->dirty;
1445
1446 session->emit_draw_states(p, vec, session);
1447
1448 /* force all commands to be uploaded if the HW context changed */
1449 if (session->hw_ctx_changed)
1450 session->pipe_dirty = ILO_DIRTY_ALL;
1451 else
1452 session->pipe_dirty = vec->dirty;
1453
1454 session->emit_draw_commands(p, vec, session);
1455 }
1456
1457 void
1458 gen6_pipeline_end(struct ilo_3d_pipeline *p,
1459 const struct ilo_state_vector *vec,
1460 struct gen6_pipeline_session *session)
1461 {
1462 p->state.reduced_prim = session->reduced_prim;
1463 p->state.primitive_restart = vec->draw->primitive_restart;
1464 }
1465
1466 static void
1467 ilo_3d_pipeline_emit_draw_gen6(struct ilo_3d_pipeline *p,
1468 const struct ilo_state_vector *vec)
1469 {
1470 struct gen6_pipeline_session session;
1471
1472 gen6_pipeline_prepare(p, vec, &session);
1473
1474 session.emit_draw_states = gen6_pipeline_states;
1475 session.emit_draw_commands = gen6_pipeline_commands;
1476
1477 gen6_pipeline_draw(p, vec, &session);
1478 gen6_pipeline_end(p, vec, &session);
1479 }
1480
1481 void
1482 ilo_3d_pipeline_emit_flush_gen6(struct ilo_3d_pipeline *p)
1483 {
1484 const uint32_t dw1 = GEN6_PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE |
1485 GEN6_PIPE_CONTROL_RENDER_CACHE_FLUSH |
1486 GEN6_PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1487 GEN6_PIPE_CONTROL_VF_CACHE_INVALIDATE |
1488 GEN6_PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
1489 GEN6_PIPE_CONTROL_CS_STALL;
1490
1491 ILO_DEV_ASSERT(p->dev, 6, 7.5);
1492
1493 if (ilo_dev_gen(p->dev) == ILO_GEN(6))
1494 gen6_wa_pre_pipe_control(p, dw1);
1495
1496 gen6_PIPE_CONTROL(p->builder, dw1, NULL, 0, false);
1497
1498 p->state.current_pipe_control_dw1 |= dw1;
1499 p->state.deferred_pipe_control_dw1 &= ~dw1;
1500 }
1501
1502 void
1503 ilo_3d_pipeline_emit_query_gen6(struct ilo_3d_pipeline *p,
1504 struct ilo_query *q, uint32_t offset)
1505 {
1506 const uint32_t pipeline_statistics_regs[] = {
1507 GEN6_REG_IA_VERTICES_COUNT,
1508 GEN6_REG_IA_PRIMITIVES_COUNT,
1509 GEN6_REG_VS_INVOCATION_COUNT,
1510 GEN6_REG_GS_INVOCATION_COUNT,
1511 GEN6_REG_GS_PRIMITIVES_COUNT,
1512 GEN6_REG_CL_INVOCATION_COUNT,
1513 GEN6_REG_CL_PRIMITIVES_COUNT,
1514 GEN6_REG_PS_INVOCATION_COUNT,
1515 (ilo_dev_gen(p->dev) >= ILO_GEN(7)) ? GEN7_REG_HS_INVOCATION_COUNT : 0,
1516 (ilo_dev_gen(p->dev) >= ILO_GEN(7)) ? GEN7_REG_DS_INVOCATION_COUNT : 0,
1517 0,
1518 };
1519 const uint32_t primitives_generated_reg =
1520 (ilo_dev_gen(p->dev) >= ILO_GEN(7) && q->index > 0) ?
1521 GEN7_REG_SO_PRIM_STORAGE_NEEDED(q->index) :
1522 GEN6_REG_CL_INVOCATION_COUNT;
1523 const uint32_t primitives_emitted_reg =
1524 (ilo_dev_gen(p->dev) >= ILO_GEN(7)) ?
1525 GEN7_REG_SO_NUM_PRIMS_WRITTEN(q->index) :
1526 GEN6_REG_SO_NUM_PRIMS_WRITTEN;
1527 const uint32_t *regs;
1528 int reg_count = 0, i;
1529 uint32_t pipe_control_dw1 = 0;
1530
1531 ILO_DEV_ASSERT(p->dev, 6, 7.5);
1532
1533 switch (q->type) {
1534 case PIPE_QUERY_OCCLUSION_COUNTER:
1535 pipe_control_dw1 = GEN6_PIPE_CONTROL_DEPTH_STALL |
1536 GEN6_PIPE_CONTROL_WRITE_PS_DEPTH_COUNT;
1537 break;
1538 case PIPE_QUERY_TIMESTAMP:
1539 case PIPE_QUERY_TIME_ELAPSED:
1540 pipe_control_dw1 = GEN6_PIPE_CONTROL_WRITE_TIMESTAMP;
1541 break;
1542 case PIPE_QUERY_PRIMITIVES_GENERATED:
1543 regs = &primitives_generated_reg;
1544 reg_count = 1;
1545 break;
1546 case PIPE_QUERY_PRIMITIVES_EMITTED:
1547 regs = &primitives_emitted_reg;
1548 reg_count = 1;
1549 break;
1550 case PIPE_QUERY_PIPELINE_STATISTICS:
1551 regs = pipeline_statistics_regs;
1552 reg_count = Elements(pipeline_statistics_regs);
1553 break;
1554 default:
1555 break;
1556 }
1557
1558 if (pipe_control_dw1) {
1559 if (ilo_dev_gen(p->dev) == ILO_GEN(6))
1560 gen6_wa_pre_pipe_control(p, pipe_control_dw1);
1561
1562 gen6_PIPE_CONTROL(p->builder, pipe_control_dw1, q->bo, offset, true);
1563
1564 p->state.current_pipe_control_dw1 |= pipe_control_dw1;
1565 p->state.deferred_pipe_control_dw1 &= ~pipe_control_dw1;
1566 }
1567
1568 if (!reg_count)
1569 return;
1570
1571 p->emit_flush(p);
1572
1573 for (i = 0; i < reg_count; i++) {
1574 if (regs[i]) {
1575 /* store lower 32 bits */
1576 gen6_MI_STORE_REGISTER_MEM(p->builder, q->bo, offset, regs[i]);
1577 /* store higher 32 bits */
1578 gen6_MI_STORE_REGISTER_MEM(p->builder, q->bo,
1579 offset + 4, regs[i] + 4);
1580 } else {
1581 gen6_MI_STORE_DATA_IMM(p->builder, q->bo, offset, 0, true);
1582 }
1583
1584 offset += 8;
1585 }
1586 }
1587
1588 static void
1589 gen6_rectlist_vs_to_sf(struct ilo_3d_pipeline *p,
1590 const struct ilo_blitter *blitter,
1591 struct gen6_rectlist_session *session)
1592 {
1593 gen6_3DSTATE_CONSTANT_VS(p->builder, NULL, NULL, 0);
1594 gen6_3DSTATE_VS(p->builder, NULL, 0);
1595
1596 gen6_wa_post_3dstate_constant_vs(p);
1597
1598 gen6_3DSTATE_CONSTANT_GS(p->builder, NULL, NULL, 0);
1599 gen6_3DSTATE_GS(p->builder, NULL, NULL, 0);
1600
1601 gen6_3DSTATE_CLIP(p->builder, NULL, NULL, false, 0);
1602 gen6_3DSTATE_SF(p->builder, NULL, NULL);
1603 }
1604
1605 static void
1606 gen6_rectlist_wm(struct ilo_3d_pipeline *p,
1607 const struct ilo_blitter *blitter,
1608 struct gen6_rectlist_session *session)
1609 {
1610 uint32_t hiz_op;
1611
1612 switch (blitter->op) {
1613 case ILO_BLITTER_RECTLIST_CLEAR_ZS:
1614 hiz_op = GEN6_WM_DW4_DEPTH_CLEAR;
1615 break;
1616 case ILO_BLITTER_RECTLIST_RESOLVE_Z:
1617 hiz_op = GEN6_WM_DW4_DEPTH_RESOLVE;
1618 break;
1619 case ILO_BLITTER_RECTLIST_RESOLVE_HIZ:
1620 hiz_op = GEN6_WM_DW4_HIZ_RESOLVE;
1621 break;
1622 default:
1623 hiz_op = 0;
1624 break;
1625 }
1626
1627 gen6_3DSTATE_CONSTANT_PS(p->builder, NULL, NULL, 0);
1628
1629 gen6_wa_pre_3dstate_wm_max_threads(p);
1630 gen6_3DSTATE_WM(p->builder, NULL, 0, NULL, false, false, hiz_op);
1631 }
1632
1633 static void
1634 gen6_rectlist_wm_depth(struct ilo_3d_pipeline *p,
1635 const struct ilo_blitter *blitter,
1636 struct gen6_rectlist_session *session)
1637 {
1638 gen6_wa_pre_depth(p);
1639
1640 if (blitter->uses & (ILO_BLITTER_USE_FB_DEPTH |
1641 ILO_BLITTER_USE_FB_STENCIL)) {
1642 gen6_3DSTATE_DEPTH_BUFFER(p->builder,
1643 &blitter->fb.dst.u.zs);
1644 }
1645
1646 if (blitter->uses & ILO_BLITTER_USE_FB_DEPTH) {
1647 gen6_3DSTATE_HIER_DEPTH_BUFFER(p->builder,
1648 &blitter->fb.dst.u.zs);
1649 }
1650
1651 if (blitter->uses & ILO_BLITTER_USE_FB_STENCIL) {
1652 gen6_3DSTATE_STENCIL_BUFFER(p->builder,
1653 &blitter->fb.dst.u.zs);
1654 }
1655
1656 gen6_3DSTATE_CLEAR_PARAMS(p->builder,
1657 blitter->depth_clear_value);
1658 }
1659
1660 static void
1661 gen6_rectlist_wm_multisample(struct ilo_3d_pipeline *p,
1662 const struct ilo_blitter *blitter,
1663 struct gen6_rectlist_session *session)
1664 {
1665 const uint32_t *packed_sample_pos = (blitter->fb.num_samples > 1) ?
1666 &p->packed_sample_position_4x : &p->packed_sample_position_1x;
1667
1668 gen6_wa_pre_3dstate_multisample(p);
1669
1670 gen6_3DSTATE_MULTISAMPLE(p->builder, blitter->fb.num_samples,
1671 packed_sample_pos, true);
1672
1673 gen6_3DSTATE_SAMPLE_MASK(p->builder,
1674 (1 << blitter->fb.num_samples) - 1);
1675 }
1676
1677 static void
1678 gen6_rectlist_commands(struct ilo_3d_pipeline *p,
1679 const struct ilo_blitter *blitter,
1680 struct gen6_rectlist_session *session)
1681 {
1682 gen6_wa_pre_non_pipelined(p);
1683
1684 gen6_rectlist_wm_multisample(p, blitter, session);
1685
1686 gen6_state_base_address(p->builder, true);
1687
1688 gen6_3DSTATE_VERTEX_BUFFERS(p->builder,
1689 &blitter->ve, &blitter->vb);
1690
1691 gen6_3DSTATE_VERTEX_ELEMENTS(p->builder,
1692 &blitter->ve, false, false);
1693
1694 gen6_3DSTATE_URB(p->builder,
1695 p->dev->urb_size, 0, blitter->ve.count * 4 * sizeof(float), 0);
1696 /* 3DSTATE_URB workaround */
1697 if (p->state.gs.active) {
1698 ilo_3d_pipeline_emit_flush_gen6(p);
1699 p->state.gs.active = false;
1700 }
1701
1702 if (blitter->uses &
1703 (ILO_BLITTER_USE_DSA | ILO_BLITTER_USE_CC)) {
1704 gen6_3DSTATE_CC_STATE_POINTERS(p->builder, 0,
1705 session->DEPTH_STENCIL_STATE, session->COLOR_CALC_STATE);
1706 }
1707
1708 gen6_rectlist_vs_to_sf(p, blitter, session);
1709 gen6_rectlist_wm(p, blitter, session);
1710
1711 if (blitter->uses & ILO_BLITTER_USE_VIEWPORT) {
1712 gen6_3DSTATE_VIEWPORT_STATE_POINTERS(p->builder,
1713 0, 0, session->CC_VIEWPORT);
1714 }
1715
1716 gen6_rectlist_wm_depth(p, blitter, session);
1717
1718 gen6_3DSTATE_DRAWING_RECTANGLE(p->builder, 0, 0,
1719 blitter->fb.width, blitter->fb.height);
1720
1721 gen6_3DPRIMITIVE(p->builder, &blitter->draw, NULL);
1722 }
1723
1724 static void
1725 gen6_rectlist_states(struct ilo_3d_pipeline *p,
1726 const struct ilo_blitter *blitter,
1727 struct gen6_rectlist_session *session)
1728 {
1729 if (blitter->uses & ILO_BLITTER_USE_DSA) {
1730 session->DEPTH_STENCIL_STATE =
1731 gen6_DEPTH_STENCIL_STATE(p->builder, &blitter->dsa);
1732 }
1733
1734 if (blitter->uses & ILO_BLITTER_USE_CC) {
1735 session->COLOR_CALC_STATE =
1736 gen6_COLOR_CALC_STATE(p->builder, &blitter->cc.stencil_ref,
1737 blitter->cc.alpha_ref, &blitter->cc.blend_color);
1738 }
1739
1740 if (blitter->uses & ILO_BLITTER_USE_VIEWPORT) {
1741 session->CC_VIEWPORT =
1742 gen6_CC_VIEWPORT(p->builder, &blitter->viewport, 1);
1743 }
1744 }
1745
1746 static void
1747 ilo_3d_pipeline_emit_rectlist_gen6(struct ilo_3d_pipeline *p,
1748 const struct ilo_blitter *blitter)
1749 {
1750 struct gen6_rectlist_session session;
1751
1752 memset(&session, 0, sizeof(session));
1753 gen6_rectlist_states(p, blitter, &session);
1754 gen6_rectlist_commands(p, blitter, &session);
1755 }
1756
1757 static int
1758 gen6_pipeline_max_command_size(const struct ilo_3d_pipeline *p)
1759 {
1760 static int size;
1761
1762 if (!size) {
1763 size += GEN6_3DSTATE_CONSTANT_ANY__SIZE * 3;
1764 size += GEN6_3DSTATE_GS_SVB_INDEX__SIZE * 4;
1765 size += GEN6_PIPE_CONTROL__SIZE * 5;
1766
1767 size +=
1768 GEN6_STATE_BASE_ADDRESS__SIZE +
1769 GEN6_STATE_SIP__SIZE +
1770 GEN6_3DSTATE_VF_STATISTICS__SIZE +
1771 GEN6_PIPELINE_SELECT__SIZE +
1772 GEN6_3DSTATE_BINDING_TABLE_POINTERS__SIZE +
1773 GEN6_3DSTATE_SAMPLER_STATE_POINTERS__SIZE +
1774 GEN6_3DSTATE_URB__SIZE +
1775 GEN6_3DSTATE_VERTEX_BUFFERS__SIZE +
1776 GEN6_3DSTATE_VERTEX_ELEMENTS__SIZE +
1777 GEN6_3DSTATE_INDEX_BUFFER__SIZE +
1778 GEN6_3DSTATE_VIEWPORT_STATE_POINTERS__SIZE +
1779 GEN6_3DSTATE_CC_STATE_POINTERS__SIZE +
1780 GEN6_3DSTATE_SCISSOR_STATE_POINTERS__SIZE +
1781 GEN6_3DSTATE_VS__SIZE +
1782 GEN6_3DSTATE_GS__SIZE +
1783 GEN6_3DSTATE_CLIP__SIZE +
1784 GEN6_3DSTATE_SF__SIZE +
1785 GEN6_3DSTATE_WM__SIZE +
1786 GEN6_3DSTATE_SAMPLE_MASK__SIZE +
1787 GEN6_3DSTATE_DRAWING_RECTANGLE__SIZE +
1788 GEN6_3DSTATE_DEPTH_BUFFER__SIZE +
1789 GEN6_3DSTATE_POLY_STIPPLE_OFFSET__SIZE +
1790 GEN6_3DSTATE_POLY_STIPPLE_PATTERN__SIZE +
1791 GEN6_3DSTATE_LINE_STIPPLE__SIZE +
1792 GEN6_3DSTATE_AA_LINE_PARAMETERS__SIZE +
1793 GEN6_3DSTATE_MULTISAMPLE__SIZE +
1794 GEN6_3DSTATE_STENCIL_BUFFER__SIZE +
1795 GEN6_3DSTATE_HIER_DEPTH_BUFFER__SIZE +
1796 GEN6_3DSTATE_CLEAR_PARAMS__SIZE +
1797 GEN6_3DPRIMITIVE__SIZE;
1798 }
1799
1800 return size;
1801 }
1802
1803 int
1804 gen6_pipeline_estimate_state_size(const struct ilo_3d_pipeline *p,
1805 const struct ilo_state_vector *vec)
1806 {
1807 static int static_size;
1808 int sh_type, size;
1809
1810 if (!static_size) {
1811 /* 64 bytes, or 16 dwords */
1812 const int alignment = 64 / 4;
1813
1814 /* pad first */
1815 size = alignment - 1;
1816
1817 /* CC states */
1818 size += align(GEN6_BLEND_STATE__SIZE * ILO_MAX_DRAW_BUFFERS, alignment);
1819 size += align(GEN6_DEPTH_STENCIL_STATE__SIZE, alignment);
1820 size += align(GEN6_COLOR_CALC_STATE__SIZE, alignment);
1821
1822 /* viewport arrays */
1823 if (ilo_dev_gen(p->dev) >= ILO_GEN(7)) {
1824 size +=
1825 align(GEN7_SF_CLIP_VIEWPORT__SIZE * ILO_MAX_VIEWPORTS, 16) +
1826 align(GEN6_CC_VIEWPORT__SIZE * ILO_MAX_VIEWPORTS, 8) +
1827 align(GEN6_SCISSOR_RECT__SIZE * ILO_MAX_VIEWPORTS, 8);
1828 }
1829 else {
1830 size +=
1831 align(GEN6_SF_VIEWPORT__SIZE * ILO_MAX_VIEWPORTS, 8) +
1832 align(GEN6_CLIP_VIEWPORT__SIZE * ILO_MAX_VIEWPORTS, 8) +
1833 align(GEN6_CC_VIEWPORT__SIZE * ILO_MAX_VIEWPORTS, 8) +
1834 align(GEN6_SCISSOR_RECT__SIZE * ILO_MAX_VIEWPORTS, 8);
1835 }
1836
1837 static_size = size;
1838 }
1839
1840 size = static_size;
1841
1842 for (sh_type = 0; sh_type < PIPE_SHADER_TYPES; sh_type++) {
1843 const int alignment = 32 / 4;
1844 int num_samplers, num_surfaces, pcb_size;
1845
1846 /* samplers */
1847 num_samplers = vec->sampler[sh_type].count;
1848
1849 /* sampler views and constant buffers */
1850 num_surfaces = vec->view[sh_type].count +
1851 util_bitcount(vec->cbuf[sh_type].enabled_mask);
1852
1853 pcb_size = 0;
1854
1855 switch (sh_type) {
1856 case PIPE_SHADER_VERTEX:
1857 if (vec->vs) {
1858 if (ilo_dev_gen(p->dev) == ILO_GEN(6)) {
1859 const struct pipe_stream_output_info *so_info =
1860 ilo_shader_get_kernel_so_info(vec->vs);
1861
1862 /* stream outputs */
1863 num_surfaces += so_info->num_outputs;
1864 }
1865
1866 pcb_size = ilo_shader_get_kernel_param(vec->vs,
1867 ILO_KERNEL_PCB_CBUF0_SIZE);
1868 pcb_size += ilo_shader_get_kernel_param(vec->vs,
1869 ILO_KERNEL_VS_PCB_UCP_SIZE);
1870 }
1871 break;
1872 case PIPE_SHADER_GEOMETRY:
1873 if (vec->gs && ilo_dev_gen(p->dev) == ILO_GEN(6)) {
1874 const struct pipe_stream_output_info *so_info =
1875 ilo_shader_get_kernel_so_info(vec->gs);
1876
1877 /* stream outputs */
1878 num_surfaces += so_info->num_outputs;
1879 }
1880 break;
1881 case PIPE_SHADER_FRAGMENT:
1882 /* render targets */
1883 num_surfaces += vec->fb.state.nr_cbufs;
1884
1885 if (vec->fs) {
1886 pcb_size = ilo_shader_get_kernel_param(vec->fs,
1887 ILO_KERNEL_PCB_CBUF0_SIZE);
1888 }
1889 break;
1890 default:
1891 break;
1892 }
1893
1894 /* SAMPLER_STATE array and SAMPLER_BORDER_COLORs */
1895 if (num_samplers) {
1896 size += align(GEN6_SAMPLER_STATE__SIZE * num_samplers, alignment) +
1897 align(GEN6_SAMPLER_BORDER_COLOR__SIZE, alignment) * num_samplers;
1898 }
1899
1900 /* BINDING_TABLE_STATE and SURFACE_STATEs */
1901 if (num_surfaces) {
1902 size += align(num_surfaces, alignment) +
1903 align(GEN6_SURFACE_STATE__SIZE, alignment) * num_surfaces;
1904 }
1905
1906 /* PCB */
1907 if (pcb_size)
1908 size += align(pcb_size, alignment);
1909 }
1910
1911 return size;
1912 }
1913
1914 int
1915 gen6_pipeline_estimate_query_size(const struct ilo_3d_pipeline *p,
1916 const struct ilo_query *q)
1917 {
1918 int size;
1919
1920 ILO_DEV_ASSERT(p->dev, 6, 7.5);
1921
1922 switch (q->type) {
1923 case PIPE_QUERY_OCCLUSION_COUNTER:
1924 size = GEN6_PIPE_CONTROL__SIZE;
1925 if (ilo_dev_gen(p->dev) == ILO_GEN(6))
1926 size *= 3;
1927 break;
1928 case PIPE_QUERY_TIMESTAMP:
1929 case PIPE_QUERY_TIME_ELAPSED:
1930 size = GEN6_PIPE_CONTROL__SIZE;
1931 if (ilo_dev_gen(p->dev) == ILO_GEN(6))
1932 size *= 2;
1933 break;
1934 case PIPE_QUERY_PRIMITIVES_GENERATED:
1935 case PIPE_QUERY_PRIMITIVES_EMITTED:
1936 size = GEN6_PIPE_CONTROL__SIZE;
1937 if (ilo_dev_gen(p->dev) == ILO_GEN(6))
1938 size *= 3;
1939
1940 size += GEN6_MI_STORE_REGISTER_MEM__SIZE * 2;
1941 break;
1942 case PIPE_QUERY_PIPELINE_STATISTICS:
1943 if (ilo_dev_gen(p->dev) >= ILO_GEN(7)) {
1944 const int num_regs = 10;
1945 const int num_pads = 1;
1946
1947 size = GEN6_PIPE_CONTROL__SIZE +
1948 GEN6_MI_STORE_REGISTER_MEM__SIZE * 2 * num_regs +
1949 GEN6_MI_STORE_DATA_IMM__SIZE * num_pads;
1950 } else {
1951 const int num_regs = 8;
1952 const int num_pads = 3;
1953
1954 size = GEN6_PIPE_CONTROL__SIZE * 3 +
1955 GEN6_MI_STORE_REGISTER_MEM__SIZE * 2 * num_regs +
1956 GEN6_MI_STORE_DATA_IMM__SIZE * num_pads;
1957 }
1958 break;
1959 default:
1960 size = 0;
1961 break;
1962 }
1963
1964 return size;
1965 }
1966
1967 static int
1968 ilo_3d_pipeline_estimate_size_gen6(struct ilo_3d_pipeline *p,
1969 enum ilo_3d_pipeline_action action,
1970 const void *arg)
1971 {
1972 int size;
1973
1974 switch (action) {
1975 case ILO_3D_PIPELINE_DRAW:
1976 {
1977 const struct ilo_state_vector *ilo = arg;
1978
1979 size = gen6_pipeline_max_command_size(p) +
1980 gen6_pipeline_estimate_state_size(p, ilo);
1981 }
1982 break;
1983 case ILO_3D_PIPELINE_FLUSH:
1984 size = GEN6_PIPE_CONTROL__SIZE * 3;
1985 break;
1986 case ILO_3D_PIPELINE_QUERY:
1987 size = gen6_pipeline_estimate_query_size(p,
1988 (const struct ilo_query *) arg);
1989 break;
1990 case ILO_3D_PIPELINE_RECTLIST:
1991 size = 64 + 256; /* states + commands */
1992 break;
1993 default:
1994 assert(!"unknown 3D pipeline action");
1995 size = 0;
1996 break;
1997 }
1998
1999 return size;
2000 }
2001
2002 void
2003 ilo_3d_pipeline_init_gen6(struct ilo_3d_pipeline *p)
2004 {
2005 p->estimate_size = ilo_3d_pipeline_estimate_size_gen6;
2006 p->emit_draw = ilo_3d_pipeline_emit_draw_gen6;
2007 p->emit_flush = ilo_3d_pipeline_emit_flush_gen6;
2008 p->emit_query = ilo_3d_pipeline_emit_query_gen6;
2009 p->emit_rectlist = ilo_3d_pipeline_emit_rectlist_gen6;
2010 }