ilo: support PIPE_CAP_USER_INDEX_BUFFERS
[mesa.git] / src / gallium / drivers / ilo / ilo_gpe_gen6.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2012-2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "util/u_dual_blend.h"
29 #include "util/u_half.h"
30 #include "brw_defines.h"
31 #include "intel_reg.h"
32
33 #include "ilo_context.h"
34 #include "ilo_cp.h"
35 #include "ilo_format.h"
36 #include "ilo_resource.h"
37 #include "ilo_shader.h"
38 #include "ilo_state.h"
39 #include "ilo_gpe_gen6.h"
40
41 /**
42 * Translate winsys tiling to hardware tiling.
43 */
44 int
45 ilo_gpe_gen6_translate_winsys_tiling(enum intel_tiling_mode tiling)
46 {
47 switch (tiling) {
48 case INTEL_TILING_NONE:
49 return 0;
50 case INTEL_TILING_X:
51 return BRW_SURFACE_TILED;
52 case INTEL_TILING_Y:
53 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
54 default:
55 assert(!"unknown tiling");
56 return 0;
57 }
58 }
59
60 /**
61 * Translate a pipe primitive type to the matching hardware primitive type.
62 */
63 int
64 ilo_gpe_gen6_translate_pipe_prim(unsigned prim)
65 {
66 static const int prim_mapping[PIPE_PRIM_MAX] = {
67 [PIPE_PRIM_POINTS] = _3DPRIM_POINTLIST,
68 [PIPE_PRIM_LINES] = _3DPRIM_LINELIST,
69 [PIPE_PRIM_LINE_LOOP] = _3DPRIM_LINELOOP,
70 [PIPE_PRIM_LINE_STRIP] = _3DPRIM_LINESTRIP,
71 [PIPE_PRIM_TRIANGLES] = _3DPRIM_TRILIST,
72 [PIPE_PRIM_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
73 [PIPE_PRIM_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
74 [PIPE_PRIM_QUADS] = _3DPRIM_QUADLIST,
75 [PIPE_PRIM_QUAD_STRIP] = _3DPRIM_QUADSTRIP,
76 [PIPE_PRIM_POLYGON] = _3DPRIM_POLYGON,
77 [PIPE_PRIM_LINES_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
78 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
79 [PIPE_PRIM_TRIANGLES_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
80 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
81 };
82
83 assert(prim_mapping[prim]);
84
85 return prim_mapping[prim];
86 }
87
88 /**
89 * Translate a pipe texture target to the matching hardware surface type.
90 */
91 int
92 ilo_gpe_gen6_translate_texture(enum pipe_texture_target target)
93 {
94 switch (target) {
95 case PIPE_BUFFER:
96 return BRW_SURFACE_BUFFER;
97 case PIPE_TEXTURE_1D:
98 case PIPE_TEXTURE_1D_ARRAY:
99 return BRW_SURFACE_1D;
100 case PIPE_TEXTURE_2D:
101 case PIPE_TEXTURE_RECT:
102 case PIPE_TEXTURE_2D_ARRAY:
103 return BRW_SURFACE_2D;
104 case PIPE_TEXTURE_3D:
105 return BRW_SURFACE_3D;
106 case PIPE_TEXTURE_CUBE:
107 case PIPE_TEXTURE_CUBE_ARRAY:
108 return BRW_SURFACE_CUBE;
109 default:
110 assert(!"unknown texture target");
111 return BRW_SURFACE_BUFFER;
112 }
113 }
114
115 /**
116 * Translate a depth/stencil pipe format to the matching hardware
117 * format. Return -1 on errors.
118 */
119 static int
120 gen6_translate_depth_format(enum pipe_format format)
121 {
122 switch (format) {
123 case PIPE_FORMAT_Z16_UNORM:
124 return BRW_DEPTHFORMAT_D16_UNORM;
125 case PIPE_FORMAT_Z32_FLOAT:
126 return BRW_DEPTHFORMAT_D32_FLOAT;
127 case PIPE_FORMAT_Z24X8_UNORM:
128 return BRW_DEPTHFORMAT_D24_UNORM_X8_UINT;
129 case PIPE_FORMAT_Z24_UNORM_S8_UINT:
130 return BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
131 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
132 return BRW_DEPTHFORMAT_D32_FLOAT_S8X24_UINT;
133 default:
134 return -1;
135 }
136 }
137
138 /**
139 * Translate a pipe logicop to the matching hardware logicop.
140 */
141 static int
142 gen6_translate_pipe_logicop(unsigned logicop)
143 {
144 switch (logicop) {
145 case PIPE_LOGICOP_CLEAR: return BRW_LOGICOPFUNCTION_CLEAR;
146 case PIPE_LOGICOP_NOR: return BRW_LOGICOPFUNCTION_NOR;
147 case PIPE_LOGICOP_AND_INVERTED: return BRW_LOGICOPFUNCTION_AND_INVERTED;
148 case PIPE_LOGICOP_COPY_INVERTED: return BRW_LOGICOPFUNCTION_COPY_INVERTED;
149 case PIPE_LOGICOP_AND_REVERSE: return BRW_LOGICOPFUNCTION_AND_REVERSE;
150 case PIPE_LOGICOP_INVERT: return BRW_LOGICOPFUNCTION_INVERT;
151 case PIPE_LOGICOP_XOR: return BRW_LOGICOPFUNCTION_XOR;
152 case PIPE_LOGICOP_NAND: return BRW_LOGICOPFUNCTION_NAND;
153 case PIPE_LOGICOP_AND: return BRW_LOGICOPFUNCTION_AND;
154 case PIPE_LOGICOP_EQUIV: return BRW_LOGICOPFUNCTION_EQUIV;
155 case PIPE_LOGICOP_NOOP: return BRW_LOGICOPFUNCTION_NOOP;
156 case PIPE_LOGICOP_OR_INVERTED: return BRW_LOGICOPFUNCTION_OR_INVERTED;
157 case PIPE_LOGICOP_COPY: return BRW_LOGICOPFUNCTION_COPY;
158 case PIPE_LOGICOP_OR_REVERSE: return BRW_LOGICOPFUNCTION_OR_REVERSE;
159 case PIPE_LOGICOP_OR: return BRW_LOGICOPFUNCTION_OR;
160 case PIPE_LOGICOP_SET: return BRW_LOGICOPFUNCTION_SET;
161 default:
162 assert(!"unknown logicop function");
163 return BRW_LOGICOPFUNCTION_CLEAR;
164 }
165 }
166
167 /**
168 * Translate a pipe blend function to the matching hardware blend function.
169 */
170 static int
171 gen6_translate_pipe_blend(unsigned blend)
172 {
173 switch (blend) {
174 case PIPE_BLEND_ADD: return BRW_BLENDFUNCTION_ADD;
175 case PIPE_BLEND_SUBTRACT: return BRW_BLENDFUNCTION_SUBTRACT;
176 case PIPE_BLEND_REVERSE_SUBTRACT: return BRW_BLENDFUNCTION_REVERSE_SUBTRACT;
177 case PIPE_BLEND_MIN: return BRW_BLENDFUNCTION_MIN;
178 case PIPE_BLEND_MAX: return BRW_BLENDFUNCTION_MAX;
179 default:
180 assert(!"unknown blend function");
181 return BRW_BLENDFUNCTION_ADD;
182 };
183 }
184
185 /**
186 * Translate a pipe blend factor to the matching hardware blend factor.
187 */
188 static int
189 gen6_translate_pipe_blendfactor(unsigned blendfactor)
190 {
191 switch (blendfactor) {
192 case PIPE_BLENDFACTOR_ONE: return BRW_BLENDFACTOR_ONE;
193 case PIPE_BLENDFACTOR_SRC_COLOR: return BRW_BLENDFACTOR_SRC_COLOR;
194 case PIPE_BLENDFACTOR_SRC_ALPHA: return BRW_BLENDFACTOR_SRC_ALPHA;
195 case PIPE_BLENDFACTOR_DST_ALPHA: return BRW_BLENDFACTOR_DST_ALPHA;
196 case PIPE_BLENDFACTOR_DST_COLOR: return BRW_BLENDFACTOR_DST_COLOR;
197 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE: return BRW_BLENDFACTOR_SRC_ALPHA_SATURATE;
198 case PIPE_BLENDFACTOR_CONST_COLOR: return BRW_BLENDFACTOR_CONST_COLOR;
199 case PIPE_BLENDFACTOR_CONST_ALPHA: return BRW_BLENDFACTOR_CONST_ALPHA;
200 case PIPE_BLENDFACTOR_SRC1_COLOR: return BRW_BLENDFACTOR_SRC1_COLOR;
201 case PIPE_BLENDFACTOR_SRC1_ALPHA: return BRW_BLENDFACTOR_SRC1_ALPHA;
202 case PIPE_BLENDFACTOR_ZERO: return BRW_BLENDFACTOR_ZERO;
203 case PIPE_BLENDFACTOR_INV_SRC_COLOR: return BRW_BLENDFACTOR_INV_SRC_COLOR;
204 case PIPE_BLENDFACTOR_INV_SRC_ALPHA: return BRW_BLENDFACTOR_INV_SRC_ALPHA;
205 case PIPE_BLENDFACTOR_INV_DST_ALPHA: return BRW_BLENDFACTOR_INV_DST_ALPHA;
206 case PIPE_BLENDFACTOR_INV_DST_COLOR: return BRW_BLENDFACTOR_INV_DST_COLOR;
207 case PIPE_BLENDFACTOR_INV_CONST_COLOR: return BRW_BLENDFACTOR_INV_CONST_COLOR;
208 case PIPE_BLENDFACTOR_INV_CONST_ALPHA: return BRW_BLENDFACTOR_INV_CONST_ALPHA;
209 case PIPE_BLENDFACTOR_INV_SRC1_COLOR: return BRW_BLENDFACTOR_INV_SRC1_COLOR;
210 case PIPE_BLENDFACTOR_INV_SRC1_ALPHA: return BRW_BLENDFACTOR_INV_SRC1_ALPHA;
211 default:
212 assert(!"unknown blend factor");
213 return BRW_BLENDFACTOR_ONE;
214 };
215 }
216
217 /**
218 * Translate a pipe stencil op to the matching hardware stencil op.
219 */
220 static int
221 gen6_translate_pipe_stencil_op(unsigned stencil_op)
222 {
223 switch (stencil_op) {
224 case PIPE_STENCIL_OP_KEEP: return BRW_STENCILOP_KEEP;
225 case PIPE_STENCIL_OP_ZERO: return BRW_STENCILOP_ZERO;
226 case PIPE_STENCIL_OP_REPLACE: return BRW_STENCILOP_REPLACE;
227 case PIPE_STENCIL_OP_INCR: return BRW_STENCILOP_INCRSAT;
228 case PIPE_STENCIL_OP_DECR: return BRW_STENCILOP_DECRSAT;
229 case PIPE_STENCIL_OP_INCR_WRAP: return BRW_STENCILOP_INCR;
230 case PIPE_STENCIL_OP_DECR_WRAP: return BRW_STENCILOP_DECR;
231 case PIPE_STENCIL_OP_INVERT: return BRW_STENCILOP_INVERT;
232 default:
233 assert(!"unknown stencil op");
234 return BRW_STENCILOP_KEEP;
235 }
236 }
237
238 /**
239 * Translate a pipe texture mipfilter to the matching hardware mipfilter.
240 */
241 static int
242 gen6_translate_tex_mipfilter(unsigned filter)
243 {
244 switch (filter) {
245 case PIPE_TEX_MIPFILTER_NEAREST: return BRW_MIPFILTER_NEAREST;
246 case PIPE_TEX_MIPFILTER_LINEAR: return BRW_MIPFILTER_LINEAR;
247 case PIPE_TEX_MIPFILTER_NONE: return BRW_MIPFILTER_NONE;
248 default:
249 assert(!"unknown mipfilter");
250 return BRW_MIPFILTER_NONE;
251 }
252 }
253
254 /**
255 * Translate a pipe texture filter to the matching hardware mapfilter.
256 */
257 static int
258 gen6_translate_tex_filter(unsigned filter)
259 {
260 switch (filter) {
261 case PIPE_TEX_FILTER_NEAREST: return BRW_MAPFILTER_NEAREST;
262 case PIPE_TEX_FILTER_LINEAR: return BRW_MAPFILTER_LINEAR;
263 default:
264 assert(!"unknown sampler filter");
265 return BRW_MAPFILTER_NEAREST;
266 }
267 }
268
269 /**
270 * Translate a pipe texture coordinate wrapping mode to the matching hardware
271 * wrapping mode.
272 */
273 static int
274 gen6_translate_tex_wrap(unsigned wrap, bool clamp_to_edge)
275 {
276 /* clamp to edge or border? */
277 if (wrap == PIPE_TEX_WRAP_CLAMP) {
278 wrap = (clamp_to_edge) ?
279 PIPE_TEX_WRAP_CLAMP_TO_EDGE : PIPE_TEX_WRAP_CLAMP_TO_BORDER;
280 }
281
282 switch (wrap) {
283 case PIPE_TEX_WRAP_REPEAT: return BRW_TEXCOORDMODE_WRAP;
284 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return BRW_TEXCOORDMODE_CLAMP;
285 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return BRW_TEXCOORDMODE_CLAMP_BORDER;
286 case PIPE_TEX_WRAP_MIRROR_REPEAT: return BRW_TEXCOORDMODE_MIRROR;
287 case PIPE_TEX_WRAP_CLAMP:
288 case PIPE_TEX_WRAP_MIRROR_CLAMP:
289 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
290 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
291 default:
292 assert(!"unknown sampler wrap mode");
293 return BRW_TEXCOORDMODE_WRAP;
294 }
295 }
296
297 /**
298 * Translate a pipe DSA test function to the matching hardware compare
299 * function.
300 */
301 static int
302 gen6_translate_dsa_func(unsigned func)
303 {
304 switch (func) {
305 case PIPE_FUNC_NEVER: return BRW_COMPAREFUNCTION_NEVER;
306 case PIPE_FUNC_LESS: return BRW_COMPAREFUNCTION_LESS;
307 case PIPE_FUNC_EQUAL: return BRW_COMPAREFUNCTION_EQUAL;
308 case PIPE_FUNC_LEQUAL: return BRW_COMPAREFUNCTION_LEQUAL;
309 case PIPE_FUNC_GREATER: return BRW_COMPAREFUNCTION_GREATER;
310 case PIPE_FUNC_NOTEQUAL: return BRW_COMPAREFUNCTION_NOTEQUAL;
311 case PIPE_FUNC_GEQUAL: return BRW_COMPAREFUNCTION_GEQUAL;
312 case PIPE_FUNC_ALWAYS: return BRW_COMPAREFUNCTION_ALWAYS;
313 default:
314 assert(!"unknown depth/stencil/alpha test function");
315 return BRW_COMPAREFUNCTION_NEVER;
316 }
317 }
318
319 /**
320 * Translate a pipe shadow compare function to the matching hardware shadow
321 * function.
322 */
323 static int
324 gen6_translate_shadow_func(unsigned func)
325 {
326 /*
327 * For PIPE_FUNC_x, the reference value is on the left-hand side of the
328 * comparison, and 1.0 is returned when the comparison is true.
329 *
330 * For BRW_PREFILTER_x, the reference value is on the right-hand side of
331 * the comparison, and 0.0 is returned when the comparison is true.
332 */
333 switch (func) {
334 case PIPE_FUNC_NEVER: return BRW_PREFILTER_ALWAYS;
335 case PIPE_FUNC_LESS: return BRW_PREFILTER_LEQUAL;
336 case PIPE_FUNC_EQUAL: return BRW_PREFILTER_NOTEQUAL;
337 case PIPE_FUNC_LEQUAL: return BRW_PREFILTER_LESS;
338 case PIPE_FUNC_GREATER: return BRW_PREFILTER_GEQUAL;
339 case PIPE_FUNC_NOTEQUAL: return BRW_PREFILTER_EQUAL;
340 case PIPE_FUNC_GEQUAL: return BRW_PREFILTER_GREATER;
341 case PIPE_FUNC_ALWAYS: return BRW_PREFILTER_NEVER;
342 default:
343 assert(!"unknown shadow compare function");
344 return BRW_PREFILTER_NEVER;
345 }
346 }
347
348 /**
349 * Translate an index size to the matching hardware index format.
350 */
351 static int
352 gen6_translate_index_size(int size)
353 {
354 switch (size) {
355 case 4: return BRW_INDEX_DWORD;
356 case 2: return BRW_INDEX_WORD;
357 case 1: return BRW_INDEX_BYTE;
358 default:
359 assert(!"unknown index size");
360 return BRW_INDEX_BYTE;
361 }
362 }
363
364 static void
365 gen6_emit_STATE_BASE_ADDRESS(const struct ilo_dev_info *dev,
366 struct intel_bo *general_state_bo,
367 struct intel_bo *surface_state_bo,
368 struct intel_bo *dynamic_state_bo,
369 struct intel_bo *indirect_object_bo,
370 struct intel_bo *instruction_bo,
371 uint32_t general_state_size,
372 uint32_t dynamic_state_size,
373 uint32_t indirect_object_size,
374 uint32_t instruction_size,
375 struct ilo_cp *cp)
376 {
377 const uint32_t cmd = ILO_GPE_CMD(0x0, 0x1, 0x01);
378 const uint8_t cmd_len = 10;
379
380 ILO_GPE_VALID_GEN(dev, 6, 7);
381
382 /* 4K-page aligned */
383 assert(((general_state_size | dynamic_state_size |
384 indirect_object_size | instruction_size) & 0xfff) == 0);
385
386 ilo_cp_begin(cp, cmd_len);
387 ilo_cp_write(cp, cmd | (cmd_len - 2));
388
389 ilo_cp_write_bo(cp, 1, general_state_bo,
390 INTEL_DOMAIN_RENDER,
391 0);
392 ilo_cp_write_bo(cp, 1, surface_state_bo,
393 INTEL_DOMAIN_SAMPLER,
394 0);
395 ilo_cp_write_bo(cp, 1, dynamic_state_bo,
396 INTEL_DOMAIN_RENDER | INTEL_DOMAIN_INSTRUCTION,
397 0);
398 ilo_cp_write_bo(cp, 1, indirect_object_bo,
399 0,
400 0);
401 ilo_cp_write_bo(cp, 1, instruction_bo,
402 INTEL_DOMAIN_INSTRUCTION,
403 0);
404
405 if (general_state_size) {
406 ilo_cp_write_bo(cp, general_state_size | 1, general_state_bo,
407 INTEL_DOMAIN_RENDER,
408 0);
409 }
410 else {
411 /* skip range check */
412 ilo_cp_write(cp, 1);
413 }
414
415 if (dynamic_state_size) {
416 ilo_cp_write_bo(cp, dynamic_state_size | 1, dynamic_state_bo,
417 INTEL_DOMAIN_RENDER | INTEL_DOMAIN_INSTRUCTION,
418 0);
419 }
420 else {
421 /* skip range check */
422 ilo_cp_write(cp, 0xfffff000 + 1);
423 }
424
425 if (indirect_object_size) {
426 ilo_cp_write_bo(cp, indirect_object_size | 1, indirect_object_bo,
427 0,
428 0);
429 }
430 else {
431 /* skip range check */
432 ilo_cp_write(cp, 0xfffff000 + 1);
433 }
434
435 if (instruction_size) {
436 ilo_cp_write_bo(cp, instruction_size | 1, instruction_bo,
437 INTEL_DOMAIN_INSTRUCTION,
438 0);
439 }
440 else {
441 /* skip range check */
442 ilo_cp_write(cp, 1);
443 }
444
445 ilo_cp_end(cp);
446 }
447
448 static void
449 gen6_emit_STATE_SIP(const struct ilo_dev_info *dev,
450 uint32_t sip,
451 struct ilo_cp *cp)
452 {
453 const uint32_t cmd = ILO_GPE_CMD(0x0, 0x1, 0x02);
454 const uint8_t cmd_len = 2;
455
456 ILO_GPE_VALID_GEN(dev, 6, 7);
457
458 ilo_cp_begin(cp, cmd_len | (cmd_len - 2));
459 ilo_cp_write(cp, cmd);
460 ilo_cp_write(cp, sip);
461 ilo_cp_end(cp);
462 }
463
464 static void
465 gen6_emit_3DSTATE_VF_STATISTICS(const struct ilo_dev_info *dev,
466 bool enable,
467 struct ilo_cp *cp)
468 {
469 const uint32_t cmd = ILO_GPE_CMD(0x1, 0x0, 0x0b);
470 const uint8_t cmd_len = 1;
471
472 ILO_GPE_VALID_GEN(dev, 6, 7);
473
474 ilo_cp_begin(cp, cmd_len);
475 ilo_cp_write(cp, cmd | enable);
476 ilo_cp_end(cp);
477 }
478
479 static void
480 gen6_emit_PIPELINE_SELECT(const struct ilo_dev_info *dev,
481 int pipeline,
482 struct ilo_cp *cp)
483 {
484 const int cmd = ILO_GPE_CMD(0x1, 0x1, 0x04);
485 const uint8_t cmd_len = 1;
486
487 ILO_GPE_VALID_GEN(dev, 6, 7);
488
489 /* 3D or media */
490 assert(pipeline == 0x0 || pipeline == 0x1);
491
492 ilo_cp_begin(cp, cmd_len);
493 ilo_cp_write(cp, cmd | pipeline);
494 ilo_cp_end(cp);
495 }
496
497 static void
498 gen6_emit_MEDIA_VFE_STATE(const struct ilo_dev_info *dev,
499 int max_threads, int num_urb_entries,
500 int urb_entry_size,
501 struct ilo_cp *cp)
502 {
503 const uint32_t cmd = ILO_GPE_CMD(0x2, 0x0, 0x00);
504 const uint8_t cmd_len = 8;
505 uint32_t dw2, dw4;
506
507 ILO_GPE_VALID_GEN(dev, 6, 6);
508
509 dw2 = (max_threads - 1) << 16 |
510 num_urb_entries << 8 |
511 1 << 7 | /* Reset Gateway Timer */
512 1 << 6; /* Bypass Gateway Control */
513
514 dw4 = urb_entry_size << 16 | /* URB Entry Allocation Size */
515 480; /* CURBE Allocation Size */
516
517 ilo_cp_begin(cp, cmd_len);
518 ilo_cp_write(cp, cmd | (cmd_len - 2));
519 ilo_cp_write(cp, 0); /* scratch */
520 ilo_cp_write(cp, dw2);
521 ilo_cp_write(cp, 0); /* MBZ */
522 ilo_cp_write(cp, dw4);
523 ilo_cp_write(cp, 0); /* scoreboard */
524 ilo_cp_write(cp, 0);
525 ilo_cp_write(cp, 0);
526 ilo_cp_end(cp);
527 }
528
529 static void
530 gen6_emit_MEDIA_CURBE_LOAD(const struct ilo_dev_info *dev,
531 uint32_t buf, int size,
532 struct ilo_cp *cp)
533 {
534 const uint32_t cmd = ILO_GPE_CMD(0x2, 0x0, 0x01);
535 const uint8_t cmd_len = 4;
536
537 ILO_GPE_VALID_GEN(dev, 6, 6);
538
539 assert(buf % 32 == 0);
540 /* gen6_emit_push_constant_buffer() allocates buffers in 256-bit units */
541 size = align(size, 32);
542
543 ilo_cp_begin(cp, cmd_len);
544 ilo_cp_write(cp, cmd | (cmd_len - 2));
545 ilo_cp_write(cp, 0); /* MBZ */
546 ilo_cp_write(cp, size);
547 ilo_cp_write(cp, buf);
548 ilo_cp_end(cp);
549 }
550
551 static void
552 gen6_emit_MEDIA_INTERFACE_DESCRIPTOR_LOAD(const struct ilo_dev_info *dev,
553 uint32_t offset, int num_ids,
554 struct ilo_cp *cp)
555 {
556 const uint32_t cmd = ILO_GPE_CMD(0x2, 0x0, 0x02);
557 const uint8_t cmd_len = 4;
558
559 ILO_GPE_VALID_GEN(dev, 6, 6);
560
561 assert(offset % 32 == 0);
562
563 ilo_cp_begin(cp, cmd_len);
564 ilo_cp_write(cp, cmd | (cmd_len - 2));
565 ilo_cp_write(cp, 0); /* MBZ */
566 /* every ID has 8 DWords */
567 ilo_cp_write(cp, num_ids * 8 * 4);
568 ilo_cp_write(cp, offset);
569 ilo_cp_end(cp);
570 }
571
572 static void
573 gen6_emit_MEDIA_GATEWAY_STATE(const struct ilo_dev_info *dev,
574 int id, int byte, int thread_count,
575 struct ilo_cp *cp)
576 {
577 const uint32_t cmd = ILO_GPE_CMD(0x2, 0x0, 0x03);
578 const uint8_t cmd_len = 2;
579 uint32_t dw1;
580
581 ILO_GPE_VALID_GEN(dev, 6, 6);
582
583 dw1 = id << 16 |
584 byte << 8 |
585 thread_count;
586
587 ilo_cp_begin(cp, cmd_len);
588 ilo_cp_write(cp, cmd | (cmd_len - 2));
589 ilo_cp_write(cp, dw1);
590 ilo_cp_end(cp);
591 }
592
593 static void
594 gen6_emit_MEDIA_STATE_FLUSH(const struct ilo_dev_info *dev,
595 int thread_count_water_mark,
596 int barrier_mask,
597 struct ilo_cp *cp)
598 {
599 const uint32_t cmd = ILO_GPE_CMD(0x2, 0x0, 0x04);
600 const uint8_t cmd_len = 2;
601 uint32_t dw1;
602
603 ILO_GPE_VALID_GEN(dev, 6, 6);
604
605 dw1 = thread_count_water_mark << 16 |
606 barrier_mask;
607
608 ilo_cp_begin(cp, cmd_len);
609 ilo_cp_write(cp, cmd | (cmd_len - 2));
610 ilo_cp_write(cp, dw1);
611 ilo_cp_end(cp);
612 }
613
614 static void
615 gen6_emit_MEDIA_OBJECT_WALKER(const struct ilo_dev_info *dev,
616 struct ilo_cp *cp)
617 {
618 assert(!"MEDIA_OBJECT_WALKER unsupported");
619 }
620
621 static void
622 gen6_emit_3DSTATE_BINDING_TABLE_POINTERS(const struct ilo_dev_info *dev,
623 uint32_t vs_binding_table,
624 uint32_t gs_binding_table,
625 uint32_t ps_binding_table,
626 struct ilo_cp *cp)
627 {
628 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x01);
629 const uint8_t cmd_len = 4;
630
631 ILO_GPE_VALID_GEN(dev, 6, 6);
632
633 ilo_cp_begin(cp, cmd_len);
634 ilo_cp_write(cp, cmd | (cmd_len - 2) |
635 GEN6_BINDING_TABLE_MODIFY_VS |
636 GEN6_BINDING_TABLE_MODIFY_GS |
637 GEN6_BINDING_TABLE_MODIFY_PS);
638 ilo_cp_write(cp, vs_binding_table);
639 ilo_cp_write(cp, gs_binding_table);
640 ilo_cp_write(cp, ps_binding_table);
641 ilo_cp_end(cp);
642 }
643
644 static void
645 gen6_emit_3DSTATE_SAMPLER_STATE_POINTERS(const struct ilo_dev_info *dev,
646 uint32_t vs_sampler_state,
647 uint32_t gs_sampler_state,
648 uint32_t ps_sampler_state,
649 struct ilo_cp *cp)
650 {
651 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x02);
652 const uint8_t cmd_len = 4;
653
654 ILO_GPE_VALID_GEN(dev, 6, 6);
655
656 ilo_cp_begin(cp, cmd_len);
657 ilo_cp_write(cp, cmd | (cmd_len - 2) |
658 VS_SAMPLER_STATE_CHANGE |
659 GS_SAMPLER_STATE_CHANGE |
660 PS_SAMPLER_STATE_CHANGE);
661 ilo_cp_write(cp, vs_sampler_state);
662 ilo_cp_write(cp, gs_sampler_state);
663 ilo_cp_write(cp, ps_sampler_state);
664 ilo_cp_end(cp);
665 }
666
667 static void
668 gen6_emit_3DSTATE_URB(const struct ilo_dev_info *dev,
669 int vs_total_size, int gs_total_size,
670 int vs_entry_size, int gs_entry_size,
671 struct ilo_cp *cp)
672 {
673 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x05);
674 const uint8_t cmd_len = 3;
675 const int row_size = 128; /* 1024 bits */
676 int vs_alloc_size, gs_alloc_size;
677 int vs_num_entries, gs_num_entries;
678
679 ILO_GPE_VALID_GEN(dev, 6, 6);
680
681 /* in 1024-bit URB rows */
682 vs_alloc_size = (vs_entry_size + row_size - 1) / row_size;
683 gs_alloc_size = (gs_entry_size + row_size - 1) / row_size;
684
685 /* the valid range is [1, 5] */
686 if (!vs_alloc_size)
687 vs_alloc_size = 1;
688 if (!gs_alloc_size)
689 gs_alloc_size = 1;
690 assert(vs_alloc_size <= 5 && gs_alloc_size <= 5);
691
692 /* the valid range is [24, 256] in multiples of 4 */
693 vs_num_entries = (vs_total_size / row_size / vs_alloc_size) & ~3;
694 if (vs_num_entries > 256)
695 vs_num_entries = 256;
696 assert(vs_num_entries >= 24);
697
698 /* the valid range is [0, 256] in multiples of 4 */
699 gs_num_entries = (gs_total_size / row_size / gs_alloc_size) & ~3;
700 if (gs_num_entries > 256)
701 gs_num_entries = 256;
702
703 ilo_cp_begin(cp, cmd_len);
704 ilo_cp_write(cp, cmd | (cmd_len - 2));
705 ilo_cp_write(cp, (vs_alloc_size - 1) << GEN6_URB_VS_SIZE_SHIFT |
706 vs_num_entries << GEN6_URB_VS_ENTRIES_SHIFT);
707 ilo_cp_write(cp, gs_num_entries << GEN6_URB_GS_ENTRIES_SHIFT |
708 (gs_alloc_size - 1) << GEN6_URB_GS_SIZE_SHIFT);
709 ilo_cp_end(cp);
710 }
711
712 static void
713 gen6_emit_3DSTATE_VERTEX_BUFFERS(const struct ilo_dev_info *dev,
714 const struct pipe_vertex_buffer *vbuffers,
715 uint64_t vbuffer_mask,
716 const struct ilo_ve_state *ve,
717 struct ilo_cp *cp)
718 {
719 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x08);
720 uint8_t cmd_len;
721 unsigned hw_idx;
722
723 ILO_GPE_VALID_GEN(dev, 6, 7);
724
725 /*
726 * From the Sandy Bridge PRM, volume 2 part 1, page 82:
727 *
728 * "From 1 to 33 VBs can be specified..."
729 */
730 assert(vbuffer_mask <= (1UL << 33));
731
732 if (!vbuffer_mask)
733 return;
734
735 cmd_len = 1;
736
737 for (hw_idx = 0; hw_idx < ve->vb_count; hw_idx++) {
738 const unsigned pipe_idx = ve->vb_mapping[hw_idx];
739
740 if (vbuffer_mask & (1 << pipe_idx))
741 cmd_len += 4;
742 }
743
744 ilo_cp_begin(cp, cmd_len);
745 ilo_cp_write(cp, cmd | (cmd_len - 2));
746
747 for (hw_idx = 0; hw_idx < ve->vb_count; hw_idx++) {
748 const unsigned instance_divisor = ve->instance_divisors[hw_idx];
749 const unsigned pipe_idx = ve->vb_mapping[hw_idx];
750 const struct pipe_vertex_buffer *vb = &vbuffers[pipe_idx];
751 uint32_t dw;
752
753 if (!(vbuffer_mask & (1 << pipe_idx)))
754 continue;
755
756 dw = hw_idx << GEN6_VB0_INDEX_SHIFT;
757
758 if (instance_divisor)
759 dw |= GEN6_VB0_ACCESS_INSTANCEDATA;
760 else
761 dw |= GEN6_VB0_ACCESS_VERTEXDATA;
762
763 if (dev->gen >= ILO_GEN(7))
764 dw |= GEN7_VB0_ADDRESS_MODIFYENABLE;
765
766 /* use null vb if there is no buffer or the stride is out of range */
767 if (vb->buffer && vb->stride <= 2048) {
768 const struct ilo_buffer *buf = ilo_buffer(vb->buffer);
769 const uint32_t start_offset = vb->buffer_offset;
770 /*
771 * As noted in ilo_translate_format(), we treat some 3-component
772 * formats as 4-component formats to work around hardware
773 * limitations. Imagine the case where the vertex buffer holds a
774 * single PIPE_FORMAT_R16G16B16_FLOAT vertex, and buf->bo_size is 6.
775 * The hardware would not be able to fetch it because the vertex
776 * buffer is expected to hold a PIPE_FORMAT_R16G16B16A16_FLOAT vertex
777 * and that takes at least 8 bytes.
778 *
779 * For the workaround to work, we query the physical size, which is
780 * page aligned, to calculate end_offset so that the last vertex has
781 * a better chance to be fetched.
782 */
783 const uint32_t end_offset = intel_bo_get_size(buf->bo) - 1;
784
785 dw |= vb->stride << BRW_VB0_PITCH_SHIFT;
786
787 ilo_cp_write(cp, dw);
788 ilo_cp_write_bo(cp, start_offset, buf->bo, INTEL_DOMAIN_VERTEX, 0);
789 ilo_cp_write_bo(cp, end_offset, buf->bo, INTEL_DOMAIN_VERTEX, 0);
790 ilo_cp_write(cp, instance_divisor);
791 }
792 else {
793 dw |= 1 << 13;
794
795 ilo_cp_write(cp, dw);
796 ilo_cp_write(cp, 0);
797 ilo_cp_write(cp, 0);
798 ilo_cp_write(cp, instance_divisor);
799 }
800 }
801
802 ilo_cp_end(cp);
803 }
804
805 static void
806 ve_set_cso_edgeflag(const struct ilo_dev_info *dev,
807 struct ilo_ve_cso *cso)
808 {
809 int format;
810
811 ILO_GPE_VALID_GEN(dev, 6, 7);
812
813 /*
814 * From the Sandy Bridge PRM, volume 2 part 1, page 94:
815 *
816 * "- This bit (Edge Flag Enable) must only be ENABLED on the last
817 * valid VERTEX_ELEMENT structure.
818 *
819 * - When set, Component 0 Control must be set to VFCOMP_STORE_SRC,
820 * and Component 1-3 Control must be set to VFCOMP_NOSTORE.
821 *
822 * - The Source Element Format must be set to the UINT format.
823 *
824 * - [DevSNB]: Edge Flags are not supported for QUADLIST
825 * primitives. Software may elect to convert QUADLIST primitives
826 * to some set of corresponding edge-flag-supported primitive
827 * types (e.g., POLYGONs) prior to submission to the 3D pipeline."
828 */
829
830 cso->payload[0] |= GEN6_VE0_EDGE_FLAG_ENABLE;
831 cso->payload[1] =
832 BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_0_SHIFT |
833 BRW_VE1_COMPONENT_NOSTORE << BRW_VE1_COMPONENT_1_SHIFT |
834 BRW_VE1_COMPONENT_NOSTORE << BRW_VE1_COMPONENT_2_SHIFT |
835 BRW_VE1_COMPONENT_NOSTORE << BRW_VE1_COMPONENT_3_SHIFT;
836
837 /*
838 * Edge flags have format BRW_SURFACEFORMAT_R8_UINT when defined via
839 * glEdgeFlagPointer(), and format BRW_SURFACEFORMAT_R32_FLOAT when defined
840 * via glEdgeFlag(), as can be seen in vbo_attrib_tmp.h.
841 *
842 * Since all the hardware cares about is whether the flags are zero or not,
843 * we can treat them as BRW_SURFACEFORMAT_R32_UINT in the latter case.
844 */
845 format = (cso->payload[0] >> BRW_VE0_FORMAT_SHIFT) & 0x1ff;
846 if (format == BRW_SURFACEFORMAT_R32_FLOAT) {
847 STATIC_ASSERT(BRW_SURFACEFORMAT_R32_UINT ==
848 BRW_SURFACEFORMAT_R32_FLOAT - 1);
849
850 cso->payload[0] -= (1 << BRW_VE0_FORMAT_SHIFT);
851 }
852 else {
853 assert(format == BRW_SURFACEFORMAT_R8_UINT);
854 }
855 }
856
857 static void
858 ve_init_cso_with_components(const struct ilo_dev_info *dev,
859 int comp0, int comp1, int comp2, int comp3,
860 struct ilo_ve_cso *cso)
861 {
862 ILO_GPE_VALID_GEN(dev, 6, 7);
863
864 STATIC_ASSERT(Elements(cso->payload) >= 2);
865 cso->payload[0] = GEN6_VE0_VALID;
866 cso->payload[1] =
867 comp0 << BRW_VE1_COMPONENT_0_SHIFT |
868 comp1 << BRW_VE1_COMPONENT_1_SHIFT |
869 comp2 << BRW_VE1_COMPONENT_2_SHIFT |
870 comp3 << BRW_VE1_COMPONENT_3_SHIFT;
871 }
872
873 static void
874 ve_init_cso(const struct ilo_dev_info *dev,
875 const struct pipe_vertex_element *state,
876 unsigned vb_index,
877 struct ilo_ve_cso *cso)
878 {
879 int comp[4] = {
880 BRW_VE1_COMPONENT_STORE_SRC,
881 BRW_VE1_COMPONENT_STORE_SRC,
882 BRW_VE1_COMPONENT_STORE_SRC,
883 BRW_VE1_COMPONENT_STORE_SRC,
884 };
885 int format;
886
887 ILO_GPE_VALID_GEN(dev, 6, 7);
888
889 switch (util_format_get_nr_components(state->src_format)) {
890 case 1: comp[1] = BRW_VE1_COMPONENT_STORE_0;
891 case 2: comp[2] = BRW_VE1_COMPONENT_STORE_0;
892 case 3: comp[3] = (util_format_is_pure_integer(state->src_format)) ?
893 BRW_VE1_COMPONENT_STORE_1_INT :
894 BRW_VE1_COMPONENT_STORE_1_FLT;
895 }
896
897 format = ilo_translate_vertex_format(state->src_format);
898
899 STATIC_ASSERT(Elements(cso->payload) >= 2);
900 cso->payload[0] =
901 vb_index << GEN6_VE0_INDEX_SHIFT |
902 GEN6_VE0_VALID |
903 format << BRW_VE0_FORMAT_SHIFT |
904 state->src_offset << BRW_VE0_SRC_OFFSET_SHIFT;
905
906 cso->payload[1] =
907 comp[0] << BRW_VE1_COMPONENT_0_SHIFT |
908 comp[1] << BRW_VE1_COMPONENT_1_SHIFT |
909 comp[2] << BRW_VE1_COMPONENT_2_SHIFT |
910 comp[3] << BRW_VE1_COMPONENT_3_SHIFT;
911 }
912
913 void
914 ilo_gpe_init_ve(const struct ilo_dev_info *dev,
915 unsigned num_states,
916 const struct pipe_vertex_element *states,
917 struct ilo_ve_state *ve)
918 {
919 unsigned i;
920
921 ILO_GPE_VALID_GEN(dev, 6, 7);
922
923 ve->count = num_states;
924 ve->vb_count = 0;
925
926 for (i = 0; i < num_states; i++) {
927 const unsigned pipe_idx = states[i].vertex_buffer_index;
928 const unsigned instance_divisor = states[i].instance_divisor;
929 unsigned hw_idx;
930
931 /*
932 * map the pipe vb to the hardware vb, which has a fixed instance
933 * divisor
934 */
935 for (hw_idx = 0; hw_idx < ve->vb_count; hw_idx++) {
936 if (ve->vb_mapping[hw_idx] == pipe_idx &&
937 ve->instance_divisors[hw_idx] == instance_divisor)
938 break;
939 }
940
941 /* create one if there is no matching hardware vb */
942 if (hw_idx >= ve->vb_count) {
943 hw_idx = ve->vb_count++;
944
945 ve->vb_mapping[hw_idx] = pipe_idx;
946 ve->instance_divisors[hw_idx] = instance_divisor;
947 }
948
949 ve_init_cso(dev, &states[i], hw_idx, &ve->cso[i]);
950 }
951 }
952
953 static void
954 gen6_emit_3DSTATE_VERTEX_ELEMENTS(const struct ilo_dev_info *dev,
955 const struct ilo_ve_state *ve,
956 bool last_velement_edgeflag,
957 bool prepend_generated_ids,
958 struct ilo_cp *cp)
959 {
960 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x09);
961 uint8_t cmd_len;
962 unsigned i;
963
964 ILO_GPE_VALID_GEN(dev, 6, 7);
965
966 /*
967 * From the Sandy Bridge PRM, volume 2 part 1, page 93:
968 *
969 * "Up to 34 (DevSNB+) vertex elements are supported."
970 */
971 assert(ve->count + prepend_generated_ids <= 34);
972
973 if (!ve->count && !prepend_generated_ids) {
974 struct ilo_ve_cso dummy;
975
976 ve_init_cso_with_components(dev,
977 BRW_VE1_COMPONENT_STORE_0,
978 BRW_VE1_COMPONENT_STORE_0,
979 BRW_VE1_COMPONENT_STORE_0,
980 BRW_VE1_COMPONENT_STORE_1_FLT,
981 &dummy);
982
983 cmd_len = 3;
984 ilo_cp_begin(cp, cmd_len);
985 ilo_cp_write(cp, cmd | (cmd_len - 2));
986 ilo_cp_write_multi(cp, dummy.payload, 2);
987 ilo_cp_end(cp);
988
989 return;
990 }
991
992 cmd_len = 2 * (ve->count + prepend_generated_ids) + 1;
993
994 ilo_cp_begin(cp, cmd_len);
995 ilo_cp_write(cp, cmd | (cmd_len - 2));
996
997 if (prepend_generated_ids) {
998 struct ilo_ve_cso gen_ids;
999
1000 ve_init_cso_with_components(dev,
1001 BRW_VE1_COMPONENT_STORE_VID,
1002 BRW_VE1_COMPONENT_STORE_IID,
1003 BRW_VE1_COMPONENT_NOSTORE,
1004 BRW_VE1_COMPONENT_NOSTORE,
1005 &gen_ids);
1006
1007 ilo_cp_write_multi(cp, gen_ids.payload, 2);
1008 }
1009
1010 if (last_velement_edgeflag) {
1011 struct ilo_ve_cso edgeflag;
1012
1013 for (i = 0; i < ve->count - 1; i++)
1014 ilo_cp_write_multi(cp, ve->cso[i].payload, 2);
1015
1016 edgeflag = ve->cso[i];
1017 ve_set_cso_edgeflag(dev, &edgeflag);
1018 ilo_cp_write_multi(cp, edgeflag.payload, 2);
1019 }
1020 else {
1021 for (i = 0; i < ve->count; i++)
1022 ilo_cp_write_multi(cp, ve->cso[i].payload, 2);
1023 }
1024
1025 ilo_cp_end(cp);
1026 }
1027
1028 static void
1029 gen6_emit_3DSTATE_INDEX_BUFFER(const struct ilo_dev_info *dev,
1030 const struct ilo_ib_state *ib,
1031 bool enable_cut_index,
1032 struct ilo_cp *cp)
1033 {
1034 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x0a);
1035 const uint8_t cmd_len = 3;
1036 const struct ilo_buffer *buf = ilo_buffer(ib->resource);
1037 uint32_t start_offset, end_offset;
1038 int format;
1039
1040 ILO_GPE_VALID_GEN(dev, 6, 7);
1041
1042 if (!buf)
1043 return;
1044
1045 format = gen6_translate_index_size(ib->state.index_size);
1046
1047 /*
1048 * set start_offset to 0 here and adjust pipe_draw_info::start with
1049 * ib->draw_start_offset in 3DPRIMITIVE
1050 */
1051 start_offset = 0;
1052 end_offset = buf->bo_size;
1053
1054 /* end_offset must also be aligned and is inclusive */
1055 end_offset -= (end_offset % ib->state.index_size);
1056 end_offset--;
1057
1058 ilo_cp_begin(cp, cmd_len);
1059 ilo_cp_write(cp, cmd | (cmd_len - 2) |
1060 ((enable_cut_index) ? BRW_CUT_INDEX_ENABLE : 0) |
1061 format << 8);
1062 ilo_cp_write_bo(cp, start_offset, buf->bo, INTEL_DOMAIN_VERTEX, 0);
1063 ilo_cp_write_bo(cp, end_offset, buf->bo, INTEL_DOMAIN_VERTEX, 0);
1064 ilo_cp_end(cp);
1065 }
1066
1067 static void
1068 gen6_emit_3DSTATE_VIEWPORT_STATE_POINTERS(const struct ilo_dev_info *dev,
1069 uint32_t clip_viewport,
1070 uint32_t sf_viewport,
1071 uint32_t cc_viewport,
1072 struct ilo_cp *cp)
1073 {
1074 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x0d);
1075 const uint8_t cmd_len = 4;
1076
1077 ILO_GPE_VALID_GEN(dev, 6, 6);
1078
1079 ilo_cp_begin(cp, cmd_len);
1080 ilo_cp_write(cp, cmd | (cmd_len - 2) |
1081 GEN6_CLIP_VIEWPORT_MODIFY |
1082 GEN6_SF_VIEWPORT_MODIFY |
1083 GEN6_CC_VIEWPORT_MODIFY);
1084 ilo_cp_write(cp, clip_viewport);
1085 ilo_cp_write(cp, sf_viewport);
1086 ilo_cp_write(cp, cc_viewport);
1087 ilo_cp_end(cp);
1088 }
1089
1090 static void
1091 gen6_emit_3DSTATE_CC_STATE_POINTERS(const struct ilo_dev_info *dev,
1092 uint32_t blend_state,
1093 uint32_t depth_stencil_state,
1094 uint32_t color_calc_state,
1095 struct ilo_cp *cp)
1096 {
1097 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x0e);
1098 const uint8_t cmd_len = 4;
1099
1100 ILO_GPE_VALID_GEN(dev, 6, 6);
1101
1102 ilo_cp_begin(cp, cmd_len);
1103 ilo_cp_write(cp, cmd | (cmd_len - 2));
1104 ilo_cp_write(cp, blend_state | 1);
1105 ilo_cp_write(cp, depth_stencil_state | 1);
1106 ilo_cp_write(cp, color_calc_state | 1);
1107 ilo_cp_end(cp);
1108 }
1109
1110 static void
1111 gen6_emit_3DSTATE_SCISSOR_STATE_POINTERS(const struct ilo_dev_info *dev,
1112 uint32_t scissor_rect,
1113 struct ilo_cp *cp)
1114 {
1115 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x0f);
1116 const uint8_t cmd_len = 2;
1117
1118 ILO_GPE_VALID_GEN(dev, 6, 7);
1119
1120 ilo_cp_begin(cp, cmd_len);
1121 ilo_cp_write(cp, cmd | (cmd_len - 2));
1122 ilo_cp_write(cp, scissor_rect);
1123 ilo_cp_end(cp);
1124 }
1125
1126 void
1127 ilo_gpe_init_vs_cso(const struct ilo_dev_info *dev,
1128 const struct ilo_shader_state *vs,
1129 struct ilo_shader_cso *cso)
1130 {
1131 int start_grf, vue_read_len, max_threads;
1132 uint32_t dw2, dw4, dw5;
1133
1134 ILO_GPE_VALID_GEN(dev, 6, 7);
1135
1136 start_grf = ilo_shader_get_kernel_param(vs, ILO_KERNEL_URB_DATA_START_REG);
1137 vue_read_len = ilo_shader_get_kernel_param(vs, ILO_KERNEL_INPUT_COUNT);
1138
1139 /*
1140 * From the Sandy Bridge PRM, volume 2 part 1, page 135:
1141 *
1142 * "(Vertex URB Entry Read Length) Specifies the number of pairs of
1143 * 128-bit vertex elements to be passed into the payload for each
1144 * vertex."
1145 *
1146 * "It is UNDEFINED to set this field to 0 indicating no Vertex URB
1147 * data to be read and passed to the thread."
1148 */
1149 vue_read_len = (vue_read_len + 1) / 2;
1150 if (!vue_read_len)
1151 vue_read_len = 1;
1152
1153 switch (dev->gen) {
1154 case ILO_GEN(6):
1155 /*
1156 * From the Sandy Bridge PRM, volume 1 part 1, page 22:
1157 *
1158 * "Device # of EUs #Threads/EU
1159 * SNB GT2 12 5
1160 * SNB GT1 6 4"
1161 */
1162 max_threads = (dev->gt == 2) ? 60 : 24;
1163 break;
1164 case ILO_GEN(7):
1165 /*
1166 * From the Ivy Bridge PRM, volume 1 part 1, page 18:
1167 *
1168 * "Device # of EUs #Threads/EU
1169 * Ivy Bridge (GT2) 16 8
1170 * Ivy Bridge (GT1) 6 6"
1171 */
1172 max_threads = (dev->gt == 2) ? 128 : 36;
1173 break;
1174 case ILO_GEN(7.5):
1175 /* see brwCreateContext() */
1176 max_threads = (dev->gt == 2) ? 280 : 70;
1177 break;
1178 default:
1179 max_threads = 1;
1180 break;
1181 }
1182
1183 dw2 = (true) ? 0 : GEN6_VS_FLOATING_POINT_MODE_ALT;
1184
1185 dw4 = start_grf << GEN6_VS_DISPATCH_START_GRF_SHIFT |
1186 vue_read_len << GEN6_VS_URB_READ_LENGTH_SHIFT |
1187 0 << GEN6_VS_URB_ENTRY_READ_OFFSET_SHIFT;
1188
1189 dw5 = GEN6_VS_STATISTICS_ENABLE |
1190 GEN6_VS_ENABLE;
1191
1192 if (dev->gen >= ILO_GEN(7.5))
1193 dw5 |= (max_threads - 1) << HSW_VS_MAX_THREADS_SHIFT;
1194 else
1195 dw5 |= (max_threads - 1) << GEN6_VS_MAX_THREADS_SHIFT;
1196
1197 STATIC_ASSERT(Elements(cso->payload) >= 3);
1198 cso->payload[0] = dw2;
1199 cso->payload[1] = dw4;
1200 cso->payload[2] = dw5;
1201 }
1202
1203 static void
1204 gen6_emit_3DSTATE_VS(const struct ilo_dev_info *dev,
1205 const struct ilo_shader_state *vs,
1206 int num_samplers,
1207 struct ilo_cp *cp)
1208 {
1209 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x10);
1210 const uint8_t cmd_len = 6;
1211 const struct ilo_shader_cso *cso;
1212 uint32_t dw2, dw4, dw5;
1213
1214 ILO_GPE_VALID_GEN(dev, 6, 7);
1215
1216 if (!vs) {
1217 ilo_cp_begin(cp, cmd_len);
1218 ilo_cp_write(cp, cmd | (cmd_len - 2));
1219 ilo_cp_write(cp, 0);
1220 ilo_cp_write(cp, 0);
1221 ilo_cp_write(cp, 0);
1222 ilo_cp_write(cp, 0);
1223 ilo_cp_write(cp, 0);
1224 ilo_cp_end(cp);
1225 return;
1226 }
1227
1228 cso = ilo_shader_get_kernel_cso(vs);
1229 dw2 = cso->payload[0];
1230 dw4 = cso->payload[1];
1231 dw5 = cso->payload[2];
1232
1233 dw2 |= ((num_samplers + 3) / 4) << GEN6_VS_SAMPLER_COUNT_SHIFT;
1234
1235 ilo_cp_begin(cp, cmd_len);
1236 ilo_cp_write(cp, cmd | (cmd_len - 2));
1237 ilo_cp_write(cp, ilo_shader_get_kernel_offset(vs));
1238 ilo_cp_write(cp, dw2);
1239 ilo_cp_write(cp, 0); /* scratch */
1240 ilo_cp_write(cp, dw4);
1241 ilo_cp_write(cp, dw5);
1242 ilo_cp_end(cp);
1243 }
1244
1245 void
1246 ilo_gpe_init_gs_cso_gen6(const struct ilo_dev_info *dev,
1247 const struct ilo_shader_state *gs,
1248 struct ilo_shader_cso *cso)
1249 {
1250 int start_grf, vue_read_len, max_threads;
1251 uint32_t dw2, dw4, dw5, dw6;
1252
1253 ILO_GPE_VALID_GEN(dev, 6, 6);
1254
1255 if (ilo_shader_get_type(gs) == PIPE_SHADER_GEOMETRY) {
1256 start_grf = ilo_shader_get_kernel_param(gs,
1257 ILO_KERNEL_URB_DATA_START_REG);
1258
1259 vue_read_len = ilo_shader_get_kernel_param(gs, ILO_KERNEL_INPUT_COUNT);
1260 }
1261 else {
1262 start_grf = ilo_shader_get_kernel_param(gs,
1263 ILO_KERNEL_VS_GEN6_SO_START_REG);
1264
1265 vue_read_len = ilo_shader_get_kernel_param(gs, ILO_KERNEL_OUTPUT_COUNT);
1266 }
1267
1268 /*
1269 * From the Sandy Bridge PRM, volume 2 part 1, page 153:
1270 *
1271 * "Specifies the amount of URB data read and passed in the thread
1272 * payload for each Vertex URB entry, in 256-bit register increments.
1273 *
1274 * It is UNDEFINED to set this field (Vertex URB Entry Read Length) to
1275 * 0 indicating no Vertex URB data to be read and passed to the
1276 * thread."
1277 */
1278 vue_read_len = (vue_read_len + 1) / 2;
1279 if (!vue_read_len)
1280 vue_read_len = 1;
1281
1282 /*
1283 * From the Sandy Bridge PRM, volume 2 part 1, page 154:
1284 *
1285 * "Maximum Number of Threads valid range is [0,27] when Rendering
1286 * Enabled bit is set."
1287 *
1288 * From the Sandy Bridge PRM, volume 2 part 1, page 173:
1289 *
1290 * "Programming Note: If the GS stage is enabled, software must always
1291 * allocate at least one GS URB Entry. This is true even if the GS
1292 * thread never needs to output vertices to the pipeline, e.g., when
1293 * only performing stream output. This is an artifact of the need to
1294 * pass the GS thread an initial destination URB handle."
1295 *
1296 * As such, we always enable rendering, and limit the number of threads.
1297 */
1298 if (dev->gt == 2) {
1299 /* maximum is 60, but limited to 28 */
1300 max_threads = 28;
1301 }
1302 else {
1303 /* maximum is 24, but limited to 21 (see brwCreateContext()) */
1304 max_threads = 21;
1305 }
1306
1307 if (max_threads > 28)
1308 max_threads = 28;
1309
1310 dw2 = GEN6_GS_SPF_MODE;
1311
1312 dw4 = vue_read_len << GEN6_GS_URB_READ_LENGTH_SHIFT |
1313 0 << GEN6_GS_URB_ENTRY_READ_OFFSET_SHIFT |
1314 start_grf << GEN6_GS_DISPATCH_START_GRF_SHIFT;
1315
1316 dw5 = (max_threads - 1) << GEN6_GS_MAX_THREADS_SHIFT |
1317 GEN6_GS_STATISTICS_ENABLE |
1318 GEN6_GS_SO_STATISTICS_ENABLE |
1319 GEN6_GS_RENDERING_ENABLE;
1320
1321 /*
1322 * we cannot make use of GEN6_GS_REORDER because it will reorder
1323 * triangle strips according to D3D rules (triangle 2N+1 uses vertices
1324 * (2N+1, 2N+3, 2N+2)), instead of GL rules (triangle 2N+1 uses vertices
1325 * (2N+2, 2N+1, 2N+3)).
1326 */
1327 dw6 = GEN6_GS_ENABLE;
1328
1329 if (ilo_shader_get_kernel_param(gs, ILO_KERNEL_GS_DISCARD_ADJACENCY))
1330 dw6 |= GEN6_GS_DISCARD_ADJACENCY;
1331
1332 if (ilo_shader_get_kernel_param(gs, ILO_KERNEL_VS_GEN6_SO)) {
1333 const uint32_t svbi_post_inc =
1334 ilo_shader_get_kernel_param(gs, ILO_KERNEL_GS_GEN6_SVBI_POST_INC);
1335
1336 dw6 |= GEN6_GS_SVBI_PAYLOAD_ENABLE;
1337 if (svbi_post_inc) {
1338 dw6 |= GEN6_GS_SVBI_POSTINCREMENT_ENABLE |
1339 svbi_post_inc << GEN6_GS_SVBI_POSTINCREMENT_VALUE_SHIFT;
1340 }
1341 }
1342
1343 STATIC_ASSERT(Elements(cso->payload) >= 4);
1344 cso->payload[0] = dw2;
1345 cso->payload[1] = dw4;
1346 cso->payload[2] = dw5;
1347 cso->payload[3] = dw6;
1348 }
1349
1350 static void
1351 gen6_emit_3DSTATE_GS(const struct ilo_dev_info *dev,
1352 const struct ilo_shader_state *gs,
1353 const struct ilo_shader_state *vs,
1354 int verts_per_prim,
1355 struct ilo_cp *cp)
1356 {
1357 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x11);
1358 const uint8_t cmd_len = 7;
1359 uint32_t dw1, dw2, dw4, dw5, dw6;
1360
1361 ILO_GPE_VALID_GEN(dev, 6, 6);
1362
1363 if (gs) {
1364 const struct ilo_shader_cso *cso;
1365
1366 dw1 = ilo_shader_get_kernel_offset(gs);
1367
1368 cso = ilo_shader_get_kernel_cso(gs);
1369 dw2 = cso->payload[0];
1370 dw4 = cso->payload[1];
1371 dw5 = cso->payload[2];
1372 dw6 = cso->payload[3];
1373 }
1374 else if (vs && ilo_shader_get_kernel_param(vs, ILO_KERNEL_VS_GEN6_SO)) {
1375 struct ilo_shader_cso cso;
1376 enum ilo_kernel_param param;
1377
1378 switch (verts_per_prim) {
1379 case 1:
1380 param = ILO_KERNEL_VS_GEN6_SO_POINT_OFFSET;
1381 break;
1382 case 2:
1383 param = ILO_KERNEL_VS_GEN6_SO_LINE_OFFSET;
1384 break;
1385 default:
1386 param = ILO_KERNEL_VS_GEN6_SO_TRI_OFFSET;
1387 break;
1388 }
1389
1390 dw1 = ilo_shader_get_kernel_offset(vs) +
1391 ilo_shader_get_kernel_param(vs, param);
1392
1393 /* cannot use VS's CSO */
1394 ilo_gpe_init_gs_cso_gen6(dev, vs, &cso);
1395 dw2 = cso.payload[0];
1396 dw4 = cso.payload[1];
1397 dw5 = cso.payload[2];
1398 dw6 = cso.payload[3];
1399 }
1400 else {
1401 dw1 = 0;
1402 dw2 = 0;
1403 dw4 = 1 << GEN6_GS_URB_READ_LENGTH_SHIFT;
1404 dw5 = GEN6_GS_STATISTICS_ENABLE;
1405 dw6 = 0;
1406 }
1407
1408 ilo_cp_begin(cp, cmd_len);
1409 ilo_cp_write(cp, cmd | (cmd_len - 2));
1410 ilo_cp_write(cp, dw1);
1411 ilo_cp_write(cp, dw2);
1412 ilo_cp_write(cp, 0);
1413 ilo_cp_write(cp, dw4);
1414 ilo_cp_write(cp, dw5);
1415 ilo_cp_write(cp, dw6);
1416 ilo_cp_end(cp);
1417 }
1418
1419 void
1420 ilo_gpe_init_rasterizer_clip(const struct ilo_dev_info *dev,
1421 const struct pipe_rasterizer_state *state,
1422 struct ilo_rasterizer_clip *clip)
1423 {
1424 uint32_t dw1, dw2, dw3;
1425
1426 ILO_GPE_VALID_GEN(dev, 6, 7);
1427
1428 dw1 = GEN6_CLIP_STATISTICS_ENABLE;
1429
1430 if (dev->gen >= ILO_GEN(7)) {
1431 /*
1432 * From the Ivy Bridge PRM, volume 2 part 1, page 219:
1433 *
1434 * "Workaround : Due to Hardware issue "EarlyCull" needs to be
1435 * enabled only for the cases where the incoming primitive topology
1436 * into the clipper guaranteed to be Trilist."
1437 *
1438 * What does this mean?
1439 */
1440 dw1 |= 0 << 19 |
1441 GEN7_CLIP_EARLY_CULL;
1442
1443 if (state->front_ccw)
1444 dw1 |= GEN7_CLIP_WINDING_CCW;
1445
1446 switch (state->cull_face) {
1447 case PIPE_FACE_NONE:
1448 dw1 |= GEN7_CLIP_CULLMODE_NONE;
1449 break;
1450 case PIPE_FACE_FRONT:
1451 dw1 |= GEN7_CLIP_CULLMODE_FRONT;
1452 break;
1453 case PIPE_FACE_BACK:
1454 dw1 |= GEN7_CLIP_CULLMODE_BACK;
1455 break;
1456 case PIPE_FACE_FRONT_AND_BACK:
1457 dw1 |= GEN7_CLIP_CULLMODE_BOTH;
1458 break;
1459 }
1460 }
1461
1462 dw2 = GEN6_CLIP_ENABLE |
1463 GEN6_CLIP_XY_TEST |
1464 state->clip_plane_enable << GEN6_USER_CLIP_CLIP_DISTANCES_SHIFT |
1465 GEN6_CLIP_MODE_NORMAL;
1466
1467 if (state->clip_halfz)
1468 dw2 |= GEN6_CLIP_API_D3D;
1469 else
1470 dw2 |= GEN6_CLIP_API_OGL;
1471
1472 if (state->depth_clip)
1473 dw2 |= GEN6_CLIP_Z_TEST;
1474
1475 if (state->flatshade_first) {
1476 dw2 |= 0 << GEN6_CLIP_TRI_PROVOKE_SHIFT |
1477 0 << GEN6_CLIP_LINE_PROVOKE_SHIFT |
1478 1 << GEN6_CLIP_TRIFAN_PROVOKE_SHIFT;
1479 }
1480 else {
1481 dw2 |= 2 << GEN6_CLIP_TRI_PROVOKE_SHIFT |
1482 1 << GEN6_CLIP_LINE_PROVOKE_SHIFT |
1483 2 << GEN6_CLIP_TRIFAN_PROVOKE_SHIFT;
1484 }
1485
1486 dw3 = 0x1 << GEN6_CLIP_MIN_POINT_WIDTH_SHIFT |
1487 0x7ff << GEN6_CLIP_MAX_POINT_WIDTH_SHIFT;
1488
1489 clip->payload[0] = dw1;
1490 clip->payload[1] = dw2;
1491 clip->payload[2] = dw3;
1492
1493 clip->can_enable_guardband = true;
1494
1495 /*
1496 * There are several reasons that guard band test should be disabled
1497 *
1498 * - GL wide points (to avoid partially visibie object)
1499 * - GL wide or AA lines (to avoid partially visibie object)
1500 */
1501 if (state->point_size_per_vertex || state->point_size > 1.0f)
1502 clip->can_enable_guardband = false;
1503 if (state->line_smooth || state->line_width > 1.0f)
1504 clip->can_enable_guardband = false;
1505 }
1506
1507 static void
1508 gen6_emit_3DSTATE_CLIP(const struct ilo_dev_info *dev,
1509 const struct ilo_rasterizer_state *rasterizer,
1510 const struct ilo_shader_state *fs,
1511 bool enable_guardband,
1512 int num_viewports,
1513 struct ilo_cp *cp)
1514 {
1515 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x12);
1516 const uint8_t cmd_len = 4;
1517 uint32_t dw1, dw2, dw3;
1518
1519 if (rasterizer) {
1520 int interps;
1521
1522 dw1 = rasterizer->clip.payload[0];
1523 dw2 = rasterizer->clip.payload[1];
1524 dw3 = rasterizer->clip.payload[2];
1525
1526 if (enable_guardband && rasterizer->clip.can_enable_guardband)
1527 dw2 |= GEN6_CLIP_GB_TEST;
1528
1529 interps = (fs) ? ilo_shader_get_kernel_param(fs,
1530 ILO_KERNEL_FS_BARYCENTRIC_INTERPOLATIONS) : 0;
1531
1532 if (interps & (1 << BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC |
1533 1 << BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC |
1534 1 << BRW_WM_NONPERSPECTIVE_SAMPLE_BARYCENTRIC))
1535 dw2 |= GEN6_CLIP_NON_PERSPECTIVE_BARYCENTRIC_ENABLE;
1536
1537 dw3 |= GEN6_CLIP_FORCE_ZERO_RTAINDEX |
1538 (num_viewports - 1);
1539 }
1540 else {
1541 dw1 = 0;
1542 dw2 = 0;
1543 dw3 = 0;
1544 }
1545
1546 ilo_cp_begin(cp, cmd_len);
1547 ilo_cp_write(cp, cmd | (cmd_len - 2));
1548 ilo_cp_write(cp, dw1);
1549 ilo_cp_write(cp, dw2);
1550 ilo_cp_write(cp, dw3);
1551 ilo_cp_end(cp);
1552 }
1553
1554 void
1555 ilo_gpe_init_rasterizer_sf(const struct ilo_dev_info *dev,
1556 const struct pipe_rasterizer_state *state,
1557 struct ilo_rasterizer_sf *sf)
1558 {
1559 float offset_const, offset_scale, offset_clamp;
1560 int line_width, point_width;
1561 uint32_t dw1, dw2, dw3;
1562
1563 ILO_GPE_VALID_GEN(dev, 6, 7);
1564
1565 /*
1566 * Scale the constant term. The minimum representable value used by the HW
1567 * is not large enouch to be the minimum resolvable difference.
1568 */
1569 offset_const = state->offset_units * 2.0f;
1570
1571 offset_scale = state->offset_scale;
1572 offset_clamp = state->offset_clamp;
1573
1574 /*
1575 * From the Sandy Bridge PRM, volume 2 part 1, page 248:
1576 *
1577 * "This bit (Statistics Enable) should be set whenever clipping is
1578 * enabled and the Statistics Enable bit is set in CLIP_STATE. It
1579 * should be cleared if clipping is disabled or Statistics Enable in
1580 * CLIP_STATE is clear."
1581 */
1582 dw1 = GEN6_SF_STATISTICS_ENABLE |
1583 GEN6_SF_VIEWPORT_TRANSFORM_ENABLE;
1584
1585 /* XXX GEN6 path seems to work fine for GEN7 */
1586 if (false && dev->gen >= ILO_GEN(7)) {
1587 /*
1588 * From the Ivy Bridge PRM, volume 2 part 1, page 258:
1589 *
1590 * "This bit (Legacy Global Depth Bias Enable, Global Depth Offset
1591 * Enable Solid , Global Depth Offset Enable Wireframe, and Global
1592 * Depth Offset Enable Point) should be set whenever non zero depth
1593 * bias (Slope, Bias) values are used. Setting this bit may have
1594 * some degradation of performance for some workloads."
1595 */
1596 if (state->offset_tri || state->offset_line || state->offset_point) {
1597 /* XXX need to scale offset_const according to the depth format */
1598 dw1 |= GEN6_SF_LEGACY_GLOBAL_DEPTH_BIAS;
1599
1600 dw1 |= GEN6_SF_GLOBAL_DEPTH_OFFSET_SOLID |
1601 GEN6_SF_GLOBAL_DEPTH_OFFSET_WIREFRAME |
1602 GEN6_SF_GLOBAL_DEPTH_OFFSET_POINT;
1603 }
1604 else {
1605 offset_const = 0.0f;
1606 offset_scale = 0.0f;
1607 offset_clamp = 0.0f;
1608 }
1609 }
1610 else {
1611 if (state->offset_tri)
1612 dw1 |= GEN6_SF_GLOBAL_DEPTH_OFFSET_SOLID;
1613 if (state->offset_line)
1614 dw1 |= GEN6_SF_GLOBAL_DEPTH_OFFSET_WIREFRAME;
1615 if (state->offset_point)
1616 dw1 |= GEN6_SF_GLOBAL_DEPTH_OFFSET_POINT;
1617 }
1618
1619 switch (state->fill_front) {
1620 case PIPE_POLYGON_MODE_FILL:
1621 dw1 |= GEN6_SF_FRONT_SOLID;
1622 break;
1623 case PIPE_POLYGON_MODE_LINE:
1624 dw1 |= GEN6_SF_FRONT_WIREFRAME;
1625 break;
1626 case PIPE_POLYGON_MODE_POINT:
1627 dw1 |= GEN6_SF_FRONT_POINT;
1628 break;
1629 }
1630
1631 switch (state->fill_back) {
1632 case PIPE_POLYGON_MODE_FILL:
1633 dw1 |= GEN6_SF_BACK_SOLID;
1634 break;
1635 case PIPE_POLYGON_MODE_LINE:
1636 dw1 |= GEN6_SF_BACK_WIREFRAME;
1637 break;
1638 case PIPE_POLYGON_MODE_POINT:
1639 dw1 |= GEN6_SF_BACK_POINT;
1640 break;
1641 }
1642
1643 if (state->front_ccw)
1644 dw1 |= GEN6_SF_WINDING_CCW;
1645
1646 dw2 = 0;
1647
1648 if (state->line_smooth) {
1649 /*
1650 * From the Sandy Bridge PRM, volume 2 part 1, page 251:
1651 *
1652 * "This field (Anti-aliasing Enable) must be disabled if any of the
1653 * render targets have integer (UINT or SINT) surface format."
1654 *
1655 * From the Sandy Bridge PRM, volume 2 part 1, page 317:
1656 *
1657 * "This field (Hierarchical Depth Buffer Enable) must be disabled
1658 * if Anti-aliasing Enable in 3DSTATE_SF is enabled.
1659 *
1660 * TODO We do not check those yet.
1661 */
1662 dw2 |= GEN6_SF_LINE_AA_ENABLE |
1663 GEN6_SF_LINE_END_CAP_WIDTH_1_0;
1664 }
1665
1666 switch (state->cull_face) {
1667 case PIPE_FACE_NONE:
1668 dw2 |= GEN6_SF_CULL_NONE;
1669 break;
1670 case PIPE_FACE_FRONT:
1671 dw2 |= GEN6_SF_CULL_FRONT;
1672 break;
1673 case PIPE_FACE_BACK:
1674 dw2 |= GEN6_SF_CULL_BACK;
1675 break;
1676 case PIPE_FACE_FRONT_AND_BACK:
1677 dw2 |= GEN6_SF_CULL_BOTH;
1678 break;
1679 }
1680
1681 /*
1682 * Smooth lines should intersect ceil(line_width) or (ceil(line_width) + 1)
1683 * pixels in the minor direction. We have to make the lines slightly
1684 * thicker, 0.5 pixel on both sides, so that they intersect that many
1685 * pixels are considered into the lines.
1686 *
1687 * Line width is in U3.7.
1688 */
1689 line_width = (int) ((state->line_width +
1690 (float) state->line_smooth) * 128.0f + 0.5f);
1691 line_width = CLAMP(line_width, 0, 1023);
1692
1693 if (line_width == 128 && !state->line_smooth) {
1694 /* use GIQ rules */
1695 line_width = 0;
1696 }
1697
1698 dw2 |= line_width << GEN6_SF_LINE_WIDTH_SHIFT;
1699
1700 if (state->scissor)
1701 dw2 |= GEN6_SF_SCISSOR_ENABLE;
1702
1703 dw3 = GEN6_SF_LINE_AA_MODE_TRUE |
1704 GEN6_SF_VERTEX_SUBPIXEL_8BITS;
1705
1706 if (state->line_last_pixel)
1707 dw3 |= 1 << 31;
1708
1709 if (state->flatshade_first) {
1710 dw3 |= 0 << GEN6_SF_TRI_PROVOKE_SHIFT |
1711 0 << GEN6_SF_LINE_PROVOKE_SHIFT |
1712 1 << GEN6_SF_TRIFAN_PROVOKE_SHIFT;
1713 }
1714 else {
1715 dw3 |= 2 << GEN6_SF_TRI_PROVOKE_SHIFT |
1716 1 << GEN6_SF_LINE_PROVOKE_SHIFT |
1717 2 << GEN6_SF_TRIFAN_PROVOKE_SHIFT;
1718 }
1719
1720 if (!state->point_size_per_vertex)
1721 dw3 |= GEN6_SF_USE_STATE_POINT_WIDTH;
1722
1723 /* in U8.3 */
1724 point_width = (int) (state->point_size * 8.0f + 0.5f);
1725 point_width = CLAMP(point_width, 1, 2047);
1726
1727 dw3 |= point_width;
1728
1729 STATIC_ASSERT(Elements(sf->payload) >= 6);
1730 sf->payload[0] = dw1;
1731 sf->payload[1] = dw2;
1732 sf->payload[2] = dw3;
1733 sf->payload[3] = fui(offset_const);
1734 sf->payload[4] = fui(offset_scale);
1735 sf->payload[5] = fui(offset_clamp);
1736
1737 if (state->multisample) {
1738 sf->dw_msaa = GEN6_SF_MSRAST_ON_PATTERN;
1739
1740 /*
1741 * From the Sandy Bridge PRM, volume 2 part 1, page 251:
1742 *
1743 * "Software must not program a value of 0.0 when running in
1744 * MSRASTMODE_ON_xxx modes - zero-width lines are not available
1745 * when multisampling rasterization is enabled."
1746 */
1747 if (!line_width) {
1748 line_width = 128; /* 1.0f */
1749
1750 sf->dw_msaa |= line_width << GEN6_SF_LINE_WIDTH_SHIFT;
1751 }
1752 }
1753 else {
1754 sf->dw_msaa = 0;
1755 }
1756 }
1757
1758 /**
1759 * Fill in DW2 to DW7 of 3DSTATE_SF.
1760 */
1761 void
1762 ilo_gpe_gen6_fill_3dstate_sf_raster(const struct ilo_dev_info *dev,
1763 const struct ilo_rasterizer_state *rasterizer,
1764 int num_samples,
1765 enum pipe_format depth_format,
1766 uint32_t *payload, unsigned payload_len)
1767 {
1768 const struct ilo_rasterizer_sf *sf = &rasterizer->sf;
1769
1770 assert(payload_len == Elements(sf->payload));
1771
1772 if (sf) {
1773 memcpy(payload, sf->payload, sizeof(sf->payload));
1774
1775 if (num_samples > 1)
1776 payload[1] |= sf->dw_msaa;
1777
1778 if (dev->gen >= ILO_GEN(7)) {
1779 int format;
1780
1781 /* separate stencil */
1782 switch (depth_format) {
1783 case PIPE_FORMAT_Z24_UNORM_S8_UINT:
1784 depth_format = PIPE_FORMAT_Z24X8_UNORM;
1785 break;
1786 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
1787 depth_format = PIPE_FORMAT_Z32_FLOAT;;
1788 break;
1789 case PIPE_FORMAT_S8_UINT:
1790 depth_format = PIPE_FORMAT_NONE;
1791 break;
1792 default:
1793 break;
1794 }
1795
1796 format = gen6_translate_depth_format(depth_format);
1797 /* FLOAT surface is assumed when there is no depth buffer */
1798 if (format < 0)
1799 format = BRW_DEPTHFORMAT_D32_FLOAT;
1800
1801 payload[0] |= format << GEN7_SF_DEPTH_BUFFER_SURFACE_FORMAT_SHIFT;
1802 }
1803 }
1804 else {
1805 payload[0] = 0;
1806 payload[1] = (num_samples > 1) ? GEN6_SF_MSRAST_ON_PATTERN : 0;
1807 payload[2] = 0;
1808 payload[3] = 0;
1809 payload[4] = 0;
1810 payload[5] = 0;
1811 }
1812 }
1813
1814 /**
1815 * Fill in DW1 and DW8 to DW19 of 3DSTATE_SF.
1816 */
1817 void
1818 ilo_gpe_gen6_fill_3dstate_sf_sbe(const struct ilo_dev_info *dev,
1819 const struct ilo_rasterizer_state *rasterizer,
1820 const struct ilo_shader_state *fs,
1821 const struct ilo_shader_state *last_sh,
1822 uint32_t *dw, int num_dwords)
1823 {
1824 int output_count, vue_offset, vue_len;
1825 const struct ilo_kernel_routing *routing;
1826
1827 ILO_GPE_VALID_GEN(dev, 6, 7);
1828 assert(num_dwords == 13);
1829
1830 if (!fs) {
1831 memset(dw, 0, sizeof(dw[0]) * num_dwords);
1832
1833 if (dev->gen >= ILO_GEN(7))
1834 dw[0] = 1 << GEN7_SBE_URB_ENTRY_READ_LENGTH_SHIFT;
1835 else
1836 dw[0] = 1 << GEN6_SF_URB_ENTRY_READ_LENGTH_SHIFT;
1837
1838 return;
1839 }
1840
1841 output_count = ilo_shader_get_kernel_param(fs, ILO_KERNEL_INPUT_COUNT);
1842 assert(output_count <= 32);
1843
1844 routing = ilo_shader_get_kernel_routing(fs);
1845
1846 vue_offset = routing->source_skip;
1847 assert(vue_offset % 2 == 0);
1848 vue_offset /= 2;
1849
1850 vue_len = (routing->source_len + 1) / 2;
1851 if (!vue_len)
1852 vue_len = 1;
1853
1854 if (dev->gen >= ILO_GEN(7)) {
1855 dw[0] = output_count << GEN7_SBE_NUM_OUTPUTS_SHIFT |
1856 vue_len << GEN7_SBE_URB_ENTRY_READ_LENGTH_SHIFT |
1857 vue_offset << GEN7_SBE_URB_ENTRY_READ_OFFSET_SHIFT;
1858 if (routing->swizzle_enable)
1859 dw[0] |= GEN7_SBE_SWIZZLE_ENABLE;
1860 }
1861 else {
1862 dw[0] = output_count << GEN6_SF_NUM_OUTPUTS_SHIFT |
1863 vue_len << GEN6_SF_URB_ENTRY_READ_LENGTH_SHIFT |
1864 vue_offset << GEN6_SF_URB_ENTRY_READ_OFFSET_SHIFT;
1865 if (routing->swizzle_enable)
1866 dw[0] |= GEN6_SF_SWIZZLE_ENABLE;
1867 }
1868
1869 switch (rasterizer->state.sprite_coord_mode) {
1870 case PIPE_SPRITE_COORD_UPPER_LEFT:
1871 dw[0] |= GEN6_SF_POINT_SPRITE_UPPERLEFT;
1872 break;
1873 case PIPE_SPRITE_COORD_LOWER_LEFT:
1874 dw[0] |= GEN6_SF_POINT_SPRITE_LOWERLEFT;
1875 break;
1876 }
1877
1878 STATIC_ASSERT(Elements(routing->swizzles) >= 16);
1879 memcpy(&dw[1], routing->swizzles, 2 * 16);
1880
1881 /*
1882 * From the Ivy Bridge PRM, volume 2 part 1, page 268:
1883 *
1884 * "This field (Point Sprite Texture Coordinate Enable) must be
1885 * programmed to 0 when non-point primitives are rendered."
1886 *
1887 * TODO We do not check that yet.
1888 */
1889 dw[9] = routing->point_sprite_enable;
1890
1891 dw[10] = routing->const_interp_enable;
1892
1893 /* WrapShortest enables */
1894 dw[11] = 0;
1895 dw[12] = 0;
1896 }
1897
1898 static void
1899 gen6_emit_3DSTATE_SF(const struct ilo_dev_info *dev,
1900 const struct ilo_rasterizer_state *rasterizer,
1901 const struct ilo_shader_state *fs,
1902 const struct ilo_shader_state *last_sh,
1903 struct ilo_cp *cp)
1904 {
1905 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x13);
1906 const uint8_t cmd_len = 20;
1907 uint32_t payload_raster[6], payload_sbe[13];
1908
1909 ILO_GPE_VALID_GEN(dev, 6, 6);
1910
1911 ilo_gpe_gen6_fill_3dstate_sf_raster(dev, rasterizer,
1912 1, PIPE_FORMAT_NONE, payload_raster, Elements(payload_raster));
1913 ilo_gpe_gen6_fill_3dstate_sf_sbe(dev, rasterizer,
1914 fs, last_sh, payload_sbe, Elements(payload_sbe));
1915
1916 ilo_cp_begin(cp, cmd_len);
1917 ilo_cp_write(cp, cmd | (cmd_len - 2));
1918 ilo_cp_write(cp, payload_sbe[0]);
1919 ilo_cp_write_multi(cp, payload_raster, 6);
1920 ilo_cp_write_multi(cp, &payload_sbe[1], 12);
1921 ilo_cp_end(cp);
1922 }
1923
1924 void
1925 ilo_gpe_init_rasterizer_wm_gen6(const struct ilo_dev_info *dev,
1926 const struct pipe_rasterizer_state *state,
1927 struct ilo_rasterizer_wm *wm)
1928 {
1929 uint32_t dw5, dw6;
1930
1931 ILO_GPE_VALID_GEN(dev, 6, 6);
1932
1933 /* only the FF unit states are set, as in GEN7 */
1934
1935 dw5 = GEN6_WM_LINE_AA_WIDTH_2_0;
1936
1937 /* same value as in 3DSTATE_SF */
1938 if (state->line_smooth)
1939 dw5 |= GEN6_WM_LINE_END_CAP_AA_WIDTH_1_0;
1940
1941 if (state->poly_stipple_enable)
1942 dw5 |= GEN6_WM_POLYGON_STIPPLE_ENABLE;
1943 if (state->line_stipple_enable)
1944 dw5 |= GEN6_WM_LINE_STIPPLE_ENABLE;
1945
1946 dw6 = GEN6_WM_POSITION_ZW_PIXEL |
1947 GEN6_WM_MSRAST_OFF_PIXEL |
1948 GEN6_WM_MSDISPMODE_PERSAMPLE;
1949
1950 if (state->bottom_edge_rule)
1951 dw6 |= GEN6_WM_POINT_RASTRULE_UPPER_RIGHT;
1952
1953 /*
1954 * assertion that makes sure
1955 *
1956 * dw6 |= wm->dw_msaa_rast | wm->dw_msaa_disp;
1957 *
1958 * is valid
1959 */
1960 STATIC_ASSERT(GEN6_WM_MSRAST_OFF_PIXEL == 0 &&
1961 GEN6_WM_MSDISPMODE_PERSAMPLE == 0);
1962
1963 wm->dw_msaa_rast =
1964 (state->multisample) ? GEN6_WM_MSRAST_ON_PATTERN : 0;
1965 wm->dw_msaa_disp = GEN6_WM_MSDISPMODE_PERPIXEL;
1966
1967 STATIC_ASSERT(Elements(wm->payload) >= 2);
1968 wm->payload[0] = dw5;
1969 wm->payload[1] = dw6;
1970 }
1971
1972 void
1973 ilo_gpe_init_fs_cso_gen6(const struct ilo_dev_info *dev,
1974 const struct ilo_shader_state *fs,
1975 struct ilo_shader_cso *cso)
1976 {
1977 int start_grf, input_count, interps, max_threads;
1978 uint32_t dw2, dw4, dw5, dw6;
1979
1980 ILO_GPE_VALID_GEN(dev, 6, 6);
1981
1982 start_grf = ilo_shader_get_kernel_param(fs, ILO_KERNEL_URB_DATA_START_REG);
1983 input_count = ilo_shader_get_kernel_param(fs, ILO_KERNEL_INPUT_COUNT);
1984 interps = ilo_shader_get_kernel_param(fs,
1985 ILO_KERNEL_FS_BARYCENTRIC_INTERPOLATIONS);
1986
1987 /* see brwCreateContext() */
1988 max_threads = (dev->gt == 2) ? 80 : 40;
1989
1990 dw2 = (true) ? 0 : GEN6_WM_FLOATING_POINT_MODE_ALT;
1991
1992 dw4 = start_grf << GEN6_WM_DISPATCH_START_GRF_SHIFT_0 |
1993 0 << GEN6_WM_DISPATCH_START_GRF_SHIFT_1 |
1994 0 << GEN6_WM_DISPATCH_START_GRF_SHIFT_2;
1995
1996 dw5 = (max_threads - 1) << GEN6_WM_MAX_THREADS_SHIFT;
1997
1998 /*
1999 * From the Sandy Bridge PRM, volume 2 part 1, page 275:
2000 *
2001 * "This bit (Pixel Shader Kill Pixel), if ENABLED, indicates that the
2002 * PS kernel or color calculator has the ability to kill (discard)
2003 * pixels or samples, other than due to depth or stencil testing.
2004 * This bit is required to be ENABLED in the following situations:
2005 *
2006 * The API pixel shader program contains "killpix" or "discard"
2007 * instructions, or other code in the pixel shader kernel that can
2008 * cause the final pixel mask to differ from the pixel mask received
2009 * on dispatch.
2010 *
2011 * A sampler with chroma key enabled with kill pixel mode is used by
2012 * the pixel shader.
2013 *
2014 * Any render target has Alpha Test Enable or AlphaToCoverage Enable
2015 * enabled.
2016 *
2017 * The pixel shader kernel generates and outputs oMask.
2018 *
2019 * Note: As ClipDistance clipping is fully supported in hardware and
2020 * therefore not via PS instructions, there should be no need to
2021 * ENABLE this bit due to ClipDistance clipping."
2022 */
2023 if (ilo_shader_get_kernel_param(fs, ILO_KERNEL_FS_USE_KILL))
2024 dw5 |= GEN6_WM_KILL_ENABLE;
2025
2026 /*
2027 * From the Sandy Bridge PRM, volume 2 part 1, page 275:
2028 *
2029 * "If a NULL Depth Buffer is selected, the Pixel Shader Computed Depth
2030 * field must be set to disabled."
2031 *
2032 * TODO This is not checked yet.
2033 */
2034 if (ilo_shader_get_kernel_param(fs, ILO_KERNEL_FS_OUTPUT_Z))
2035 dw5 |= GEN6_WM_COMPUTED_DEPTH;
2036
2037 if (ilo_shader_get_kernel_param(fs, ILO_KERNEL_FS_INPUT_Z))
2038 dw5 |= GEN6_WM_USES_SOURCE_DEPTH;
2039
2040 if (ilo_shader_get_kernel_param(fs, ILO_KERNEL_FS_INPUT_W))
2041 dw5 |= GEN6_WM_USES_SOURCE_W;
2042
2043 /*
2044 * TODO set this bit only when
2045 *
2046 * a) fs writes colors and color is not masked, or
2047 * b) fs writes depth, or
2048 * c) fs or cc kills
2049 */
2050 if (true)
2051 dw5 |= GEN6_WM_DISPATCH_ENABLE;
2052
2053 assert(!ilo_shader_get_kernel_param(fs, ILO_KERNEL_FS_DISPATCH_16_OFFSET));
2054 dw5 |= GEN6_WM_8_DISPATCH_ENABLE;
2055
2056 dw6 = input_count << GEN6_WM_NUM_SF_OUTPUTS_SHIFT |
2057 GEN6_WM_POSOFFSET_NONE |
2058 interps << GEN6_WM_BARYCENTRIC_INTERPOLATION_MODE_SHIFT;
2059
2060 STATIC_ASSERT(Elements(cso->payload) >= 4);
2061 cso->payload[0] = dw2;
2062 cso->payload[1] = dw4;
2063 cso->payload[2] = dw5;
2064 cso->payload[3] = dw6;
2065 }
2066
2067 static void
2068 gen6_emit_3DSTATE_WM(const struct ilo_dev_info *dev,
2069 const struct ilo_shader_state *fs,
2070 int num_samplers,
2071 const struct ilo_rasterizer_state *rasterizer,
2072 bool dual_blend, bool cc_may_kill,
2073 struct ilo_cp *cp)
2074 {
2075 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x14);
2076 const uint8_t cmd_len = 9;
2077 const int num_samples = 1;
2078 const struct ilo_shader_cso *fs_cso;
2079 uint32_t dw2, dw4, dw5, dw6;
2080
2081 ILO_GPE_VALID_GEN(dev, 6, 6);
2082
2083 if (!fs) {
2084 /* see brwCreateContext() */
2085 const int max_threads = (dev->gt == 2) ? 80 : 40;
2086
2087 ilo_cp_begin(cp, cmd_len);
2088 ilo_cp_write(cp, cmd | (cmd_len - 2));
2089 ilo_cp_write(cp, 0);
2090 ilo_cp_write(cp, 0);
2091 ilo_cp_write(cp, 0);
2092 ilo_cp_write(cp, 0);
2093 /* honor the valid range even if dispatching is disabled */
2094 ilo_cp_write(cp, (max_threads - 1) << GEN6_WM_MAX_THREADS_SHIFT);
2095 ilo_cp_write(cp, 0);
2096 ilo_cp_write(cp, 0);
2097 ilo_cp_write(cp, 0);
2098 ilo_cp_end(cp);
2099
2100 return;
2101 }
2102
2103 fs_cso = ilo_shader_get_kernel_cso(fs);
2104 dw2 = fs_cso->payload[0];
2105 dw4 = fs_cso->payload[1];
2106 dw5 = fs_cso->payload[2];
2107 dw6 = fs_cso->payload[3];
2108
2109 dw2 |= (num_samplers + 3) / 4 << GEN6_WM_SAMPLER_COUNT_SHIFT;
2110
2111 if (true) {
2112 dw4 |= GEN6_WM_STATISTICS_ENABLE;
2113 }
2114 else {
2115 /*
2116 * From the Sandy Bridge PRM, volume 2 part 1, page 248:
2117 *
2118 * "This bit (Statistics Enable) must be disabled if either of these
2119 * bits is set: Depth Buffer Clear , Hierarchical Depth Buffer
2120 * Resolve Enable or Depth Buffer Resolve Enable."
2121 */
2122 dw4 |= GEN6_WM_DEPTH_CLEAR;
2123 dw4 |= GEN6_WM_DEPTH_RESOLVE;
2124 dw4 |= GEN6_WM_HIERARCHICAL_DEPTH_RESOLVE;
2125 }
2126
2127 if (cc_may_kill) {
2128 dw5 |= GEN6_WM_KILL_ENABLE |
2129 GEN6_WM_DISPATCH_ENABLE;
2130 }
2131
2132 if (dual_blend)
2133 dw5 |= GEN6_WM_DUAL_SOURCE_BLEND_ENABLE;
2134
2135 dw5 |= rasterizer->wm.payload[0];
2136
2137 dw6 |= rasterizer->wm.payload[1];
2138
2139 if (num_samples > 1) {
2140 dw6 |= rasterizer->wm.dw_msaa_rast |
2141 rasterizer->wm.dw_msaa_disp;
2142 }
2143
2144 ilo_cp_begin(cp, cmd_len);
2145 ilo_cp_write(cp, cmd | (cmd_len - 2));
2146 ilo_cp_write(cp, ilo_shader_get_kernel_offset(fs));
2147 ilo_cp_write(cp, dw2);
2148 ilo_cp_write(cp, 0); /* scratch */
2149 ilo_cp_write(cp, dw4);
2150 ilo_cp_write(cp, dw5);
2151 ilo_cp_write(cp, dw6);
2152 ilo_cp_write(cp, 0); /* kernel 1 */
2153 ilo_cp_write(cp, 0); /* kernel 2 */
2154 ilo_cp_end(cp);
2155 }
2156
2157 static unsigned
2158 gen6_fill_3dstate_constant(const struct ilo_dev_info *dev,
2159 const uint32_t *bufs, const int *sizes,
2160 int num_bufs, int max_read_length,
2161 uint32_t *dw, int num_dwords)
2162 {
2163 unsigned enabled = 0x0;
2164 int total_read_length, i;
2165
2166 assert(num_dwords == 4);
2167
2168 total_read_length = 0;
2169 for (i = 0; i < 4; i++) {
2170 if (i < num_bufs && sizes[i]) {
2171 /* in 256-bit units minus one */
2172 const int read_len = (sizes[i] + 31) / 32 - 1;
2173
2174 assert(bufs[i] % 32 == 0);
2175 assert(read_len < 32);
2176
2177 enabled |= 1 << i;
2178 dw[i] = bufs[i] | read_len;
2179
2180 total_read_length += read_len + 1;
2181 }
2182 else {
2183 dw[i] = 0;
2184 }
2185 }
2186
2187 assert(total_read_length <= max_read_length);
2188
2189 return enabled;
2190 }
2191
2192 static void
2193 gen6_emit_3DSTATE_CONSTANT_VS(const struct ilo_dev_info *dev,
2194 const uint32_t *bufs, const int *sizes,
2195 int num_bufs,
2196 struct ilo_cp *cp)
2197 {
2198 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x15);
2199 const uint8_t cmd_len = 5;
2200 uint32_t buf_dw[4], buf_enabled;
2201
2202 ILO_GPE_VALID_GEN(dev, 6, 6);
2203 assert(num_bufs <= 4);
2204
2205 /*
2206 * From the Sandy Bridge PRM, volume 2 part 1, page 138:
2207 *
2208 * "The sum of all four read length fields (each incremented to
2209 * represent the actual read length) must be less than or equal to 32"
2210 */
2211 buf_enabled = gen6_fill_3dstate_constant(dev,
2212 bufs, sizes, num_bufs, 32, buf_dw, Elements(buf_dw));
2213
2214 ilo_cp_begin(cp, cmd_len);
2215 ilo_cp_write(cp, cmd | (cmd_len - 2) | buf_enabled << 12);
2216 ilo_cp_write(cp, buf_dw[0]);
2217 ilo_cp_write(cp, buf_dw[1]);
2218 ilo_cp_write(cp, buf_dw[2]);
2219 ilo_cp_write(cp, buf_dw[3]);
2220 ilo_cp_end(cp);
2221 }
2222
2223 static void
2224 gen6_emit_3DSTATE_CONSTANT_GS(const struct ilo_dev_info *dev,
2225 const uint32_t *bufs, const int *sizes,
2226 int num_bufs,
2227 struct ilo_cp *cp)
2228 {
2229 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x16);
2230 const uint8_t cmd_len = 5;
2231 uint32_t buf_dw[4], buf_enabled;
2232
2233 ILO_GPE_VALID_GEN(dev, 6, 6);
2234 assert(num_bufs <= 4);
2235
2236 /*
2237 * From the Sandy Bridge PRM, volume 2 part 1, page 161:
2238 *
2239 * "The sum of all four read length fields (each incremented to
2240 * represent the actual read length) must be less than or equal to 64"
2241 */
2242 buf_enabled = gen6_fill_3dstate_constant(dev,
2243 bufs, sizes, num_bufs, 64, buf_dw, Elements(buf_dw));
2244
2245 ilo_cp_begin(cp, cmd_len);
2246 ilo_cp_write(cp, cmd | (cmd_len - 2) | buf_enabled << 12);
2247 ilo_cp_write(cp, buf_dw[0]);
2248 ilo_cp_write(cp, buf_dw[1]);
2249 ilo_cp_write(cp, buf_dw[2]);
2250 ilo_cp_write(cp, buf_dw[3]);
2251 ilo_cp_end(cp);
2252 }
2253
2254 static void
2255 gen6_emit_3DSTATE_CONSTANT_PS(const struct ilo_dev_info *dev,
2256 const uint32_t *bufs, const int *sizes,
2257 int num_bufs,
2258 struct ilo_cp *cp)
2259 {
2260 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x17);
2261 const uint8_t cmd_len = 5;
2262 uint32_t buf_dw[4], buf_enabled;
2263
2264 ILO_GPE_VALID_GEN(dev, 6, 6);
2265 assert(num_bufs <= 4);
2266
2267 /*
2268 * From the Sandy Bridge PRM, volume 2 part 1, page 287:
2269 *
2270 * "The sum of all four read length fields (each incremented to
2271 * represent the actual read length) must be less than or equal to 64"
2272 */
2273 buf_enabled = gen6_fill_3dstate_constant(dev,
2274 bufs, sizes, num_bufs, 64, buf_dw, Elements(buf_dw));
2275
2276 ilo_cp_begin(cp, cmd_len);
2277 ilo_cp_write(cp, cmd | (cmd_len - 2) | buf_enabled << 12);
2278 ilo_cp_write(cp, buf_dw[0]);
2279 ilo_cp_write(cp, buf_dw[1]);
2280 ilo_cp_write(cp, buf_dw[2]);
2281 ilo_cp_write(cp, buf_dw[3]);
2282 ilo_cp_end(cp);
2283 }
2284
2285 static void
2286 gen6_emit_3DSTATE_SAMPLE_MASK(const struct ilo_dev_info *dev,
2287 unsigned sample_mask,
2288 struct ilo_cp *cp)
2289 {
2290 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x18);
2291 const uint8_t cmd_len = 2;
2292 const unsigned valid_mask = 0xf;
2293
2294 ILO_GPE_VALID_GEN(dev, 6, 6);
2295
2296 sample_mask &= valid_mask;
2297
2298 ilo_cp_begin(cp, cmd_len);
2299 ilo_cp_write(cp, cmd | (cmd_len - 2));
2300 ilo_cp_write(cp, sample_mask);
2301 ilo_cp_end(cp);
2302 }
2303
2304 static void
2305 gen6_emit_3DSTATE_DRAWING_RECTANGLE(const struct ilo_dev_info *dev,
2306 unsigned x, unsigned y,
2307 unsigned width, unsigned height,
2308 struct ilo_cp *cp)
2309 {
2310 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x1, 0x00);
2311 const uint8_t cmd_len = 4;
2312 unsigned xmax = x + width - 1;
2313 unsigned ymax = y + height - 1;
2314 int rect_limit;
2315
2316 ILO_GPE_VALID_GEN(dev, 6, 7);
2317
2318 if (dev->gen >= ILO_GEN(7)) {
2319 rect_limit = 16383;
2320 }
2321 else {
2322 /*
2323 * From the Sandy Bridge PRM, volume 2 part 1, page 230:
2324 *
2325 * "[DevSNB] Errata: This field (Clipped Drawing Rectangle Y Min)
2326 * must be an even number"
2327 */
2328 assert(y % 2 == 0);
2329
2330 rect_limit = 8191;
2331 }
2332
2333 if (x > rect_limit) x = rect_limit;
2334 if (y > rect_limit) y = rect_limit;
2335 if (xmax > rect_limit) xmax = rect_limit;
2336 if (ymax > rect_limit) ymax = rect_limit;
2337
2338 ilo_cp_begin(cp, cmd_len);
2339 ilo_cp_write(cp, cmd | (cmd_len - 2));
2340 ilo_cp_write(cp, y << 16 | x);
2341 ilo_cp_write(cp, ymax << 16 | xmax);
2342
2343 /*
2344 * There is no need to set the origin. It is intended to support front
2345 * buffer rendering.
2346 */
2347 ilo_cp_write(cp, 0);
2348
2349 ilo_cp_end(cp);
2350 }
2351
2352 struct ilo_zs_surface_info {
2353 int surface_type;
2354 int format;
2355
2356 struct {
2357 struct intel_bo *bo;
2358 unsigned stride;
2359 enum intel_tiling_mode tiling;
2360 uint32_t offset;
2361 } zs, stencil, hiz;
2362
2363 unsigned width, height, depth;
2364 unsigned lod, first_layer, num_layers;
2365 uint32_t x_offset, y_offset;
2366 };
2367
2368 static void
2369 zs_init_info_null(const struct ilo_dev_info *dev,
2370 struct ilo_zs_surface_info *info)
2371 {
2372 ILO_GPE_VALID_GEN(dev, 6, 7);
2373
2374 memset(info, 0, sizeof(*info));
2375
2376 info->surface_type = BRW_SURFACE_NULL;
2377 info->format = BRW_DEPTHFORMAT_D32_FLOAT;
2378 info->width = 1;
2379 info->height = 1;
2380 info->depth = 1;
2381 info->num_layers = 1;
2382 }
2383
2384 static void
2385 zs_init_info(const struct ilo_dev_info *dev,
2386 const struct ilo_texture *tex,
2387 enum pipe_format format,
2388 unsigned level,
2389 unsigned first_layer, unsigned num_layers,
2390 struct ilo_zs_surface_info *info)
2391 {
2392 const bool rebase_layer = true;
2393 struct intel_bo * const hiz_bo = NULL;
2394 bool separate_stencil;
2395 uint32_t x_offset[3], y_offset[3];
2396
2397 ILO_GPE_VALID_GEN(dev, 6, 7);
2398
2399 memset(info, 0, sizeof(*info));
2400
2401 info->surface_type = ilo_gpe_gen6_translate_texture(tex->base.target);
2402
2403 if (info->surface_type == BRW_SURFACE_CUBE) {
2404 /*
2405 * From the Sandy Bridge PRM, volume 2 part 1, page 325-326:
2406 *
2407 * "For Other Surfaces (Cube Surfaces):
2408 * This field (Minimum Array Element) is ignored."
2409 *
2410 * "For Other Surfaces (Cube Surfaces):
2411 * This field (Render Target View Extent) is ignored."
2412 *
2413 * As such, we cannot set first_layer and num_layers on cube surfaces.
2414 * To work around that, treat it as a 2D surface.
2415 */
2416 info->surface_type = BRW_SURFACE_2D;
2417 }
2418
2419 if (dev->gen >= ILO_GEN(7)) {
2420 separate_stencil = true;
2421 }
2422 else {
2423 /*
2424 * From the Sandy Bridge PRM, volume 2 part 1, page 317:
2425 *
2426 * "This field (Separate Stencil Buffer Enable) must be set to the
2427 * same value (enabled or disabled) as Hierarchical Depth Buffer
2428 * Enable."
2429 */
2430 separate_stencil = (hiz_bo != NULL);
2431 }
2432
2433 /*
2434 * From the Sandy Bridge PRM, volume 2 part 1, page 317:
2435 *
2436 * "If this field (Hierarchical Depth Buffer Enable) is enabled, the
2437 * Surface Format of the depth buffer cannot be
2438 * D32_FLOAT_S8X24_UINT or D24_UNORM_S8_UINT. Use of stencil
2439 * requires the separate stencil buffer."
2440 *
2441 * From the Ironlake PRM, volume 2 part 1, page 330:
2442 *
2443 * "If this field (Separate Stencil Buffer Enable) is disabled, the
2444 * Surface Format of the depth buffer cannot be D24_UNORM_X8_UINT."
2445 *
2446 * There is no similar restriction for GEN6. But when D24_UNORM_X8_UINT
2447 * is indeed used, the depth values output by the fragment shaders will
2448 * be different when read back.
2449 *
2450 * As for GEN7+, separate_stencil is always true.
2451 */
2452 switch (format) {
2453 case PIPE_FORMAT_Z16_UNORM:
2454 info->format = BRW_DEPTHFORMAT_D16_UNORM;
2455 break;
2456 case PIPE_FORMAT_Z32_FLOAT:
2457 info->format = BRW_DEPTHFORMAT_D32_FLOAT;
2458 break;
2459 case PIPE_FORMAT_Z24X8_UNORM:
2460 case PIPE_FORMAT_Z24_UNORM_S8_UINT:
2461 info->format = (separate_stencil) ?
2462 BRW_DEPTHFORMAT_D24_UNORM_X8_UINT :
2463 BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
2464 break;
2465 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
2466 info->format = (separate_stencil) ?
2467 BRW_DEPTHFORMAT_D32_FLOAT :
2468 BRW_DEPTHFORMAT_D32_FLOAT_S8X24_UINT;
2469 break;
2470 case PIPE_FORMAT_S8_UINT:
2471 if (separate_stencil) {
2472 info->format = BRW_DEPTHFORMAT_D32_FLOAT;
2473 break;
2474 }
2475 /* fall through */
2476 default:
2477 assert(!"unsupported depth/stencil format");
2478 zs_init_info_null(dev, info);
2479 return;
2480 break;
2481 }
2482
2483 if (format != PIPE_FORMAT_S8_UINT) {
2484 info->zs.bo = tex->bo;
2485 info->zs.stride = tex->bo_stride;
2486 info->zs.tiling = tex->tiling;
2487
2488 if (rebase_layer) {
2489 info->zs.offset = ilo_texture_get_slice_offset(tex,
2490 level, first_layer, &x_offset[0], &y_offset[0]);
2491 }
2492 }
2493
2494 if (tex->separate_s8 || format == PIPE_FORMAT_S8_UINT) {
2495 const struct ilo_texture *s8_tex =
2496 (tex->separate_s8) ? tex->separate_s8 : tex;
2497
2498 info->stencil.bo = s8_tex->bo;
2499
2500 /*
2501 * From the Sandy Bridge PRM, volume 2 part 1, page 329:
2502 *
2503 * "The pitch must be set to 2x the value computed based on width,
2504 * as the stencil buffer is stored with two rows interleaved."
2505 *
2506 * According to the classic driver, we need to do the same for GEN7+
2507 * even though the Ivy Bridge PRM does not say anything about it.
2508 */
2509 info->stencil.stride = s8_tex->bo_stride * 2;
2510
2511 info->stencil.tiling = s8_tex->tiling;
2512
2513 if (rebase_layer) {
2514 info->stencil.offset = ilo_texture_get_slice_offset(s8_tex,
2515 level, first_layer, &x_offset[1], &y_offset[1]);
2516 }
2517 }
2518
2519 if (hiz_bo) {
2520 info->hiz.bo = hiz_bo;
2521 info->hiz.stride = 0;
2522 info->hiz.tiling = 0;
2523 info->hiz.offset = 0;
2524 x_offset[2] = 0;
2525 y_offset[2] = 0;
2526 }
2527
2528 info->width = tex->base.width0;
2529 info->height = tex->base.height0;
2530 info->depth = (tex->base.target == PIPE_TEXTURE_3D) ?
2531 tex->base.depth0 : num_layers;
2532
2533 info->lod = level;
2534 info->first_layer = first_layer;
2535 info->num_layers = num_layers;
2536
2537 if (rebase_layer) {
2538 /* the size of the layer */
2539 info->width = u_minify(info->width, level);
2540 info->height = u_minify(info->height, level);
2541 if (info->surface_type == BRW_SURFACE_3D)
2542 info->depth = u_minify(info->depth, level);
2543 else
2544 info->depth = 1;
2545
2546 /* no layered rendering */
2547 assert(num_layers == 1);
2548
2549 info->lod = 0;
2550 info->first_layer = 0;
2551 info->num_layers = 1;
2552
2553 /* all three share the same X/Y offsets */
2554 if (info->zs.bo) {
2555 if (info->stencil.bo) {
2556 assert(x_offset[0] == x_offset[1]);
2557 assert(y_offset[0] == y_offset[1]);
2558 }
2559
2560 info->x_offset = x_offset[0];
2561 info->y_offset = y_offset[0];
2562 }
2563 else {
2564 assert(info->stencil.bo);
2565
2566 info->x_offset = x_offset[1];
2567 info->y_offset = y_offset[1];
2568 }
2569
2570 if (info->hiz.bo) {
2571 assert(info->x_offset == x_offset[2]);
2572 assert(info->y_offset == y_offset[2]);
2573 }
2574
2575 /*
2576 * From the Sandy Bridge PRM, volume 2 part 1, page 326:
2577 *
2578 * "The 3 LSBs of both offsets (Depth Coordinate Offset Y and Depth
2579 * Coordinate Offset X) must be zero to ensure correct alignment"
2580 *
2581 * XXX Skip the check for gen6, which seems to be fine. We need to make
2582 * sure that does not happen eventually.
2583 */
2584 if (dev->gen >= ILO_GEN(7)) {
2585 assert((info->x_offset & 7) == 0 && (info->y_offset & 7) == 0);
2586 info->x_offset &= ~7;
2587 info->y_offset &= ~7;
2588 }
2589
2590 info->width += info->x_offset;
2591 info->height += info->y_offset;
2592
2593 /* we have to treat them as 2D surfaces */
2594 if (info->surface_type == BRW_SURFACE_CUBE) {
2595 assert(tex->base.width0 == tex->base.height0);
2596 /* we will set slice_offset to point to the single face */
2597 info->surface_type = BRW_SURFACE_2D;
2598 }
2599 else if (info->surface_type == BRW_SURFACE_1D && info->height > 1) {
2600 assert(tex->base.height0 == 1);
2601 info->surface_type = BRW_SURFACE_2D;
2602 }
2603 }
2604 }
2605
2606 void
2607 ilo_gpe_init_zs_surface(const struct ilo_dev_info *dev,
2608 const struct ilo_texture *tex,
2609 enum pipe_format format,
2610 unsigned level,
2611 unsigned first_layer, unsigned num_layers,
2612 struct ilo_zs_surface *zs)
2613 {
2614 const int max_2d_size = (dev->gen >= ILO_GEN(7)) ? 16384 : 8192;
2615 const int max_array_size = (dev->gen >= ILO_GEN(7)) ? 2048 : 512;
2616 struct ilo_zs_surface_info info;
2617 uint32_t dw1, dw2, dw3, dw4, dw5, dw6;
2618
2619 ILO_GPE_VALID_GEN(dev, 6, 7);
2620
2621 if (tex)
2622 zs_init_info(dev, tex, format, level, first_layer, num_layers, &info);
2623 else
2624 zs_init_info_null(dev, &info);
2625
2626 switch (info.surface_type) {
2627 case BRW_SURFACE_NULL:
2628 break;
2629 case BRW_SURFACE_1D:
2630 assert(info.width <= max_2d_size && info.height == 1 &&
2631 info.depth <= max_array_size);
2632 assert(info.first_layer < max_array_size - 1 &&
2633 info.num_layers <= max_array_size);
2634 break;
2635 case BRW_SURFACE_2D:
2636 assert(info.width <= max_2d_size && info.height <= max_2d_size &&
2637 info.depth <= max_array_size);
2638 assert(info.first_layer < max_array_size - 1 &&
2639 info.num_layers <= max_array_size);
2640 break;
2641 case BRW_SURFACE_3D:
2642 assert(info.width <= 2048 && info.height <= 2048 && info.depth <= 2048);
2643 assert(info.first_layer < 2048 && info.num_layers <= max_array_size);
2644 assert(info.x_offset == 0 && info.y_offset == 0);
2645 break;
2646 case BRW_SURFACE_CUBE:
2647 assert(info.width <= max_2d_size && info.height <= max_2d_size &&
2648 info.depth == 1);
2649 assert(info.first_layer == 0 && info.num_layers == 1);
2650 assert(info.width == info.height);
2651 assert(info.x_offset == 0 && info.y_offset == 0);
2652 break;
2653 default:
2654 assert(!"unexpected depth surface type");
2655 break;
2656 }
2657
2658 dw1 = info.surface_type << 29 |
2659 info.format << 18;
2660
2661 if (info.zs.bo) {
2662 /* required for GEN6+ */
2663 assert(info.zs.tiling == INTEL_TILING_Y);
2664 assert(info.zs.stride > 0 && info.zs.stride < 128 * 1024 &&
2665 info.zs.stride % 128 == 0);
2666 assert(info.width <= info.zs.stride);
2667
2668 dw1 |= (info.zs.stride - 1);
2669 dw2 = info.zs.offset;
2670 }
2671 else {
2672 dw2 = 0;
2673 }
2674
2675 if (dev->gen >= ILO_GEN(7)) {
2676 if (info.zs.bo)
2677 dw1 |= 1 << 28;
2678
2679 if (info.stencil.bo)
2680 dw1 |= 1 << 27;
2681
2682 if (info.hiz.bo)
2683 dw1 |= 1 << 22;
2684
2685 dw3 = (info.height - 1) << 18 |
2686 (info.width - 1) << 4 |
2687 info.lod;
2688
2689 dw4 = (info.depth - 1) << 21 |
2690 info.first_layer << 10;
2691
2692 dw5 = info.y_offset << 16 | info.x_offset;
2693
2694 dw6 = (info.num_layers - 1) << 21;
2695 }
2696 else {
2697 /* always Y-tiled */
2698 dw1 |= 1 << 27 |
2699 1 << 26;
2700
2701 if (info.hiz.bo) {
2702 dw1 |= 1 << 22 |
2703 1 << 21;
2704 }
2705
2706 dw3 = (info.height - 1) << 19 |
2707 (info.width - 1) << 6 |
2708 info.lod << 2 |
2709 BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1;
2710
2711 dw4 = (info.depth - 1) << 21 |
2712 info.first_layer << 10 |
2713 (info.num_layers - 1) << 1;
2714
2715 dw5 = info.y_offset << 16 | info.x_offset;
2716
2717 dw6 = 0;
2718 }
2719
2720 STATIC_ASSERT(Elements(zs->payload) >= 10);
2721
2722 zs->payload[0] = dw1;
2723 zs->payload[1] = dw2;
2724 zs->payload[2] = dw3;
2725 zs->payload[3] = dw4;
2726 zs->payload[4] = dw5;
2727 zs->payload[5] = dw6;
2728
2729 /* do not increment reference count */
2730 zs->bo = info.zs.bo;
2731
2732 /* separate stencil */
2733 if (info.stencil.bo) {
2734 assert(info.stencil.stride > 0 && info.stencil.stride < 128 * 1024 &&
2735 info.stencil.stride % 128 == 0);
2736
2737 zs->payload[6] = info.stencil.stride - 1;
2738 zs->payload[7] = info.stencil.offset;
2739
2740 /* do not increment reference count */
2741 zs->separate_s8_bo = info.stencil.bo;
2742 }
2743 else {
2744 zs->payload[6] = 0;
2745 zs->payload[7] = 0;
2746 zs->separate_s8_bo = NULL;
2747 }
2748
2749 /* hiz */
2750 if (info.hiz.bo) {
2751 zs->payload[8] = info.hiz.stride - 1;
2752 zs->payload[9] = info.hiz.offset;
2753
2754 /* do not increment reference count */
2755 zs->hiz_bo = info.hiz.bo;
2756 }
2757 else {
2758 zs->payload[8] = 0;
2759 zs->payload[9] = 0;
2760 zs->hiz_bo = NULL;
2761 }
2762 }
2763
2764 static void
2765 gen6_emit_3DSTATE_DEPTH_BUFFER(const struct ilo_dev_info *dev,
2766 const struct ilo_zs_surface *zs,
2767 struct ilo_cp *cp)
2768 {
2769 const uint32_t cmd = (dev->gen >= ILO_GEN(7)) ?
2770 ILO_GPE_CMD(0x3, 0x0, 0x05) : ILO_GPE_CMD(0x3, 0x1, 0x05);
2771 const uint8_t cmd_len = 7;
2772
2773 ILO_GPE_VALID_GEN(dev, 6, 7);
2774
2775 ilo_cp_begin(cp, cmd_len);
2776 ilo_cp_write(cp, cmd | (cmd_len - 2));
2777 ilo_cp_write(cp, zs->payload[0]);
2778 ilo_cp_write_bo(cp, zs->payload[1], zs->bo,
2779 INTEL_DOMAIN_RENDER, INTEL_DOMAIN_RENDER);
2780 ilo_cp_write(cp, zs->payload[2]);
2781 ilo_cp_write(cp, zs->payload[3]);
2782 ilo_cp_write(cp, zs->payload[4]);
2783 ilo_cp_write(cp, zs->payload[5]);
2784 ilo_cp_end(cp);
2785 }
2786
2787 static void
2788 gen6_emit_3DSTATE_POLY_STIPPLE_OFFSET(const struct ilo_dev_info *dev,
2789 int x_offset, int y_offset,
2790 struct ilo_cp *cp)
2791 {
2792 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x1, 0x06);
2793 const uint8_t cmd_len = 2;
2794
2795 ILO_GPE_VALID_GEN(dev, 6, 7);
2796 assert(x_offset >= 0 && x_offset <= 31);
2797 assert(y_offset >= 0 && y_offset <= 31);
2798
2799 ilo_cp_begin(cp, cmd_len);
2800 ilo_cp_write(cp, cmd | (cmd_len - 2));
2801 ilo_cp_write(cp, x_offset << 8 | y_offset);
2802 ilo_cp_end(cp);
2803 }
2804
2805 static void
2806 gen6_emit_3DSTATE_POLY_STIPPLE_PATTERN(const struct ilo_dev_info *dev,
2807 const struct pipe_poly_stipple *pattern,
2808 struct ilo_cp *cp)
2809 {
2810 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x1, 0x07);
2811 const uint8_t cmd_len = 33;
2812 int i;
2813
2814 ILO_GPE_VALID_GEN(dev, 6, 7);
2815 assert(Elements(pattern->stipple) == 32);
2816
2817 ilo_cp_begin(cp, cmd_len);
2818 ilo_cp_write(cp, cmd | (cmd_len - 2));
2819 for (i = 0; i < 32; i++)
2820 ilo_cp_write(cp, pattern->stipple[i]);
2821 ilo_cp_end(cp);
2822 }
2823
2824 static void
2825 gen6_emit_3DSTATE_LINE_STIPPLE(const struct ilo_dev_info *dev,
2826 unsigned pattern, unsigned factor,
2827 struct ilo_cp *cp)
2828 {
2829 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x1, 0x08);
2830 const uint8_t cmd_len = 3;
2831 unsigned inverse;
2832
2833 ILO_GPE_VALID_GEN(dev, 6, 7);
2834 assert((pattern & 0xffff) == pattern);
2835 assert(factor >= 1 && factor <= 256);
2836
2837 ilo_cp_begin(cp, cmd_len);
2838 ilo_cp_write(cp, cmd | (cmd_len - 2));
2839 ilo_cp_write(cp, pattern);
2840
2841 if (dev->gen >= ILO_GEN(7)) {
2842 /* in U1.16 */
2843 inverse = (unsigned) (65536.0f / factor);
2844 ilo_cp_write(cp, inverse << 15 | factor);
2845 }
2846 else {
2847 /* in U1.13 */
2848 inverse = (unsigned) (8192.0f / factor);
2849 ilo_cp_write(cp, inverse << 16 | factor);
2850 }
2851
2852 ilo_cp_end(cp);
2853 }
2854
2855 static void
2856 gen6_emit_3DSTATE_AA_LINE_PARAMETERS(const struct ilo_dev_info *dev,
2857 struct ilo_cp *cp)
2858 {
2859 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x1, 0x0a);
2860 const uint8_t cmd_len = 3;
2861
2862 ILO_GPE_VALID_GEN(dev, 6, 7);
2863
2864 ilo_cp_begin(cp, cmd_len);
2865 ilo_cp_write(cp, cmd | (cmd_len - 2));
2866 ilo_cp_write(cp, 0 << 16 | 0);
2867 ilo_cp_write(cp, 0 << 16 | 0);
2868 ilo_cp_end(cp);
2869 }
2870
2871 static void
2872 gen6_emit_3DSTATE_GS_SVB_INDEX(const struct ilo_dev_info *dev,
2873 int index, unsigned svbi,
2874 unsigned max_svbi,
2875 bool load_vertex_count,
2876 struct ilo_cp *cp)
2877 {
2878 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x1, 0x0b);
2879 const uint8_t cmd_len = 4;
2880 uint32_t dw1;
2881
2882 ILO_GPE_VALID_GEN(dev, 6, 6);
2883 assert(index >= 0 && index < 4);
2884
2885 dw1 = index << SVB_INDEX_SHIFT;
2886 if (load_vertex_count)
2887 dw1 |= SVB_LOAD_INTERNAL_VERTEX_COUNT;
2888
2889 ilo_cp_begin(cp, cmd_len);
2890 ilo_cp_write(cp, cmd | (cmd_len - 2));
2891 ilo_cp_write(cp, dw1);
2892 ilo_cp_write(cp, svbi);
2893 ilo_cp_write(cp, max_svbi);
2894 ilo_cp_end(cp);
2895 }
2896
2897 static void
2898 gen6_emit_3DSTATE_MULTISAMPLE(const struct ilo_dev_info *dev,
2899 int num_samples,
2900 const uint32_t *packed_sample_pos,
2901 bool pixel_location_center,
2902 struct ilo_cp *cp)
2903 {
2904 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x1, 0x0d);
2905 const uint8_t cmd_len = (dev->gen >= ILO_GEN(7)) ? 4 : 3;
2906 uint32_t dw1, dw2, dw3;
2907
2908 ILO_GPE_VALID_GEN(dev, 6, 7);
2909
2910 dw1 = (pixel_location_center) ?
2911 MS_PIXEL_LOCATION_CENTER : MS_PIXEL_LOCATION_UPPER_LEFT;
2912
2913 switch (num_samples) {
2914 case 0:
2915 case 1:
2916 dw1 |= MS_NUMSAMPLES_1;
2917 dw2 = 0;
2918 dw3 = 0;
2919 break;
2920 case 4:
2921 dw1 |= MS_NUMSAMPLES_4;
2922 dw2 = packed_sample_pos[0];
2923 dw3 = 0;
2924 break;
2925 case 8:
2926 assert(dev->gen >= ILO_GEN(7));
2927 dw1 |= MS_NUMSAMPLES_8;
2928 dw2 = packed_sample_pos[0];
2929 dw3 = packed_sample_pos[1];
2930 break;
2931 default:
2932 assert(!"unsupported sample count");
2933 dw1 |= MS_NUMSAMPLES_1;
2934 dw2 = 0;
2935 dw3 = 0;
2936 break;
2937 }
2938
2939 ilo_cp_begin(cp, cmd_len);
2940 ilo_cp_write(cp, cmd | (cmd_len - 2));
2941 ilo_cp_write(cp, dw1);
2942 ilo_cp_write(cp, dw2);
2943 if (dev->gen >= ILO_GEN(7))
2944 ilo_cp_write(cp, dw3);
2945 ilo_cp_end(cp);
2946 }
2947
2948 static void
2949 gen6_emit_3DSTATE_STENCIL_BUFFER(const struct ilo_dev_info *dev,
2950 const struct ilo_zs_surface *zs,
2951 struct ilo_cp *cp)
2952 {
2953 const uint32_t cmd = (dev->gen >= ILO_GEN(7)) ?
2954 ILO_GPE_CMD(0x3, 0x0, 0x06) :
2955 ILO_GPE_CMD(0x3, 0x1, 0x0e);
2956 const uint8_t cmd_len = 3;
2957
2958 ILO_GPE_VALID_GEN(dev, 6, 7);
2959
2960 ilo_cp_begin(cp, cmd_len);
2961 ilo_cp_write(cp, cmd | (cmd_len - 2));
2962 /* see ilo_gpe_init_zs_surface() */
2963 ilo_cp_write(cp, zs->payload[6]);
2964 ilo_cp_write_bo(cp, zs->payload[7], zs->separate_s8_bo,
2965 INTEL_DOMAIN_RENDER, INTEL_DOMAIN_RENDER);
2966 ilo_cp_end(cp);
2967 }
2968
2969 static void
2970 gen6_emit_3DSTATE_HIER_DEPTH_BUFFER(const struct ilo_dev_info *dev,
2971 const struct ilo_zs_surface *zs,
2972 struct ilo_cp *cp)
2973 {
2974 const uint32_t cmd = (dev->gen >= ILO_GEN(7)) ?
2975 ILO_GPE_CMD(0x3, 0x0, 0x07) :
2976 ILO_GPE_CMD(0x3, 0x1, 0x0f);
2977 const uint8_t cmd_len = 3;
2978
2979 ILO_GPE_VALID_GEN(dev, 6, 7);
2980
2981 ilo_cp_begin(cp, cmd_len);
2982 ilo_cp_write(cp, cmd | (cmd_len - 2));
2983 /* see ilo_gpe_init_zs_surface() */
2984 ilo_cp_write(cp, zs->payload[8]);
2985 ilo_cp_write_bo(cp, zs->payload[9], zs->hiz_bo,
2986 INTEL_DOMAIN_RENDER, INTEL_DOMAIN_RENDER);
2987 ilo_cp_end(cp);
2988 }
2989
2990 static void
2991 gen6_emit_3DSTATE_CLEAR_PARAMS(const struct ilo_dev_info *dev,
2992 uint32_t clear_val,
2993 struct ilo_cp *cp)
2994 {
2995 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x1, 0x10);
2996 const uint8_t cmd_len = 2;
2997
2998 ILO_GPE_VALID_GEN(dev, 6, 6);
2999
3000 ilo_cp_begin(cp, cmd_len);
3001 ilo_cp_write(cp, cmd | (cmd_len - 2) |
3002 GEN5_DEPTH_CLEAR_VALID);
3003 ilo_cp_write(cp, clear_val);
3004 ilo_cp_end(cp);
3005 }
3006
3007 static void
3008 gen6_emit_PIPE_CONTROL(const struct ilo_dev_info *dev,
3009 uint32_t dw1,
3010 struct intel_bo *bo, uint32_t bo_offset,
3011 bool write_qword,
3012 struct ilo_cp *cp)
3013 {
3014 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x2, 0x00);
3015 const uint8_t cmd_len = (write_qword) ? 5 : 4;
3016 const uint32_t read_domains = INTEL_DOMAIN_INSTRUCTION;
3017 const uint32_t write_domain = INTEL_DOMAIN_INSTRUCTION;
3018
3019 ILO_GPE_VALID_GEN(dev, 6, 7);
3020
3021 if (dw1 & PIPE_CONTROL_CS_STALL) {
3022 /*
3023 * From the Sandy Bridge PRM, volume 2 part 1, page 73:
3024 *
3025 * "1 of the following must also be set (when CS stall is set):
3026 *
3027 * * Depth Cache Flush Enable ([0] of DW1)
3028 * * Stall at Pixel Scoreboard ([1] of DW1)
3029 * * Depth Stall ([13] of DW1)
3030 * * Post-Sync Operation ([13] of DW1)
3031 * * Render Target Cache Flush Enable ([12] of DW1)
3032 * * Notify Enable ([8] of DW1)"
3033 *
3034 * From the Ivy Bridge PRM, volume 2 part 1, page 61:
3035 *
3036 * "One of the following must also be set (when CS stall is set):
3037 *
3038 * * Render Target Cache Flush Enable ([12] of DW1)
3039 * * Depth Cache Flush Enable ([0] of DW1)
3040 * * Stall at Pixel Scoreboard ([1] of DW1)
3041 * * Depth Stall ([13] of DW1)
3042 * * Post-Sync Operation ([13] of DW1)"
3043 */
3044 uint32_t bit_test = PIPE_CONTROL_WRITE_FLUSH |
3045 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
3046 PIPE_CONTROL_STALL_AT_SCOREBOARD |
3047 PIPE_CONTROL_DEPTH_STALL;
3048
3049 /* post-sync op */
3050 bit_test |= PIPE_CONTROL_WRITE_IMMEDIATE |
3051 PIPE_CONTROL_WRITE_DEPTH_COUNT |
3052 PIPE_CONTROL_WRITE_TIMESTAMP;
3053
3054 if (dev->gen == ILO_GEN(6))
3055 bit_test |= PIPE_CONTROL_INTERRUPT_ENABLE;
3056
3057 assert(dw1 & bit_test);
3058 }
3059
3060 if (dw1 & PIPE_CONTROL_DEPTH_STALL) {
3061 /*
3062 * From the Sandy Bridge PRM, volume 2 part 1, page 73:
3063 *
3064 * "Following bits must be clear (when Depth Stall is set):
3065 *
3066 * * Render Target Cache Flush Enable ([12] of DW1)
3067 * * Depth Cache Flush Enable ([0] of DW1)"
3068 */
3069 assert(!(dw1 & (PIPE_CONTROL_WRITE_FLUSH |
3070 PIPE_CONTROL_DEPTH_CACHE_FLUSH)));
3071 }
3072
3073 ilo_cp_begin(cp, cmd_len);
3074 ilo_cp_write(cp, cmd | (cmd_len - 2));
3075 ilo_cp_write(cp, dw1);
3076 ilo_cp_write_bo(cp, bo_offset, bo, read_domains, write_domain);
3077 ilo_cp_write(cp, 0);
3078 if (write_qword)
3079 ilo_cp_write(cp, 0);
3080 ilo_cp_end(cp);
3081 }
3082
3083 static void
3084 gen6_emit_3DPRIMITIVE(const struct ilo_dev_info *dev,
3085 const struct pipe_draw_info *info,
3086 const struct ilo_ib_state *ib,
3087 bool rectlist,
3088 struct ilo_cp *cp)
3089 {
3090 const uint32_t cmd = ILO_GPE_CMD(0x3, 0x3, 0x00);
3091 const uint8_t cmd_len = 6;
3092 const int prim = (rectlist) ?
3093 _3DPRIM_RECTLIST : ilo_gpe_gen6_translate_pipe_prim(info->mode);
3094 const int vb_access = (info->indexed) ?
3095 GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM :
3096 GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
3097 const uint32_t vb_start = info->start +
3098 ((info->indexed) ? ib->draw_start_offset : 0);
3099
3100 ILO_GPE_VALID_GEN(dev, 6, 6);
3101
3102 ilo_cp_begin(cp, cmd_len);
3103 ilo_cp_write(cp, cmd | (cmd_len - 2) |
3104 prim << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
3105 vb_access);
3106 ilo_cp_write(cp, info->count);
3107 ilo_cp_write(cp, vb_start);
3108 ilo_cp_write(cp, info->instance_count);
3109 ilo_cp_write(cp, info->start_instance);
3110 ilo_cp_write(cp, info->index_bias);
3111 ilo_cp_end(cp);
3112 }
3113
3114 static uint32_t
3115 gen6_emit_INTERFACE_DESCRIPTOR_DATA(const struct ilo_dev_info *dev,
3116 const struct ilo_shader_state **cs,
3117 uint32_t *sampler_state,
3118 int *num_samplers,
3119 uint32_t *binding_table_state,
3120 int *num_surfaces,
3121 int num_ids,
3122 struct ilo_cp *cp)
3123 {
3124 /*
3125 * From the Sandy Bridge PRM, volume 2 part 2, page 34:
3126 *
3127 * "(Interface Descriptor Total Length) This field must have the same
3128 * alignment as the Interface Descriptor Data Start Address.
3129 *
3130 * It must be DQWord (32-byte) aligned..."
3131 *
3132 * From the Sandy Bridge PRM, volume 2 part 2, page 35:
3133 *
3134 * "(Interface Descriptor Data Start Address) Specifies the 32-byte
3135 * aligned address of the Interface Descriptor data."
3136 */
3137 const int state_align = 32 / 4;
3138 const int state_len = (32 / 4) * num_ids;
3139 uint32_t state_offset, *dw;
3140 int i;
3141
3142 ILO_GPE_VALID_GEN(dev, 6, 6);
3143
3144 dw = ilo_cp_steal_ptr(cp, "INTERFACE_DESCRIPTOR_DATA",
3145 state_len, state_align, &state_offset);
3146
3147 for (i = 0; i < num_ids; i++) {
3148 dw[0] = ilo_shader_get_kernel_offset(cs[i]);
3149 dw[1] = 1 << 18; /* SPF */
3150 dw[2] = sampler_state[i] |
3151 (num_samplers[i] + 3) / 4 << 2;
3152 dw[3] = binding_table_state[i] |
3153 num_surfaces[i];
3154 dw[4] = 0 << 16 | /* CURBE Read Length */
3155 0; /* CURBE Read Offset */
3156 dw[5] = 0; /* Barrier ID */
3157 dw[6] = 0;
3158 dw[7] = 0;
3159
3160 dw += 8;
3161 }
3162
3163 return state_offset;
3164 }
3165
3166 static void
3167 viewport_get_guardband(const struct ilo_dev_info *dev,
3168 int center_x, int center_y,
3169 int *min_gbx, int *max_gbx,
3170 int *min_gby, int *max_gby)
3171 {
3172 /*
3173 * From the Sandy Bridge PRM, volume 2 part 1, page 234:
3174 *
3175 * "Per-Device Guardband Extents
3176 *
3177 * - Supported X,Y ScreenSpace "Guardband" Extent: [-16K,16K-1]
3178 * - Maximum Post-Clamp Delta (X or Y): 16K"
3179 *
3180 * "In addition, in order to be correctly rendered, objects must have a
3181 * screenspace bounding box not exceeding 8K in the X or Y direction.
3182 * This additional restriction must also be comprehended by software,
3183 * i.e., enforced by use of clipping."
3184 *
3185 * From the Ivy Bridge PRM, volume 2 part 1, page 248:
3186 *
3187 * "Per-Device Guardband Extents
3188 *
3189 * - Supported X,Y ScreenSpace "Guardband" Extent: [-32K,32K-1]
3190 * - Maximum Post-Clamp Delta (X or Y): N/A"
3191 *
3192 * "In addition, in order to be correctly rendered, objects must have a
3193 * screenspace bounding box not exceeding 8K in the X or Y direction.
3194 * This additional restriction must also be comprehended by software,
3195 * i.e., enforced by use of clipping."
3196 *
3197 * Combined, the bounding box of any object can not exceed 8K in both
3198 * width and height.
3199 *
3200 * Below we set the guardband as a squre of length 8K, centered at where
3201 * the viewport is. This makes sure all objects passing the GB test are
3202 * valid to the renderer, and those failing the XY clipping have a
3203 * better chance of passing the GB test.
3204 */
3205 const int max_extent = (dev->gen >= ILO_GEN(7)) ? 32768 : 16384;
3206 const int half_len = 8192 / 2;
3207
3208 /* make sure the guardband is within the valid range */
3209 if (center_x - half_len < -max_extent)
3210 center_x = -max_extent + half_len;
3211 else if (center_x + half_len > max_extent - 1)
3212 center_x = max_extent - half_len;
3213
3214 if (center_y - half_len < -max_extent)
3215 center_y = -max_extent + half_len;
3216 else if (center_y + half_len > max_extent - 1)
3217 center_y = max_extent - half_len;
3218
3219 *min_gbx = (float) (center_x - half_len);
3220 *max_gbx = (float) (center_x + half_len);
3221 *min_gby = (float) (center_y - half_len);
3222 *max_gby = (float) (center_y + half_len);
3223 }
3224
3225 void
3226 ilo_gpe_set_viewport_cso(const struct ilo_dev_info *dev,
3227 const struct pipe_viewport_state *state,
3228 struct ilo_viewport_cso *vp)
3229 {
3230 const float scale_x = fabs(state->scale[0]);
3231 const float scale_y = fabs(state->scale[1]);
3232 const float scale_z = fabs(state->scale[2]);
3233 int min_gbx, max_gbx, min_gby, max_gby;
3234
3235 ILO_GPE_VALID_GEN(dev, 6, 7);
3236
3237 viewport_get_guardband(dev,
3238 (int) state->translate[0],
3239 (int) state->translate[1],
3240 &min_gbx, &max_gbx, &min_gby, &max_gby);
3241
3242 /* matrix form */
3243 vp->m00 = state->scale[0];
3244 vp->m11 = state->scale[1];
3245 vp->m22 = state->scale[2];
3246 vp->m30 = state->translate[0];
3247 vp->m31 = state->translate[1];
3248 vp->m32 = state->translate[2];
3249
3250 /* guardband in NDC space */
3251 vp->min_gbx = ((float) min_gbx - state->translate[0]) / scale_x;
3252 vp->max_gbx = ((float) max_gbx - state->translate[0]) / scale_x;
3253 vp->min_gby = ((float) min_gby - state->translate[1]) / scale_y;
3254 vp->max_gby = ((float) max_gby - state->translate[1]) / scale_y;
3255
3256 /* viewport in screen space */
3257 vp->min_x = scale_x * -1.0f + state->translate[0];
3258 vp->max_x = scale_x * 1.0f + state->translate[0];
3259 vp->min_y = scale_y * -1.0f + state->translate[1];
3260 vp->max_y = scale_y * 1.0f + state->translate[1];
3261 vp->min_z = scale_z * -1.0f + state->translate[2];
3262 vp->max_z = scale_z * 1.0f + state->translate[2];
3263 }
3264
3265 static uint32_t
3266 gen6_emit_SF_VIEWPORT(const struct ilo_dev_info *dev,
3267 const struct ilo_viewport_cso *viewports,
3268 unsigned num_viewports,
3269 struct ilo_cp *cp)
3270 {
3271 const int state_align = 32 / 4;
3272 const int state_len = 8 * num_viewports;
3273 uint32_t state_offset, *dw;
3274 unsigned i;
3275
3276 ILO_GPE_VALID_GEN(dev, 6, 6);
3277
3278 /*
3279 * From the Sandy Bridge PRM, volume 2 part 1, page 262:
3280 *
3281 * "The viewport-specific state used by the SF unit (SF_VIEWPORT) is
3282 * stored as an array of up to 16 elements..."
3283 */
3284 assert(num_viewports && num_viewports <= 16);
3285
3286 dw = ilo_cp_steal_ptr(cp, "SF_VIEWPORT",
3287 state_len, state_align, &state_offset);
3288
3289 for (i = 0; i < num_viewports; i++) {
3290 const struct ilo_viewport_cso *vp = &viewports[i];
3291
3292 dw[0] = fui(vp->m00);
3293 dw[1] = fui(vp->m11);
3294 dw[2] = fui(vp->m22);
3295 dw[3] = fui(vp->m30);
3296 dw[4] = fui(vp->m31);
3297 dw[5] = fui(vp->m32);
3298 dw[6] = 0;
3299 dw[7] = 0;
3300
3301 dw += 8;
3302 }
3303
3304 return state_offset;
3305 }
3306
3307 static uint32_t
3308 gen6_emit_CLIP_VIEWPORT(const struct ilo_dev_info *dev,
3309 const struct ilo_viewport_cso *viewports,
3310 unsigned num_viewports,
3311 struct ilo_cp *cp)
3312 {
3313 const int state_align = 32 / 4;
3314 const int state_len = 4 * num_viewports;
3315 uint32_t state_offset, *dw;
3316 unsigned i;
3317
3318 ILO_GPE_VALID_GEN(dev, 6, 6);
3319
3320 /*
3321 * From the Sandy Bridge PRM, volume 2 part 1, page 193:
3322 *
3323 * "The viewport-related state is stored as an array of up to 16
3324 * elements..."
3325 */
3326 assert(num_viewports && num_viewports <= 16);
3327
3328 dw = ilo_cp_steal_ptr(cp, "CLIP_VIEWPORT",
3329 state_len, state_align, &state_offset);
3330
3331 for (i = 0; i < num_viewports; i++) {
3332 const struct ilo_viewport_cso *vp = &viewports[i];
3333
3334 dw[0] = fui(vp->min_gbx);
3335 dw[1] = fui(vp->max_gbx);
3336 dw[2] = fui(vp->min_gby);
3337 dw[3] = fui(vp->max_gby);
3338
3339 dw += 4;
3340 }
3341
3342 return state_offset;
3343 }
3344
3345 static uint32_t
3346 gen6_emit_CC_VIEWPORT(const struct ilo_dev_info *dev,
3347 const struct ilo_viewport_cso *viewports,
3348 unsigned num_viewports,
3349 struct ilo_cp *cp)
3350 {
3351 const int state_align = 32 / 4;
3352 const int state_len = 2 * num_viewports;
3353 uint32_t state_offset, *dw;
3354 unsigned i;
3355
3356 ILO_GPE_VALID_GEN(dev, 6, 7);
3357
3358 /*
3359 * From the Sandy Bridge PRM, volume 2 part 1, page 385:
3360 *
3361 * "The viewport state is stored as an array of up to 16 elements..."
3362 */
3363 assert(num_viewports && num_viewports <= 16);
3364
3365 dw = ilo_cp_steal_ptr(cp, "CC_VIEWPORT",
3366 state_len, state_align, &state_offset);
3367
3368 for (i = 0; i < num_viewports; i++) {
3369 const struct ilo_viewport_cso *vp = &viewports[i];
3370
3371 dw[0] = fui(vp->min_z);
3372 dw[1] = fui(vp->max_z);
3373
3374 dw += 2;
3375 }
3376
3377 return state_offset;
3378 }
3379
3380 static uint32_t
3381 gen6_emit_COLOR_CALC_STATE(const struct ilo_dev_info *dev,
3382 const struct pipe_stencil_ref *stencil_ref,
3383 float alpha_ref,
3384 const struct pipe_blend_color *blend_color,
3385 struct ilo_cp *cp)
3386 {
3387 const int state_align = 64 / 4;
3388 const int state_len = 6;
3389 uint32_t state_offset, *dw;
3390
3391 ILO_GPE_VALID_GEN(dev, 6, 7);
3392
3393 dw = ilo_cp_steal_ptr(cp, "COLOR_CALC_STATE",
3394 state_len, state_align, &state_offset);
3395
3396 dw[0] = stencil_ref->ref_value[0] << 24 |
3397 stencil_ref->ref_value[1] << 16 |
3398 BRW_ALPHATEST_FORMAT_UNORM8;
3399 dw[1] = float_to_ubyte(alpha_ref);
3400 dw[2] = fui(blend_color->color[0]);
3401 dw[3] = fui(blend_color->color[1]);
3402 dw[4] = fui(blend_color->color[2]);
3403 dw[5] = fui(blend_color->color[3]);
3404
3405 return state_offset;
3406 }
3407
3408 static int
3409 gen6_blend_factor_dst_alpha_forced_one(int factor)
3410 {
3411 switch (factor) {
3412 case BRW_BLENDFACTOR_DST_ALPHA:
3413 return BRW_BLENDFACTOR_ONE;
3414 case BRW_BLENDFACTOR_INV_DST_ALPHA:
3415 case BRW_BLENDFACTOR_SRC_ALPHA_SATURATE:
3416 return BRW_BLENDFACTOR_ZERO;
3417 default:
3418 return factor;
3419 }
3420 }
3421
3422 static uint32_t
3423 blend_get_rt_blend_enable(const struct ilo_dev_info *dev,
3424 const struct pipe_rt_blend_state *rt,
3425 bool dst_alpha_forced_one)
3426 {
3427 int rgb_src, rgb_dst, a_src, a_dst;
3428 uint32_t dw;
3429
3430 if (!rt->blend_enable)
3431 return 0;
3432
3433 rgb_src = gen6_translate_pipe_blendfactor(rt->rgb_src_factor);
3434 rgb_dst = gen6_translate_pipe_blendfactor(rt->rgb_dst_factor);
3435 a_src = gen6_translate_pipe_blendfactor(rt->alpha_src_factor);
3436 a_dst = gen6_translate_pipe_blendfactor(rt->alpha_dst_factor);
3437
3438 if (dst_alpha_forced_one) {
3439 rgb_src = gen6_blend_factor_dst_alpha_forced_one(rgb_src);
3440 rgb_dst = gen6_blend_factor_dst_alpha_forced_one(rgb_dst);
3441 a_src = gen6_blend_factor_dst_alpha_forced_one(a_src);
3442 a_dst = gen6_blend_factor_dst_alpha_forced_one(a_dst);
3443 }
3444
3445 dw = 1 << 31 |
3446 gen6_translate_pipe_blend(rt->alpha_func) << 26 |
3447 a_src << 20 |
3448 a_dst << 15 |
3449 gen6_translate_pipe_blend(rt->rgb_func) << 11 |
3450 rgb_src << 5 |
3451 rgb_dst;
3452
3453 if (rt->rgb_func != rt->alpha_func ||
3454 rgb_src != a_src || rgb_dst != a_dst)
3455 dw |= 1 << 30;
3456
3457 return dw;
3458 }
3459
3460 void
3461 ilo_gpe_init_blend(const struct ilo_dev_info *dev,
3462 const struct pipe_blend_state *state,
3463 struct ilo_blend_state *blend)
3464 {
3465 unsigned num_cso, i;
3466
3467 ILO_GPE_VALID_GEN(dev, 6, 7);
3468
3469 if (state->independent_blend_enable) {
3470 num_cso = Elements(blend->cso);
3471 }
3472 else {
3473 memset(blend->cso, 0, sizeof(blend->cso));
3474 num_cso = 1;
3475 }
3476
3477 blend->independent_blend_enable = state->independent_blend_enable;
3478 blend->alpha_to_coverage = state->alpha_to_coverage;
3479 blend->dual_blend = false;
3480
3481 for (i = 0; i < num_cso; i++) {
3482 const struct pipe_rt_blend_state *rt = &state->rt[i];
3483 struct ilo_blend_cso *cso = &blend->cso[i];
3484 bool dual_blend;
3485
3486 cso->payload[0] = 0;
3487 cso->payload[1] = BRW_RENDERTARGET_CLAMPRANGE_FORMAT << 2 |
3488 0x3;
3489
3490 if (!(rt->colormask & PIPE_MASK_A))
3491 cso->payload[1] |= 1 << 27;
3492 if (!(rt->colormask & PIPE_MASK_R))
3493 cso->payload[1] |= 1 << 26;
3494 if (!(rt->colormask & PIPE_MASK_G))
3495 cso->payload[1] |= 1 << 25;
3496 if (!(rt->colormask & PIPE_MASK_B))
3497 cso->payload[1] |= 1 << 24;
3498
3499 if (state->dither)
3500 cso->payload[1] |= 1 << 12;
3501
3502 /*
3503 * From the Sandy Bridge PRM, volume 2 part 1, page 365:
3504 *
3505 * "Color Buffer Blending and Logic Ops must not be enabled
3506 * simultaneously, or behavior is UNDEFINED."
3507 *
3508 * Since state->logicop_enable takes precedence over rt->blend_enable,
3509 * no special care is needed.
3510 */
3511 if (state->logicop_enable) {
3512 cso->dw_logicop = 1 << 22 |
3513 gen6_translate_pipe_logicop(state->logicop_func) << 18;
3514
3515 cso->dw_blend = 0;
3516 cso->dw_blend_dst_alpha_forced_one = 0;
3517
3518 dual_blend = false;
3519 }
3520 else {
3521 cso->dw_logicop = 0;
3522
3523 cso->dw_blend = blend_get_rt_blend_enable(dev, rt, false);
3524 cso->dw_blend_dst_alpha_forced_one =
3525 blend_get_rt_blend_enable(dev, rt, true);
3526
3527 dual_blend = (rt->blend_enable &&
3528 util_blend_state_is_dual(state, i));
3529 }
3530
3531 cso->dw_alpha_mod = 0;
3532
3533 if (state->alpha_to_coverage) {
3534 cso->dw_alpha_mod |= 1 << 31;
3535
3536 if (dev->gen >= ILO_GEN(7))
3537 cso->dw_alpha_mod |= 1 << 29;
3538 }
3539
3540 /*
3541 * From the Sandy Bridge PRM, volume 2 part 1, page 378:
3542 *
3543 * "If Dual Source Blending is enabled, this bit (AlphaToOne Enable)
3544 * must be disabled."
3545 */
3546 if (state->alpha_to_one && !dual_blend)
3547 cso->dw_alpha_mod |= 1 << 30;
3548
3549 if (dual_blend)
3550 blend->dual_blend = true;
3551 }
3552 }
3553
3554 static uint32_t
3555 gen6_emit_BLEND_STATE(const struct ilo_dev_info *dev,
3556 const struct ilo_blend_state *blend,
3557 const struct ilo_fb_state *fb,
3558 const struct pipe_alpha_state *alpha,
3559 struct ilo_cp *cp)
3560 {
3561 const int state_align = 64 / 4;
3562 int state_len;
3563 uint32_t state_offset, *dw;
3564 unsigned num_targets, i;
3565
3566 ILO_GPE_VALID_GEN(dev, 6, 7);
3567
3568 /*
3569 * From the Sandy Bridge PRM, volume 2 part 1, page 376:
3570 *
3571 * "The blend state is stored as an array of up to 8 elements..."
3572 */
3573 num_targets = fb->state.nr_cbufs;
3574 assert(num_targets <= 8);
3575
3576 if (!num_targets) {
3577 if (!alpha->enabled)
3578 return 0;
3579 /* to be able to reference alpha func */
3580 num_targets = 1;
3581 }
3582
3583 state_len = 2 * num_targets;
3584
3585 dw = ilo_cp_steal_ptr(cp, "BLEND_STATE",
3586 state_len, state_align, &state_offset);
3587
3588 for (i = 0; i < num_targets; i++) {
3589 const unsigned idx = (blend->independent_blend_enable) ? i : 0;
3590 const struct ilo_blend_cso *cso = &blend->cso[idx];
3591 const int num_samples = fb->num_samples;
3592 const struct util_format_description *format_desc =
3593 (idx < fb->state.nr_cbufs) ?
3594 util_format_description(fb->state.cbufs[idx]->format) : NULL;
3595 bool rt_is_unorm, rt_is_pure_integer, rt_dst_alpha_forced_one;
3596
3597 rt_is_unorm = true;
3598 rt_is_pure_integer = false;
3599 rt_dst_alpha_forced_one = false;
3600
3601 if (format_desc) {
3602 int ch;
3603
3604 switch (format_desc->format) {
3605 case PIPE_FORMAT_B8G8R8X8_UNORM:
3606 /* force alpha to one when the HW format has alpha */
3607 assert(ilo_translate_render_format(PIPE_FORMAT_B8G8R8X8_UNORM)
3608 == BRW_SURFACEFORMAT_B8G8R8A8_UNORM);
3609 rt_dst_alpha_forced_one = true;
3610 break;
3611 default:
3612 break;
3613 }
3614
3615 for (ch = 0; ch < 4; ch++) {
3616 if (format_desc->channel[ch].type == UTIL_FORMAT_TYPE_VOID)
3617 continue;
3618
3619 if (format_desc->channel[ch].pure_integer) {
3620 rt_is_unorm = false;
3621 rt_is_pure_integer = true;
3622 break;
3623 }
3624
3625 if (!format_desc->channel[ch].normalized ||
3626 format_desc->channel[ch].type != UTIL_FORMAT_TYPE_UNSIGNED)
3627 rt_is_unorm = false;
3628 }
3629 }
3630
3631 dw[0] = cso->payload[0];
3632 dw[1] = cso->payload[1];
3633
3634 if (!rt_is_pure_integer) {
3635 if (rt_dst_alpha_forced_one)
3636 dw[0] |= cso->dw_blend_dst_alpha_forced_one;
3637 else
3638 dw[0] |= cso->dw_blend;
3639 }
3640
3641 /*
3642 * From the Sandy Bridge PRM, volume 2 part 1, page 365:
3643 *
3644 * "Logic Ops are only supported on *_UNORM surfaces (excluding
3645 * _SRGB variants), otherwise Logic Ops must be DISABLED."
3646 *
3647 * Since logicop is ignored for non-UNORM color buffers, no special care
3648 * is needed.
3649 */
3650 if (rt_is_unorm)
3651 dw[1] |= cso->dw_logicop;
3652
3653 /*
3654 * From the Sandy Bridge PRM, volume 2 part 1, page 356:
3655 *
3656 * "When NumSamples = 1, AlphaToCoverage and AlphaToCoverage
3657 * Dither both must be disabled."
3658 *
3659 * There is no such limitation on GEN7, or for AlphaToOne. But GL
3660 * requires that anyway.
3661 */
3662 if (num_samples > 1)
3663 dw[1] |= cso->dw_alpha_mod;
3664
3665 /*
3666 * From the Sandy Bridge PRM, volume 2 part 1, page 382:
3667 *
3668 * "Alpha Test can only be enabled if Pixel Shader outputs a float
3669 * alpha value."
3670 */
3671 if (alpha->enabled && !rt_is_pure_integer) {
3672 dw[1] |= 1 << 16 |
3673 gen6_translate_dsa_func(alpha->func) << 13;
3674 }
3675
3676 dw += 2;
3677 }
3678
3679 return state_offset;
3680 }
3681
3682 void
3683 ilo_gpe_init_dsa(const struct ilo_dev_info *dev,
3684 const struct pipe_depth_stencil_alpha_state *state,
3685 struct ilo_dsa_state *dsa)
3686 {
3687 const struct pipe_depth_state *depth = &state->depth;
3688 const struct pipe_stencil_state *stencil0 = &state->stencil[0];
3689 const struct pipe_stencil_state *stencil1 = &state->stencil[1];
3690 uint32_t *dw;
3691
3692 ILO_GPE_VALID_GEN(dev, 6, 7);
3693
3694 /* copy alpha state for later use */
3695 dsa->alpha = state->alpha;
3696
3697 STATIC_ASSERT(Elements(dsa->payload) >= 3);
3698 dw = dsa->payload;
3699
3700 /*
3701 * From the Sandy Bridge PRM, volume 2 part 1, page 359:
3702 *
3703 * "If the Depth Buffer is either undefined or does not have a surface
3704 * format of D32_FLOAT_S8X24_UINT or D24_UNORM_S8_UINT and separate
3705 * stencil buffer is disabled, Stencil Test Enable must be DISABLED"
3706 *
3707 * From the Sandy Bridge PRM, volume 2 part 1, page 370:
3708 *
3709 * "This field (Stencil Test Enable) cannot be enabled if
3710 * Surface Format in 3DSTATE_DEPTH_BUFFER is set to D16_UNORM."
3711 *
3712 * TODO We do not check these yet.
3713 */
3714 if (stencil0->enabled) {
3715 dw[0] = 1 << 31 |
3716 gen6_translate_dsa_func(stencil0->func) << 28 |
3717 gen6_translate_pipe_stencil_op(stencil0->fail_op) << 25 |
3718 gen6_translate_pipe_stencil_op(stencil0->zfail_op) << 22 |
3719 gen6_translate_pipe_stencil_op(stencil0->zpass_op) << 19;
3720 if (stencil0->writemask)
3721 dw[0] |= 1 << 18;
3722
3723 dw[1] = stencil0->valuemask << 24 |
3724 stencil0->writemask << 16;
3725
3726 if (stencil1->enabled) {
3727 dw[0] |= 1 << 15 |
3728 gen6_translate_dsa_func(stencil1->func) << 12 |
3729 gen6_translate_pipe_stencil_op(stencil1->fail_op) << 9 |
3730 gen6_translate_pipe_stencil_op(stencil1->zfail_op) << 6 |
3731 gen6_translate_pipe_stencil_op(stencil1->zpass_op) << 3;
3732 if (stencil1->writemask)
3733 dw[0] |= 1 << 18;
3734
3735 dw[1] |= stencil1->valuemask << 8 |
3736 stencil1->writemask;
3737 }
3738 }
3739 else {
3740 dw[0] = 0;
3741 dw[1] = 0;
3742 }
3743
3744 /*
3745 * From the Sandy Bridge PRM, volume 2 part 1, page 360:
3746 *
3747 * "Enabling the Depth Test function without defining a Depth Buffer is
3748 * UNDEFINED."
3749 *
3750 * From the Sandy Bridge PRM, volume 2 part 1, page 375:
3751 *
3752 * "A Depth Buffer must be defined before enabling writes to it, or
3753 * operation is UNDEFINED."
3754 *
3755 * TODO We do not check these yet.
3756 */
3757 dw[2] = depth->enabled << 31 |
3758 depth->writemask << 26;
3759 if (depth->enabled)
3760 dw[2] |= gen6_translate_dsa_func(depth->func) << 27;
3761 else
3762 dw[2] |= BRW_COMPAREFUNCTION_ALWAYS << 27;
3763 }
3764
3765 static uint32_t
3766 gen6_emit_DEPTH_STENCIL_STATE(const struct ilo_dev_info *dev,
3767 const struct ilo_dsa_state *dsa,
3768 struct ilo_cp *cp)
3769 {
3770 const int state_align = 64 / 4;
3771 const int state_len = 3;
3772 uint32_t state_offset, *dw;
3773
3774
3775 ILO_GPE_VALID_GEN(dev, 6, 7);
3776
3777 dw = ilo_cp_steal_ptr(cp, "DEPTH_STENCIL_STATE",
3778 state_len, state_align, &state_offset);
3779
3780 dw[0] = dsa->payload[0];
3781 dw[1] = dsa->payload[1];
3782 dw[2] = dsa->payload[2];
3783
3784 return state_offset;
3785 }
3786
3787 void
3788 ilo_gpe_set_scissor(const struct ilo_dev_info *dev,
3789 unsigned start_slot,
3790 unsigned num_states,
3791 const struct pipe_scissor_state *states,
3792 struct ilo_scissor_state *scissor)
3793 {
3794 unsigned i;
3795
3796 ILO_GPE_VALID_GEN(dev, 6, 7);
3797
3798 for (i = 0; i < num_states; i++) {
3799 uint16_t min_x, min_y, max_x, max_y;
3800
3801 /* both max and min are inclusive in SCISSOR_RECT */
3802 if (states[i].minx < states[i].maxx &&
3803 states[i].miny < states[i].maxy) {
3804 min_x = states[i].minx;
3805 min_y = states[i].miny;
3806 max_x = states[i].maxx - 1;
3807 max_y = states[i].maxy - 1;
3808 }
3809 else {
3810 /* we have to make min greater than max */
3811 min_x = 1;
3812 min_y = 1;
3813 max_x = 0;
3814 max_y = 0;
3815 }
3816
3817 scissor->payload[start_slot * 2 + 0] = min_y << 16 | min_x;
3818 scissor->payload[start_slot * 2 + 1] = max_y << 16 | max_x;
3819 start_slot++;
3820 }
3821 }
3822
3823 void
3824 ilo_gpe_set_scissor_null(const struct ilo_dev_info *dev,
3825 struct ilo_scissor_state *scissor)
3826 {
3827 unsigned i;
3828
3829 for (i = 0; i < Elements(scissor->payload); i += 2) {
3830 scissor->payload[i + 0] = 1 << 16 | 1;
3831 scissor->payload[i + 1] = 0;
3832 }
3833 }
3834
3835 static uint32_t
3836 gen6_emit_SCISSOR_RECT(const struct ilo_dev_info *dev,
3837 const struct ilo_scissor_state *scissor,
3838 unsigned num_viewports,
3839 struct ilo_cp *cp)
3840 {
3841 const int state_align = 32 / 4;
3842 const int state_len = 2 * num_viewports;
3843 uint32_t state_offset, *dw;
3844
3845 ILO_GPE_VALID_GEN(dev, 6, 7);
3846
3847 /*
3848 * From the Sandy Bridge PRM, volume 2 part 1, page 263:
3849 *
3850 * "The viewport-specific state used by the SF unit (SCISSOR_RECT) is
3851 * stored as an array of up to 16 elements..."
3852 */
3853 assert(num_viewports && num_viewports <= 16);
3854
3855 dw = ilo_cp_steal_ptr(cp, "SCISSOR_RECT",
3856 state_len, state_align, &state_offset);
3857
3858 memcpy(dw, scissor->payload, state_len * 4);
3859
3860 return state_offset;
3861 }
3862
3863 static uint32_t
3864 gen6_emit_BINDING_TABLE_STATE(const struct ilo_dev_info *dev,
3865 uint32_t *surface_states,
3866 int num_surface_states,
3867 struct ilo_cp *cp)
3868 {
3869 const int state_align = 32 / 4;
3870 const int state_len = num_surface_states;
3871 uint32_t state_offset, *dw;
3872
3873 ILO_GPE_VALID_GEN(dev, 6, 7);
3874
3875 /*
3876 * From the Sandy Bridge PRM, volume 4 part 1, page 69:
3877 *
3878 * "It is stored as an array of up to 256 elements..."
3879 */
3880 assert(num_surface_states <= 256);
3881
3882 if (!num_surface_states)
3883 return 0;
3884
3885 dw = ilo_cp_steal_ptr(cp, "BINDING_TABLE_STATE",
3886 state_len, state_align, &state_offset);
3887 memcpy(dw, surface_states,
3888 num_surface_states * sizeof(surface_states[0]));
3889
3890 return state_offset;
3891 }
3892
3893 void
3894 ilo_gpe_init_view_surface_null_gen6(const struct ilo_dev_info *dev,
3895 unsigned width, unsigned height,
3896 unsigned depth, unsigned level,
3897 struct ilo_view_surface *surf)
3898 {
3899 uint32_t *dw;
3900
3901 ILO_GPE_VALID_GEN(dev, 6, 6);
3902
3903 /*
3904 * From the Sandy Bridge PRM, volume 4 part 1, page 71:
3905 *
3906 * "A null surface will be used in instances where an actual surface is
3907 * not bound. When a write message is generated to a null surface, no
3908 * actual surface is written to. When a read message (including any
3909 * sampling engine message) is generated to a null surface, the result
3910 * is all zeros. Note that a null surface type is allowed to be used
3911 * with all messages, even if it is not specificially indicated as
3912 * supported. All of the remaining fields in surface state are ignored
3913 * for null surfaces, with the following exceptions:
3914 *
3915 * * [DevSNB+]: Width, Height, Depth, and LOD fields must match the
3916 * depth buffer's corresponding state for all render target
3917 * surfaces, including null.
3918 * * Surface Format must be R8G8B8A8_UNORM."
3919 *
3920 * From the Sandy Bridge PRM, volume 4 part 1, page 82:
3921 *
3922 * "If Surface Type is SURFTYPE_NULL, this field (Tiled Surface) must be
3923 * true"
3924 */
3925
3926 STATIC_ASSERT(Elements(surf->payload) >= 6);
3927 dw = surf->payload;
3928
3929 dw[0] = BRW_SURFACE_NULL << BRW_SURFACE_TYPE_SHIFT |
3930 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT;
3931
3932 dw[1] = 0;
3933
3934 dw[2] = (height - 1) << BRW_SURFACE_HEIGHT_SHIFT |
3935 (width - 1) << BRW_SURFACE_WIDTH_SHIFT |
3936 level << BRW_SURFACE_LOD_SHIFT;
3937
3938 dw[3] = (depth - 1) << BRW_SURFACE_DEPTH_SHIFT |
3939 BRW_SURFACE_TILED;
3940
3941 dw[4] = 0;
3942 dw[5] = 0;
3943
3944 surf->bo = NULL;
3945 }
3946
3947 void
3948 ilo_gpe_init_view_surface_for_buffer_gen6(const struct ilo_dev_info *dev,
3949 const struct ilo_buffer *buf,
3950 unsigned offset, unsigned size,
3951 unsigned struct_size,
3952 enum pipe_format elem_format,
3953 bool is_rt, bool render_cache_rw,
3954 struct ilo_view_surface *surf)
3955 {
3956 const int elem_size = util_format_get_blocksize(elem_format);
3957 int width, height, depth, pitch;
3958 int surface_format, num_entries;
3959 uint32_t *dw;
3960
3961 ILO_GPE_VALID_GEN(dev, 6, 6);
3962
3963 /*
3964 * For SURFTYPE_BUFFER, a SURFACE_STATE specifies an element of a
3965 * structure in a buffer.
3966 */
3967
3968 surface_format = ilo_translate_color_format(elem_format);
3969
3970 num_entries = size / struct_size;
3971 /* see if there is enough space to fit another element */
3972 if (size % struct_size >= elem_size)
3973 num_entries++;
3974
3975 /*
3976 * From the Sandy Bridge PRM, volume 4 part 1, page 76:
3977 *
3978 * "For SURFTYPE_BUFFER render targets, this field (Surface Base
3979 * Address) specifies the base address of first element of the
3980 * surface. The surface is interpreted as a simple array of that
3981 * single element type. The address must be naturally-aligned to the
3982 * element size (e.g., a buffer containing R32G32B32A32_FLOAT elements
3983 * must be 16-byte aligned).
3984 *
3985 * For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
3986 * the base address of the first element of the surface, computed in
3987 * software by adding the surface base address to the byte offset of
3988 * the element in the buffer."
3989 */
3990 if (is_rt)
3991 assert(offset % elem_size == 0);
3992
3993 /*
3994 * From the Sandy Bridge PRM, volume 4 part 1, page 77:
3995 *
3996 * "For buffer surfaces, the number of entries in the buffer ranges
3997 * from 1 to 2^27."
3998 */
3999 assert(num_entries >= 1 && num_entries <= 1 << 27);
4000
4001 /*
4002 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
4003 *
4004 * "For surfaces of type SURFTYPE_BUFFER, this field (Surface Pitch)
4005 * indicates the size of the structure."
4006 */
4007 pitch = struct_size;
4008
4009 pitch--;
4010 num_entries--;
4011 /* bits [6:0] */
4012 width = (num_entries & 0x0000007f);
4013 /* bits [19:7] */
4014 height = (num_entries & 0x000fff80) >> 7;
4015 /* bits [26:20] */
4016 depth = (num_entries & 0x07f00000) >> 20;
4017
4018 STATIC_ASSERT(Elements(surf->payload) >= 6);
4019 dw = surf->payload;
4020
4021 dw[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
4022 surface_format << BRW_SURFACE_FORMAT_SHIFT;
4023 if (render_cache_rw)
4024 dw[0] |= BRW_SURFACE_RC_READ_WRITE;
4025
4026 dw[1] = offset;
4027
4028 dw[2] = height << BRW_SURFACE_HEIGHT_SHIFT |
4029 width << BRW_SURFACE_WIDTH_SHIFT;
4030
4031 dw[3] = depth << BRW_SURFACE_DEPTH_SHIFT |
4032 pitch << BRW_SURFACE_PITCH_SHIFT;
4033
4034 dw[4] = 0;
4035 dw[5] = 0;
4036
4037 /* do not increment reference count */
4038 surf->bo = buf->bo;
4039 }
4040
4041 void
4042 ilo_gpe_init_view_surface_for_texture_gen6(const struct ilo_dev_info *dev,
4043 const struct ilo_texture *tex,
4044 enum pipe_format format,
4045 unsigned first_level,
4046 unsigned num_levels,
4047 unsigned first_layer,
4048 unsigned num_layers,
4049 bool is_rt, bool render_cache_rw,
4050 struct ilo_view_surface *surf)
4051 {
4052 int surface_type, surface_format;
4053 int width, height, depth, pitch, lod;
4054 unsigned layer_offset, x_offset, y_offset;
4055 uint32_t *dw;
4056
4057 ILO_GPE_VALID_GEN(dev, 6, 6);
4058
4059 surface_type = ilo_gpe_gen6_translate_texture(tex->base.target);
4060 assert(surface_type != BRW_SURFACE_BUFFER);
4061
4062 if (format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT && tex->separate_s8)
4063 format = PIPE_FORMAT_Z32_FLOAT;
4064
4065 if (is_rt)
4066 surface_format = ilo_translate_render_format(format);
4067 else
4068 surface_format = ilo_translate_texture_format(format);
4069 assert(surface_format >= 0);
4070
4071 width = tex->base.width0;
4072 height = tex->base.height0;
4073 depth = (tex->base.target == PIPE_TEXTURE_3D) ?
4074 tex->base.depth0 : num_layers;
4075 pitch = tex->bo_stride;
4076
4077 if (surface_type == BRW_SURFACE_CUBE) {
4078 /*
4079 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
4080 *
4081 * "For SURFTYPE_CUBE: [DevSNB+]: for Sampling Engine Surfaces, the
4082 * range of this field (Depth) is [0,84], indicating the number of
4083 * cube array elements (equal to the number of underlying 2D array
4084 * elements divided by 6). For other surfaces, this field must be
4085 * zero."
4086 *
4087 * When is_rt is true, we treat the texture as a 2D one to avoid the
4088 * restriction.
4089 */
4090 if (is_rt) {
4091 surface_type = BRW_SURFACE_2D;
4092 }
4093 else {
4094 assert(num_layers % 6 == 0);
4095 depth = num_layers / 6;
4096 }
4097 }
4098
4099 /* sanity check the size */
4100 assert(width >= 1 && height >= 1 && depth >= 1 && pitch >= 1);
4101 switch (surface_type) {
4102 case BRW_SURFACE_1D:
4103 assert(width <= 8192 && height == 1 && depth <= 512);
4104 assert(first_layer < 512 && num_layers <= 512);
4105 break;
4106 case BRW_SURFACE_2D:
4107 assert(width <= 8192 && height <= 8192 && depth <= 512);
4108 assert(first_layer < 512 && num_layers <= 512);
4109 break;
4110 case BRW_SURFACE_3D:
4111 assert(width <= 2048 && height <= 2048 && depth <= 2048);
4112 assert(first_layer < 2048 && num_layers <= 512);
4113 if (!is_rt)
4114 assert(first_layer == 0);
4115 break;
4116 case BRW_SURFACE_CUBE:
4117 assert(width <= 8192 && height <= 8192 && depth <= 85);
4118 assert(width == height);
4119 assert(first_layer < 512 && num_layers <= 512);
4120 if (is_rt)
4121 assert(first_layer == 0);
4122 break;
4123 default:
4124 assert(!"unexpected surface type");
4125 break;
4126 }
4127
4128 /* non-full array spacing is supported only on GEN7+ */
4129 assert(tex->array_spacing_full);
4130 /* non-interleaved samples are supported only on GEN7+ */
4131 if (tex->base.nr_samples > 1)
4132 assert(tex->interleaved);
4133
4134 if (is_rt) {
4135 /*
4136 * Compute the offset to the layer manually.
4137 *
4138 * For rendering, the hardware requires LOD to be the same for all
4139 * render targets and the depth buffer. We need to compute the offset
4140 * to the layer manually and always set LOD to 0.
4141 */
4142 if (true) {
4143 /* we lose the capability for layered rendering */
4144 assert(num_layers == 1);
4145
4146 layer_offset = ilo_texture_get_slice_offset(tex,
4147 first_level, first_layer, &x_offset, &y_offset);
4148
4149 assert(x_offset % 4 == 0);
4150 assert(y_offset % 2 == 0);
4151 x_offset /= 4;
4152 y_offset /= 2;
4153
4154 /* derive the size for the LOD */
4155 width = u_minify(width, first_level);
4156 height = u_minify(height, first_level);
4157 if (surface_type == BRW_SURFACE_3D)
4158 depth = u_minify(depth, first_level);
4159 else
4160 depth = 1;
4161
4162 first_level = 0;
4163 first_layer = 0;
4164 lod = 0;
4165 }
4166 else {
4167 layer_offset = 0;
4168 x_offset = 0;
4169 y_offset = 0;
4170 }
4171
4172 assert(num_levels == 1);
4173 lod = first_level;
4174 }
4175 else {
4176 layer_offset = 0;
4177 x_offset = 0;
4178 y_offset = 0;
4179
4180 lod = num_levels - 1;
4181 }
4182
4183 /*
4184 * From the Sandy Bridge PRM, volume 4 part 1, page 76:
4185 *
4186 * "Linear render target surface base addresses must be element-size
4187 * aligned, for non-YUV surface formats, or a multiple of 2
4188 * element-sizes for YUV surface formats. Other linear surfaces have
4189 * no alignment requirements (byte alignment is sufficient.)"
4190 *
4191 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
4192 *
4193 * "For linear render target surfaces, the pitch must be a multiple
4194 * of the element size for non-YUV surface formats. Pitch must be a
4195 * multiple of 2 * element size for YUV surface formats."
4196 *
4197 * From the Sandy Bridge PRM, volume 4 part 1, page 86:
4198 *
4199 * "For linear surfaces, this field (X Offset) must be zero"
4200 */
4201 if (tex->tiling == INTEL_TILING_NONE) {
4202 if (is_rt) {
4203 const int elem_size = util_format_get_blocksize(format);
4204 assert(layer_offset % elem_size == 0);
4205 assert(pitch % elem_size == 0);
4206 }
4207
4208 assert(!x_offset);
4209 }
4210
4211 STATIC_ASSERT(Elements(surf->payload) >= 6);
4212 dw = surf->payload;
4213
4214 dw[0] = surface_type << BRW_SURFACE_TYPE_SHIFT |
4215 surface_format << BRW_SURFACE_FORMAT_SHIFT |
4216 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT;
4217
4218 if (surface_type == BRW_SURFACE_CUBE && !is_rt) {
4219 dw[0] |= 1 << 9 |
4220 BRW_SURFACE_CUBEFACE_ENABLES;
4221 }
4222
4223 if (render_cache_rw)
4224 dw[0] |= BRW_SURFACE_RC_READ_WRITE;
4225
4226 dw[1] = layer_offset;
4227
4228 dw[2] = (height - 1) << BRW_SURFACE_HEIGHT_SHIFT |
4229 (width - 1) << BRW_SURFACE_WIDTH_SHIFT |
4230 lod << BRW_SURFACE_LOD_SHIFT;
4231
4232 dw[3] = (depth - 1) << BRW_SURFACE_DEPTH_SHIFT |
4233 (pitch - 1) << BRW_SURFACE_PITCH_SHIFT |
4234 ilo_gpe_gen6_translate_winsys_tiling(tex->tiling);
4235
4236 dw[4] = first_level << BRW_SURFACE_MIN_LOD_SHIFT |
4237 first_layer << 17 |
4238 (num_layers - 1) << 8 |
4239 ((tex->base.nr_samples > 1) ? BRW_SURFACE_MULTISAMPLECOUNT_4 :
4240 BRW_SURFACE_MULTISAMPLECOUNT_1);
4241
4242 dw[5] = x_offset << BRW_SURFACE_X_OFFSET_SHIFT |
4243 y_offset << BRW_SURFACE_Y_OFFSET_SHIFT;
4244 if (tex->valign_4)
4245 dw[5] |= BRW_SURFACE_VERTICAL_ALIGN_ENABLE;
4246
4247 /* do not increment reference count */
4248 surf->bo = tex->bo;
4249 }
4250
4251 static uint32_t
4252 gen6_emit_SURFACE_STATE(const struct ilo_dev_info *dev,
4253 const struct ilo_view_surface *surf,
4254 bool for_render,
4255 struct ilo_cp *cp)
4256 {
4257 const int state_align = 32 / 4;
4258 const int state_len = (dev->gen >= ILO_GEN(7)) ? 8 : 6;
4259 uint32_t state_offset;
4260 uint32_t read_domains, write_domain;
4261
4262 ILO_GPE_VALID_GEN(dev, 6, 7);
4263
4264 if (for_render) {
4265 read_domains = INTEL_DOMAIN_RENDER;
4266 write_domain = INTEL_DOMAIN_RENDER;
4267 }
4268 else {
4269 read_domains = INTEL_DOMAIN_SAMPLER;
4270 write_domain = 0;
4271 }
4272
4273 ilo_cp_steal(cp, "SURFACE_STATE", state_len, state_align, &state_offset);
4274
4275 STATIC_ASSERT(Elements(surf->payload) >= 8);
4276
4277 ilo_cp_write(cp, surf->payload[0]);
4278 ilo_cp_write_bo(cp, surf->payload[1],
4279 surf->bo, read_domains, write_domain);
4280 ilo_cp_write(cp, surf->payload[2]);
4281 ilo_cp_write(cp, surf->payload[3]);
4282 ilo_cp_write(cp, surf->payload[4]);
4283 ilo_cp_write(cp, surf->payload[5]);
4284
4285 if (dev->gen >= ILO_GEN(7)) {
4286 ilo_cp_write(cp, surf->payload[6]);
4287 ilo_cp_write(cp, surf->payload[7]);
4288 }
4289
4290 ilo_cp_end(cp);
4291
4292 return state_offset;
4293 }
4294
4295 static uint32_t
4296 gen6_emit_so_SURFACE_STATE(const struct ilo_dev_info *dev,
4297 const struct pipe_stream_output_target *so,
4298 const struct pipe_stream_output_info *so_info,
4299 int so_index,
4300 struct ilo_cp *cp)
4301 {
4302 struct ilo_buffer *buf = ilo_buffer(so->buffer);
4303 unsigned bo_offset, struct_size;
4304 enum pipe_format elem_format;
4305 struct ilo_view_surface surf;
4306
4307 ILO_GPE_VALID_GEN(dev, 6, 6);
4308
4309 bo_offset = so->buffer_offset + so_info->output[so_index].dst_offset * 4;
4310 struct_size = so_info->stride[so_info->output[so_index].output_buffer] * 4;
4311
4312 switch (so_info->output[so_index].num_components) {
4313 case 1:
4314 elem_format = PIPE_FORMAT_R32_FLOAT;
4315 break;
4316 case 2:
4317 elem_format = PIPE_FORMAT_R32G32_FLOAT;
4318 break;
4319 case 3:
4320 elem_format = PIPE_FORMAT_R32G32B32_FLOAT;
4321 break;
4322 case 4:
4323 elem_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
4324 break;
4325 default:
4326 assert(!"unexpected SO components length");
4327 elem_format = PIPE_FORMAT_R32_FLOAT;
4328 break;
4329 }
4330
4331 ilo_gpe_init_view_surface_for_buffer_gen6(dev, buf, bo_offset, so->buffer_size,
4332 struct_size, elem_format, false, true, &surf);
4333
4334 return gen6_emit_SURFACE_STATE(dev, &surf, false, cp);
4335 }
4336
4337 static void
4338 sampler_init_border_color_gen6(const struct ilo_dev_info *dev,
4339 const union pipe_color_union *color,
4340 uint32_t *dw, int num_dwords)
4341 {
4342 float rgba[4] = {
4343 color->f[0], color->f[1], color->f[2], color->f[3],
4344 };
4345
4346 ILO_GPE_VALID_GEN(dev, 6, 6);
4347
4348 assert(num_dwords >= 12);
4349
4350 /*
4351 * This state is not documented in the Sandy Bridge PRM, but in the
4352 * Ironlake PRM. SNORM8 seems to be in DW11 instead of DW1.
4353 */
4354
4355 /* IEEE_FP */
4356 dw[1] = fui(rgba[0]);
4357 dw[2] = fui(rgba[1]);
4358 dw[3] = fui(rgba[2]);
4359 dw[4] = fui(rgba[3]);
4360
4361 /* FLOAT_16 */
4362 dw[5] = util_float_to_half(rgba[0]) |
4363 util_float_to_half(rgba[1]) << 16;
4364 dw[6] = util_float_to_half(rgba[2]) |
4365 util_float_to_half(rgba[3]) << 16;
4366
4367 /* clamp to [-1.0f, 1.0f] */
4368 rgba[0] = CLAMP(rgba[0], -1.0f, 1.0f);
4369 rgba[1] = CLAMP(rgba[1], -1.0f, 1.0f);
4370 rgba[2] = CLAMP(rgba[2], -1.0f, 1.0f);
4371 rgba[3] = CLAMP(rgba[3], -1.0f, 1.0f);
4372
4373 /* SNORM16 */
4374 dw[9] = (int16_t) util_iround(rgba[0] * 32767.0f) |
4375 (int16_t) util_iround(rgba[1] * 32767.0f) << 16;
4376 dw[10] = (int16_t) util_iround(rgba[2] * 32767.0f) |
4377 (int16_t) util_iround(rgba[3] * 32767.0f) << 16;
4378
4379 /* SNORM8 */
4380 dw[11] = (int8_t) util_iround(rgba[0] * 127.0f) |
4381 (int8_t) util_iround(rgba[1] * 127.0f) << 8 |
4382 (int8_t) util_iround(rgba[2] * 127.0f) << 16 |
4383 (int8_t) util_iround(rgba[3] * 127.0f) << 24;
4384
4385 /* clamp to [0.0f, 1.0f] */
4386 rgba[0] = CLAMP(rgba[0], 0.0f, 1.0f);
4387 rgba[1] = CLAMP(rgba[1], 0.0f, 1.0f);
4388 rgba[2] = CLAMP(rgba[2], 0.0f, 1.0f);
4389 rgba[3] = CLAMP(rgba[3], 0.0f, 1.0f);
4390
4391 /* UNORM8 */
4392 dw[0] = (uint8_t) util_iround(rgba[0] * 255.0f) |
4393 (uint8_t) util_iround(rgba[1] * 255.0f) << 8 |
4394 (uint8_t) util_iround(rgba[2] * 255.0f) << 16 |
4395 (uint8_t) util_iround(rgba[3] * 255.0f) << 24;
4396
4397 /* UNORM16 */
4398 dw[7] = (uint16_t) util_iround(rgba[0] * 65535.0f) |
4399 (uint16_t) util_iround(rgba[1] * 65535.0f) << 16;
4400 dw[8] = (uint16_t) util_iround(rgba[2] * 65535.0f) |
4401 (uint16_t) util_iround(rgba[3] * 65535.0f) << 16;
4402 }
4403
4404 void
4405 ilo_gpe_init_sampler_cso(const struct ilo_dev_info *dev,
4406 const struct pipe_sampler_state *state,
4407 struct ilo_sampler_cso *sampler)
4408 {
4409 int mip_filter, min_filter, mag_filter, max_aniso;
4410 int lod_bias, max_lod, min_lod;
4411 int wrap_s, wrap_t, wrap_r, wrap_cube;
4412 bool clamp_is_to_edge;
4413 uint32_t dw0, dw1, dw3;
4414
4415 ILO_GPE_VALID_GEN(dev, 6, 7);
4416
4417 memset(sampler, 0, sizeof(*sampler));
4418
4419 mip_filter = gen6_translate_tex_mipfilter(state->min_mip_filter);
4420 min_filter = gen6_translate_tex_filter(state->min_img_filter);
4421 mag_filter = gen6_translate_tex_filter(state->mag_img_filter);
4422
4423 sampler->anisotropic = state->max_anisotropy;
4424
4425 if (state->max_anisotropy >= 2 && state->max_anisotropy <= 16)
4426 max_aniso = state->max_anisotropy / 2 - 1;
4427 else if (state->max_anisotropy > 16)
4428 max_aniso = BRW_ANISORATIO_16;
4429 else
4430 max_aniso = BRW_ANISORATIO_2;
4431
4432 /*
4433 *
4434 * Here is how the hardware calculate per-pixel LOD, from my reading of the
4435 * PRMs:
4436 *
4437 * 1) LOD is set to log2(ratio of texels to pixels) if not specified in
4438 * other ways. The number of texels is measured using level
4439 * SurfMinLod.
4440 * 2) Bias is added to LOD.
4441 * 3) LOD is clamped to [MinLod, MaxLod], and the clamped value is
4442 * compared with Base to determine whether magnification or
4443 * minification is needed. (if preclamp is disabled, LOD is compared
4444 * with Base before clamping)
4445 * 4) If magnification is needed, or no mipmapping is requested, LOD is
4446 * set to floor(MinLod).
4447 * 5) LOD is clamped to [0, MIPCnt], and SurfMinLod is added to LOD.
4448 *
4449 * With Gallium interface, Base is always zero and
4450 * pipe_sampler_view::u.tex.first_level specifies SurfMinLod.
4451 */
4452 if (dev->gen >= ILO_GEN(7)) {
4453 const float scale = 256.0f;
4454
4455 /* [-16.0, 16.0) in S4.8 */
4456 lod_bias = (int)
4457 (CLAMP(state->lod_bias, -16.0f, 15.9f) * scale);
4458 lod_bias &= 0x1fff;
4459
4460 /* [0.0, 14.0] in U4.8 */
4461 max_lod = (int) (CLAMP(state->max_lod, 0.0f, 14.0f) * scale);
4462 min_lod = (int) (CLAMP(state->min_lod, 0.0f, 14.0f) * scale);
4463 }
4464 else {
4465 const float scale = 64.0f;
4466
4467 /* [-16.0, 16.0) in S4.6 */
4468 lod_bias = (int)
4469 (CLAMP(state->lod_bias, -16.0f, 15.9f) * scale);
4470 lod_bias &= 0x7ff;
4471
4472 /* [0.0, 13.0] in U4.6 */
4473 max_lod = (int) (CLAMP(state->max_lod, 0.0f, 13.0f) * scale);
4474 min_lod = (int) (CLAMP(state->min_lod, 0.0f, 13.0f) * scale);
4475 }
4476
4477 /*
4478 * We want LOD to be clamped to determine magnification/minification, and
4479 * get set to zero when it is magnification or when mipmapping is disabled.
4480 * The hardware would set LOD to floor(MinLod) and that is a problem when
4481 * MinLod is greater than or equal to 1.0f.
4482 *
4483 * With Base being zero, it is always minification when MinLod is non-zero.
4484 * To achieve our goal, we just need to set MinLod to zero and set
4485 * MagFilter to MinFilter when mipmapping is disabled.
4486 */
4487 if (state->min_mip_filter == PIPE_TEX_MIPFILTER_NONE && min_lod) {
4488 min_lod = 0;
4489 mag_filter = min_filter;
4490 }
4491
4492 /*
4493 * For nearest filtering, PIPE_TEX_WRAP_CLAMP means
4494 * PIPE_TEX_WRAP_CLAMP_TO_EDGE; for linear filtering, PIPE_TEX_WRAP_CLAMP
4495 * means PIPE_TEX_WRAP_CLAMP_TO_BORDER while additionally clamping the
4496 * texture coordinates to [0.0, 1.0].
4497 *
4498 * The clamping will be taken care of in the shaders. There are two
4499 * filters here, but let the minification one has a say.
4500 */
4501 clamp_is_to_edge = (state->min_img_filter == PIPE_TEX_FILTER_NEAREST);
4502 if (!clamp_is_to_edge) {
4503 sampler->saturate_s = (state->wrap_s == PIPE_TEX_WRAP_CLAMP);
4504 sampler->saturate_t = (state->wrap_t == PIPE_TEX_WRAP_CLAMP);
4505 sampler->saturate_r = (state->wrap_r == PIPE_TEX_WRAP_CLAMP);
4506 }
4507
4508 /* determine wrap s/t/r */
4509 wrap_s = gen6_translate_tex_wrap(state->wrap_s, clamp_is_to_edge);
4510 wrap_t = gen6_translate_tex_wrap(state->wrap_t, clamp_is_to_edge);
4511 wrap_r = gen6_translate_tex_wrap(state->wrap_r, clamp_is_to_edge);
4512
4513 /*
4514 * From the Sandy Bridge PRM, volume 4 part 1, page 107:
4515 *
4516 * "When using cube map texture coordinates, only TEXCOORDMODE_CLAMP
4517 * and TEXCOORDMODE_CUBE settings are valid, and each TC component
4518 * must have the same Address Control mode."
4519 *
4520 * From the Ivy Bridge PRM, volume 4 part 1, page 96:
4521 *
4522 * "This field (Cube Surface Control Mode) must be set to
4523 * CUBECTRLMODE_PROGRAMMED"
4524 *
4525 * Therefore, we cannot use "Cube Surface Control Mode" for semless cube
4526 * map filtering.
4527 */
4528 if (state->seamless_cube_map &&
4529 (state->min_img_filter != PIPE_TEX_FILTER_NEAREST ||
4530 state->mag_img_filter != PIPE_TEX_FILTER_NEAREST)) {
4531 wrap_cube = BRW_TEXCOORDMODE_CUBE;
4532 }
4533 else {
4534 wrap_cube = BRW_TEXCOORDMODE_CLAMP;
4535 }
4536
4537 if (!state->normalized_coords) {
4538 /*
4539 * From the Ivy Bridge PRM, volume 4 part 1, page 98:
4540 *
4541 * "The following state must be set as indicated if this field
4542 * (Non-normalized Coordinate Enable) is enabled:
4543 *
4544 * - TCX/Y/Z Address Control Mode must be TEXCOORDMODE_CLAMP,
4545 * TEXCOORDMODE_HALF_BORDER, or TEXCOORDMODE_CLAMP_BORDER.
4546 * - Surface Type must be SURFTYPE_2D or SURFTYPE_3D.
4547 * - Mag Mode Filter must be MAPFILTER_NEAREST or
4548 * MAPFILTER_LINEAR.
4549 * - Min Mode Filter must be MAPFILTER_NEAREST or
4550 * MAPFILTER_LINEAR.
4551 * - Mip Mode Filter must be MIPFILTER_NONE.
4552 * - Min LOD must be 0.
4553 * - Max LOD must be 0.
4554 * - MIP Count must be 0.
4555 * - Surface Min LOD must be 0.
4556 * - Texture LOD Bias must be 0."
4557 */
4558 assert(wrap_s == BRW_TEXCOORDMODE_CLAMP ||
4559 wrap_s == BRW_TEXCOORDMODE_CLAMP_BORDER);
4560 assert(wrap_t == BRW_TEXCOORDMODE_CLAMP ||
4561 wrap_t == BRW_TEXCOORDMODE_CLAMP_BORDER);
4562 assert(wrap_r == BRW_TEXCOORDMODE_CLAMP ||
4563 wrap_r == BRW_TEXCOORDMODE_CLAMP_BORDER);
4564
4565 assert(mag_filter == BRW_MAPFILTER_NEAREST ||
4566 mag_filter == BRW_MAPFILTER_LINEAR);
4567 assert(min_filter == BRW_MAPFILTER_NEAREST ||
4568 min_filter == BRW_MAPFILTER_LINEAR);
4569
4570 /* work around a bug in util_blitter */
4571 mip_filter = BRW_MIPFILTER_NONE;
4572
4573 assert(mip_filter == BRW_MIPFILTER_NONE);
4574 }
4575
4576 if (dev->gen >= ILO_GEN(7)) {
4577 dw0 = 1 << 28 |
4578 mip_filter << 20 |
4579 lod_bias << 1;
4580
4581 sampler->dw_filter = mag_filter << 17 |
4582 min_filter << 14;
4583
4584 sampler->dw_filter_aniso = BRW_MAPFILTER_ANISOTROPIC << 17 |
4585 BRW_MAPFILTER_ANISOTROPIC << 14 |
4586 1;
4587
4588 dw1 = min_lod << 20 |
4589 max_lod << 8;
4590
4591 if (state->compare_mode != PIPE_TEX_COMPARE_NONE)
4592 dw1 |= gen6_translate_shadow_func(state->compare_func) << 1;
4593
4594 dw3 = max_aniso << 19;
4595
4596 /* round the coordinates for linear filtering */
4597 if (min_filter != BRW_MAPFILTER_NEAREST) {
4598 dw3 |= (BRW_ADDRESS_ROUNDING_ENABLE_U_MIN |
4599 BRW_ADDRESS_ROUNDING_ENABLE_V_MIN |
4600 BRW_ADDRESS_ROUNDING_ENABLE_R_MIN) << 13;
4601 }
4602 if (mag_filter != BRW_MAPFILTER_NEAREST) {
4603 dw3 |= (BRW_ADDRESS_ROUNDING_ENABLE_U_MAG |
4604 BRW_ADDRESS_ROUNDING_ENABLE_V_MAG |
4605 BRW_ADDRESS_ROUNDING_ENABLE_R_MAG) << 13;
4606 }
4607
4608 if (!state->normalized_coords)
4609 dw3 |= 1 << 10;
4610
4611 sampler->dw_wrap = wrap_s << 6 |
4612 wrap_t << 3 |
4613 wrap_r;
4614
4615 /*
4616 * As noted in the classic i965 driver, the HW may still reference
4617 * wrap_t and wrap_r for 1D textures. We need to set them to a safe
4618 * mode
4619 */
4620 sampler->dw_wrap_1d = wrap_s << 6 |
4621 BRW_TEXCOORDMODE_WRAP << 3 |
4622 BRW_TEXCOORDMODE_WRAP;
4623
4624 sampler->dw_wrap_cube = wrap_cube << 6 |
4625 wrap_cube << 3 |
4626 wrap_cube;
4627
4628 STATIC_ASSERT(Elements(sampler->payload) >= 7);
4629
4630 sampler->payload[0] = dw0;
4631 sampler->payload[1] = dw1;
4632 sampler->payload[2] = dw3;
4633
4634 memcpy(&sampler->payload[3],
4635 state->border_color.ui, sizeof(state->border_color.ui));
4636 }
4637 else {
4638 dw0 = 1 << 28 |
4639 mip_filter << 20 |
4640 lod_bias << 3;
4641
4642 if (state->compare_mode != PIPE_TEX_COMPARE_NONE)
4643 dw0 |= gen6_translate_shadow_func(state->compare_func);
4644
4645 sampler->dw_filter = (min_filter != mag_filter) << 27 |
4646 mag_filter << 17 |
4647 min_filter << 14;
4648
4649 sampler->dw_filter_aniso = BRW_MAPFILTER_ANISOTROPIC << 17 |
4650 BRW_MAPFILTER_ANISOTROPIC << 14;
4651
4652 dw1 = min_lod << 22 |
4653 max_lod << 12;
4654
4655 sampler->dw_wrap = wrap_s << 6 |
4656 wrap_t << 3 |
4657 wrap_r;
4658
4659 sampler->dw_wrap_1d = wrap_s << 6 |
4660 BRW_TEXCOORDMODE_WRAP << 3 |
4661 BRW_TEXCOORDMODE_WRAP;
4662
4663 sampler->dw_wrap_cube = wrap_cube << 6 |
4664 wrap_cube << 3 |
4665 wrap_cube;
4666
4667 dw3 = max_aniso << 19;
4668
4669 /* round the coordinates for linear filtering */
4670 if (min_filter != BRW_MAPFILTER_NEAREST) {
4671 dw3 |= (BRW_ADDRESS_ROUNDING_ENABLE_U_MIN |
4672 BRW_ADDRESS_ROUNDING_ENABLE_V_MIN |
4673 BRW_ADDRESS_ROUNDING_ENABLE_R_MIN) << 13;
4674 }
4675 if (mag_filter != BRW_MAPFILTER_NEAREST) {
4676 dw3 |= (BRW_ADDRESS_ROUNDING_ENABLE_U_MAG |
4677 BRW_ADDRESS_ROUNDING_ENABLE_V_MAG |
4678 BRW_ADDRESS_ROUNDING_ENABLE_R_MAG) << 13;
4679 }
4680
4681 if (!state->normalized_coords)
4682 dw3 |= 1;
4683
4684 STATIC_ASSERT(Elements(sampler->payload) >= 15);
4685
4686 sampler->payload[0] = dw0;
4687 sampler->payload[1] = dw1;
4688 sampler->payload[2] = dw3;
4689
4690 sampler_init_border_color_gen6(dev,
4691 &state->border_color, &sampler->payload[3], 12);
4692 }
4693 }
4694
4695 static uint32_t
4696 gen6_emit_SAMPLER_STATE(const struct ilo_dev_info *dev,
4697 const struct ilo_sampler_cso * const *samplers,
4698 const struct pipe_sampler_view * const *views,
4699 const uint32_t *sampler_border_colors,
4700 int num_samplers,
4701 struct ilo_cp *cp)
4702 {
4703 const int state_align = 32 / 4;
4704 const int state_len = 4 * num_samplers;
4705 uint32_t state_offset, *dw;
4706 int i;
4707
4708 ILO_GPE_VALID_GEN(dev, 6, 7);
4709
4710 /*
4711 * From the Sandy Bridge PRM, volume 4 part 1, page 101:
4712 *
4713 * "The sampler state is stored as an array of up to 16 elements..."
4714 */
4715 assert(num_samplers <= 16);
4716
4717 if (!num_samplers)
4718 return 0;
4719
4720 dw = ilo_cp_steal_ptr(cp, "SAMPLER_STATE",
4721 state_len, state_align, &state_offset);
4722
4723 for (i = 0; i < num_samplers; i++) {
4724 const struct ilo_sampler_cso *sampler = samplers[i];
4725 const struct pipe_sampler_view *view = views[i];
4726 const uint32_t border_color = sampler_border_colors[i];
4727 uint32_t dw_filter, dw_wrap;
4728
4729 /* there may be holes */
4730 if (!sampler || !view) {
4731 /* disabled sampler */
4732 dw[0] = 1 << 31;
4733 dw[1] = 0;
4734 dw[2] = 0;
4735 dw[3] = 0;
4736 dw += 4;
4737
4738 continue;
4739 }
4740
4741 /* determine filter and wrap modes */
4742 switch (view->texture->target) {
4743 case PIPE_TEXTURE_1D:
4744 dw_filter = (sampler->anisotropic) ?
4745 sampler->dw_filter_aniso : sampler->dw_filter;
4746 dw_wrap = sampler->dw_wrap_1d;
4747 break;
4748 case PIPE_TEXTURE_3D:
4749 /*
4750 * From the Sandy Bridge PRM, volume 4 part 1, page 103:
4751 *
4752 * "Only MAPFILTER_NEAREST and MAPFILTER_LINEAR are supported for
4753 * surfaces of type SURFTYPE_3D."
4754 */
4755 dw_filter = sampler->dw_filter;
4756 dw_wrap = sampler->dw_wrap;
4757 break;
4758 case PIPE_TEXTURE_CUBE:
4759 dw_filter = (sampler->anisotropic) ?
4760 sampler->dw_filter_aniso : sampler->dw_filter;
4761 dw_wrap = sampler->dw_wrap_cube;
4762 break;
4763 default:
4764 dw_filter = (sampler->anisotropic) ?
4765 sampler->dw_filter_aniso : sampler->dw_filter;
4766 dw_wrap = sampler->dw_wrap;
4767 break;
4768 }
4769
4770 dw[0] = sampler->payload[0];
4771 dw[1] = sampler->payload[1];
4772 assert(!(border_color & 0x1f));
4773 dw[2] = border_color;
4774 dw[3] = sampler->payload[2];
4775
4776 dw[0] |= dw_filter;
4777
4778 if (dev->gen >= ILO_GEN(7)) {
4779 dw[3] |= dw_wrap;
4780 }
4781 else {
4782 /*
4783 * From the Sandy Bridge PRM, volume 4 part 1, page 21:
4784 *
4785 * "[DevSNB] Errata: Incorrect behavior is observed in cases
4786 * where the min and mag mode filters are different and
4787 * SurfMinLOD is nonzero. The determination of MagMode uses the
4788 * following equation instead of the one in the above
4789 * pseudocode: MagMode = (LOD + SurfMinLOD - Base <= 0)"
4790 *
4791 * As a way to work around that, we set Base to
4792 * view->u.tex.first_level.
4793 */
4794 dw[0] |= view->u.tex.first_level << 22;
4795
4796 dw[1] |= dw_wrap;
4797 }
4798
4799 dw += 4;
4800 }
4801
4802 return state_offset;
4803 }
4804
4805 static uint32_t
4806 gen6_emit_SAMPLER_BORDER_COLOR_STATE(const struct ilo_dev_info *dev,
4807 const struct ilo_sampler_cso *sampler,
4808 struct ilo_cp *cp)
4809 {
4810 const int state_align = 32 / 4;
4811 const int state_len = (dev->gen >= ILO_GEN(7)) ? 4 : 12;
4812 uint32_t state_offset, *dw;
4813
4814 ILO_GPE_VALID_GEN(dev, 6, 7);
4815
4816 dw = ilo_cp_steal_ptr(cp, "SAMPLER_BORDER_COLOR_STATE",
4817 state_len, state_align, &state_offset);
4818
4819 /* see ilo_gpe_init_sampler_cso() */
4820 memcpy(dw, &sampler->payload[3], state_len * 4);
4821
4822 return state_offset;
4823 }
4824
4825 static uint32_t
4826 gen6_emit_push_constant_buffer(const struct ilo_dev_info *dev,
4827 int size, void **pcb,
4828 struct ilo_cp *cp)
4829 {
4830 /*
4831 * For all VS, GS, FS, and CS push constant buffers, they must be aligned
4832 * to 32 bytes, and their sizes are specified in 256-bit units.
4833 */
4834 const int state_align = 32 / 4;
4835 const int state_len = align(size, 32) / 4;
4836 uint32_t state_offset;
4837 char *buf;
4838
4839 ILO_GPE_VALID_GEN(dev, 6, 7);
4840
4841 buf = ilo_cp_steal_ptr(cp, "PUSH_CONSTANT_BUFFER",
4842 state_len, state_align, &state_offset);
4843
4844 /* zero out the unused range */
4845 if (size < state_len * 4)
4846 memset(&buf[size], 0, state_len * 4 - size);
4847
4848 if (pcb)
4849 *pcb = buf;
4850
4851 return state_offset;
4852 }
4853
4854 static int
4855 gen6_estimate_command_size(const struct ilo_dev_info *dev,
4856 enum ilo_gpe_gen6_command cmd,
4857 int arg)
4858 {
4859 static const struct {
4860 int header;
4861 int body;
4862 } gen6_command_size_table[ILO_GPE_GEN6_COMMAND_COUNT] = {
4863 [ILO_GPE_GEN6_STATE_BASE_ADDRESS] = { 0, 10 },
4864 [ILO_GPE_GEN6_STATE_SIP] = { 0, 2 },
4865 [ILO_GPE_GEN6_3DSTATE_VF_STATISTICS] = { 0, 1 },
4866 [ILO_GPE_GEN6_PIPELINE_SELECT] = { 0, 1 },
4867 [ILO_GPE_GEN6_MEDIA_VFE_STATE] = { 0, 8 },
4868 [ILO_GPE_GEN6_MEDIA_CURBE_LOAD] = { 0, 4 },
4869 [ILO_GPE_GEN6_MEDIA_INTERFACE_DESCRIPTOR_LOAD] = { 0, 4 },
4870 [ILO_GPE_GEN6_MEDIA_GATEWAY_STATE] = { 0, 2 },
4871 [ILO_GPE_GEN6_MEDIA_STATE_FLUSH] = { 0, 2 },
4872 [ILO_GPE_GEN6_MEDIA_OBJECT_WALKER] = { 17, 1 },
4873 [ILO_GPE_GEN6_3DSTATE_BINDING_TABLE_POINTERS] = { 0, 4 },
4874 [ILO_GPE_GEN6_3DSTATE_SAMPLER_STATE_POINTERS] = { 0, 4 },
4875 [ILO_GPE_GEN6_3DSTATE_URB] = { 0, 3 },
4876 [ILO_GPE_GEN6_3DSTATE_VERTEX_BUFFERS] = { 1, 4 },
4877 [ILO_GPE_GEN6_3DSTATE_VERTEX_ELEMENTS] = { 1, 2 },
4878 [ILO_GPE_GEN6_3DSTATE_INDEX_BUFFER] = { 0, 3 },
4879 [ILO_GPE_GEN6_3DSTATE_VIEWPORT_STATE_POINTERS] = { 0, 4 },
4880 [ILO_GPE_GEN6_3DSTATE_CC_STATE_POINTERS] = { 0, 4 },
4881 [ILO_GPE_GEN6_3DSTATE_SCISSOR_STATE_POINTERS] = { 0, 2 },
4882 [ILO_GPE_GEN6_3DSTATE_VS] = { 0, 6 },
4883 [ILO_GPE_GEN6_3DSTATE_GS] = { 0, 7 },
4884 [ILO_GPE_GEN6_3DSTATE_CLIP] = { 0, 4 },
4885 [ILO_GPE_GEN6_3DSTATE_SF] = { 0, 20 },
4886 [ILO_GPE_GEN6_3DSTATE_WM] = { 0, 9 },
4887 [ILO_GPE_GEN6_3DSTATE_CONSTANT_VS] = { 0, 5 },
4888 [ILO_GPE_GEN6_3DSTATE_CONSTANT_GS] = { 0, 5 },
4889 [ILO_GPE_GEN6_3DSTATE_CONSTANT_PS] = { 0, 5 },
4890 [ILO_GPE_GEN6_3DSTATE_SAMPLE_MASK] = { 0, 2 },
4891 [ILO_GPE_GEN6_3DSTATE_DRAWING_RECTANGLE] = { 0, 4 },
4892 [ILO_GPE_GEN6_3DSTATE_DEPTH_BUFFER] = { 0, 7 },
4893 [ILO_GPE_GEN6_3DSTATE_POLY_STIPPLE_OFFSET] = { 0, 2 },
4894 [ILO_GPE_GEN6_3DSTATE_POLY_STIPPLE_PATTERN] = { 0, 33 },
4895 [ILO_GPE_GEN6_3DSTATE_LINE_STIPPLE] = { 0, 3 },
4896 [ILO_GPE_GEN6_3DSTATE_AA_LINE_PARAMETERS] = { 0, 3 },
4897 [ILO_GPE_GEN6_3DSTATE_GS_SVB_INDEX] = { 0, 4 },
4898 [ILO_GPE_GEN6_3DSTATE_MULTISAMPLE] = { 0, 3 },
4899 [ILO_GPE_GEN6_3DSTATE_STENCIL_BUFFER] = { 0, 3 },
4900 [ILO_GPE_GEN6_3DSTATE_HIER_DEPTH_BUFFER] = { 0, 3 },
4901 [ILO_GPE_GEN6_3DSTATE_CLEAR_PARAMS] = { 0, 2 },
4902 [ILO_GPE_GEN6_PIPE_CONTROL] = { 0, 5 },
4903 [ILO_GPE_GEN6_3DPRIMITIVE] = { 0, 6 },
4904 };
4905 const int header = gen6_command_size_table[cmd].header;
4906 const int body = gen6_command_size_table[arg].body;
4907 const int count = arg;
4908
4909 ILO_GPE_VALID_GEN(dev, 6, 6);
4910 assert(cmd < ILO_GPE_GEN6_COMMAND_COUNT);
4911
4912 return (likely(count)) ? header + body * count : 0;
4913 }
4914
4915 static int
4916 gen6_estimate_state_size(const struct ilo_dev_info *dev,
4917 enum ilo_gpe_gen6_state state,
4918 int arg)
4919 {
4920 static const struct {
4921 int alignment;
4922 int body;
4923 bool is_array;
4924 } gen6_state_size_table[ILO_GPE_GEN6_STATE_COUNT] = {
4925 [ILO_GPE_GEN6_INTERFACE_DESCRIPTOR_DATA] = { 8, 8, true },
4926 [ILO_GPE_GEN6_SF_VIEWPORT] = { 8, 8, true },
4927 [ILO_GPE_GEN6_CLIP_VIEWPORT] = { 8, 4, true },
4928 [ILO_GPE_GEN6_CC_VIEWPORT] = { 8, 2, true },
4929 [ILO_GPE_GEN6_COLOR_CALC_STATE] = { 16, 6, false },
4930 [ILO_GPE_GEN6_BLEND_STATE] = { 16, 2, true },
4931 [ILO_GPE_GEN6_DEPTH_STENCIL_STATE] = { 16, 3, false },
4932 [ILO_GPE_GEN6_SCISSOR_RECT] = { 8, 2, true },
4933 [ILO_GPE_GEN6_BINDING_TABLE_STATE] = { 8, 1, true },
4934 [ILO_GPE_GEN6_SURFACE_STATE] = { 8, 6, false },
4935 [ILO_GPE_GEN6_SAMPLER_STATE] = { 8, 4, true },
4936 [ILO_GPE_GEN6_SAMPLER_BORDER_COLOR_STATE] = { 8, 12, false },
4937 [ILO_GPE_GEN6_PUSH_CONSTANT_BUFFER] = { 8, 1, true },
4938 };
4939 const int alignment = gen6_state_size_table[state].alignment;
4940 const int body = gen6_state_size_table[state].body;
4941 const bool is_array = gen6_state_size_table[state].is_array;
4942 const int count = arg;
4943 int estimate;
4944
4945 ILO_GPE_VALID_GEN(dev, 6, 6);
4946 assert(state < ILO_GPE_GEN6_STATE_COUNT);
4947
4948 if (likely(count)) {
4949 if (is_array) {
4950 estimate = (alignment - 1) + body * count;
4951 }
4952 else {
4953 estimate = (alignment - 1) + body;
4954 /* all states are aligned */
4955 if (count > 1)
4956 estimate += util_align_npot(body, alignment) * (count - 1);
4957 }
4958 }
4959 else {
4960 estimate = 0;
4961 }
4962
4963 return estimate;
4964 }
4965
4966 static const struct ilo_gpe_gen6 gen6_gpe = {
4967 .estimate_command_size = gen6_estimate_command_size,
4968 .estimate_state_size = gen6_estimate_state_size,
4969
4970 #define GEN6_SET(name) .emit_ ## name = gen6_emit_ ## name
4971 GEN6_SET(STATE_BASE_ADDRESS),
4972 GEN6_SET(STATE_SIP),
4973 GEN6_SET(3DSTATE_VF_STATISTICS),
4974 GEN6_SET(PIPELINE_SELECT),
4975 GEN6_SET(MEDIA_VFE_STATE),
4976 GEN6_SET(MEDIA_CURBE_LOAD),
4977 GEN6_SET(MEDIA_INTERFACE_DESCRIPTOR_LOAD),
4978 GEN6_SET(MEDIA_GATEWAY_STATE),
4979 GEN6_SET(MEDIA_STATE_FLUSH),
4980 GEN6_SET(MEDIA_OBJECT_WALKER),
4981 GEN6_SET(3DSTATE_BINDING_TABLE_POINTERS),
4982 GEN6_SET(3DSTATE_SAMPLER_STATE_POINTERS),
4983 GEN6_SET(3DSTATE_URB),
4984 GEN6_SET(3DSTATE_VERTEX_BUFFERS),
4985 GEN6_SET(3DSTATE_VERTEX_ELEMENTS),
4986 GEN6_SET(3DSTATE_INDEX_BUFFER),
4987 GEN6_SET(3DSTATE_VIEWPORT_STATE_POINTERS),
4988 GEN6_SET(3DSTATE_CC_STATE_POINTERS),
4989 GEN6_SET(3DSTATE_SCISSOR_STATE_POINTERS),
4990 GEN6_SET(3DSTATE_VS),
4991 GEN6_SET(3DSTATE_GS),
4992 GEN6_SET(3DSTATE_CLIP),
4993 GEN6_SET(3DSTATE_SF),
4994 GEN6_SET(3DSTATE_WM),
4995 GEN6_SET(3DSTATE_CONSTANT_VS),
4996 GEN6_SET(3DSTATE_CONSTANT_GS),
4997 GEN6_SET(3DSTATE_CONSTANT_PS),
4998 GEN6_SET(3DSTATE_SAMPLE_MASK),
4999 GEN6_SET(3DSTATE_DRAWING_RECTANGLE),
5000 GEN6_SET(3DSTATE_DEPTH_BUFFER),
5001 GEN6_SET(3DSTATE_POLY_STIPPLE_OFFSET),
5002 GEN6_SET(3DSTATE_POLY_STIPPLE_PATTERN),
5003 GEN6_SET(3DSTATE_LINE_STIPPLE),
5004 GEN6_SET(3DSTATE_AA_LINE_PARAMETERS),
5005 GEN6_SET(3DSTATE_GS_SVB_INDEX),
5006 GEN6_SET(3DSTATE_MULTISAMPLE),
5007 GEN6_SET(3DSTATE_STENCIL_BUFFER),
5008 GEN6_SET(3DSTATE_HIER_DEPTH_BUFFER),
5009 GEN6_SET(3DSTATE_CLEAR_PARAMS),
5010 GEN6_SET(PIPE_CONTROL),
5011 GEN6_SET(3DPRIMITIVE),
5012 GEN6_SET(INTERFACE_DESCRIPTOR_DATA),
5013 GEN6_SET(SF_VIEWPORT),
5014 GEN6_SET(CLIP_VIEWPORT),
5015 GEN6_SET(CC_VIEWPORT),
5016 GEN6_SET(COLOR_CALC_STATE),
5017 GEN6_SET(BLEND_STATE),
5018 GEN6_SET(DEPTH_STENCIL_STATE),
5019 GEN6_SET(SCISSOR_RECT),
5020 GEN6_SET(BINDING_TABLE_STATE),
5021 GEN6_SET(SURFACE_STATE),
5022 GEN6_SET(so_SURFACE_STATE),
5023 GEN6_SET(SAMPLER_STATE),
5024 GEN6_SET(SAMPLER_BORDER_COLOR_STATE),
5025 GEN6_SET(push_constant_buffer),
5026 #undef GEN6_SET
5027 };
5028
5029 const struct ilo_gpe_gen6 *
5030 ilo_gpe_gen6_get(void)
5031 {
5032 return &gen6_gpe;
5033 }