c883fe3f259035794b7350a6eabe9b256b46de01
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs_generator.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_fs_generator.cpp
25 *
26 * This file supports generating code from the FS LIR to the actual
27 * native instructions.
28 */
29
30 #include "brw_eu.h"
31 #include "brw_fs.h"
32 #include "brw_cfg.h"
33 #include "brw_program.h"
34
35 static enum brw_reg_file
36 brw_file_from_reg(fs_reg *reg)
37 {
38 switch (reg->file) {
39 case ARF:
40 return BRW_ARCHITECTURE_REGISTER_FILE;
41 case FIXED_GRF:
42 case VGRF:
43 return BRW_GENERAL_REGISTER_FILE;
44 case MRF:
45 return BRW_MESSAGE_REGISTER_FILE;
46 case IMM:
47 return BRW_IMMEDIATE_VALUE;
48 case BAD_FILE:
49 case ATTR:
50 case UNIFORM:
51 unreachable("not reached");
52 }
53 return BRW_ARCHITECTURE_REGISTER_FILE;
54 }
55
56 static struct brw_reg
57 brw_reg_from_fs_reg(fs_inst *inst, fs_reg *reg, unsigned gen)
58 {
59 struct brw_reg brw_reg;
60
61 switch (reg->file) {
62 case MRF:
63 assert((reg->nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(gen));
64 /* Fallthrough */
65 case VGRF:
66 if (reg->stride == 0) {
67 brw_reg = brw_vec1_reg(brw_file_from_reg(reg), reg->nr, 0);
68 } else if (inst->exec_size < 8) {
69 brw_reg = brw_vec8_reg(brw_file_from_reg(reg), reg->nr, 0);
70 brw_reg = stride(brw_reg, inst->exec_size * reg->stride,
71 inst->exec_size, reg->stride);
72 } else {
73 /* From the Haswell PRM:
74 *
75 * VertStride must be used to cross GRF register boundaries. This
76 * rule implies that elements within a 'Width' cannot cross GRF
77 * boundaries.
78 *
79 * So, for registers with width > 8, we have to use a width of 8
80 * and trust the compression state to sort out the exec size.
81 */
82 brw_reg = brw_vec8_reg(brw_file_from_reg(reg), reg->nr, 0);
83 brw_reg = stride(brw_reg, 8 * reg->stride, 8, reg->stride);
84 }
85
86 brw_reg = retype(brw_reg, reg->type);
87 brw_reg = byte_offset(brw_reg, reg->subreg_offset);
88 brw_reg.abs = reg->abs;
89 brw_reg.negate = reg->negate;
90 break;
91 case ARF:
92 case FIXED_GRF:
93 case IMM:
94 brw_reg = reg->as_brw_reg();
95 break;
96 case BAD_FILE:
97 /* Probably unused. */
98 brw_reg = brw_null_reg();
99 break;
100 case ATTR:
101 case UNIFORM:
102 unreachable("not reached");
103 }
104
105 return brw_reg;
106 }
107
108 fs_generator::fs_generator(const struct brw_compiler *compiler, void *log_data,
109 void *mem_ctx,
110 const void *key,
111 struct brw_stage_prog_data *prog_data,
112 unsigned promoted_constants,
113 bool runtime_check_aads_emit,
114 gl_shader_stage stage)
115
116 : compiler(compiler), log_data(log_data),
117 devinfo(compiler->devinfo), key(key),
118 prog_data(prog_data),
119 promoted_constants(promoted_constants),
120 runtime_check_aads_emit(runtime_check_aads_emit), debug_flag(false),
121 stage(stage), mem_ctx(mem_ctx)
122 {
123 p = rzalloc(mem_ctx, struct brw_codegen);
124 brw_init_codegen(devinfo, p, mem_ctx);
125 }
126
127 fs_generator::~fs_generator()
128 {
129 }
130
131 class ip_record : public exec_node {
132 public:
133 DECLARE_RALLOC_CXX_OPERATORS(ip_record)
134
135 ip_record(int ip)
136 {
137 this->ip = ip;
138 }
139
140 int ip;
141 };
142
143 bool
144 fs_generator::patch_discard_jumps_to_fb_writes()
145 {
146 if (devinfo->gen < 6 || this->discard_halt_patches.is_empty())
147 return false;
148
149 int scale = brw_jump_scale(p->devinfo);
150
151 /* There is a somewhat strange undocumented requirement of using
152 * HALT, according to the simulator. If some channel has HALTed to
153 * a particular UIP, then by the end of the program, every channel
154 * must have HALTed to that UIP. Furthermore, the tracking is a
155 * stack, so you can't do the final halt of a UIP after starting
156 * halting to a new UIP.
157 *
158 * Symptoms of not emitting this instruction on actual hardware
159 * included GPU hangs and sparkly rendering on the piglit discard
160 * tests.
161 */
162 brw_inst *last_halt = gen6_HALT(p);
163 brw_inst_set_uip(p->devinfo, last_halt, 1 * scale);
164 brw_inst_set_jip(p->devinfo, last_halt, 1 * scale);
165
166 int ip = p->nr_insn;
167
168 foreach_in_list(ip_record, patch_ip, &discard_halt_patches) {
169 brw_inst *patch = &p->store[patch_ip->ip];
170
171 assert(brw_inst_opcode(p->devinfo, patch) == BRW_OPCODE_HALT);
172 /* HALT takes a half-instruction distance from the pre-incremented IP. */
173 brw_inst_set_uip(p->devinfo, patch, (ip - patch_ip->ip) * scale);
174 }
175
176 this->discard_halt_patches.make_empty();
177 return true;
178 }
179
180 void
181 fs_generator::fire_fb_write(fs_inst *inst,
182 struct brw_reg payload,
183 struct brw_reg implied_header,
184 GLuint nr)
185 {
186 uint32_t msg_control;
187
188 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
189
190 if (devinfo->gen < 6) {
191 brw_push_insn_state(p);
192 brw_set_default_exec_size(p, BRW_EXECUTE_8);
193 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
194 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
195 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
196 brw_MOV(p, offset(payload, 1), brw_vec8_grf(1, 0));
197 brw_pop_insn_state(p);
198 }
199
200 if (inst->opcode == FS_OPCODE_REP_FB_WRITE)
201 msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED;
202 else if (prog_data->dual_src_blend) {
203 if (!inst->force_sechalf)
204 msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01;
205 else
206 msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23;
207 } else if (inst->exec_size == 16)
208 msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE;
209 else
210 msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01;
211
212 uint32_t surf_index =
213 prog_data->binding_table.render_target_start + inst->target;
214
215 bool last_render_target = inst->eot ||
216 (prog_data->dual_src_blend && dispatch_width == 16);
217
218
219 brw_fb_WRITE(p,
220 dispatch_width,
221 payload,
222 implied_header,
223 msg_control,
224 surf_index,
225 nr,
226 0,
227 inst->eot,
228 last_render_target,
229 inst->header_size != 0);
230
231 brw_mark_surface_used(&prog_data->base, surf_index);
232 }
233
234 void
235 fs_generator::generate_fb_write(fs_inst *inst, struct brw_reg payload)
236 {
237 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
238 const brw_wm_prog_key * const key = (brw_wm_prog_key * const) this->key;
239 struct brw_reg implied_header;
240
241 if (devinfo->gen < 8 && !devinfo->is_haswell) {
242 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
243 }
244
245 if (inst->base_mrf >= 0)
246 payload = brw_message_reg(inst->base_mrf);
247
248 /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
249 * move, here's g1.
250 */
251 if (inst->header_size != 0) {
252 brw_push_insn_state(p);
253 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
254 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
255 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
256 brw_set_default_flag_reg(p, 0, 0);
257
258 /* On HSW, the GPU will use the predicate on SENDC, unless the header is
259 * present.
260 */
261 if (prog_data->uses_kill) {
262 struct brw_reg pixel_mask;
263
264 if (devinfo->gen >= 6)
265 pixel_mask = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
266 else
267 pixel_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
268
269 brw_MOV(p, pixel_mask, brw_flag_reg(0, 1));
270 }
271
272 if (devinfo->gen >= 6) {
273 brw_push_insn_state(p);
274 brw_set_default_exec_size(p, BRW_EXECUTE_16);
275 brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
276 brw_MOV(p,
277 retype(payload, BRW_REGISTER_TYPE_UD),
278 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
279 brw_pop_insn_state(p);
280
281 if (inst->target > 0 && key->replicate_alpha) {
282 /* Set "Source0 Alpha Present to RenderTarget" bit in message
283 * header.
284 */
285 brw_OR(p,
286 vec1(retype(payload, BRW_REGISTER_TYPE_UD)),
287 vec1(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)),
288 brw_imm_ud(0x1 << 11));
289 }
290
291 if (inst->target > 0) {
292 /* Set the render target index for choosing BLEND_STATE. */
293 brw_MOV(p, retype(vec1(suboffset(payload, 2)),
294 BRW_REGISTER_TYPE_UD),
295 brw_imm_ud(inst->target));
296 }
297
298 /* Set computes stencil to render target */
299 if (prog_data->computed_stencil) {
300 brw_OR(p,
301 vec1(retype(payload, BRW_REGISTER_TYPE_UD)),
302 vec1(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)),
303 brw_imm_ud(0x1 << 14));
304 }
305
306 implied_header = brw_null_reg();
307 } else {
308 implied_header = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
309 }
310
311 brw_pop_insn_state(p);
312 } else {
313 implied_header = brw_null_reg();
314 }
315
316 if (!runtime_check_aads_emit) {
317 fire_fb_write(inst, payload, implied_header, inst->mlen);
318 } else {
319 /* This can only happen in gen < 6 */
320 assert(devinfo->gen < 6);
321
322 struct brw_reg v1_null_ud = vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD));
323
324 /* Check runtime bit to detect if we have to send AA data or not */
325 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
326 brw_AND(p,
327 v1_null_ud,
328 retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD),
329 brw_imm_ud(1<<26));
330 brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_NZ);
331
332 int jmp = brw_JMPI(p, brw_imm_ud(0), BRW_PREDICATE_NORMAL) - p->store;
333 brw_inst_set_exec_size(p->devinfo, brw_last_inst, BRW_EXECUTE_1);
334 {
335 /* Don't send AA data */
336 fire_fb_write(inst, offset(payload, 1), implied_header, inst->mlen-1);
337 }
338 brw_land_fwd_jump(p, jmp);
339 fire_fb_write(inst, payload, implied_header, inst->mlen);
340 }
341 }
342
343 void
344 fs_generator::generate_mov_indirect(fs_inst *inst,
345 struct brw_reg dst,
346 struct brw_reg reg,
347 struct brw_reg indirect_byte_offset)
348 {
349 assert(indirect_byte_offset.type == BRW_REGISTER_TYPE_UD);
350 assert(indirect_byte_offset.file == BRW_GENERAL_REGISTER_FILE);
351
352 unsigned imm_byte_offset = reg.nr * REG_SIZE + reg.subnr;
353
354 /* We use VxH indirect addressing, clobbering a0.0 through a0.7. */
355 struct brw_reg addr = vec8(brw_address_reg(0));
356
357 /* The destination stride of an instruction (in bytes) must be greater
358 * than or equal to the size of the rest of the instruction. Since the
359 * address register is of type UW, we can't use a D-type instruction.
360 * In order to get around this, re re-type to UW and use a stride.
361 */
362 indirect_byte_offset =
363 retype(spread(indirect_byte_offset, 2), BRW_REGISTER_TYPE_UW);
364
365 /* Prior to Broadwell, there are only 8 address registers. */
366 assert(inst->exec_size == 8 || devinfo->gen >= 8);
367
368 brw_MOV(p, addr, indirect_byte_offset);
369 brw_inst_set_mask_control(devinfo, brw_last_inst, BRW_MASK_DISABLE);
370 brw_MOV(p, dst, retype(brw_VxH_indirect(0, imm_byte_offset), dst.type));
371 }
372
373 void
374 fs_generator::generate_urb_read(fs_inst *inst,
375 struct brw_reg dst,
376 struct brw_reg header)
377 {
378 assert(header.file == BRW_GENERAL_REGISTER_FILE);
379 assert(header.type == BRW_REGISTER_TYPE_UD);
380
381 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
382 brw_set_dest(p, send, dst);
383 brw_set_src0(p, send, header);
384 brw_set_src1(p, send, brw_imm_ud(0u));
385
386 brw_inst_set_sfid(p->devinfo, send, BRW_SFID_URB);
387 brw_inst_set_urb_opcode(p->devinfo, send, GEN8_URB_OPCODE_SIMD8_READ);
388
389 if (inst->opcode == SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT)
390 brw_inst_set_urb_per_slot_offset(p->devinfo, send, true);
391
392 brw_inst_set_mlen(p->devinfo, send, inst->mlen);
393 brw_inst_set_rlen(p->devinfo, send, inst->regs_written);
394 brw_inst_set_header_present(p->devinfo, send, true);
395 brw_inst_set_urb_global_offset(p->devinfo, send, inst->offset);
396 }
397
398 void
399 fs_generator::generate_urb_write(fs_inst *inst, struct brw_reg payload)
400 {
401 brw_inst *insn;
402
403 insn = brw_next_insn(p, BRW_OPCODE_SEND);
404
405 brw_set_dest(p, insn, brw_null_reg());
406 brw_set_src0(p, insn, payload);
407 brw_set_src1(p, insn, brw_imm_d(0));
408
409 brw_inst_set_sfid(p->devinfo, insn, BRW_SFID_URB);
410 brw_inst_set_urb_opcode(p->devinfo, insn, GEN8_URB_OPCODE_SIMD8_WRITE);
411
412 if (inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT ||
413 inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT)
414 brw_inst_set_urb_per_slot_offset(p->devinfo, insn, true);
415
416 if (inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED ||
417 inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT)
418 brw_inst_set_urb_channel_mask_present(p->devinfo, insn, true);
419
420 brw_inst_set_mlen(p->devinfo, insn, inst->mlen);
421 brw_inst_set_rlen(p->devinfo, insn, 0);
422 brw_inst_set_eot(p->devinfo, insn, inst->eot);
423 brw_inst_set_header_present(p->devinfo, insn, true);
424 brw_inst_set_urb_global_offset(p->devinfo, insn, inst->offset);
425 }
426
427 void
428 fs_generator::generate_cs_terminate(fs_inst *inst, struct brw_reg payload)
429 {
430 struct brw_inst *insn;
431
432 insn = brw_next_insn(p, BRW_OPCODE_SEND);
433
434 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
435 brw_set_src0(p, insn, payload);
436 brw_set_src1(p, insn, brw_imm_d(0));
437
438 /* Terminate a compute shader by sending a message to the thread spawner.
439 */
440 brw_inst_set_sfid(devinfo, insn, BRW_SFID_THREAD_SPAWNER);
441 brw_inst_set_mlen(devinfo, insn, 1);
442 brw_inst_set_rlen(devinfo, insn, 0);
443 brw_inst_set_eot(devinfo, insn, inst->eot);
444 brw_inst_set_header_present(devinfo, insn, false);
445
446 brw_inst_set_ts_opcode(devinfo, insn, 0); /* Dereference resource */
447 brw_inst_set_ts_request_type(devinfo, insn, 0); /* Root thread */
448
449 /* Note that even though the thread has a URB resource associated with it,
450 * we set the "do not dereference URB" bit, because the URB resource is
451 * managed by the fixed-function unit, so it will free it automatically.
452 */
453 brw_inst_set_ts_resource_select(devinfo, insn, 1); /* Do not dereference URB */
454
455 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
456 }
457
458 void
459 fs_generator::generate_stencil_ref_packing(fs_inst *inst,
460 struct brw_reg dst,
461 struct brw_reg src)
462 {
463 assert(dispatch_width == 8);
464 assert(devinfo->gen >= 9);
465
466 /* Stencil value updates are provided in 8 slots of 1 byte per slot.
467 * Presumably, in order to save memory bandwidth, the stencil reference
468 * values written from the FS need to be packed into 2 dwords (this makes
469 * sense because the stencil values are limited to 1 byte each and a SIMD8
470 * send, so stencil slots 0-3 in dw0, and 4-7 in dw1.)
471 *
472 * The spec is confusing here because in the payload definition of MDP_RTW_S8
473 * (Message Data Payload for Render Target Writes with Stencil 8b) the
474 * stencil value seems to be dw4.0-dw4.7. However, if you look at the type of
475 * dw4 it is type MDPR_STENCIL (Message Data Payload Register) which is the
476 * packed values specified above and diagrammed below:
477 *
478 * 31 0
479 * --------------------------------
480 * DW | |
481 * 2-7 | IGNORED |
482 * | |
483 * --------------------------------
484 * DW1 | STC | STC | STC | STC |
485 * | slot7 | slot6 | slot5 | slot4|
486 * --------------------------------
487 * DW0 | STC | STC | STC | STC |
488 * | slot3 | slot2 | slot1 | slot0|
489 * --------------------------------
490 */
491
492 src.vstride = BRW_VERTICAL_STRIDE_4;
493 src.width = BRW_WIDTH_1;
494 src.hstride = BRW_HORIZONTAL_STRIDE_0;
495 assert(src.type == BRW_REGISTER_TYPE_UB);
496 brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_UB), src);
497 }
498
499 void
500 fs_generator::generate_barrier(fs_inst *inst, struct brw_reg src)
501 {
502 brw_barrier(p, src);
503 brw_WAIT(p);
504 }
505
506 void
507 fs_generator::generate_blorp_fb_write(fs_inst *inst)
508 {
509 brw_fb_WRITE(p,
510 16 /* dispatch_width */,
511 brw_message_reg(inst->base_mrf),
512 brw_reg_from_fs_reg(inst, &inst->src[0], devinfo->gen),
513 BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE,
514 inst->target,
515 inst->mlen,
516 0,
517 true,
518 true,
519 inst->header_size != 0);
520 }
521
522 void
523 fs_generator::generate_linterp(fs_inst *inst,
524 struct brw_reg dst, struct brw_reg *src)
525 {
526 /* PLN reads:
527 * / in SIMD16 \
528 * -----------------------------------
529 * | src1+0 | src1+1 | src1+2 | src1+3 |
530 * |-----------------------------------|
531 * |(x0, x1)|(y0, y1)|(x2, x3)|(y2, y3)|
532 * -----------------------------------
533 *
534 * but for the LINE/MAC pair, the LINE reads Xs and the MAC reads Ys:
535 *
536 * -----------------------------------
537 * | src1+0 | src1+1 | src1+2 | src1+3 |
538 * |-----------------------------------|
539 * |(x0, x1)|(y0, y1)| | | in SIMD8
540 * |-----------------------------------|
541 * |(x0, x1)|(x2, x3)|(y0, y1)|(y2, y3)| in SIMD16
542 * -----------------------------------
543 *
544 * See also: emit_interpolation_setup_gen4().
545 */
546 struct brw_reg delta_x = src[0];
547 struct brw_reg delta_y = offset(src[0], dispatch_width / 8);
548 struct brw_reg interp = src[1];
549
550 if (devinfo->has_pln &&
551 (devinfo->gen >= 7 || (delta_x.nr & 1) == 0)) {
552 brw_PLN(p, dst, interp, delta_x);
553 } else {
554 brw_LINE(p, brw_null_reg(), interp, delta_x);
555 brw_MAC(p, dst, suboffset(interp, 1), delta_y);
556 }
557 }
558
559 void
560 fs_generator::generate_math_gen6(fs_inst *inst,
561 struct brw_reg dst,
562 struct brw_reg src0,
563 struct brw_reg src1)
564 {
565 int op = brw_math_function(inst->opcode);
566 bool binop = src1.file != BRW_ARCHITECTURE_REGISTER_FILE;
567
568 if (dispatch_width == 8) {
569 gen6_math(p, dst, op, src0, src1);
570 } else if (dispatch_width == 16) {
571 brw_push_insn_state(p);
572 brw_set_default_exec_size(p, BRW_EXECUTE_8);
573 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
574 gen6_math(p, firsthalf(dst), op, firsthalf(src0), firsthalf(src1));
575 brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
576 gen6_math(p, sechalf(dst), op, sechalf(src0),
577 binop ? sechalf(src1) : brw_null_reg());
578 brw_pop_insn_state(p);
579 }
580 }
581
582 void
583 fs_generator::generate_math_gen4(fs_inst *inst,
584 struct brw_reg dst,
585 struct brw_reg src)
586 {
587 int op = brw_math_function(inst->opcode);
588
589 assert(inst->mlen >= 1);
590
591 if (dispatch_width == 8) {
592 gen4_math(p, dst,
593 op,
594 inst->base_mrf, src,
595 BRW_MATH_PRECISION_FULL);
596 } else if (dispatch_width == 16) {
597 brw_set_default_exec_size(p, BRW_EXECUTE_8);
598 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
599 gen4_math(p, firsthalf(dst),
600 op,
601 inst->base_mrf, firsthalf(src),
602 BRW_MATH_PRECISION_FULL);
603 brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
604 gen4_math(p, sechalf(dst),
605 op,
606 inst->base_mrf + 1, sechalf(src),
607 BRW_MATH_PRECISION_FULL);
608
609 brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
610 }
611 }
612
613 void
614 fs_generator::generate_math_g45(fs_inst *inst,
615 struct brw_reg dst,
616 struct brw_reg src)
617 {
618 if (inst->opcode == SHADER_OPCODE_POW ||
619 inst->opcode == SHADER_OPCODE_INT_QUOTIENT ||
620 inst->opcode == SHADER_OPCODE_INT_REMAINDER) {
621 generate_math_gen4(inst, dst, src);
622 return;
623 }
624
625 int op = brw_math_function(inst->opcode);
626
627 assert(inst->mlen >= 1);
628
629 gen4_math(p, dst,
630 op,
631 inst->base_mrf, src,
632 BRW_MATH_PRECISION_FULL);
633 }
634
635 void
636 fs_generator::generate_get_buffer_size(fs_inst *inst,
637 struct brw_reg dst,
638 struct brw_reg src,
639 struct brw_reg surf_index)
640 {
641 assert(devinfo->gen >= 7);
642 assert(surf_index.file == BRW_IMMEDIATE_VALUE);
643
644 uint32_t simd_mode;
645 int rlen = 4;
646
647 switch (inst->exec_size) {
648 case 8:
649 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
650 break;
651 case 16:
652 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
653 break;
654 default:
655 unreachable("Invalid width for texture instruction");
656 }
657
658 if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) {
659 rlen = 8;
660 dst = vec16(dst);
661 }
662
663 brw_SAMPLE(p,
664 retype(dst, BRW_REGISTER_TYPE_UW),
665 inst->base_mrf,
666 src,
667 surf_index.ud,
668 0,
669 GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO,
670 rlen, /* response length */
671 inst->mlen,
672 inst->header_size > 0,
673 simd_mode,
674 BRW_SAMPLER_RETURN_FORMAT_SINT32);
675
676 brw_mark_surface_used(prog_data, surf_index.ud);
677 }
678
679 void
680 fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src,
681 struct brw_reg surface_index,
682 struct brw_reg sampler_index)
683 {
684 int msg_type = -1;
685 int rlen = 4;
686 uint32_t simd_mode;
687 uint32_t return_format;
688 bool is_combined_send = inst->eot;
689
690 switch (dst.type) {
691 case BRW_REGISTER_TYPE_D:
692 return_format = BRW_SAMPLER_RETURN_FORMAT_SINT32;
693 break;
694 case BRW_REGISTER_TYPE_UD:
695 return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
696 break;
697 default:
698 return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
699 break;
700 }
701
702 /* Stomp the resinfo output type to UINT32. On gens 4-5, the output type
703 * is set as part of the message descriptor. On gen4, the PRM seems to
704 * allow UINT32 and FLOAT32 (i965 PRM, Vol. 4 Section 4.8.1.1), but on
705 * later gens UINT32 is required. Once you hit Sandy Bridge, the bit is
706 * gone from the message descriptor entirely and you just get UINT32 all
707 * the time regasrdless. Since we can really only do non-UINT32 on gen4,
708 * just stomp it to UINT32 all the time.
709 */
710 if (inst->opcode == SHADER_OPCODE_TXS)
711 return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
712
713 switch (inst->exec_size) {
714 case 8:
715 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
716 break;
717 case 16:
718 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
719 break;
720 default:
721 unreachable("Invalid width for texture instruction");
722 }
723
724 if (devinfo->gen >= 5) {
725 switch (inst->opcode) {
726 case SHADER_OPCODE_TEX:
727 if (inst->shadow_compare) {
728 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE;
729 } else {
730 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE;
731 }
732 break;
733 case FS_OPCODE_TXB:
734 if (inst->shadow_compare) {
735 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE;
736 } else {
737 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS;
738 }
739 break;
740 case SHADER_OPCODE_TXL:
741 if (inst->shadow_compare) {
742 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE;
743 } else {
744 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD;
745 }
746 break;
747 case SHADER_OPCODE_TXS:
748 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO;
749 break;
750 case SHADER_OPCODE_TXD:
751 if (inst->shadow_compare) {
752 /* Gen7.5+. Otherwise, lowered by brw_lower_texture_gradients(). */
753 assert(devinfo->gen >= 8 || devinfo->is_haswell);
754 msg_type = HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE;
755 } else {
756 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS;
757 }
758 break;
759 case SHADER_OPCODE_TXF:
760 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
761 break;
762 case SHADER_OPCODE_TXF_CMS_W:
763 assert(devinfo->gen >= 9);
764 msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W;
765 break;
766 case SHADER_OPCODE_TXF_CMS:
767 if (devinfo->gen >= 7)
768 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS;
769 else
770 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
771 break;
772 case SHADER_OPCODE_TXF_UMS:
773 assert(devinfo->gen >= 7);
774 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DSS;
775 break;
776 case SHADER_OPCODE_TXF_MCS:
777 assert(devinfo->gen >= 7);
778 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS;
779 break;
780 case SHADER_OPCODE_LOD:
781 msg_type = GEN5_SAMPLER_MESSAGE_LOD;
782 break;
783 case SHADER_OPCODE_TG4:
784 if (inst->shadow_compare) {
785 assert(devinfo->gen >= 7);
786 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C;
787 } else {
788 assert(devinfo->gen >= 6);
789 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4;
790 }
791 break;
792 case SHADER_OPCODE_TG4_OFFSET:
793 assert(devinfo->gen >= 7);
794 if (inst->shadow_compare) {
795 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C;
796 } else {
797 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO;
798 }
799 break;
800 case SHADER_OPCODE_SAMPLEINFO:
801 msg_type = GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO;
802 break;
803 default:
804 unreachable("not reached");
805 }
806 } else {
807 switch (inst->opcode) {
808 case SHADER_OPCODE_TEX:
809 /* Note that G45 and older determines shadow compare and dispatch width
810 * from message length for most messages.
811 */
812 if (inst->exec_size == 8) {
813 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE;
814 if (inst->shadow_compare) {
815 assert(inst->mlen == 6);
816 } else {
817 assert(inst->mlen <= 4);
818 }
819 } else {
820 if (inst->shadow_compare) {
821 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE;
822 assert(inst->mlen == 9);
823 } else {
824 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE;
825 assert(inst->mlen <= 7 && inst->mlen % 2 == 1);
826 }
827 }
828 break;
829 case FS_OPCODE_TXB:
830 if (inst->shadow_compare) {
831 assert(inst->exec_size == 8);
832 assert(inst->mlen == 6);
833 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE;
834 } else {
835 assert(inst->mlen == 9);
836 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS;
837 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
838 }
839 break;
840 case SHADER_OPCODE_TXL:
841 if (inst->shadow_compare) {
842 assert(inst->exec_size == 8);
843 assert(inst->mlen == 6);
844 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE;
845 } else {
846 assert(inst->mlen == 9);
847 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD;
848 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
849 }
850 break;
851 case SHADER_OPCODE_TXD:
852 /* There is no sample_d_c message; comparisons are done manually */
853 assert(inst->exec_size == 8);
854 assert(inst->mlen == 7 || inst->mlen == 10);
855 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_GRADIENTS;
856 break;
857 case SHADER_OPCODE_TXF:
858 assert(inst->mlen <= 9 && inst->mlen % 2 == 1);
859 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_LD;
860 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
861 break;
862 case SHADER_OPCODE_TXS:
863 assert(inst->mlen == 3);
864 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_RESINFO;
865 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
866 break;
867 default:
868 unreachable("not reached");
869 }
870 }
871 assert(msg_type != -1);
872
873 if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) {
874 rlen = 8;
875 dst = vec16(dst);
876 }
877
878 if (is_combined_send) {
879 assert(devinfo->gen >= 9 || devinfo->is_cherryview);
880 rlen = 0;
881 }
882
883 assert(devinfo->gen < 7 || inst->header_size == 0 ||
884 src.file == BRW_GENERAL_REGISTER_FILE);
885
886 assert(sampler_index.type == BRW_REGISTER_TYPE_UD);
887
888 /* Load the message header if present. If there's a texture offset,
889 * we need to set it up explicitly and load the offset bitfield.
890 * Otherwise, we can use an implied move from g0 to the first message reg.
891 */
892 if (inst->header_size != 0) {
893 if (devinfo->gen < 6 && !inst->offset) {
894 /* Set up an implied move from g0 to the MRF. */
895 src = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
896 } else {
897 struct brw_reg header_reg;
898
899 if (devinfo->gen >= 7) {
900 header_reg = src;
901 } else {
902 assert(inst->base_mrf != -1);
903 header_reg = brw_message_reg(inst->base_mrf);
904 }
905
906 brw_push_insn_state(p);
907 brw_set_default_exec_size(p, BRW_EXECUTE_8);
908 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
909 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
910 /* Explicitly set up the message header by copying g0 to the MRF. */
911 brw_MOV(p, header_reg, brw_vec8_grf(0, 0));
912
913 if (inst->offset) {
914 /* Set the offset bits in DWord 2. */
915 brw_MOV(p, get_element_ud(header_reg, 2),
916 brw_imm_ud(inst->offset));
917 } else if (stage != MESA_SHADER_VERTEX &&
918 stage != MESA_SHADER_FRAGMENT) {
919 /* The vertex and fragment stages have g0.2 set to 0, so
920 * header0.2 is 0 when g0 is copied. Other stages may not, so we
921 * must set it to 0 to avoid setting undesirable bits in the
922 * message.
923 */
924 brw_MOV(p, get_element_ud(header_reg, 2), brw_imm_ud(0));
925 }
926
927 brw_adjust_sampler_state_pointer(p, header_reg, sampler_index);
928 brw_pop_insn_state(p);
929 }
930 }
931
932 uint32_t base_binding_table_index = (inst->opcode == SHADER_OPCODE_TG4 ||
933 inst->opcode == SHADER_OPCODE_TG4_OFFSET)
934 ? prog_data->binding_table.gather_texture_start
935 : prog_data->binding_table.texture_start;
936
937 if (surface_index.file == BRW_IMMEDIATE_VALUE &&
938 sampler_index.file == BRW_IMMEDIATE_VALUE) {
939 uint32_t surface = surface_index.ud;
940 uint32_t sampler = sampler_index.ud;
941
942 brw_SAMPLE(p,
943 retype(dst, BRW_REGISTER_TYPE_UW),
944 inst->base_mrf,
945 src,
946 surface + base_binding_table_index,
947 sampler % 16,
948 msg_type,
949 rlen,
950 inst->mlen,
951 inst->header_size != 0,
952 simd_mode,
953 return_format);
954
955 brw_mark_surface_used(prog_data, surface + base_binding_table_index);
956 } else {
957 /* Non-const sampler index */
958
959 struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
960 struct brw_reg surface_reg = vec1(retype(surface_index, BRW_REGISTER_TYPE_UD));
961 struct brw_reg sampler_reg = vec1(retype(sampler_index, BRW_REGISTER_TYPE_UD));
962
963 brw_push_insn_state(p);
964 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
965 brw_set_default_access_mode(p, BRW_ALIGN_1);
966
967 if (memcmp(&surface_reg, &sampler_reg, sizeof(surface_reg)) == 0) {
968 brw_MUL(p, addr, sampler_reg, brw_imm_uw(0x101));
969 } else {
970 brw_SHL(p, addr, sampler_reg, brw_imm_ud(8));
971 brw_OR(p, addr, addr, surface_reg);
972 }
973 if (base_binding_table_index)
974 brw_ADD(p, addr, addr, brw_imm_ud(base_binding_table_index));
975 brw_AND(p, addr, addr, brw_imm_ud(0xfff));
976
977 brw_pop_insn_state(p);
978
979 /* dst = send(offset, a0.0 | <descriptor>) */
980 brw_inst *insn = brw_send_indirect_message(
981 p, BRW_SFID_SAMPLER, dst, src, addr);
982 brw_set_sampler_message(p, insn,
983 0 /* surface */,
984 0 /* sampler */,
985 msg_type,
986 rlen,
987 inst->mlen /* mlen */,
988 inst->header_size != 0 /* header */,
989 simd_mode,
990 return_format);
991
992 /* visitor knows more than we do about the surface limit required,
993 * so has already done marking.
994 */
995 }
996
997 if (is_combined_send) {
998 brw_inst_set_eot(p->devinfo, brw_last_inst, true);
999 brw_inst_set_opcode(p->devinfo, brw_last_inst, BRW_OPCODE_SENDC);
1000 }
1001 }
1002
1003
1004 /* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
1005 * looking like:
1006 *
1007 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
1008 *
1009 * Ideally, we want to produce:
1010 *
1011 * DDX DDY
1012 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
1013 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
1014 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
1015 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
1016 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
1017 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
1018 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
1019 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
1020 *
1021 * and add another set of two more subspans if in 16-pixel dispatch mode.
1022 *
1023 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
1024 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
1025 * pair. But the ideal approximation may impose a huge performance cost on
1026 * sample_d. On at least Haswell, sample_d instruction does some
1027 * optimizations if the same LOD is used for all pixels in the subspan.
1028 *
1029 * For DDY, we need to use ALIGN16 mode since it's capable of doing the
1030 * appropriate swizzling.
1031 */
1032 void
1033 fs_generator::generate_ddx(enum opcode opcode,
1034 struct brw_reg dst, struct brw_reg src)
1035 {
1036 unsigned vstride, width;
1037
1038 if (opcode == FS_OPCODE_DDX_FINE) {
1039 /* produce accurate derivatives */
1040 vstride = BRW_VERTICAL_STRIDE_2;
1041 width = BRW_WIDTH_2;
1042 } else {
1043 /* replicate the derivative at the top-left pixel to other pixels */
1044 vstride = BRW_VERTICAL_STRIDE_4;
1045 width = BRW_WIDTH_4;
1046 }
1047
1048 struct brw_reg src0 = brw_reg(src.file, src.nr, 1,
1049 src.negate, src.abs,
1050 BRW_REGISTER_TYPE_F,
1051 vstride,
1052 width,
1053 BRW_HORIZONTAL_STRIDE_0,
1054 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
1055 struct brw_reg src1 = brw_reg(src.file, src.nr, 0,
1056 src.negate, src.abs,
1057 BRW_REGISTER_TYPE_F,
1058 vstride,
1059 width,
1060 BRW_HORIZONTAL_STRIDE_0,
1061 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
1062 brw_ADD(p, dst, src0, negate(src1));
1063 }
1064
1065 /* The negate_value boolean is used to negate the derivative computation for
1066 * FBOs, since they place the origin at the upper left instead of the lower
1067 * left.
1068 */
1069 void
1070 fs_generator::generate_ddy(enum opcode opcode,
1071 struct brw_reg dst, struct brw_reg src,
1072 bool negate_value)
1073 {
1074 if (opcode == FS_OPCODE_DDY_FINE) {
1075 /* From the Ivy Bridge PRM, volume 4 part 3, section 3.3.9 (Register
1076 * Region Restrictions):
1077 *
1078 * In Align16 access mode, SIMD16 is not allowed for DW operations
1079 * and SIMD8 is not allowed for DF operations.
1080 *
1081 * In this context, "DW operations" means "operations acting on 32-bit
1082 * values", so it includes operations on floats.
1083 *
1084 * Gen4 has a similar restriction. From the i965 PRM, section 11.5.3
1085 * (Instruction Compression -> Rules and Restrictions):
1086 *
1087 * A compressed instruction must be in Align1 access mode. Align16
1088 * mode instructions cannot be compressed.
1089 *
1090 * Similar text exists in the g45 PRM.
1091 *
1092 * On these platforms, if we're building a SIMD16 shader, we need to
1093 * manually unroll to a pair of SIMD8 instructions.
1094 */
1095 bool unroll_to_simd8 =
1096 (dispatch_width == 16 &&
1097 (devinfo->gen == 4 || (devinfo->gen == 7 && !devinfo->is_haswell)));
1098
1099 /* produce accurate derivatives */
1100 struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
1101 src.negate, src.abs,
1102 BRW_REGISTER_TYPE_F,
1103 BRW_VERTICAL_STRIDE_4,
1104 BRW_WIDTH_4,
1105 BRW_HORIZONTAL_STRIDE_1,
1106 BRW_SWIZZLE_XYXY, WRITEMASK_XYZW);
1107 struct brw_reg src1 = brw_reg(src.file, src.nr, 0,
1108 src.negate, src.abs,
1109 BRW_REGISTER_TYPE_F,
1110 BRW_VERTICAL_STRIDE_4,
1111 BRW_WIDTH_4,
1112 BRW_HORIZONTAL_STRIDE_1,
1113 BRW_SWIZZLE_ZWZW, WRITEMASK_XYZW);
1114 brw_push_insn_state(p);
1115 brw_set_default_access_mode(p, BRW_ALIGN_16);
1116 if (unroll_to_simd8) {
1117 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1118 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1119 if (negate_value) {
1120 brw_ADD(p, firsthalf(dst), firsthalf(src1), negate(firsthalf(src0)));
1121 brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
1122 brw_ADD(p, sechalf(dst), sechalf(src1), negate(sechalf(src0)));
1123 } else {
1124 brw_ADD(p, firsthalf(dst), firsthalf(src0), negate(firsthalf(src1)));
1125 brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
1126 brw_ADD(p, sechalf(dst), sechalf(src0), negate(sechalf(src1)));
1127 }
1128 } else {
1129 if (negate_value)
1130 brw_ADD(p, dst, src1, negate(src0));
1131 else
1132 brw_ADD(p, dst, src0, negate(src1));
1133 }
1134 brw_pop_insn_state(p);
1135 } else {
1136 /* replicate the derivative at the top-left pixel to other pixels */
1137 struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
1138 src.negate, src.abs,
1139 BRW_REGISTER_TYPE_F,
1140 BRW_VERTICAL_STRIDE_4,
1141 BRW_WIDTH_4,
1142 BRW_HORIZONTAL_STRIDE_0,
1143 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
1144 struct brw_reg src1 = brw_reg(src.file, src.nr, 2,
1145 src.negate, src.abs,
1146 BRW_REGISTER_TYPE_F,
1147 BRW_VERTICAL_STRIDE_4,
1148 BRW_WIDTH_4,
1149 BRW_HORIZONTAL_STRIDE_0,
1150 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
1151 if (negate_value)
1152 brw_ADD(p, dst, src1, negate(src0));
1153 else
1154 brw_ADD(p, dst, src0, negate(src1));
1155 }
1156 }
1157
1158 void
1159 fs_generator::generate_discard_jump(fs_inst *inst)
1160 {
1161 assert(devinfo->gen >= 6);
1162
1163 /* This HALT will be patched up at FB write time to point UIP at the end of
1164 * the program, and at brw_uip_jip() JIP will be set to the end of the
1165 * current block (or the program).
1166 */
1167 this->discard_halt_patches.push_tail(new(mem_ctx) ip_record(p->nr_insn));
1168
1169 brw_push_insn_state(p);
1170 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1171 gen6_HALT(p);
1172 brw_pop_insn_state(p);
1173 }
1174
1175 void
1176 fs_generator::generate_scratch_write(fs_inst *inst, struct brw_reg src)
1177 {
1178 assert(inst->mlen != 0);
1179
1180 brw_MOV(p,
1181 brw_uvec_mrf(inst->exec_size, (inst->base_mrf + 1), 0),
1182 retype(src, BRW_REGISTER_TYPE_UD));
1183 brw_oword_block_write_scratch(p, brw_message_reg(inst->base_mrf),
1184 inst->exec_size / 8, inst->offset);
1185 }
1186
1187 void
1188 fs_generator::generate_scratch_read(fs_inst *inst, struct brw_reg dst)
1189 {
1190 assert(inst->mlen != 0);
1191
1192 brw_oword_block_read_scratch(p, dst, brw_message_reg(inst->base_mrf),
1193 inst->exec_size / 8, inst->offset);
1194 }
1195
1196 void
1197 fs_generator::generate_scratch_read_gen7(fs_inst *inst, struct brw_reg dst)
1198 {
1199 gen7_block_read_scratch(p, dst, inst->exec_size / 8, inst->offset);
1200 }
1201
1202 void
1203 fs_generator::generate_uniform_pull_constant_load(fs_inst *inst,
1204 struct brw_reg dst,
1205 struct brw_reg index,
1206 struct brw_reg offset)
1207 {
1208 assert(inst->mlen != 0);
1209
1210 assert(index.file == BRW_IMMEDIATE_VALUE &&
1211 index.type == BRW_REGISTER_TYPE_UD);
1212 uint32_t surf_index = index.ud;
1213
1214 assert(offset.file == BRW_IMMEDIATE_VALUE &&
1215 offset.type == BRW_REGISTER_TYPE_UD);
1216 uint32_t read_offset = offset.ud;
1217
1218 brw_oword_block_read(p, dst, brw_message_reg(inst->base_mrf),
1219 read_offset, surf_index);
1220 }
1221
1222 void
1223 fs_generator::generate_uniform_pull_constant_load_gen7(fs_inst *inst,
1224 struct brw_reg dst,
1225 struct brw_reg index,
1226 struct brw_reg offset)
1227 {
1228 assert(index.type == BRW_REGISTER_TYPE_UD);
1229
1230 assert(offset.file == BRW_GENERAL_REGISTER_FILE);
1231 /* Reference just the dword we need, to avoid angering validate_reg(). */
1232 offset = brw_vec1_grf(offset.nr, 0);
1233
1234 /* We use the SIMD4x2 mode because we want to end up with 4 components in
1235 * the destination loaded consecutively from the same offset (which appears
1236 * in the first component, and the rest are ignored).
1237 */
1238 dst.width = BRW_WIDTH_4;
1239
1240 struct brw_reg src = offset;
1241 bool header_present = false;
1242
1243 if (devinfo->gen >= 9) {
1244 /* Skylake requires a message header in order to use SIMD4x2 mode. */
1245 src = retype(brw_vec4_grf(offset.nr, 0), BRW_REGISTER_TYPE_UD);
1246 header_present = true;
1247
1248 brw_push_insn_state(p);
1249 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1250 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1251 brw_MOV(p, vec8(src), retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
1252 brw_set_default_access_mode(p, BRW_ALIGN_1);
1253
1254 brw_MOV(p, get_element_ud(src, 2),
1255 brw_imm_ud(GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2));
1256 brw_pop_insn_state(p);
1257 }
1258
1259 if (index.file == BRW_IMMEDIATE_VALUE) {
1260
1261 uint32_t surf_index = index.ud;
1262
1263 brw_push_insn_state(p);
1264 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1265 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1266 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1267 brw_inst_set_exec_size(devinfo, send, BRW_EXECUTE_4);
1268 brw_pop_insn_state(p);
1269
1270 brw_set_dest(p, send, dst);
1271 brw_set_src0(p, send, src);
1272 brw_set_sampler_message(p, send,
1273 surf_index,
1274 0, /* LD message ignores sampler unit */
1275 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
1276 1, /* rlen */
1277 inst->mlen,
1278 header_present,
1279 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
1280 0);
1281 } else {
1282
1283 struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
1284
1285 brw_push_insn_state(p);
1286 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1287 brw_set_default_access_mode(p, BRW_ALIGN_1);
1288
1289 /* a0.0 = surf_index & 0xff */
1290 brw_inst *insn_and = brw_next_insn(p, BRW_OPCODE_AND);
1291 brw_inst_set_exec_size(p->devinfo, insn_and, BRW_EXECUTE_1);
1292 brw_set_dest(p, insn_and, addr);
1293 brw_set_src0(p, insn_and, vec1(retype(index, BRW_REGISTER_TYPE_UD)));
1294 brw_set_src1(p, insn_and, brw_imm_ud(0x0ff));
1295
1296 /* dst = send(payload, a0.0 | <descriptor>) */
1297 brw_inst *insn = brw_send_indirect_message(
1298 p, BRW_SFID_SAMPLER, dst, src, addr);
1299 brw_set_sampler_message(p, insn,
1300 0,
1301 0, /* LD message ignores sampler unit */
1302 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
1303 1, /* rlen */
1304 inst->mlen,
1305 header_present,
1306 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
1307 0);
1308
1309 brw_pop_insn_state(p);
1310 }
1311 }
1312
1313 void
1314 fs_generator::generate_varying_pull_constant_load(fs_inst *inst,
1315 struct brw_reg dst,
1316 struct brw_reg index,
1317 struct brw_reg offset)
1318 {
1319 assert(devinfo->gen < 7); /* Should use the gen7 variant. */
1320 assert(inst->header_size != 0);
1321 assert(inst->mlen);
1322
1323 assert(index.file == BRW_IMMEDIATE_VALUE &&
1324 index.type == BRW_REGISTER_TYPE_UD);
1325 uint32_t surf_index = index.ud;
1326
1327 uint32_t simd_mode, rlen, msg_type;
1328 if (dispatch_width == 16) {
1329 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1330 rlen = 8;
1331 } else {
1332 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
1333 rlen = 4;
1334 }
1335
1336 if (devinfo->gen >= 5)
1337 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
1338 else {
1339 /* We always use the SIMD16 message so that we only have to load U, and
1340 * not V or R.
1341 */
1342 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_LD;
1343 assert(inst->mlen == 3);
1344 assert(inst->regs_written == 8);
1345 rlen = 8;
1346 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1347 }
1348
1349 struct brw_reg offset_mrf = retype(brw_message_reg(inst->base_mrf + 1),
1350 BRW_REGISTER_TYPE_D);
1351 brw_MOV(p, offset_mrf, offset);
1352
1353 struct brw_reg header = brw_vec8_grf(0, 0);
1354 gen6_resolve_implied_move(p, &header, inst->base_mrf);
1355
1356 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1357 brw_inst_set_qtr_control(p->devinfo, send, BRW_COMPRESSION_NONE);
1358 brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UW));
1359 brw_set_src0(p, send, header);
1360 if (devinfo->gen < 6)
1361 brw_inst_set_base_mrf(p->devinfo, send, inst->base_mrf);
1362
1363 /* Our surface is set up as floats, regardless of what actual data is
1364 * stored in it.
1365 */
1366 uint32_t return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
1367 brw_set_sampler_message(p, send,
1368 surf_index,
1369 0, /* sampler (unused) */
1370 msg_type,
1371 rlen,
1372 inst->mlen,
1373 inst->header_size != 0,
1374 simd_mode,
1375 return_format);
1376 }
1377
1378 void
1379 fs_generator::generate_varying_pull_constant_load_gen7(fs_inst *inst,
1380 struct brw_reg dst,
1381 struct brw_reg index,
1382 struct brw_reg offset)
1383 {
1384 assert(devinfo->gen >= 7);
1385 /* Varying-offset pull constant loads are treated as a normal expression on
1386 * gen7, so the fact that it's a send message is hidden at the IR level.
1387 */
1388 assert(inst->header_size == 0);
1389 assert(!inst->mlen);
1390 assert(index.type == BRW_REGISTER_TYPE_UD);
1391
1392 uint32_t simd_mode, rlen, mlen;
1393 if (dispatch_width == 16) {
1394 mlen = 2;
1395 rlen = 8;
1396 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1397 } else {
1398 mlen = 1;
1399 rlen = 4;
1400 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
1401 }
1402
1403 if (index.file == BRW_IMMEDIATE_VALUE) {
1404
1405 uint32_t surf_index = index.ud;
1406
1407 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1408 brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UW));
1409 brw_set_src0(p, send, offset);
1410 brw_set_sampler_message(p, send,
1411 surf_index,
1412 0, /* LD message ignores sampler unit */
1413 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
1414 rlen,
1415 mlen,
1416 false, /* no header */
1417 simd_mode,
1418 0);
1419
1420 } else {
1421
1422 struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
1423
1424 brw_push_insn_state(p);
1425 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1426 brw_set_default_access_mode(p, BRW_ALIGN_1);
1427
1428 /* a0.0 = surf_index & 0xff */
1429 brw_inst *insn_and = brw_next_insn(p, BRW_OPCODE_AND);
1430 brw_inst_set_exec_size(p->devinfo, insn_and, BRW_EXECUTE_1);
1431 brw_set_dest(p, insn_and, addr);
1432 brw_set_src0(p, insn_and, vec1(retype(index, BRW_REGISTER_TYPE_UD)));
1433 brw_set_src1(p, insn_and, brw_imm_ud(0x0ff));
1434
1435 brw_pop_insn_state(p);
1436
1437 /* dst = send(offset, a0.0 | <descriptor>) */
1438 brw_inst *insn = brw_send_indirect_message(
1439 p, BRW_SFID_SAMPLER, retype(dst, BRW_REGISTER_TYPE_UW),
1440 offset, addr);
1441 brw_set_sampler_message(p, insn,
1442 0 /* surface */,
1443 0 /* sampler */,
1444 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
1445 rlen /* rlen */,
1446 mlen /* mlen */,
1447 false /* header */,
1448 simd_mode,
1449 0);
1450 }
1451 }
1452
1453 /**
1454 * Cause the current pixel/sample mask (from R1.7 bits 15:0) to be transferred
1455 * into the flags register (f0.0).
1456 *
1457 * Used only on Gen6 and above.
1458 */
1459 void
1460 fs_generator::generate_mov_dispatch_to_flags(fs_inst *inst)
1461 {
1462 struct brw_reg flags = brw_flag_reg(0, inst->flag_subreg);
1463 struct brw_reg dispatch_mask;
1464
1465 if (devinfo->gen >= 6)
1466 dispatch_mask = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
1467 else
1468 dispatch_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
1469
1470 brw_push_insn_state(p);
1471 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1472 brw_MOV(p, flags, dispatch_mask);
1473 brw_pop_insn_state(p);
1474 }
1475
1476 void
1477 fs_generator::generate_pixel_interpolator_query(fs_inst *inst,
1478 struct brw_reg dst,
1479 struct brw_reg src,
1480 struct brw_reg msg_data,
1481 unsigned msg_type)
1482 {
1483 assert(msg_data.type == BRW_REGISTER_TYPE_UD);
1484
1485 brw_pixel_interpolator_query(p,
1486 retype(dst, BRW_REGISTER_TYPE_UW),
1487 src,
1488 inst->pi_noperspective,
1489 msg_type,
1490 msg_data,
1491 inst->mlen,
1492 inst->regs_written);
1493 }
1494
1495
1496 /**
1497 * Sets the first word of a vgrf for gen7+ simd4x2 uniform pull constant
1498 * sampler LD messages.
1499 *
1500 * We don't want to bake it into the send message's code generation because
1501 * that means we don't get a chance to schedule the instructions.
1502 */
1503 void
1504 fs_generator::generate_set_simd4x2_offset(fs_inst *inst,
1505 struct brw_reg dst,
1506 struct brw_reg value)
1507 {
1508 assert(value.file == BRW_IMMEDIATE_VALUE);
1509
1510 brw_push_insn_state(p);
1511 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1512 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1513 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1514 brw_MOV(p, retype(brw_vec1_reg(dst.file, dst.nr, 0), value.type), value);
1515 brw_pop_insn_state(p);
1516 }
1517
1518 /* Sets vstride=1, width=4, hstride=0 of register src1 during
1519 * the ADD instruction.
1520 */
1521 void
1522 fs_generator::generate_set_sample_id(fs_inst *inst,
1523 struct brw_reg dst,
1524 struct brw_reg src0,
1525 struct brw_reg src1)
1526 {
1527 assert(dst.type == BRW_REGISTER_TYPE_D ||
1528 dst.type == BRW_REGISTER_TYPE_UD);
1529 assert(src0.type == BRW_REGISTER_TYPE_D ||
1530 src0.type == BRW_REGISTER_TYPE_UD);
1531
1532 struct brw_reg reg = stride(src1, 1, 4, 0);
1533 if (devinfo->gen >= 8 || dispatch_width == 8) {
1534 brw_ADD(p, dst, src0, reg);
1535 } else if (dispatch_width == 16) {
1536 brw_push_insn_state(p);
1537 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1538 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1539 brw_ADD(p, firsthalf(dst), firsthalf(src0), reg);
1540 brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
1541 brw_ADD(p, sechalf(dst), sechalf(src0), suboffset(reg, 2));
1542 brw_pop_insn_state(p);
1543 }
1544 }
1545
1546 void
1547 fs_generator::generate_pack_half_2x16_split(fs_inst *inst,
1548 struct brw_reg dst,
1549 struct brw_reg x,
1550 struct brw_reg y)
1551 {
1552 assert(devinfo->gen >= 7);
1553 assert(dst.type == BRW_REGISTER_TYPE_UD);
1554 assert(x.type == BRW_REGISTER_TYPE_F);
1555 assert(y.type == BRW_REGISTER_TYPE_F);
1556
1557 /* From the Ivybridge PRM, Vol4, Part3, Section 6.27 f32to16:
1558 *
1559 * Because this instruction does not have a 16-bit floating-point type,
1560 * the destination data type must be Word (W).
1561 *
1562 * The destination must be DWord-aligned and specify a horizontal stride
1563 * (HorzStride) of 2. The 16-bit result is stored in the lower word of
1564 * each destination channel and the upper word is not modified.
1565 */
1566 struct brw_reg dst_w = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1567
1568 /* Give each 32-bit channel of dst the form below, where "." means
1569 * unchanged.
1570 * 0x....hhhh
1571 */
1572 brw_F32TO16(p, dst_w, y);
1573
1574 /* Now the form:
1575 * 0xhhhh0000
1576 */
1577 brw_SHL(p, dst, dst, brw_imm_ud(16u));
1578
1579 /* And, finally the form of packHalf2x16's output:
1580 * 0xhhhhllll
1581 */
1582 brw_F32TO16(p, dst_w, x);
1583 }
1584
1585 void
1586 fs_generator::generate_unpack_half_2x16_split(fs_inst *inst,
1587 struct brw_reg dst,
1588 struct brw_reg src)
1589 {
1590 assert(devinfo->gen >= 7);
1591 assert(dst.type == BRW_REGISTER_TYPE_F);
1592 assert(src.type == BRW_REGISTER_TYPE_UD);
1593
1594 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1595 *
1596 * Because this instruction does not have a 16-bit floating-point type,
1597 * the source data type must be Word (W). The destination type must be
1598 * F (Float).
1599 */
1600 struct brw_reg src_w = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1601
1602 /* Each channel of src has the form of unpackHalf2x16's input: 0xhhhhllll.
1603 * For the Y case, we wish to access only the upper word; therefore
1604 * a 16-bit subregister offset is needed.
1605 */
1606 assert(inst->opcode == FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X ||
1607 inst->opcode == FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y);
1608 if (inst->opcode == FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y)
1609 src_w.subnr += 2;
1610
1611 brw_F16TO32(p, dst, src_w);
1612 }
1613
1614 void
1615 fs_generator::generate_shader_time_add(fs_inst *inst,
1616 struct brw_reg payload,
1617 struct brw_reg offset,
1618 struct brw_reg value)
1619 {
1620 assert(devinfo->gen >= 7);
1621 brw_push_insn_state(p);
1622 brw_set_default_mask_control(p, true);
1623
1624 assert(payload.file == BRW_GENERAL_REGISTER_FILE);
1625 struct brw_reg payload_offset = retype(brw_vec1_grf(payload.nr, 0),
1626 offset.type);
1627 struct brw_reg payload_value = retype(brw_vec1_grf(payload.nr + 1, 0),
1628 value.type);
1629
1630 assert(offset.file == BRW_IMMEDIATE_VALUE);
1631 if (value.file == BRW_GENERAL_REGISTER_FILE) {
1632 value.width = BRW_WIDTH_1;
1633 value.hstride = BRW_HORIZONTAL_STRIDE_0;
1634 value.vstride = BRW_VERTICAL_STRIDE_0;
1635 } else {
1636 assert(value.file == BRW_IMMEDIATE_VALUE);
1637 }
1638
1639 /* Trying to deal with setup of the params from the IR is crazy in the FS8
1640 * case, and we don't really care about squeezing every bit of performance
1641 * out of this path, so we just emit the MOVs from here.
1642 */
1643 brw_MOV(p, payload_offset, offset);
1644 brw_MOV(p, payload_value, value);
1645 brw_shader_time_add(p, payload,
1646 prog_data->binding_table.shader_time_start);
1647 brw_pop_insn_state(p);
1648
1649 brw_mark_surface_used(prog_data,
1650 prog_data->binding_table.shader_time_start);
1651 }
1652
1653 void
1654 fs_generator::enable_debug(const char *shader_name)
1655 {
1656 debug_flag = true;
1657 this->shader_name = shader_name;
1658 }
1659
1660 int
1661 fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
1662 {
1663 /* align to 64 byte boundary. */
1664 while (p->next_insn_offset % 64)
1665 brw_NOP(p);
1666
1667 this->dispatch_width = dispatch_width;
1668 if (dispatch_width == 16)
1669 brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
1670
1671 int start_offset = p->next_insn_offset;
1672 int spill_count = 0, fill_count = 0;
1673 int loop_count = 0;
1674
1675 struct annotation_info annotation;
1676 memset(&annotation, 0, sizeof(annotation));
1677
1678 foreach_block_and_inst (block, fs_inst, inst, cfg) {
1679 struct brw_reg src[3], dst;
1680 unsigned int last_insn_offset = p->next_insn_offset;
1681 bool multiple_instructions_emitted = false;
1682
1683 if (unlikely(debug_flag))
1684 annotate(p->devinfo, &annotation, cfg, inst, p->next_insn_offset);
1685
1686 for (unsigned int i = 0; i < inst->sources; i++) {
1687 src[i] = brw_reg_from_fs_reg(inst, &inst->src[i], devinfo->gen);
1688
1689 /* The accumulator result appears to get used for the
1690 * conditional modifier generation. When negating a UD
1691 * value, there is a 33rd bit generated for the sign in the
1692 * accumulator value, so now you can't check, for example,
1693 * equality with a 32-bit value. See piglit fs-op-neg-uvec4.
1694 */
1695 assert(!inst->conditional_mod ||
1696 inst->src[i].type != BRW_REGISTER_TYPE_UD ||
1697 !inst->src[i].negate);
1698 }
1699 dst = brw_reg_from_fs_reg(inst, &inst->dst, devinfo->gen);
1700
1701 brw_set_default_predicate_control(p, inst->predicate);
1702 brw_set_default_predicate_inverse(p, inst->predicate_inverse);
1703 brw_set_default_flag_reg(p, 0, inst->flag_subreg);
1704 brw_set_default_saturate(p, inst->saturate);
1705 brw_set_default_mask_control(p, inst->force_writemask_all);
1706 brw_set_default_acc_write_control(p, inst->writes_accumulator);
1707 brw_set_default_exec_size(p, cvt(inst->exec_size) - 1);
1708
1709 assert(inst->base_mrf + inst->mlen <= BRW_MAX_MRF(devinfo->gen));
1710 assert(inst->mlen <= BRW_MAX_MSG_LENGTH);
1711
1712 switch (inst->exec_size) {
1713 case 1:
1714 case 2:
1715 case 4:
1716 assert(inst->force_writemask_all);
1717 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1718 break;
1719 case 8:
1720 if (inst->force_sechalf) {
1721 brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
1722 } else {
1723 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1724 }
1725 break;
1726 case 16:
1727 case 32:
1728 /* If the instruction writes to more than one register, it needs to
1729 * be a "compressed" instruction on Gen <= 5.
1730 */
1731 if (inst->dst.component_size(inst->exec_size) > REG_SIZE)
1732 brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
1733 else
1734 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1735 break;
1736 default:
1737 unreachable("Invalid instruction width");
1738 }
1739
1740 switch (inst->opcode) {
1741 case BRW_OPCODE_MOV:
1742 brw_MOV(p, dst, src[0]);
1743 break;
1744 case BRW_OPCODE_ADD:
1745 brw_ADD(p, dst, src[0], src[1]);
1746 break;
1747 case BRW_OPCODE_MUL:
1748 brw_MUL(p, dst, src[0], src[1]);
1749 break;
1750 case BRW_OPCODE_AVG:
1751 brw_AVG(p, dst, src[0], src[1]);
1752 break;
1753 case BRW_OPCODE_MACH:
1754 brw_MACH(p, dst, src[0], src[1]);
1755 break;
1756
1757 case BRW_OPCODE_LINE:
1758 brw_LINE(p, dst, src[0], src[1]);
1759 break;
1760
1761 case BRW_OPCODE_MAD:
1762 assert(devinfo->gen >= 6);
1763 brw_set_default_access_mode(p, BRW_ALIGN_16);
1764 if (dispatch_width == 16 && !devinfo->supports_simd16_3src) {
1765 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1766 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1767 brw_inst *f = brw_MAD(p, firsthalf(dst), firsthalf(src[0]), firsthalf(src[1]), firsthalf(src[2]));
1768 brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
1769 brw_inst *s = brw_MAD(p, sechalf(dst), sechalf(src[0]), sechalf(src[1]), sechalf(src[2]));
1770 brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
1771
1772 if (inst->conditional_mod) {
1773 brw_inst_set_cond_modifier(p->devinfo, f, inst->conditional_mod);
1774 brw_inst_set_cond_modifier(p->devinfo, s, inst->conditional_mod);
1775 multiple_instructions_emitted = true;
1776 }
1777 } else {
1778 brw_MAD(p, dst, src[0], src[1], src[2]);
1779 }
1780 brw_set_default_access_mode(p, BRW_ALIGN_1);
1781 break;
1782
1783 case BRW_OPCODE_LRP:
1784 assert(devinfo->gen >= 6);
1785 brw_set_default_access_mode(p, BRW_ALIGN_16);
1786 if (dispatch_width == 16 && !devinfo->supports_simd16_3src) {
1787 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1788 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1789 brw_inst *f = brw_LRP(p, firsthalf(dst), firsthalf(src[0]), firsthalf(src[1]), firsthalf(src[2]));
1790 brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
1791 brw_inst *s = brw_LRP(p, sechalf(dst), sechalf(src[0]), sechalf(src[1]), sechalf(src[2]));
1792 brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
1793
1794 if (inst->conditional_mod) {
1795 brw_inst_set_cond_modifier(p->devinfo, f, inst->conditional_mod);
1796 brw_inst_set_cond_modifier(p->devinfo, s, inst->conditional_mod);
1797 multiple_instructions_emitted = true;
1798 }
1799 } else {
1800 brw_LRP(p, dst, src[0], src[1], src[2]);
1801 }
1802 brw_set_default_access_mode(p, BRW_ALIGN_1);
1803 break;
1804
1805 case BRW_OPCODE_FRC:
1806 brw_FRC(p, dst, src[0]);
1807 break;
1808 case BRW_OPCODE_RNDD:
1809 brw_RNDD(p, dst, src[0]);
1810 break;
1811 case BRW_OPCODE_RNDE:
1812 brw_RNDE(p, dst, src[0]);
1813 break;
1814 case BRW_OPCODE_RNDZ:
1815 brw_RNDZ(p, dst, src[0]);
1816 break;
1817
1818 case BRW_OPCODE_AND:
1819 brw_AND(p, dst, src[0], src[1]);
1820 break;
1821 case BRW_OPCODE_OR:
1822 brw_OR(p, dst, src[0], src[1]);
1823 break;
1824 case BRW_OPCODE_XOR:
1825 brw_XOR(p, dst, src[0], src[1]);
1826 break;
1827 case BRW_OPCODE_NOT:
1828 brw_NOT(p, dst, src[0]);
1829 break;
1830 case BRW_OPCODE_ASR:
1831 brw_ASR(p, dst, src[0], src[1]);
1832 break;
1833 case BRW_OPCODE_SHR:
1834 brw_SHR(p, dst, src[0], src[1]);
1835 break;
1836 case BRW_OPCODE_SHL:
1837 brw_SHL(p, dst, src[0], src[1]);
1838 break;
1839 case BRW_OPCODE_F32TO16:
1840 assert(devinfo->gen >= 7);
1841 brw_F32TO16(p, dst, src[0]);
1842 break;
1843 case BRW_OPCODE_F16TO32:
1844 assert(devinfo->gen >= 7);
1845 brw_F16TO32(p, dst, src[0]);
1846 break;
1847 case BRW_OPCODE_CMP:
1848 /* The Ivybridge/BayTrail WaCMPInstFlagDepClearedEarly workaround says
1849 * that when the destination is a GRF that the dependency-clear bit on
1850 * the flag register is cleared early.
1851 *
1852 * Suggested workarounds are to disable coissuing CMP instructions
1853 * or to split CMP(16) instructions into two CMP(8) instructions.
1854 *
1855 * We choose to split into CMP(8) instructions since disabling
1856 * coissuing would affect CMP instructions not otherwise affected by
1857 * the errata.
1858 */
1859 if (dispatch_width == 16 && devinfo->gen == 7 && !devinfo->is_haswell) {
1860 if (dst.file == BRW_GENERAL_REGISTER_FILE) {
1861 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1862 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1863 brw_CMP(p, firsthalf(dst), inst->conditional_mod,
1864 firsthalf(src[0]), firsthalf(src[1]));
1865 brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
1866 brw_CMP(p, sechalf(dst), inst->conditional_mod,
1867 sechalf(src[0]), sechalf(src[1]));
1868 brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
1869
1870 multiple_instructions_emitted = true;
1871 } else if (dst.file == BRW_ARCHITECTURE_REGISTER_FILE) {
1872 /* For unknown reasons, the aforementioned workaround is not
1873 * sufficient. Overriding the type when the destination is the
1874 * null register is necessary but not sufficient by itself.
1875 */
1876 assert(dst.nr == BRW_ARF_NULL);
1877 dst.type = BRW_REGISTER_TYPE_D;
1878 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
1879 } else {
1880 unreachable("not reached");
1881 }
1882 } else {
1883 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
1884 }
1885 break;
1886 case BRW_OPCODE_SEL:
1887 brw_SEL(p, dst, src[0], src[1]);
1888 break;
1889 case BRW_OPCODE_BFREV:
1890 assert(devinfo->gen >= 7);
1891 /* BFREV only supports UD type for src and dst. */
1892 brw_BFREV(p, retype(dst, BRW_REGISTER_TYPE_UD),
1893 retype(src[0], BRW_REGISTER_TYPE_UD));
1894 break;
1895 case BRW_OPCODE_FBH:
1896 assert(devinfo->gen >= 7);
1897 /* FBH only supports UD type for dst. */
1898 brw_FBH(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1899 break;
1900 case BRW_OPCODE_FBL:
1901 assert(devinfo->gen >= 7);
1902 /* FBL only supports UD type for dst. */
1903 brw_FBL(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1904 break;
1905 case BRW_OPCODE_CBIT:
1906 assert(devinfo->gen >= 7);
1907 /* CBIT only supports UD type for dst. */
1908 brw_CBIT(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1909 break;
1910 case BRW_OPCODE_ADDC:
1911 assert(devinfo->gen >= 7);
1912 brw_ADDC(p, dst, src[0], src[1]);
1913 break;
1914 case BRW_OPCODE_SUBB:
1915 assert(devinfo->gen >= 7);
1916 brw_SUBB(p, dst, src[0], src[1]);
1917 break;
1918 case BRW_OPCODE_MAC:
1919 brw_MAC(p, dst, src[0], src[1]);
1920 break;
1921
1922 case BRW_OPCODE_BFE:
1923 assert(devinfo->gen >= 7);
1924 brw_set_default_access_mode(p, BRW_ALIGN_16);
1925 if (dispatch_width == 16 && !devinfo->supports_simd16_3src) {
1926 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1927 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1928 brw_BFE(p, firsthalf(dst), firsthalf(src[0]), firsthalf(src[1]), firsthalf(src[2]));
1929 brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
1930 brw_BFE(p, sechalf(dst), sechalf(src[0]), sechalf(src[1]), sechalf(src[2]));
1931 brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
1932 } else {
1933 brw_BFE(p, dst, src[0], src[1], src[2]);
1934 }
1935 brw_set_default_access_mode(p, BRW_ALIGN_1);
1936 break;
1937
1938 case BRW_OPCODE_BFI1:
1939 assert(devinfo->gen >= 7);
1940 /* The Haswell WaForceSIMD8ForBFIInstruction workaround says that we
1941 * should
1942 *
1943 * "Force BFI instructions to be executed always in SIMD8."
1944 */
1945 if (dispatch_width == 16 && devinfo->is_haswell) {
1946 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1947 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1948 brw_BFI1(p, firsthalf(dst), firsthalf(src[0]), firsthalf(src[1]));
1949 brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
1950 brw_BFI1(p, sechalf(dst), sechalf(src[0]), sechalf(src[1]));
1951 brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
1952 } else {
1953 brw_BFI1(p, dst, src[0], src[1]);
1954 }
1955 break;
1956 case BRW_OPCODE_BFI2:
1957 assert(devinfo->gen >= 7);
1958 brw_set_default_access_mode(p, BRW_ALIGN_16);
1959 /* The Haswell WaForceSIMD8ForBFIInstruction workaround says that we
1960 * should
1961 *
1962 * "Force BFI instructions to be executed always in SIMD8."
1963 *
1964 * Otherwise we would be able to emit compressed instructions like we
1965 * do for the other three-source instructions.
1966 */
1967 if (dispatch_width == 16 &&
1968 (devinfo->is_haswell || !devinfo->supports_simd16_3src)) {
1969 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1970 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1971 brw_BFI2(p, firsthalf(dst), firsthalf(src[0]), firsthalf(src[1]), firsthalf(src[2]));
1972 brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
1973 brw_BFI2(p, sechalf(dst), sechalf(src[0]), sechalf(src[1]), sechalf(src[2]));
1974 brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
1975 } else {
1976 brw_BFI2(p, dst, src[0], src[1], src[2]);
1977 }
1978 brw_set_default_access_mode(p, BRW_ALIGN_1);
1979 break;
1980
1981 case BRW_OPCODE_IF:
1982 if (inst->src[0].file != BAD_FILE) {
1983 /* The instruction has an embedded compare (only allowed on gen6) */
1984 assert(devinfo->gen == 6);
1985 gen6_IF(p, inst->conditional_mod, src[0], src[1]);
1986 } else {
1987 brw_IF(p, dispatch_width == 16 ? BRW_EXECUTE_16 : BRW_EXECUTE_8);
1988 }
1989 break;
1990
1991 case BRW_OPCODE_ELSE:
1992 brw_ELSE(p);
1993 break;
1994 case BRW_OPCODE_ENDIF:
1995 brw_ENDIF(p);
1996 break;
1997
1998 case BRW_OPCODE_DO:
1999 brw_DO(p, dispatch_width == 16 ? BRW_EXECUTE_16 : BRW_EXECUTE_8);
2000 break;
2001
2002 case BRW_OPCODE_BREAK:
2003 brw_BREAK(p);
2004 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2005 break;
2006 case BRW_OPCODE_CONTINUE:
2007 brw_CONT(p);
2008 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2009 break;
2010
2011 case BRW_OPCODE_WHILE:
2012 brw_WHILE(p);
2013 loop_count++;
2014 break;
2015
2016 case SHADER_OPCODE_RCP:
2017 case SHADER_OPCODE_RSQ:
2018 case SHADER_OPCODE_SQRT:
2019 case SHADER_OPCODE_EXP2:
2020 case SHADER_OPCODE_LOG2:
2021 case SHADER_OPCODE_SIN:
2022 case SHADER_OPCODE_COS:
2023 assert(devinfo->gen < 6 || inst->mlen == 0);
2024 assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
2025 if (devinfo->gen >= 7) {
2026 gen6_math(p, dst, brw_math_function(inst->opcode), src[0],
2027 brw_null_reg());
2028 } else if (devinfo->gen == 6) {
2029 generate_math_gen6(inst, dst, src[0], brw_null_reg());
2030 } else if (devinfo->gen == 5 || devinfo->is_g4x) {
2031 generate_math_g45(inst, dst, src[0]);
2032 } else {
2033 generate_math_gen4(inst, dst, src[0]);
2034 }
2035 break;
2036 case SHADER_OPCODE_INT_QUOTIENT:
2037 case SHADER_OPCODE_INT_REMAINDER:
2038 case SHADER_OPCODE_POW:
2039 assert(devinfo->gen < 6 || inst->mlen == 0);
2040 assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
2041 if (devinfo->gen >= 7 && inst->opcode == SHADER_OPCODE_POW) {
2042 gen6_math(p, dst, brw_math_function(inst->opcode), src[0], src[1]);
2043 } else if (devinfo->gen >= 6) {
2044 generate_math_gen6(inst, dst, src[0], src[1]);
2045 } else {
2046 generate_math_gen4(inst, dst, src[0]);
2047 }
2048 break;
2049 case FS_OPCODE_CINTERP:
2050 brw_MOV(p, dst, src[0]);
2051 break;
2052 case FS_OPCODE_LINTERP:
2053 generate_linterp(inst, dst, src);
2054 break;
2055 case FS_OPCODE_PIXEL_X:
2056 assert(src[0].type == BRW_REGISTER_TYPE_UW);
2057 src[0].subnr = 0 * type_sz(src[0].type);
2058 brw_MOV(p, dst, stride(src[0], 8, 4, 1));
2059 break;
2060 case FS_OPCODE_PIXEL_Y:
2061 assert(src[0].type == BRW_REGISTER_TYPE_UW);
2062 src[0].subnr = 4 * type_sz(src[0].type);
2063 brw_MOV(p, dst, stride(src[0], 8, 4, 1));
2064 break;
2065 case FS_OPCODE_GET_BUFFER_SIZE:
2066 generate_get_buffer_size(inst, dst, src[0], src[1]);
2067 break;
2068 case SHADER_OPCODE_TEX:
2069 case FS_OPCODE_TXB:
2070 case SHADER_OPCODE_TXD:
2071 case SHADER_OPCODE_TXF:
2072 case SHADER_OPCODE_TXF_CMS:
2073 case SHADER_OPCODE_TXF_CMS_W:
2074 case SHADER_OPCODE_TXF_UMS:
2075 case SHADER_OPCODE_TXF_MCS:
2076 case SHADER_OPCODE_TXL:
2077 case SHADER_OPCODE_TXS:
2078 case SHADER_OPCODE_LOD:
2079 case SHADER_OPCODE_TG4:
2080 case SHADER_OPCODE_TG4_OFFSET:
2081 case SHADER_OPCODE_SAMPLEINFO:
2082 generate_tex(inst, dst, src[0], src[1], src[2]);
2083 break;
2084 case FS_OPCODE_DDX_COARSE:
2085 case FS_OPCODE_DDX_FINE:
2086 generate_ddx(inst->opcode, dst, src[0]);
2087 break;
2088 case FS_OPCODE_DDY_COARSE:
2089 case FS_OPCODE_DDY_FINE:
2090 assert(src[1].file == BRW_IMMEDIATE_VALUE);
2091 generate_ddy(inst->opcode, dst, src[0], src[1].ud);
2092 break;
2093
2094 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
2095 generate_scratch_write(inst, src[0]);
2096 spill_count++;
2097 break;
2098
2099 case SHADER_OPCODE_GEN4_SCRATCH_READ:
2100 generate_scratch_read(inst, dst);
2101 fill_count++;
2102 break;
2103
2104 case SHADER_OPCODE_GEN7_SCRATCH_READ:
2105 generate_scratch_read_gen7(inst, dst);
2106 fill_count++;
2107 break;
2108
2109 case SHADER_OPCODE_MOV_INDIRECT:
2110 generate_mov_indirect(inst, dst, src[0], src[1]);
2111 break;
2112
2113 case SHADER_OPCODE_URB_READ_SIMD8:
2114 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
2115 generate_urb_read(inst, dst, src[0]);
2116 break;
2117
2118 case SHADER_OPCODE_URB_WRITE_SIMD8:
2119 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
2120 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
2121 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
2122 generate_urb_write(inst, src[0]);
2123 break;
2124
2125 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
2126 generate_uniform_pull_constant_load(inst, dst, src[0], src[1]);
2127 break;
2128
2129 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
2130 generate_uniform_pull_constant_load_gen7(inst, dst, src[0], src[1]);
2131 break;
2132
2133 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
2134 generate_varying_pull_constant_load(inst, dst, src[0], src[1]);
2135 break;
2136
2137 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
2138 generate_varying_pull_constant_load_gen7(inst, dst, src[0], src[1]);
2139 break;
2140
2141 case FS_OPCODE_REP_FB_WRITE:
2142 case FS_OPCODE_FB_WRITE:
2143 generate_fb_write(inst, src[0]);
2144 break;
2145
2146 case FS_OPCODE_BLORP_FB_WRITE:
2147 generate_blorp_fb_write(inst);
2148 break;
2149
2150 case FS_OPCODE_MOV_DISPATCH_TO_FLAGS:
2151 generate_mov_dispatch_to_flags(inst);
2152 break;
2153
2154 case FS_OPCODE_DISCARD_JUMP:
2155 generate_discard_jump(inst);
2156 break;
2157
2158 case SHADER_OPCODE_SHADER_TIME_ADD:
2159 generate_shader_time_add(inst, src[0], src[1], src[2]);
2160 break;
2161
2162 case SHADER_OPCODE_UNTYPED_ATOMIC:
2163 assert(src[2].file == BRW_IMMEDIATE_VALUE);
2164 brw_untyped_atomic(p, dst, src[0], src[1], src[2].ud,
2165 inst->mlen, !inst->dst.is_null());
2166 break;
2167
2168 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
2169 assert(src[2].file == BRW_IMMEDIATE_VALUE);
2170 brw_untyped_surface_read(p, dst, src[0], src[1],
2171 inst->mlen, src[2].ud);
2172 break;
2173
2174 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
2175 assert(src[2].file == BRW_IMMEDIATE_VALUE);
2176 brw_untyped_surface_write(p, src[0], src[1],
2177 inst->mlen, src[2].ud);
2178 break;
2179
2180 case SHADER_OPCODE_TYPED_ATOMIC:
2181 assert(src[2].file == BRW_IMMEDIATE_VALUE);
2182 brw_typed_atomic(p, dst, src[0], src[1],
2183 src[2].ud, inst->mlen, !inst->dst.is_null());
2184 break;
2185
2186 case SHADER_OPCODE_TYPED_SURFACE_READ:
2187 assert(src[2].file == BRW_IMMEDIATE_VALUE);
2188 brw_typed_surface_read(p, dst, src[0], src[1],
2189 inst->mlen, src[2].ud);
2190 break;
2191
2192 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
2193 assert(src[2].file == BRW_IMMEDIATE_VALUE);
2194 brw_typed_surface_write(p, src[0], src[1], inst->mlen, src[2].ud);
2195 break;
2196
2197 case SHADER_OPCODE_MEMORY_FENCE:
2198 brw_memory_fence(p, dst);
2199 break;
2200
2201 case FS_OPCODE_SET_SIMD4X2_OFFSET:
2202 generate_set_simd4x2_offset(inst, dst, src[0]);
2203 break;
2204
2205 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
2206 brw_find_live_channel(p, dst);
2207 break;
2208
2209 case SHADER_OPCODE_BROADCAST:
2210 brw_broadcast(p, dst, src[0], src[1]);
2211 break;
2212
2213 case SHADER_OPCODE_EXTRACT_BYTE: {
2214 assert(src[0].type == BRW_REGISTER_TYPE_D ||
2215 src[0].type == BRW_REGISTER_TYPE_UD);
2216
2217 enum brw_reg_type type =
2218 src[0].type == BRW_REGISTER_TYPE_D ? BRW_REGISTER_TYPE_B
2219 : BRW_REGISTER_TYPE_UB;
2220 brw_MOV(p, dst, spread(suboffset(retype(src[0], type), src[1].ud), 4));
2221 break;
2222 }
2223
2224 case SHADER_OPCODE_EXTRACT_WORD: {
2225 assert(src[0].type == BRW_REGISTER_TYPE_D ||
2226 src[0].type == BRW_REGISTER_TYPE_UD);
2227
2228 enum brw_reg_type type =
2229 src[0].type == BRW_REGISTER_TYPE_D ? BRW_REGISTER_TYPE_W
2230 : BRW_REGISTER_TYPE_UW;
2231 brw_MOV(p, dst, spread(suboffset(retype(src[0], type), src[1].ud), 2));
2232 break;
2233 }
2234
2235 case FS_OPCODE_SET_SAMPLE_ID:
2236 generate_set_sample_id(inst, dst, src[0], src[1]);
2237 break;
2238
2239 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
2240 generate_pack_half_2x16_split(inst, dst, src[0], src[1]);
2241 break;
2242
2243 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X:
2244 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y:
2245 generate_unpack_half_2x16_split(inst, dst, src[0]);
2246 break;
2247
2248 case FS_OPCODE_PLACEHOLDER_HALT:
2249 /* This is the place where the final HALT needs to be inserted if
2250 * we've emitted any discards. If not, this will emit no code.
2251 */
2252 if (!patch_discard_jumps_to_fb_writes()) {
2253 if (unlikely(debug_flag)) {
2254 annotation.ann_count--;
2255 }
2256 }
2257 break;
2258
2259 case FS_OPCODE_INTERPOLATE_AT_CENTROID:
2260 generate_pixel_interpolator_query(inst, dst, src[0], src[1],
2261 GEN7_PIXEL_INTERPOLATOR_LOC_CENTROID);
2262 break;
2263
2264 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
2265 generate_pixel_interpolator_query(inst, dst, src[0], src[1],
2266 GEN7_PIXEL_INTERPOLATOR_LOC_SAMPLE);
2267 break;
2268
2269 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
2270 generate_pixel_interpolator_query(inst, dst, src[0], src[1],
2271 GEN7_PIXEL_INTERPOLATOR_LOC_SHARED_OFFSET);
2272 break;
2273
2274 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
2275 generate_pixel_interpolator_query(inst, dst, src[0], src[1],
2276 GEN7_PIXEL_INTERPOLATOR_LOC_PER_SLOT_OFFSET);
2277 break;
2278
2279 case CS_OPCODE_CS_TERMINATE:
2280 generate_cs_terminate(inst, src[0]);
2281 break;
2282
2283 case SHADER_OPCODE_BARRIER:
2284 generate_barrier(inst, src[0]);
2285 break;
2286
2287 case FS_OPCODE_PACK_STENCIL_REF:
2288 generate_stencil_ref_packing(inst, dst, src[0]);
2289 break;
2290
2291 default:
2292 unreachable("Unsupported opcode");
2293
2294 case SHADER_OPCODE_LOAD_PAYLOAD:
2295 unreachable("Should be lowered by lower_load_payload()");
2296 }
2297
2298 if (multiple_instructions_emitted)
2299 continue;
2300
2301 if (inst->no_dd_clear || inst->no_dd_check || inst->conditional_mod) {
2302 assert(p->next_insn_offset == last_insn_offset + 16 ||
2303 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
2304 "emitting more than 1 instruction");
2305
2306 brw_inst *last = &p->store[last_insn_offset / 16];
2307
2308 if (inst->conditional_mod)
2309 brw_inst_set_cond_modifier(p->devinfo, last, inst->conditional_mod);
2310 brw_inst_set_no_dd_clear(p->devinfo, last, inst->no_dd_clear);
2311 brw_inst_set_no_dd_check(p->devinfo, last, inst->no_dd_check);
2312 }
2313 }
2314
2315 brw_set_uip_jip(p);
2316 annotation_finalize(&annotation, p->next_insn_offset);
2317
2318 #ifndef NDEBUG
2319 bool validated = brw_validate_instructions(p, start_offset, &annotation);
2320 #else
2321 if (unlikely(debug_flag))
2322 brw_validate_instructions(p, start_offset, &annotation);
2323 #endif
2324
2325 int before_size = p->next_insn_offset - start_offset;
2326 brw_compact_instructions(p, start_offset, annotation.ann_count,
2327 annotation.ann);
2328 int after_size = p->next_insn_offset - start_offset;
2329
2330 if (unlikely(debug_flag)) {
2331 fprintf(stderr, "Native code for %s\n"
2332 "SIMD%d shader: %d instructions. %d loops. %u cycles. %d:%d spills:fills. Promoted %u constants. Compacted %d to %d"
2333 " bytes (%.0f%%)\n",
2334 shader_name, dispatch_width, before_size / 16, loop_count, cfg->cycle_count,
2335 spill_count, fill_count, promoted_constants, before_size, after_size,
2336 100.0f * (before_size - after_size) / before_size);
2337
2338 dump_assembly(p->store, annotation.ann_count, annotation.ann,
2339 p->devinfo);
2340 ralloc_free(annotation.mem_ctx);
2341 }
2342 assert(validated);
2343
2344 compiler->shader_debug_log(log_data,
2345 "%s SIMD%d shader: %d inst, %d loops, %u cycles, "
2346 "%d:%d spills:fills, Promoted %u constants, "
2347 "compacted %d to %d bytes.",
2348 _mesa_shader_stage_to_abbrev(stage),
2349 dispatch_width, before_size / 16,
2350 loop_count, cfg->cycle_count, spill_count,
2351 fill_count, promoted_constants, before_size,
2352 after_size);
2353
2354 return start_offset;
2355 }
2356
2357 const unsigned *
2358 fs_generator::get_assembly(unsigned int *assembly_size)
2359 {
2360 return brw_get_program(p, assembly_size);
2361 }