i965/vec4: Replace vec4_instruction::regs_written with ::size_written field in bytes.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs_generator.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_fs_generator.cpp
25 *
26 * This file supports generating code from the FS LIR to the actual
27 * native instructions.
28 */
29
30 #include "brw_eu.h"
31 #include "brw_fs.h"
32 #include "brw_cfg.h"
33 #include "brw_program.h"
34
35 static enum brw_reg_file
36 brw_file_from_reg(fs_reg *reg)
37 {
38 switch (reg->file) {
39 case ARF:
40 return BRW_ARCHITECTURE_REGISTER_FILE;
41 case FIXED_GRF:
42 case VGRF:
43 return BRW_GENERAL_REGISTER_FILE;
44 case MRF:
45 return BRW_MESSAGE_REGISTER_FILE;
46 case IMM:
47 return BRW_IMMEDIATE_VALUE;
48 case BAD_FILE:
49 case ATTR:
50 case UNIFORM:
51 unreachable("not reached");
52 }
53 return BRW_ARCHITECTURE_REGISTER_FILE;
54 }
55
56 static struct brw_reg
57 brw_reg_from_fs_reg(fs_inst *inst, fs_reg *reg, unsigned gen, bool compressed)
58 {
59 assert(reg->offset / REG_SIZE == 0);
60 struct brw_reg brw_reg;
61
62 switch (reg->file) {
63 case MRF:
64 assert((reg->nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(gen));
65 /* Fallthrough */
66 case VGRF:
67 if (reg->stride == 0) {
68 brw_reg = brw_vec1_reg(brw_file_from_reg(reg), reg->nr, 0);
69 } else {
70 /* From the Haswell PRM:
71 *
72 * "VertStride must be used to cross GRF register boundaries. This
73 * rule implies that elements within a 'Width' cannot cross GRF
74 * boundaries."
75 *
76 * The maximum width value that could satisfy this restriction is:
77 */
78 const unsigned reg_width = REG_SIZE / (reg->stride * type_sz(reg->type));
79
80 /* Because the hardware can only split source regions at a whole
81 * multiple of width during decompression (i.e. vertically), clamp
82 * the value obtained above to the physical execution size of a
83 * single decompressed chunk of the instruction:
84 */
85 const unsigned phys_width = compressed ? inst->exec_size / 2 :
86 inst->exec_size;
87
88 /* XXX - The equation above is strictly speaking not correct on
89 * hardware that supports unbalanced GRF writes -- On Gen9+
90 * each decompressed chunk of the instruction may have a
91 * different execution size when the number of components
92 * written to each destination GRF is not the same.
93 */
94 const unsigned width = MIN2(reg_width, phys_width);
95 brw_reg = brw_vecn_reg(width, brw_file_from_reg(reg), reg->nr, 0);
96 brw_reg = stride(brw_reg, width * reg->stride, width, reg->stride);
97 }
98
99 brw_reg = retype(brw_reg, reg->type);
100 brw_reg = byte_offset(brw_reg, reg->offset % REG_SIZE);
101 brw_reg.abs = reg->abs;
102 brw_reg.negate = reg->negate;
103 break;
104 case ARF:
105 case FIXED_GRF:
106 case IMM:
107 assert(reg->offset % REG_SIZE == 0);
108 brw_reg = reg->as_brw_reg();
109 break;
110 case BAD_FILE:
111 /* Probably unused. */
112 brw_reg = brw_null_reg();
113 break;
114 case ATTR:
115 case UNIFORM:
116 unreachable("not reached");
117 }
118
119 return brw_reg;
120 }
121
122 fs_generator::fs_generator(const struct brw_compiler *compiler, void *log_data,
123 void *mem_ctx,
124 const void *key,
125 struct brw_stage_prog_data *prog_data,
126 unsigned promoted_constants,
127 bool runtime_check_aads_emit,
128 gl_shader_stage stage)
129
130 : compiler(compiler), log_data(log_data),
131 devinfo(compiler->devinfo), key(key),
132 prog_data(prog_data),
133 promoted_constants(promoted_constants),
134 runtime_check_aads_emit(runtime_check_aads_emit), debug_flag(false),
135 stage(stage), mem_ctx(mem_ctx)
136 {
137 p = rzalloc(mem_ctx, struct brw_codegen);
138 brw_init_codegen(devinfo, p, mem_ctx);
139 }
140
141 fs_generator::~fs_generator()
142 {
143 }
144
145 class ip_record : public exec_node {
146 public:
147 DECLARE_RALLOC_CXX_OPERATORS(ip_record)
148
149 ip_record(int ip)
150 {
151 this->ip = ip;
152 }
153
154 int ip;
155 };
156
157 bool
158 fs_generator::patch_discard_jumps_to_fb_writes()
159 {
160 if (devinfo->gen < 6 || this->discard_halt_patches.is_empty())
161 return false;
162
163 int scale = brw_jump_scale(p->devinfo);
164
165 /* There is a somewhat strange undocumented requirement of using
166 * HALT, according to the simulator. If some channel has HALTed to
167 * a particular UIP, then by the end of the program, every channel
168 * must have HALTed to that UIP. Furthermore, the tracking is a
169 * stack, so you can't do the final halt of a UIP after starting
170 * halting to a new UIP.
171 *
172 * Symptoms of not emitting this instruction on actual hardware
173 * included GPU hangs and sparkly rendering on the piglit discard
174 * tests.
175 */
176 brw_inst *last_halt = gen6_HALT(p);
177 brw_inst_set_uip(p->devinfo, last_halt, 1 * scale);
178 brw_inst_set_jip(p->devinfo, last_halt, 1 * scale);
179
180 int ip = p->nr_insn;
181
182 foreach_in_list(ip_record, patch_ip, &discard_halt_patches) {
183 brw_inst *patch = &p->store[patch_ip->ip];
184
185 assert(brw_inst_opcode(p->devinfo, patch) == BRW_OPCODE_HALT);
186 /* HALT takes a half-instruction distance from the pre-incremented IP. */
187 brw_inst_set_uip(p->devinfo, patch, (ip - patch_ip->ip) * scale);
188 }
189
190 this->discard_halt_patches.make_empty();
191 return true;
192 }
193
194 void
195 fs_generator::fire_fb_write(fs_inst *inst,
196 struct brw_reg payload,
197 struct brw_reg implied_header,
198 GLuint nr)
199 {
200 uint32_t msg_control;
201
202 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
203
204 if (devinfo->gen < 6) {
205 brw_push_insn_state(p);
206 brw_set_default_exec_size(p, BRW_EXECUTE_8);
207 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
208 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
209 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
210 brw_MOV(p, offset(payload, 1), brw_vec8_grf(1, 0));
211 brw_pop_insn_state(p);
212 }
213
214 if (inst->opcode == FS_OPCODE_REP_FB_WRITE)
215 msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED;
216 else if (prog_data->dual_src_blend) {
217 if (!inst->group)
218 msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01;
219 else
220 msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23;
221 } else if (inst->exec_size == 16)
222 msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE;
223 else
224 msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01;
225
226 uint32_t surf_index =
227 prog_data->binding_table.render_target_start + inst->target;
228
229 bool last_render_target = inst->eot ||
230 (prog_data->dual_src_blend && dispatch_width == 16);
231
232
233 brw_fb_WRITE(p,
234 payload,
235 implied_header,
236 msg_control,
237 surf_index,
238 nr,
239 0,
240 inst->eot,
241 last_render_target,
242 inst->header_size != 0);
243
244 brw_mark_surface_used(&prog_data->base, surf_index);
245 }
246
247 void
248 fs_generator::generate_fb_write(fs_inst *inst, struct brw_reg payload)
249 {
250 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
251 const brw_wm_prog_key * const key = (brw_wm_prog_key * const) this->key;
252 struct brw_reg implied_header;
253
254 if (devinfo->gen < 8 && !devinfo->is_haswell) {
255 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
256 }
257
258 if (inst->base_mrf >= 0)
259 payload = brw_message_reg(inst->base_mrf);
260
261 /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
262 * move, here's g1.
263 */
264 if (inst->header_size != 0) {
265 brw_push_insn_state(p);
266 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
267 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
268 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
269 brw_set_default_flag_reg(p, 0, 0);
270
271 /* On HSW, the GPU will use the predicate on SENDC, unless the header is
272 * present.
273 */
274 if (prog_data->uses_kill) {
275 struct brw_reg pixel_mask;
276
277 if (devinfo->gen >= 6)
278 pixel_mask = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
279 else
280 pixel_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
281
282 brw_MOV(p, pixel_mask, brw_flag_reg(0, 1));
283 }
284
285 if (devinfo->gen >= 6) {
286 brw_push_insn_state(p);
287 brw_set_default_exec_size(p, BRW_EXECUTE_16);
288 brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
289 brw_MOV(p,
290 retype(payload, BRW_REGISTER_TYPE_UD),
291 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
292 brw_pop_insn_state(p);
293
294 if (inst->target > 0 && key->replicate_alpha) {
295 /* Set "Source0 Alpha Present to RenderTarget" bit in message
296 * header.
297 */
298 brw_OR(p,
299 vec1(retype(payload, BRW_REGISTER_TYPE_UD)),
300 vec1(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)),
301 brw_imm_ud(0x1 << 11));
302 }
303
304 if (inst->target > 0) {
305 /* Set the render target index for choosing BLEND_STATE. */
306 brw_MOV(p, retype(vec1(suboffset(payload, 2)),
307 BRW_REGISTER_TYPE_UD),
308 brw_imm_ud(inst->target));
309 }
310
311 /* Set computes stencil to render target */
312 if (prog_data->computed_stencil) {
313 brw_OR(p,
314 vec1(retype(payload, BRW_REGISTER_TYPE_UD)),
315 vec1(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)),
316 brw_imm_ud(0x1 << 14));
317 }
318
319 implied_header = brw_null_reg();
320 } else {
321 implied_header = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
322 }
323
324 brw_pop_insn_state(p);
325 } else {
326 implied_header = brw_null_reg();
327 }
328
329 if (!runtime_check_aads_emit) {
330 fire_fb_write(inst, payload, implied_header, inst->mlen);
331 } else {
332 /* This can only happen in gen < 6 */
333 assert(devinfo->gen < 6);
334
335 struct brw_reg v1_null_ud = vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD));
336
337 /* Check runtime bit to detect if we have to send AA data or not */
338 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
339 brw_AND(p,
340 v1_null_ud,
341 retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD),
342 brw_imm_ud(1<<26));
343 brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_NZ);
344
345 int jmp = brw_JMPI(p, brw_imm_ud(0), BRW_PREDICATE_NORMAL) - p->store;
346 brw_inst_set_exec_size(p->devinfo, brw_last_inst, BRW_EXECUTE_1);
347 {
348 /* Don't send AA data */
349 fire_fb_write(inst, offset(payload, 1), implied_header, inst->mlen-1);
350 }
351 brw_land_fwd_jump(p, jmp);
352 fire_fb_write(inst, payload, implied_header, inst->mlen);
353 }
354 }
355
356 void
357 fs_generator::generate_fb_read(fs_inst *inst, struct brw_reg dst,
358 struct brw_reg payload)
359 {
360 assert(inst->size_written % REG_SIZE == 0);
361 brw_wm_prog_data *prog_data =
362 reinterpret_cast<brw_wm_prog_data *>(this->prog_data);
363 const unsigned surf_index =
364 prog_data->binding_table.render_target_start + inst->target;
365
366 gen9_fb_READ(p, dst, payload, surf_index,
367 inst->header_size, inst->size_written / REG_SIZE,
368 prog_data->persample_dispatch);
369
370 brw_mark_surface_used(&prog_data->base, surf_index);
371 }
372
373 void
374 fs_generator::generate_mov_indirect(fs_inst *inst,
375 struct brw_reg dst,
376 struct brw_reg reg,
377 struct brw_reg indirect_byte_offset)
378 {
379 assert(indirect_byte_offset.type == BRW_REGISTER_TYPE_UD);
380 assert(indirect_byte_offset.file == BRW_GENERAL_REGISTER_FILE);
381
382 unsigned imm_byte_offset = reg.nr * REG_SIZE + reg.subnr;
383
384 if (indirect_byte_offset.file == BRW_IMMEDIATE_VALUE) {
385 imm_byte_offset += indirect_byte_offset.ud;
386
387 reg.nr = imm_byte_offset / REG_SIZE;
388 reg.subnr = imm_byte_offset % REG_SIZE;
389 brw_MOV(p, dst, reg);
390 } else {
391 /* Prior to Broadwell, there are only 8 address registers. */
392 assert(inst->exec_size == 8 || devinfo->gen >= 8);
393
394 /* We use VxH indirect addressing, clobbering a0.0 through a0.7. */
395 struct brw_reg addr = vec8(brw_address_reg(0));
396
397 /* The destination stride of an instruction (in bytes) must be greater
398 * than or equal to the size of the rest of the instruction. Since the
399 * address register is of type UW, we can't use a D-type instruction.
400 * In order to get around this, re retype to UW and use a stride.
401 */
402 indirect_byte_offset =
403 retype(spread(indirect_byte_offset, 2), BRW_REGISTER_TYPE_UW);
404
405 struct brw_reg ind_src;
406 if (devinfo->gen < 8) {
407 /* From the Haswell PRM section "Register Region Restrictions":
408 *
409 * "The lower bits of the AddressImmediate must not overflow to
410 * change the register address. The lower 5 bits of Address
411 * Immediate when added to lower 5 bits of address register gives
412 * the sub-register offset. The upper bits of Address Immediate
413 * when added to upper bits of address register gives the register
414 * address. Any overflow from sub-register offset is dropped."
415 *
416 * This restriction is only listed in the Haswell PRM but emperical
417 * testing indicates that it applies on all older generations and is
418 * lifted on Broadwell.
419 *
420 * Since the indirect may cause us to cross a register boundary, this
421 * makes the base offset almost useless. We could try and do
422 * something clever where we use a actual base offset if
423 * base_offset % 32 == 0 but that would mean we were generating
424 * different code depending on the base offset. Instead, for the
425 * sake of consistency, we'll just do the add ourselves.
426 */
427 brw_ADD(p, addr, indirect_byte_offset, brw_imm_uw(imm_byte_offset));
428 ind_src = brw_VxH_indirect(0, 0);
429 } else {
430 brw_MOV(p, addr, indirect_byte_offset);
431 ind_src = brw_VxH_indirect(0, imm_byte_offset);
432 }
433
434 brw_inst *mov = brw_MOV(p, dst, retype(ind_src, dst.type));
435
436 if (devinfo->gen == 6 && dst.file == BRW_MESSAGE_REGISTER_FILE &&
437 !inst->get_next()->is_tail_sentinel() &&
438 ((fs_inst *)inst->get_next())->mlen > 0) {
439 /* From the Sandybridge PRM:
440 *
441 * "[Errata: DevSNB(SNB)] If MRF register is updated by any
442 * instruction that “indexed/indirect” source AND is followed by a
443 * send, the instruction requires a “Switch”. This is to avoid
444 * race condition where send may dispatch before MRF is updated."
445 */
446 brw_inst_set_thread_control(devinfo, mov, BRW_THREAD_SWITCH);
447 }
448 }
449 }
450
451 void
452 fs_generator::generate_urb_read(fs_inst *inst,
453 struct brw_reg dst,
454 struct brw_reg header)
455 {
456 assert(inst->size_written % REG_SIZE == 0);
457 assert(header.file == BRW_GENERAL_REGISTER_FILE);
458 assert(header.type == BRW_REGISTER_TYPE_UD);
459
460 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
461 brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UD));
462 brw_set_src0(p, send, header);
463 brw_set_src1(p, send, brw_imm_ud(0u));
464
465 brw_inst_set_sfid(p->devinfo, send, BRW_SFID_URB);
466 brw_inst_set_urb_opcode(p->devinfo, send, GEN8_URB_OPCODE_SIMD8_READ);
467
468 if (inst->opcode == SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT)
469 brw_inst_set_urb_per_slot_offset(p->devinfo, send, true);
470
471 brw_inst_set_mlen(p->devinfo, send, inst->mlen);
472 brw_inst_set_rlen(p->devinfo, send, inst->size_written / REG_SIZE);
473 brw_inst_set_header_present(p->devinfo, send, true);
474 brw_inst_set_urb_global_offset(p->devinfo, send, inst->offset);
475 }
476
477 void
478 fs_generator::generate_urb_write(fs_inst *inst, struct brw_reg payload)
479 {
480 brw_inst *insn;
481
482 insn = brw_next_insn(p, BRW_OPCODE_SEND);
483
484 brw_set_dest(p, insn, brw_null_reg());
485 brw_set_src0(p, insn, payload);
486 brw_set_src1(p, insn, brw_imm_d(0));
487
488 brw_inst_set_sfid(p->devinfo, insn, BRW_SFID_URB);
489 brw_inst_set_urb_opcode(p->devinfo, insn, GEN8_URB_OPCODE_SIMD8_WRITE);
490
491 if (inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT ||
492 inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT)
493 brw_inst_set_urb_per_slot_offset(p->devinfo, insn, true);
494
495 if (inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED ||
496 inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT)
497 brw_inst_set_urb_channel_mask_present(p->devinfo, insn, true);
498
499 brw_inst_set_mlen(p->devinfo, insn, inst->mlen);
500 brw_inst_set_rlen(p->devinfo, insn, 0);
501 brw_inst_set_eot(p->devinfo, insn, inst->eot);
502 brw_inst_set_header_present(p->devinfo, insn, true);
503 brw_inst_set_urb_global_offset(p->devinfo, insn, inst->offset);
504 }
505
506 void
507 fs_generator::generate_cs_terminate(fs_inst *inst, struct brw_reg payload)
508 {
509 struct brw_inst *insn;
510
511 insn = brw_next_insn(p, BRW_OPCODE_SEND);
512
513 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
514 brw_set_src0(p, insn, payload);
515 brw_set_src1(p, insn, brw_imm_d(0));
516
517 /* Terminate a compute shader by sending a message to the thread spawner.
518 */
519 brw_inst_set_sfid(devinfo, insn, BRW_SFID_THREAD_SPAWNER);
520 brw_inst_set_mlen(devinfo, insn, 1);
521 brw_inst_set_rlen(devinfo, insn, 0);
522 brw_inst_set_eot(devinfo, insn, inst->eot);
523 brw_inst_set_header_present(devinfo, insn, false);
524
525 brw_inst_set_ts_opcode(devinfo, insn, 0); /* Dereference resource */
526 brw_inst_set_ts_request_type(devinfo, insn, 0); /* Root thread */
527
528 /* Note that even though the thread has a URB resource associated with it,
529 * we set the "do not dereference URB" bit, because the URB resource is
530 * managed by the fixed-function unit, so it will free it automatically.
531 */
532 brw_inst_set_ts_resource_select(devinfo, insn, 1); /* Do not dereference URB */
533
534 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
535 }
536
537 void
538 fs_generator::generate_barrier(fs_inst *inst, struct brw_reg src)
539 {
540 brw_barrier(p, src);
541 brw_WAIT(p);
542 }
543
544 void
545 fs_generator::generate_linterp(fs_inst *inst,
546 struct brw_reg dst, struct brw_reg *src)
547 {
548 /* PLN reads:
549 * / in SIMD16 \
550 * -----------------------------------
551 * | src1+0 | src1+1 | src1+2 | src1+3 |
552 * |-----------------------------------|
553 * |(x0, x1)|(y0, y1)|(x2, x3)|(y2, y3)|
554 * -----------------------------------
555 *
556 * but for the LINE/MAC pair, the LINE reads Xs and the MAC reads Ys:
557 *
558 * -----------------------------------
559 * | src1+0 | src1+1 | src1+2 | src1+3 |
560 * |-----------------------------------|
561 * |(x0, x1)|(y0, y1)| | | in SIMD8
562 * |-----------------------------------|
563 * |(x0, x1)|(x2, x3)|(y0, y1)|(y2, y3)| in SIMD16
564 * -----------------------------------
565 *
566 * See also: emit_interpolation_setup_gen4().
567 */
568 struct brw_reg delta_x = src[0];
569 struct brw_reg delta_y = offset(src[0], inst->exec_size / 8);
570 struct brw_reg interp = src[1];
571
572 if (devinfo->has_pln &&
573 (devinfo->gen >= 7 || (delta_x.nr & 1) == 0)) {
574 brw_PLN(p, dst, interp, delta_x);
575 } else {
576 brw_LINE(p, brw_null_reg(), interp, delta_x);
577 brw_MAC(p, dst, suboffset(interp, 1), delta_y);
578 }
579 }
580
581 void
582 fs_generator::generate_get_buffer_size(fs_inst *inst,
583 struct brw_reg dst,
584 struct brw_reg src,
585 struct brw_reg surf_index)
586 {
587 assert(devinfo->gen >= 7);
588 assert(surf_index.file == BRW_IMMEDIATE_VALUE);
589
590 uint32_t simd_mode;
591 int rlen = 4;
592
593 switch (inst->exec_size) {
594 case 8:
595 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
596 break;
597 case 16:
598 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
599 break;
600 default:
601 unreachable("Invalid width for texture instruction");
602 }
603
604 if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) {
605 rlen = 8;
606 dst = vec16(dst);
607 }
608
609 brw_SAMPLE(p,
610 retype(dst, BRW_REGISTER_TYPE_UW),
611 inst->base_mrf,
612 src,
613 surf_index.ud,
614 0,
615 GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO,
616 rlen, /* response length */
617 inst->mlen,
618 inst->header_size > 0,
619 simd_mode,
620 BRW_SAMPLER_RETURN_FORMAT_SINT32);
621
622 brw_mark_surface_used(prog_data, surf_index.ud);
623 }
624
625 void
626 fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src,
627 struct brw_reg surface_index,
628 struct brw_reg sampler_index)
629 {
630 assert(inst->size_written % REG_SIZE == 0);
631 int msg_type = -1;
632 uint32_t simd_mode;
633 uint32_t return_format;
634 bool is_combined_send = inst->eot;
635
636 switch (dst.type) {
637 case BRW_REGISTER_TYPE_D:
638 return_format = BRW_SAMPLER_RETURN_FORMAT_SINT32;
639 break;
640 case BRW_REGISTER_TYPE_UD:
641 return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
642 break;
643 default:
644 return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
645 break;
646 }
647
648 /* Stomp the resinfo output type to UINT32. On gens 4-5, the output type
649 * is set as part of the message descriptor. On gen4, the PRM seems to
650 * allow UINT32 and FLOAT32 (i965 PRM, Vol. 4 Section 4.8.1.1), but on
651 * later gens UINT32 is required. Once you hit Sandy Bridge, the bit is
652 * gone from the message descriptor entirely and you just get UINT32 all
653 * the time regasrdless. Since we can really only do non-UINT32 on gen4,
654 * just stomp it to UINT32 all the time.
655 */
656 if (inst->opcode == SHADER_OPCODE_TXS)
657 return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
658
659 switch (inst->exec_size) {
660 case 8:
661 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
662 break;
663 case 16:
664 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
665 break;
666 default:
667 unreachable("Invalid width for texture instruction");
668 }
669
670 if (devinfo->gen >= 5) {
671 switch (inst->opcode) {
672 case SHADER_OPCODE_TEX:
673 if (inst->shadow_compare) {
674 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE;
675 } else {
676 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE;
677 }
678 break;
679 case FS_OPCODE_TXB:
680 if (inst->shadow_compare) {
681 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE;
682 } else {
683 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS;
684 }
685 break;
686 case SHADER_OPCODE_TXL:
687 if (inst->shadow_compare) {
688 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE;
689 } else {
690 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD;
691 }
692 break;
693 case SHADER_OPCODE_TXL_LZ:
694 assert(devinfo->gen >= 9);
695 if (inst->shadow_compare) {
696 msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_C_LZ;
697 } else {
698 msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LZ;
699 }
700 break;
701 case SHADER_OPCODE_TXS:
702 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO;
703 break;
704 case SHADER_OPCODE_TXD:
705 if (inst->shadow_compare) {
706 /* Gen7.5+. Otherwise, lowered by brw_lower_texture_gradients(). */
707 assert(devinfo->gen >= 8 || devinfo->is_haswell);
708 msg_type = HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE;
709 } else {
710 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS;
711 }
712 break;
713 case SHADER_OPCODE_TXF:
714 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
715 break;
716 case SHADER_OPCODE_TXF_LZ:
717 assert(devinfo->gen >= 9);
718 msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LD_LZ;
719 break;
720 case SHADER_OPCODE_TXF_CMS_W:
721 assert(devinfo->gen >= 9);
722 msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W;
723 break;
724 case SHADER_OPCODE_TXF_CMS:
725 if (devinfo->gen >= 7)
726 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS;
727 else
728 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
729 break;
730 case SHADER_OPCODE_TXF_UMS:
731 assert(devinfo->gen >= 7);
732 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DSS;
733 break;
734 case SHADER_OPCODE_TXF_MCS:
735 assert(devinfo->gen >= 7);
736 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS;
737 break;
738 case SHADER_OPCODE_LOD:
739 msg_type = GEN5_SAMPLER_MESSAGE_LOD;
740 break;
741 case SHADER_OPCODE_TG4:
742 if (inst->shadow_compare) {
743 assert(devinfo->gen >= 7);
744 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C;
745 } else {
746 assert(devinfo->gen >= 6);
747 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4;
748 }
749 break;
750 case SHADER_OPCODE_TG4_OFFSET:
751 assert(devinfo->gen >= 7);
752 if (inst->shadow_compare) {
753 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C;
754 } else {
755 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO;
756 }
757 break;
758 case SHADER_OPCODE_SAMPLEINFO:
759 msg_type = GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO;
760 break;
761 default:
762 unreachable("not reached");
763 }
764 } else {
765 switch (inst->opcode) {
766 case SHADER_OPCODE_TEX:
767 /* Note that G45 and older determines shadow compare and dispatch width
768 * from message length for most messages.
769 */
770 if (inst->exec_size == 8) {
771 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE;
772 if (inst->shadow_compare) {
773 assert(inst->mlen == 6);
774 } else {
775 assert(inst->mlen <= 4);
776 }
777 } else {
778 if (inst->shadow_compare) {
779 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE;
780 assert(inst->mlen == 9);
781 } else {
782 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE;
783 assert(inst->mlen <= 7 && inst->mlen % 2 == 1);
784 }
785 }
786 break;
787 case FS_OPCODE_TXB:
788 if (inst->shadow_compare) {
789 assert(inst->exec_size == 8);
790 assert(inst->mlen == 6);
791 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE;
792 } else {
793 assert(inst->mlen == 9);
794 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS;
795 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
796 }
797 break;
798 case SHADER_OPCODE_TXL:
799 if (inst->shadow_compare) {
800 assert(inst->exec_size == 8);
801 assert(inst->mlen == 6);
802 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE;
803 } else {
804 assert(inst->mlen == 9);
805 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD;
806 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
807 }
808 break;
809 case SHADER_OPCODE_TXD:
810 /* There is no sample_d_c message; comparisons are done manually */
811 assert(inst->exec_size == 8);
812 assert(inst->mlen == 7 || inst->mlen == 10);
813 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_GRADIENTS;
814 break;
815 case SHADER_OPCODE_TXF:
816 assert(inst->mlen <= 9 && inst->mlen % 2 == 1);
817 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_LD;
818 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
819 break;
820 case SHADER_OPCODE_TXS:
821 assert(inst->mlen == 3);
822 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_RESINFO;
823 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
824 break;
825 default:
826 unreachable("not reached");
827 }
828 }
829 assert(msg_type != -1);
830
831 if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) {
832 dst = vec16(dst);
833 }
834
835 assert(devinfo->gen < 7 || inst->header_size == 0 ||
836 src.file == BRW_GENERAL_REGISTER_FILE);
837
838 assert(sampler_index.type == BRW_REGISTER_TYPE_UD);
839
840 /* Load the message header if present. If there's a texture offset,
841 * we need to set it up explicitly and load the offset bitfield.
842 * Otherwise, we can use an implied move from g0 to the first message reg.
843 */
844 if (inst->header_size != 0) {
845 if (devinfo->gen < 6 && !inst->offset) {
846 /* Set up an implied move from g0 to the MRF. */
847 src = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
848 } else {
849 struct brw_reg header_reg;
850
851 if (devinfo->gen >= 7) {
852 header_reg = src;
853 } else {
854 assert(inst->base_mrf != -1);
855 header_reg = brw_message_reg(inst->base_mrf);
856 }
857
858 brw_push_insn_state(p);
859 brw_set_default_exec_size(p, BRW_EXECUTE_8);
860 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
861 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
862 /* Explicitly set up the message header by copying g0 to the MRF. */
863 brw_MOV(p, header_reg, brw_vec8_grf(0, 0));
864
865 if (inst->offset) {
866 /* Set the offset bits in DWord 2. */
867 brw_MOV(p, get_element_ud(header_reg, 2),
868 brw_imm_ud(inst->offset));
869 } else if (stage != MESA_SHADER_VERTEX &&
870 stage != MESA_SHADER_FRAGMENT) {
871 /* The vertex and fragment stages have g0.2 set to 0, so
872 * header0.2 is 0 when g0 is copied. Other stages may not, so we
873 * must set it to 0 to avoid setting undesirable bits in the
874 * message.
875 */
876 brw_MOV(p, get_element_ud(header_reg, 2), brw_imm_ud(0));
877 }
878
879 brw_adjust_sampler_state_pointer(p, header_reg, sampler_index);
880 brw_pop_insn_state(p);
881 }
882 }
883
884 uint32_t base_binding_table_index = (inst->opcode == SHADER_OPCODE_TG4 ||
885 inst->opcode == SHADER_OPCODE_TG4_OFFSET)
886 ? prog_data->binding_table.gather_texture_start
887 : prog_data->binding_table.texture_start;
888
889 if (surface_index.file == BRW_IMMEDIATE_VALUE &&
890 sampler_index.file == BRW_IMMEDIATE_VALUE) {
891 uint32_t surface = surface_index.ud;
892 uint32_t sampler = sampler_index.ud;
893
894 brw_SAMPLE(p,
895 retype(dst, BRW_REGISTER_TYPE_UW),
896 inst->base_mrf,
897 src,
898 surface + base_binding_table_index,
899 sampler % 16,
900 msg_type,
901 inst->size_written / REG_SIZE,
902 inst->mlen,
903 inst->header_size != 0,
904 simd_mode,
905 return_format);
906
907 brw_mark_surface_used(prog_data, surface + base_binding_table_index);
908 } else {
909 /* Non-const sampler index */
910
911 struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
912 struct brw_reg surface_reg = vec1(retype(surface_index, BRW_REGISTER_TYPE_UD));
913 struct brw_reg sampler_reg = vec1(retype(sampler_index, BRW_REGISTER_TYPE_UD));
914
915 brw_push_insn_state(p);
916 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
917 brw_set_default_access_mode(p, BRW_ALIGN_1);
918
919 if (brw_regs_equal(&surface_reg, &sampler_reg)) {
920 brw_MUL(p, addr, sampler_reg, brw_imm_uw(0x101));
921 } else {
922 brw_SHL(p, addr, sampler_reg, brw_imm_ud(8));
923 brw_OR(p, addr, addr, surface_reg);
924 }
925 if (base_binding_table_index)
926 brw_ADD(p, addr, addr, brw_imm_ud(base_binding_table_index));
927 brw_AND(p, addr, addr, brw_imm_ud(0xfff));
928
929 brw_pop_insn_state(p);
930
931 /* dst = send(offset, a0.0 | <descriptor>) */
932 brw_inst *insn = brw_send_indirect_message(
933 p, BRW_SFID_SAMPLER, dst, src, addr);
934 brw_set_sampler_message(p, insn,
935 0 /* surface */,
936 0 /* sampler */,
937 msg_type,
938 inst->size_written / REG_SIZE,
939 inst->mlen /* mlen */,
940 inst->header_size != 0 /* header */,
941 simd_mode,
942 return_format);
943
944 /* visitor knows more than we do about the surface limit required,
945 * so has already done marking.
946 */
947 }
948
949 if (is_combined_send) {
950 brw_inst_set_eot(p->devinfo, brw_last_inst, true);
951 brw_inst_set_opcode(p->devinfo, brw_last_inst, BRW_OPCODE_SENDC);
952 }
953 }
954
955
956 /* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
957 * looking like:
958 *
959 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
960 *
961 * Ideally, we want to produce:
962 *
963 * DDX DDY
964 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
965 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
966 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
967 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
968 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
969 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
970 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
971 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
972 *
973 * and add another set of two more subspans if in 16-pixel dispatch mode.
974 *
975 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
976 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
977 * pair. But the ideal approximation may impose a huge performance cost on
978 * sample_d. On at least Haswell, sample_d instruction does some
979 * optimizations if the same LOD is used for all pixels in the subspan.
980 *
981 * For DDY, we need to use ALIGN16 mode since it's capable of doing the
982 * appropriate swizzling.
983 */
984 void
985 fs_generator::generate_ddx(enum opcode opcode,
986 struct brw_reg dst, struct brw_reg src)
987 {
988 unsigned vstride, width;
989
990 if (opcode == FS_OPCODE_DDX_FINE) {
991 /* produce accurate derivatives */
992 vstride = BRW_VERTICAL_STRIDE_2;
993 width = BRW_WIDTH_2;
994 } else {
995 /* replicate the derivative at the top-left pixel to other pixels */
996 vstride = BRW_VERTICAL_STRIDE_4;
997 width = BRW_WIDTH_4;
998 }
999
1000 struct brw_reg src0 = brw_reg(src.file, src.nr, 1,
1001 src.negate, src.abs,
1002 BRW_REGISTER_TYPE_F,
1003 vstride,
1004 width,
1005 BRW_HORIZONTAL_STRIDE_0,
1006 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
1007 struct brw_reg src1 = brw_reg(src.file, src.nr, 0,
1008 src.negate, src.abs,
1009 BRW_REGISTER_TYPE_F,
1010 vstride,
1011 width,
1012 BRW_HORIZONTAL_STRIDE_0,
1013 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
1014 brw_ADD(p, dst, src0, negate(src1));
1015 }
1016
1017 /* The negate_value boolean is used to negate the derivative computation for
1018 * FBOs, since they place the origin at the upper left instead of the lower
1019 * left.
1020 */
1021 void
1022 fs_generator::generate_ddy(enum opcode opcode,
1023 struct brw_reg dst, struct brw_reg src)
1024 {
1025 if (opcode == FS_OPCODE_DDY_FINE) {
1026 /* produce accurate derivatives */
1027 struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
1028 src.negate, src.abs,
1029 BRW_REGISTER_TYPE_F,
1030 BRW_VERTICAL_STRIDE_4,
1031 BRW_WIDTH_4,
1032 BRW_HORIZONTAL_STRIDE_1,
1033 BRW_SWIZZLE_XYXY, WRITEMASK_XYZW);
1034 struct brw_reg src1 = brw_reg(src.file, src.nr, 0,
1035 src.negate, src.abs,
1036 BRW_REGISTER_TYPE_F,
1037 BRW_VERTICAL_STRIDE_4,
1038 BRW_WIDTH_4,
1039 BRW_HORIZONTAL_STRIDE_1,
1040 BRW_SWIZZLE_ZWZW, WRITEMASK_XYZW);
1041 brw_push_insn_state(p);
1042 brw_set_default_access_mode(p, BRW_ALIGN_16);
1043 brw_ADD(p, dst, negate(src0), src1);
1044 brw_pop_insn_state(p);
1045 } else {
1046 /* replicate the derivative at the top-left pixel to other pixels */
1047 struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
1048 src.negate, src.abs,
1049 BRW_REGISTER_TYPE_F,
1050 BRW_VERTICAL_STRIDE_4,
1051 BRW_WIDTH_4,
1052 BRW_HORIZONTAL_STRIDE_0,
1053 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
1054 struct brw_reg src1 = brw_reg(src.file, src.nr, 2,
1055 src.negate, src.abs,
1056 BRW_REGISTER_TYPE_F,
1057 BRW_VERTICAL_STRIDE_4,
1058 BRW_WIDTH_4,
1059 BRW_HORIZONTAL_STRIDE_0,
1060 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
1061 brw_ADD(p, dst, negate(src0), src1);
1062 }
1063 }
1064
1065 void
1066 fs_generator::generate_discard_jump(fs_inst *inst)
1067 {
1068 assert(devinfo->gen >= 6);
1069
1070 /* This HALT will be patched up at FB write time to point UIP at the end of
1071 * the program, and at brw_uip_jip() JIP will be set to the end of the
1072 * current block (or the program).
1073 */
1074 this->discard_halt_patches.push_tail(new(mem_ctx) ip_record(p->nr_insn));
1075 gen6_HALT(p);
1076 }
1077
1078 void
1079 fs_generator::generate_scratch_write(fs_inst *inst, struct brw_reg src)
1080 {
1081 /* The 32-wide messages only respect the first 16-wide half of the channel
1082 * enable signals which are replicated identically for the second group of
1083 * 16 channels, so we cannot use them unless the write is marked
1084 * force_writemask_all.
1085 */
1086 const unsigned lower_size = inst->force_writemask_all ? inst->exec_size :
1087 MIN2(16, inst->exec_size);
1088 const unsigned block_size = 4 * lower_size / REG_SIZE;
1089 assert(inst->mlen != 0);
1090
1091 brw_push_insn_state(p);
1092 brw_set_default_exec_size(p, cvt(lower_size) - 1);
1093 brw_set_default_compression(p, lower_size > 8);
1094
1095 for (unsigned i = 0; i < inst->exec_size / lower_size; i++) {
1096 brw_set_default_group(p, inst->group + lower_size * i);
1097
1098 brw_MOV(p, brw_uvec_mrf(lower_size, inst->base_mrf + 1, 0),
1099 retype(offset(src, block_size * i), BRW_REGISTER_TYPE_UD));
1100
1101 brw_oword_block_write_scratch(p, brw_message_reg(inst->base_mrf),
1102 block_size,
1103 inst->offset + block_size * REG_SIZE * i);
1104 }
1105
1106 brw_pop_insn_state(p);
1107 }
1108
1109 void
1110 fs_generator::generate_scratch_read(fs_inst *inst, struct brw_reg dst)
1111 {
1112 assert(inst->exec_size <= 16 || inst->force_writemask_all);
1113 assert(inst->mlen != 0);
1114
1115 brw_oword_block_read_scratch(p, dst, brw_message_reg(inst->base_mrf),
1116 inst->exec_size / 8, inst->offset);
1117 }
1118
1119 void
1120 fs_generator::generate_scratch_read_gen7(fs_inst *inst, struct brw_reg dst)
1121 {
1122 assert(inst->exec_size <= 16 || inst->force_writemask_all);
1123
1124 gen7_block_read_scratch(p, dst, inst->exec_size / 8, inst->offset);
1125 }
1126
1127 void
1128 fs_generator::generate_uniform_pull_constant_load(fs_inst *inst,
1129 struct brw_reg dst,
1130 struct brw_reg index,
1131 struct brw_reg offset)
1132 {
1133 assert(inst->mlen != 0);
1134
1135 assert(index.file == BRW_IMMEDIATE_VALUE &&
1136 index.type == BRW_REGISTER_TYPE_UD);
1137 uint32_t surf_index = index.ud;
1138
1139 assert(offset.file == BRW_IMMEDIATE_VALUE &&
1140 offset.type == BRW_REGISTER_TYPE_UD);
1141 uint32_t read_offset = offset.ud;
1142
1143 brw_oword_block_read(p, dst, brw_message_reg(inst->base_mrf),
1144 read_offset, surf_index);
1145 }
1146
1147 void
1148 fs_generator::generate_uniform_pull_constant_load_gen7(fs_inst *inst,
1149 struct brw_reg dst,
1150 struct brw_reg index,
1151 struct brw_reg offset)
1152 {
1153 assert(index.type == BRW_REGISTER_TYPE_UD);
1154
1155 assert(offset.file == BRW_GENERAL_REGISTER_FILE);
1156 /* Reference just the dword we need, to avoid angering validate_reg(). */
1157 offset = brw_vec1_grf(offset.nr, 0);
1158
1159 /* We use the SIMD4x2 mode because we want to end up with 4 components in
1160 * the destination loaded consecutively from the same offset (which appears
1161 * in the first component, and the rest are ignored).
1162 */
1163 dst.width = BRW_WIDTH_4;
1164
1165 struct brw_reg src = offset;
1166 bool header_present = false;
1167
1168 if (devinfo->gen >= 9) {
1169 /* Skylake requires a message header in order to use SIMD4x2 mode. */
1170 src = retype(brw_vec4_grf(offset.nr, 0), BRW_REGISTER_TYPE_UD);
1171 header_present = true;
1172
1173 brw_push_insn_state(p);
1174 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1175 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1176 brw_MOV(p, vec8(src), retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
1177 brw_set_default_access_mode(p, BRW_ALIGN_1);
1178
1179 brw_MOV(p, get_element_ud(src, 2),
1180 brw_imm_ud(GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2));
1181 brw_pop_insn_state(p);
1182 }
1183
1184 if (index.file == BRW_IMMEDIATE_VALUE) {
1185
1186 uint32_t surf_index = index.ud;
1187
1188 brw_push_insn_state(p);
1189 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1190 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1191 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1192 brw_inst_set_exec_size(devinfo, send, BRW_EXECUTE_4);
1193 brw_pop_insn_state(p);
1194
1195 brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UD));
1196 brw_set_src0(p, send, src);
1197 brw_set_sampler_message(p, send,
1198 surf_index,
1199 0, /* LD message ignores sampler unit */
1200 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
1201 1, /* rlen */
1202 inst->mlen,
1203 header_present,
1204 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
1205 0);
1206 } else {
1207
1208 struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
1209
1210 brw_push_insn_state(p);
1211 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1212 brw_set_default_access_mode(p, BRW_ALIGN_1);
1213
1214 /* a0.0 = surf_index & 0xff */
1215 brw_inst *insn_and = brw_next_insn(p, BRW_OPCODE_AND);
1216 brw_inst_set_exec_size(p->devinfo, insn_and, BRW_EXECUTE_1);
1217 brw_set_dest(p, insn_and, addr);
1218 brw_set_src0(p, insn_and, vec1(retype(index, BRW_REGISTER_TYPE_UD)));
1219 brw_set_src1(p, insn_and, brw_imm_ud(0x0ff));
1220
1221 /* dst = send(payload, a0.0 | <descriptor>) */
1222 brw_inst *insn = brw_send_indirect_message(
1223 p, BRW_SFID_SAMPLER, dst, src, addr);
1224 brw_set_sampler_message(p, insn,
1225 0,
1226 0, /* LD message ignores sampler unit */
1227 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
1228 1, /* rlen */
1229 inst->mlen,
1230 header_present,
1231 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
1232 0);
1233
1234 brw_pop_insn_state(p);
1235 }
1236 }
1237
1238 void
1239 fs_generator::generate_varying_pull_constant_load_gen4(fs_inst *inst,
1240 struct brw_reg dst,
1241 struct brw_reg index)
1242 {
1243 assert(devinfo->gen < 7); /* Should use the gen7 variant. */
1244 assert(inst->header_size != 0);
1245 assert(inst->mlen);
1246
1247 assert(index.file == BRW_IMMEDIATE_VALUE &&
1248 index.type == BRW_REGISTER_TYPE_UD);
1249 uint32_t surf_index = index.ud;
1250
1251 uint32_t simd_mode, rlen, msg_type;
1252 if (inst->exec_size == 16) {
1253 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1254 rlen = 8;
1255 } else {
1256 assert(inst->exec_size == 8);
1257 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
1258 rlen = 4;
1259 }
1260
1261 if (devinfo->gen >= 5)
1262 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
1263 else {
1264 /* We always use the SIMD16 message so that we only have to load U, and
1265 * not V or R.
1266 */
1267 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_LD;
1268 assert(inst->mlen == 3);
1269 assert(inst->size_written == 8 * REG_SIZE);
1270 rlen = 8;
1271 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1272 }
1273
1274 struct brw_reg header = brw_vec8_grf(0, 0);
1275 gen6_resolve_implied_move(p, &header, inst->base_mrf);
1276
1277 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1278 brw_inst_set_compression(devinfo, send, false);
1279 brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UW));
1280 brw_set_src0(p, send, header);
1281 if (devinfo->gen < 6)
1282 brw_inst_set_base_mrf(p->devinfo, send, inst->base_mrf);
1283
1284 /* Our surface is set up as floats, regardless of what actual data is
1285 * stored in it.
1286 */
1287 uint32_t return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
1288 brw_set_sampler_message(p, send,
1289 surf_index,
1290 0, /* sampler (unused) */
1291 msg_type,
1292 rlen,
1293 inst->mlen,
1294 inst->header_size != 0,
1295 simd_mode,
1296 return_format);
1297 }
1298
1299 void
1300 fs_generator::generate_varying_pull_constant_load_gen7(fs_inst *inst,
1301 struct brw_reg dst,
1302 struct brw_reg index,
1303 struct brw_reg offset)
1304 {
1305 assert(devinfo->gen >= 7);
1306 /* Varying-offset pull constant loads are treated as a normal expression on
1307 * gen7, so the fact that it's a send message is hidden at the IR level.
1308 */
1309 assert(inst->header_size == 0);
1310 assert(!inst->mlen);
1311 assert(index.type == BRW_REGISTER_TYPE_UD);
1312
1313 uint32_t simd_mode, rlen, mlen;
1314 if (inst->exec_size == 16) {
1315 mlen = 2;
1316 rlen = 8;
1317 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1318 } else {
1319 assert(inst->exec_size == 8);
1320 mlen = 1;
1321 rlen = 4;
1322 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
1323 }
1324
1325 if (index.file == BRW_IMMEDIATE_VALUE) {
1326
1327 uint32_t surf_index = index.ud;
1328
1329 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1330 brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UW));
1331 brw_set_src0(p, send, offset);
1332 brw_set_sampler_message(p, send,
1333 surf_index,
1334 0, /* LD message ignores sampler unit */
1335 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
1336 rlen,
1337 mlen,
1338 false, /* no header */
1339 simd_mode,
1340 0);
1341
1342 } else {
1343
1344 struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
1345
1346 brw_push_insn_state(p);
1347 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1348 brw_set_default_access_mode(p, BRW_ALIGN_1);
1349
1350 /* a0.0 = surf_index & 0xff */
1351 brw_inst *insn_and = brw_next_insn(p, BRW_OPCODE_AND);
1352 brw_inst_set_exec_size(p->devinfo, insn_and, BRW_EXECUTE_1);
1353 brw_set_dest(p, insn_and, addr);
1354 brw_set_src0(p, insn_and, vec1(retype(index, BRW_REGISTER_TYPE_UD)));
1355 brw_set_src1(p, insn_and, brw_imm_ud(0x0ff));
1356
1357 brw_pop_insn_state(p);
1358
1359 /* dst = send(offset, a0.0 | <descriptor>) */
1360 brw_inst *insn = brw_send_indirect_message(
1361 p, BRW_SFID_SAMPLER, retype(dst, BRW_REGISTER_TYPE_UW),
1362 offset, addr);
1363 brw_set_sampler_message(p, insn,
1364 0 /* surface */,
1365 0 /* sampler */,
1366 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
1367 rlen /* rlen */,
1368 mlen /* mlen */,
1369 false /* header */,
1370 simd_mode,
1371 0);
1372 }
1373 }
1374
1375 /**
1376 * Cause the current pixel/sample mask (from R1.7 bits 15:0) to be transferred
1377 * into the flags register (f0.0).
1378 *
1379 * Used only on Gen6 and above.
1380 */
1381 void
1382 fs_generator::generate_mov_dispatch_to_flags(fs_inst *inst)
1383 {
1384 struct brw_reg flags = brw_flag_reg(0, inst->flag_subreg);
1385 struct brw_reg dispatch_mask;
1386
1387 if (devinfo->gen >= 6)
1388 dispatch_mask = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
1389 else
1390 dispatch_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
1391
1392 brw_push_insn_state(p);
1393 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1394 brw_MOV(p, flags, dispatch_mask);
1395 brw_pop_insn_state(p);
1396 }
1397
1398 void
1399 fs_generator::generate_pixel_interpolator_query(fs_inst *inst,
1400 struct brw_reg dst,
1401 struct brw_reg src,
1402 struct brw_reg msg_data,
1403 unsigned msg_type)
1404 {
1405 assert(inst->size_written % REG_SIZE == 0);
1406 assert(msg_data.type == BRW_REGISTER_TYPE_UD);
1407
1408 brw_pixel_interpolator_query(p,
1409 retype(dst, BRW_REGISTER_TYPE_UW),
1410 src,
1411 inst->pi_noperspective,
1412 msg_type,
1413 msg_data,
1414 inst->mlen,
1415 inst->size_written / REG_SIZE);
1416 }
1417
1418
1419 /**
1420 * Sets the first word of a vgrf for gen7+ simd4x2 uniform pull constant
1421 * sampler LD messages.
1422 *
1423 * We don't want to bake it into the send message's code generation because
1424 * that means we don't get a chance to schedule the instructions.
1425 */
1426 void
1427 fs_generator::generate_set_simd4x2_offset(fs_inst *inst,
1428 struct brw_reg dst,
1429 struct brw_reg value)
1430 {
1431 assert(value.file == BRW_IMMEDIATE_VALUE);
1432
1433 brw_push_insn_state(p);
1434 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1435 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1436 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1437 brw_MOV(p, retype(brw_vec1_reg(dst.file, dst.nr, 0), value.type), value);
1438 brw_pop_insn_state(p);
1439 }
1440
1441 /* Sets vstride=1, width=4, hstride=0 of register src1 during
1442 * the ADD instruction.
1443 */
1444 void
1445 fs_generator::generate_set_sample_id(fs_inst *inst,
1446 struct brw_reg dst,
1447 struct brw_reg src0,
1448 struct brw_reg src1)
1449 {
1450 assert(dst.type == BRW_REGISTER_TYPE_D ||
1451 dst.type == BRW_REGISTER_TYPE_UD);
1452 assert(src0.type == BRW_REGISTER_TYPE_D ||
1453 src0.type == BRW_REGISTER_TYPE_UD);
1454
1455 struct brw_reg reg = stride(src1, 1, 4, 0);
1456 if (devinfo->gen >= 8 || inst->exec_size == 8) {
1457 brw_ADD(p, dst, src0, reg);
1458 } else if (inst->exec_size == 16) {
1459 brw_push_insn_state(p);
1460 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1461 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1462 brw_ADD(p, firsthalf(dst), firsthalf(src0), reg);
1463 brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
1464 brw_ADD(p, sechalf(dst), sechalf(src0), suboffset(reg, 2));
1465 brw_pop_insn_state(p);
1466 }
1467 }
1468
1469 void
1470 fs_generator::generate_pack_half_2x16_split(fs_inst *inst,
1471 struct brw_reg dst,
1472 struct brw_reg x,
1473 struct brw_reg y)
1474 {
1475 assert(devinfo->gen >= 7);
1476 assert(dst.type == BRW_REGISTER_TYPE_UD);
1477 assert(x.type == BRW_REGISTER_TYPE_F);
1478 assert(y.type == BRW_REGISTER_TYPE_F);
1479
1480 /* From the Ivybridge PRM, Vol4, Part3, Section 6.27 f32to16:
1481 *
1482 * Because this instruction does not have a 16-bit floating-point type,
1483 * the destination data type must be Word (W).
1484 *
1485 * The destination must be DWord-aligned and specify a horizontal stride
1486 * (HorzStride) of 2. The 16-bit result is stored in the lower word of
1487 * each destination channel and the upper word is not modified.
1488 */
1489 struct brw_reg dst_w = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1490
1491 /* Give each 32-bit channel of dst the form below, where "." means
1492 * unchanged.
1493 * 0x....hhhh
1494 */
1495 brw_F32TO16(p, dst_w, y);
1496
1497 /* Now the form:
1498 * 0xhhhh0000
1499 */
1500 brw_SHL(p, dst, dst, brw_imm_ud(16u));
1501
1502 /* And, finally the form of packHalf2x16's output:
1503 * 0xhhhhllll
1504 */
1505 brw_F32TO16(p, dst_w, x);
1506 }
1507
1508 void
1509 fs_generator::generate_unpack_half_2x16_split(fs_inst *inst,
1510 struct brw_reg dst,
1511 struct brw_reg src)
1512 {
1513 assert(devinfo->gen >= 7);
1514 assert(dst.type == BRW_REGISTER_TYPE_F);
1515 assert(src.type == BRW_REGISTER_TYPE_UD);
1516
1517 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1518 *
1519 * Because this instruction does not have a 16-bit floating-point type,
1520 * the source data type must be Word (W). The destination type must be
1521 * F (Float).
1522 */
1523 struct brw_reg src_w = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1524
1525 /* Each channel of src has the form of unpackHalf2x16's input: 0xhhhhllll.
1526 * For the Y case, we wish to access only the upper word; therefore
1527 * a 16-bit subregister offset is needed.
1528 */
1529 assert(inst->opcode == FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X ||
1530 inst->opcode == FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y);
1531 if (inst->opcode == FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y)
1532 src_w.subnr += 2;
1533
1534 brw_F16TO32(p, dst, src_w);
1535 }
1536
1537 void
1538 fs_generator::generate_shader_time_add(fs_inst *inst,
1539 struct brw_reg payload,
1540 struct brw_reg offset,
1541 struct brw_reg value)
1542 {
1543 assert(devinfo->gen >= 7);
1544 brw_push_insn_state(p);
1545 brw_set_default_mask_control(p, true);
1546
1547 assert(payload.file == BRW_GENERAL_REGISTER_FILE);
1548 struct brw_reg payload_offset = retype(brw_vec1_grf(payload.nr, 0),
1549 offset.type);
1550 struct brw_reg payload_value = retype(brw_vec1_grf(payload.nr + 1, 0),
1551 value.type);
1552
1553 assert(offset.file == BRW_IMMEDIATE_VALUE);
1554 if (value.file == BRW_GENERAL_REGISTER_FILE) {
1555 value.width = BRW_WIDTH_1;
1556 value.hstride = BRW_HORIZONTAL_STRIDE_0;
1557 value.vstride = BRW_VERTICAL_STRIDE_0;
1558 } else {
1559 assert(value.file == BRW_IMMEDIATE_VALUE);
1560 }
1561
1562 /* Trying to deal with setup of the params from the IR is crazy in the FS8
1563 * case, and we don't really care about squeezing every bit of performance
1564 * out of this path, so we just emit the MOVs from here.
1565 */
1566 brw_MOV(p, payload_offset, offset);
1567 brw_MOV(p, payload_value, value);
1568 brw_shader_time_add(p, payload,
1569 prog_data->binding_table.shader_time_start);
1570 brw_pop_insn_state(p);
1571
1572 brw_mark_surface_used(prog_data,
1573 prog_data->binding_table.shader_time_start);
1574 }
1575
1576 void
1577 fs_generator::enable_debug(const char *shader_name)
1578 {
1579 debug_flag = true;
1580 this->shader_name = shader_name;
1581 }
1582
1583 int
1584 fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
1585 {
1586 /* align to 64 byte boundary. */
1587 while (p->next_insn_offset % 64)
1588 brw_NOP(p);
1589
1590 this->dispatch_width = dispatch_width;
1591
1592 int start_offset = p->next_insn_offset;
1593 int spill_count = 0, fill_count = 0;
1594 int loop_count = 0;
1595
1596 struct annotation_info annotation;
1597 memset(&annotation, 0, sizeof(annotation));
1598
1599 foreach_block_and_inst (block, fs_inst, inst, cfg) {
1600 struct brw_reg src[3], dst;
1601 unsigned int last_insn_offset = p->next_insn_offset;
1602 bool multiple_instructions_emitted = false;
1603
1604 /* From the Broadwell PRM, Volume 7, "3D-Media-GPGPU", in the
1605 * "Register Region Restrictions" section: for BDW, SKL:
1606 *
1607 * "A POW/FDIV operation must not be followed by an instruction
1608 * that requires two destination registers."
1609 *
1610 * The documentation is often lacking annotations for Atom parts,
1611 * and empirically this affects CHV as well.
1612 */
1613 if (devinfo->gen >= 8 &&
1614 p->nr_insn > 1 &&
1615 brw_inst_opcode(devinfo, brw_last_inst) == BRW_OPCODE_MATH &&
1616 brw_inst_math_function(devinfo, brw_last_inst) == BRW_MATH_FUNCTION_POW &&
1617 inst->dst.component_size(inst->exec_size) > REG_SIZE) {
1618 brw_NOP(p);
1619 last_insn_offset = p->next_insn_offset;
1620 }
1621
1622 if (unlikely(debug_flag))
1623 annotate(p->devinfo, &annotation, cfg, inst, p->next_insn_offset);
1624
1625 /* If the instruction writes to more than one register, it needs to be
1626 * explicitly marked as compressed on Gen <= 5. On Gen >= 6 the
1627 * hardware figures out by itself what the right compression mode is,
1628 * but we still need to know whether the instruction is compressed to
1629 * set up the source register regions appropriately.
1630 *
1631 * XXX - This is wrong for instructions that write a single register but
1632 * read more than one which should strictly speaking be treated as
1633 * compressed. For instructions that don't write any registers it
1634 * relies on the destination being a null register of the correct
1635 * type and regioning so the instruction is considered compressed
1636 * or not accordingly.
1637 */
1638 const bool compressed =
1639 inst->dst.component_size(inst->exec_size) > REG_SIZE;
1640 brw_set_default_compression(p, compressed);
1641 brw_set_default_group(p, inst->group);
1642
1643 for (unsigned int i = 0; i < inst->sources; i++) {
1644 src[i] = brw_reg_from_fs_reg(inst, &inst->src[i], devinfo->gen,
1645 compressed);
1646
1647 /* The accumulator result appears to get used for the
1648 * conditional modifier generation. When negating a UD
1649 * value, there is a 33rd bit generated for the sign in the
1650 * accumulator value, so now you can't check, for example,
1651 * equality with a 32-bit value. See piglit fs-op-neg-uvec4.
1652 */
1653 assert(!inst->conditional_mod ||
1654 inst->src[i].type != BRW_REGISTER_TYPE_UD ||
1655 !inst->src[i].negate);
1656 }
1657 dst = brw_reg_from_fs_reg(inst, &inst->dst, devinfo->gen, compressed);
1658
1659 brw_set_default_access_mode(p, BRW_ALIGN_1);
1660 brw_set_default_predicate_control(p, inst->predicate);
1661 brw_set_default_predicate_inverse(p, inst->predicate_inverse);
1662 brw_set_default_flag_reg(p, 0, inst->flag_subreg);
1663 brw_set_default_saturate(p, inst->saturate);
1664 brw_set_default_mask_control(p, inst->force_writemask_all);
1665 brw_set_default_acc_write_control(p, inst->writes_accumulator);
1666 brw_set_default_exec_size(p, cvt(inst->exec_size) - 1);
1667
1668 assert(inst->force_writemask_all || inst->exec_size >= 4);
1669 assert(inst->force_writemask_all || inst->group % inst->exec_size == 0);
1670 assert(inst->base_mrf + inst->mlen <= BRW_MAX_MRF(devinfo->gen));
1671 assert(inst->mlen <= BRW_MAX_MSG_LENGTH);
1672
1673 switch (inst->opcode) {
1674 case BRW_OPCODE_MOV:
1675 brw_MOV(p, dst, src[0]);
1676 break;
1677 case BRW_OPCODE_ADD:
1678 brw_ADD(p, dst, src[0], src[1]);
1679 break;
1680 case BRW_OPCODE_MUL:
1681 brw_MUL(p, dst, src[0], src[1]);
1682 break;
1683 case BRW_OPCODE_AVG:
1684 brw_AVG(p, dst, src[0], src[1]);
1685 break;
1686 case BRW_OPCODE_MACH:
1687 brw_MACH(p, dst, src[0], src[1]);
1688 break;
1689
1690 case BRW_OPCODE_LINE:
1691 brw_LINE(p, dst, src[0], src[1]);
1692 break;
1693
1694 case BRW_OPCODE_MAD:
1695 assert(devinfo->gen >= 6);
1696 brw_set_default_access_mode(p, BRW_ALIGN_16);
1697 brw_MAD(p, dst, src[0], src[1], src[2]);
1698 break;
1699
1700 case BRW_OPCODE_LRP:
1701 assert(devinfo->gen >= 6);
1702 brw_set_default_access_mode(p, BRW_ALIGN_16);
1703 brw_LRP(p, dst, src[0], src[1], src[2]);
1704 break;
1705
1706 case BRW_OPCODE_FRC:
1707 brw_FRC(p, dst, src[0]);
1708 break;
1709 case BRW_OPCODE_RNDD:
1710 brw_RNDD(p, dst, src[0]);
1711 break;
1712 case BRW_OPCODE_RNDE:
1713 brw_RNDE(p, dst, src[0]);
1714 break;
1715 case BRW_OPCODE_RNDZ:
1716 brw_RNDZ(p, dst, src[0]);
1717 break;
1718
1719 case BRW_OPCODE_AND:
1720 brw_AND(p, dst, src[0], src[1]);
1721 break;
1722 case BRW_OPCODE_OR:
1723 brw_OR(p, dst, src[0], src[1]);
1724 break;
1725 case BRW_OPCODE_XOR:
1726 brw_XOR(p, dst, src[0], src[1]);
1727 break;
1728 case BRW_OPCODE_NOT:
1729 brw_NOT(p, dst, src[0]);
1730 break;
1731 case BRW_OPCODE_ASR:
1732 brw_ASR(p, dst, src[0], src[1]);
1733 break;
1734 case BRW_OPCODE_SHR:
1735 brw_SHR(p, dst, src[0], src[1]);
1736 break;
1737 case BRW_OPCODE_SHL:
1738 brw_SHL(p, dst, src[0], src[1]);
1739 break;
1740 case BRW_OPCODE_F32TO16:
1741 assert(devinfo->gen >= 7);
1742 brw_F32TO16(p, dst, src[0]);
1743 break;
1744 case BRW_OPCODE_F16TO32:
1745 assert(devinfo->gen >= 7);
1746 brw_F16TO32(p, dst, src[0]);
1747 break;
1748 case BRW_OPCODE_CMP:
1749 if (inst->exec_size >= 16 && devinfo->gen == 7 && !devinfo->is_haswell &&
1750 dst.file == BRW_ARCHITECTURE_REGISTER_FILE) {
1751 /* For unknown reasons the WaCMPInstFlagDepClearedEarly workaround
1752 * implemented in the compiler is not sufficient. Overriding the
1753 * type when the destination is the null register is necessary but
1754 * not sufficient by itself.
1755 */
1756 assert(dst.nr == BRW_ARF_NULL);
1757 dst.type = BRW_REGISTER_TYPE_D;
1758 }
1759 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
1760 break;
1761 case BRW_OPCODE_SEL:
1762 brw_SEL(p, dst, src[0], src[1]);
1763 break;
1764 case BRW_OPCODE_BFREV:
1765 assert(devinfo->gen >= 7);
1766 /* BFREV only supports UD type for src and dst. */
1767 brw_BFREV(p, retype(dst, BRW_REGISTER_TYPE_UD),
1768 retype(src[0], BRW_REGISTER_TYPE_UD));
1769 break;
1770 case BRW_OPCODE_FBH:
1771 assert(devinfo->gen >= 7);
1772 /* FBH only supports UD type for dst. */
1773 brw_FBH(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1774 break;
1775 case BRW_OPCODE_FBL:
1776 assert(devinfo->gen >= 7);
1777 /* FBL only supports UD type for dst. */
1778 brw_FBL(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1779 break;
1780 case BRW_OPCODE_LZD:
1781 brw_LZD(p, dst, src[0]);
1782 break;
1783 case BRW_OPCODE_CBIT:
1784 assert(devinfo->gen >= 7);
1785 /* CBIT only supports UD type for dst. */
1786 brw_CBIT(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1787 break;
1788 case BRW_OPCODE_ADDC:
1789 assert(devinfo->gen >= 7);
1790 brw_ADDC(p, dst, src[0], src[1]);
1791 break;
1792 case BRW_OPCODE_SUBB:
1793 assert(devinfo->gen >= 7);
1794 brw_SUBB(p, dst, src[0], src[1]);
1795 break;
1796 case BRW_OPCODE_MAC:
1797 brw_MAC(p, dst, src[0], src[1]);
1798 break;
1799
1800 case BRW_OPCODE_BFE:
1801 assert(devinfo->gen >= 7);
1802 brw_set_default_access_mode(p, BRW_ALIGN_16);
1803 brw_BFE(p, dst, src[0], src[1], src[2]);
1804 break;
1805
1806 case BRW_OPCODE_BFI1:
1807 assert(devinfo->gen >= 7);
1808 brw_BFI1(p, dst, src[0], src[1]);
1809 break;
1810 case BRW_OPCODE_BFI2:
1811 assert(devinfo->gen >= 7);
1812 brw_set_default_access_mode(p, BRW_ALIGN_16);
1813 brw_BFI2(p, dst, src[0], src[1], src[2]);
1814 break;
1815
1816 case BRW_OPCODE_IF:
1817 if (inst->src[0].file != BAD_FILE) {
1818 /* The instruction has an embedded compare (only allowed on gen6) */
1819 assert(devinfo->gen == 6);
1820 gen6_IF(p, inst->conditional_mod, src[0], src[1]);
1821 } else {
1822 brw_IF(p, brw_inst_exec_size(devinfo, p->current));
1823 }
1824 break;
1825
1826 case BRW_OPCODE_ELSE:
1827 brw_ELSE(p);
1828 break;
1829 case BRW_OPCODE_ENDIF:
1830 brw_ENDIF(p);
1831 break;
1832
1833 case BRW_OPCODE_DO:
1834 brw_DO(p, brw_inst_exec_size(devinfo, p->current));
1835 break;
1836
1837 case BRW_OPCODE_BREAK:
1838 brw_BREAK(p);
1839 break;
1840 case BRW_OPCODE_CONTINUE:
1841 brw_CONT(p);
1842 break;
1843
1844 case BRW_OPCODE_WHILE:
1845 brw_WHILE(p);
1846 loop_count++;
1847 break;
1848
1849 case SHADER_OPCODE_RCP:
1850 case SHADER_OPCODE_RSQ:
1851 case SHADER_OPCODE_SQRT:
1852 case SHADER_OPCODE_EXP2:
1853 case SHADER_OPCODE_LOG2:
1854 case SHADER_OPCODE_SIN:
1855 case SHADER_OPCODE_COS:
1856 assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
1857 if (devinfo->gen >= 6) {
1858 assert(inst->mlen == 0);
1859 assert(devinfo->gen >= 7 || inst->exec_size == 8);
1860 gen6_math(p, dst, brw_math_function(inst->opcode),
1861 src[0], brw_null_reg());
1862 } else {
1863 assert(inst->mlen >= 1);
1864 assert(devinfo->gen == 5 || devinfo->is_g4x || inst->exec_size == 8);
1865 gen4_math(p, dst,
1866 brw_math_function(inst->opcode),
1867 inst->base_mrf, src[0],
1868 BRW_MATH_PRECISION_FULL);
1869 }
1870 break;
1871 case SHADER_OPCODE_INT_QUOTIENT:
1872 case SHADER_OPCODE_INT_REMAINDER:
1873 case SHADER_OPCODE_POW:
1874 assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
1875 if (devinfo->gen >= 6) {
1876 assert(inst->mlen == 0);
1877 assert((devinfo->gen >= 7 && inst->opcode == SHADER_OPCODE_POW) ||
1878 inst->exec_size == 8);
1879 gen6_math(p, dst, brw_math_function(inst->opcode), src[0], src[1]);
1880 } else {
1881 assert(inst->mlen >= 1);
1882 assert(inst->exec_size == 8);
1883 gen4_math(p, dst, brw_math_function(inst->opcode),
1884 inst->base_mrf, src[0],
1885 BRW_MATH_PRECISION_FULL);
1886 }
1887 break;
1888 case FS_OPCODE_CINTERP:
1889 brw_MOV(p, dst, src[0]);
1890 break;
1891 case FS_OPCODE_LINTERP:
1892 generate_linterp(inst, dst, src);
1893 break;
1894 case FS_OPCODE_PIXEL_X:
1895 assert(src[0].type == BRW_REGISTER_TYPE_UW);
1896 src[0].subnr = 0 * type_sz(src[0].type);
1897 brw_MOV(p, dst, stride(src[0], 8, 4, 1));
1898 break;
1899 case FS_OPCODE_PIXEL_Y:
1900 assert(src[0].type == BRW_REGISTER_TYPE_UW);
1901 src[0].subnr = 4 * type_sz(src[0].type);
1902 brw_MOV(p, dst, stride(src[0], 8, 4, 1));
1903 break;
1904 case FS_OPCODE_GET_BUFFER_SIZE:
1905 generate_get_buffer_size(inst, dst, src[0], src[1]);
1906 break;
1907 case SHADER_OPCODE_TEX:
1908 case FS_OPCODE_TXB:
1909 case SHADER_OPCODE_TXD:
1910 case SHADER_OPCODE_TXF:
1911 case SHADER_OPCODE_TXF_LZ:
1912 case SHADER_OPCODE_TXF_CMS:
1913 case SHADER_OPCODE_TXF_CMS_W:
1914 case SHADER_OPCODE_TXF_UMS:
1915 case SHADER_OPCODE_TXF_MCS:
1916 case SHADER_OPCODE_TXL:
1917 case SHADER_OPCODE_TXL_LZ:
1918 case SHADER_OPCODE_TXS:
1919 case SHADER_OPCODE_LOD:
1920 case SHADER_OPCODE_TG4:
1921 case SHADER_OPCODE_TG4_OFFSET:
1922 case SHADER_OPCODE_SAMPLEINFO:
1923 generate_tex(inst, dst, src[0], src[1], src[2]);
1924 break;
1925 case FS_OPCODE_DDX_COARSE:
1926 case FS_OPCODE_DDX_FINE:
1927 generate_ddx(inst->opcode, dst, src[0]);
1928 break;
1929 case FS_OPCODE_DDY_COARSE:
1930 case FS_OPCODE_DDY_FINE:
1931 generate_ddy(inst->opcode, dst, src[0]);
1932 break;
1933
1934 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
1935 generate_scratch_write(inst, src[0]);
1936 spill_count++;
1937 break;
1938
1939 case SHADER_OPCODE_GEN4_SCRATCH_READ:
1940 generate_scratch_read(inst, dst);
1941 fill_count++;
1942 break;
1943
1944 case SHADER_OPCODE_GEN7_SCRATCH_READ:
1945 generate_scratch_read_gen7(inst, dst);
1946 fill_count++;
1947 break;
1948
1949 case SHADER_OPCODE_MOV_INDIRECT:
1950 generate_mov_indirect(inst, dst, src[0], src[1]);
1951 break;
1952
1953 case SHADER_OPCODE_URB_READ_SIMD8:
1954 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
1955 generate_urb_read(inst, dst, src[0]);
1956 break;
1957
1958 case SHADER_OPCODE_URB_WRITE_SIMD8:
1959 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
1960 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
1961 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
1962 generate_urb_write(inst, src[0]);
1963 break;
1964
1965 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
1966 assert(inst->force_writemask_all);
1967 generate_uniform_pull_constant_load(inst, dst, src[0], src[1]);
1968 break;
1969
1970 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
1971 assert(inst->force_writemask_all);
1972 generate_uniform_pull_constant_load_gen7(inst, dst, src[0], src[1]);
1973 break;
1974
1975 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4:
1976 generate_varying_pull_constant_load_gen4(inst, dst, src[0]);
1977 break;
1978
1979 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
1980 generate_varying_pull_constant_load_gen7(inst, dst, src[0], src[1]);
1981 break;
1982
1983 case FS_OPCODE_REP_FB_WRITE:
1984 case FS_OPCODE_FB_WRITE:
1985 generate_fb_write(inst, src[0]);
1986 break;
1987
1988 case FS_OPCODE_FB_READ:
1989 generate_fb_read(inst, dst, src[0]);
1990 break;
1991
1992 case FS_OPCODE_MOV_DISPATCH_TO_FLAGS:
1993 generate_mov_dispatch_to_flags(inst);
1994 break;
1995
1996 case FS_OPCODE_DISCARD_JUMP:
1997 generate_discard_jump(inst);
1998 break;
1999
2000 case SHADER_OPCODE_SHADER_TIME_ADD:
2001 generate_shader_time_add(inst, src[0], src[1], src[2]);
2002 break;
2003
2004 case SHADER_OPCODE_UNTYPED_ATOMIC:
2005 assert(src[2].file == BRW_IMMEDIATE_VALUE);
2006 brw_untyped_atomic(p, dst, src[0], src[1], src[2].ud,
2007 inst->mlen, !inst->dst.is_null());
2008 break;
2009
2010 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
2011 assert(src[2].file == BRW_IMMEDIATE_VALUE);
2012 brw_untyped_surface_read(p, dst, src[0], src[1],
2013 inst->mlen, src[2].ud);
2014 break;
2015
2016 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
2017 assert(src[2].file == BRW_IMMEDIATE_VALUE);
2018 brw_untyped_surface_write(p, src[0], src[1],
2019 inst->mlen, src[2].ud);
2020 break;
2021
2022 case SHADER_OPCODE_TYPED_ATOMIC:
2023 assert(src[2].file == BRW_IMMEDIATE_VALUE);
2024 brw_typed_atomic(p, dst, src[0], src[1],
2025 src[2].ud, inst->mlen, !inst->dst.is_null());
2026 break;
2027
2028 case SHADER_OPCODE_TYPED_SURFACE_READ:
2029 assert(src[2].file == BRW_IMMEDIATE_VALUE);
2030 brw_typed_surface_read(p, dst, src[0], src[1],
2031 inst->mlen, src[2].ud);
2032 break;
2033
2034 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
2035 assert(src[2].file == BRW_IMMEDIATE_VALUE);
2036 brw_typed_surface_write(p, src[0], src[1], inst->mlen, src[2].ud);
2037 break;
2038
2039 case SHADER_OPCODE_MEMORY_FENCE:
2040 brw_memory_fence(p, dst);
2041 break;
2042
2043 case FS_OPCODE_SET_SIMD4X2_OFFSET:
2044 generate_set_simd4x2_offset(inst, dst, src[0]);
2045 break;
2046
2047 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
2048 brw_find_live_channel(p, dst);
2049 break;
2050
2051 case SHADER_OPCODE_BROADCAST:
2052 assert(inst->force_writemask_all);
2053 brw_broadcast(p, dst, src[0], src[1]);
2054 break;
2055
2056 case FS_OPCODE_SET_SAMPLE_ID:
2057 generate_set_sample_id(inst, dst, src[0], src[1]);
2058 break;
2059
2060 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
2061 generate_pack_half_2x16_split(inst, dst, src[0], src[1]);
2062 break;
2063
2064 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X:
2065 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y:
2066 generate_unpack_half_2x16_split(inst, dst, src[0]);
2067 break;
2068
2069 case FS_OPCODE_PLACEHOLDER_HALT:
2070 /* This is the place where the final HALT needs to be inserted if
2071 * we've emitted any discards. If not, this will emit no code.
2072 */
2073 if (!patch_discard_jumps_to_fb_writes()) {
2074 if (unlikely(debug_flag)) {
2075 annotation.ann_count--;
2076 }
2077 }
2078 break;
2079
2080 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
2081 generate_pixel_interpolator_query(inst, dst, src[0], src[1],
2082 GEN7_PIXEL_INTERPOLATOR_LOC_SAMPLE);
2083 break;
2084
2085 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
2086 generate_pixel_interpolator_query(inst, dst, src[0], src[1],
2087 GEN7_PIXEL_INTERPOLATOR_LOC_SHARED_OFFSET);
2088 break;
2089
2090 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
2091 generate_pixel_interpolator_query(inst, dst, src[0], src[1],
2092 GEN7_PIXEL_INTERPOLATOR_LOC_PER_SLOT_OFFSET);
2093 break;
2094
2095 case CS_OPCODE_CS_TERMINATE:
2096 generate_cs_terminate(inst, src[0]);
2097 break;
2098
2099 case SHADER_OPCODE_BARRIER:
2100 generate_barrier(inst, src[0]);
2101 break;
2102
2103 case BRW_OPCODE_DIM:
2104 assert(devinfo->is_haswell);
2105 assert(src[0].type == BRW_REGISTER_TYPE_DF);
2106 assert(dst.type == BRW_REGISTER_TYPE_DF);
2107 brw_DIM(p, dst, retype(src[0], BRW_REGISTER_TYPE_F));
2108 break;
2109
2110 default:
2111 unreachable("Unsupported opcode");
2112
2113 case SHADER_OPCODE_LOAD_PAYLOAD:
2114 unreachable("Should be lowered by lower_load_payload()");
2115 }
2116
2117 if (multiple_instructions_emitted)
2118 continue;
2119
2120 if (inst->no_dd_clear || inst->no_dd_check || inst->conditional_mod) {
2121 assert(p->next_insn_offset == last_insn_offset + 16 ||
2122 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
2123 "emitting more than 1 instruction");
2124
2125 brw_inst *last = &p->store[last_insn_offset / 16];
2126
2127 if (inst->conditional_mod)
2128 brw_inst_set_cond_modifier(p->devinfo, last, inst->conditional_mod);
2129 brw_inst_set_no_dd_clear(p->devinfo, last, inst->no_dd_clear);
2130 brw_inst_set_no_dd_check(p->devinfo, last, inst->no_dd_check);
2131 }
2132 }
2133
2134 brw_set_uip_jip(p, start_offset);
2135 annotation_finalize(&annotation, p->next_insn_offset);
2136
2137 #ifndef NDEBUG
2138 bool validated = brw_validate_instructions(p, start_offset, &annotation);
2139 #else
2140 if (unlikely(debug_flag))
2141 brw_validate_instructions(p, start_offset, &annotation);
2142 #endif
2143
2144 int before_size = p->next_insn_offset - start_offset;
2145 brw_compact_instructions(p, start_offset, annotation.ann_count,
2146 annotation.ann);
2147 int after_size = p->next_insn_offset - start_offset;
2148
2149 if (unlikely(debug_flag)) {
2150 fprintf(stderr, "Native code for %s\n"
2151 "SIMD%d shader: %d instructions. %d loops. %u cycles. %d:%d spills:fills. Promoted %u constants. Compacted %d to %d"
2152 " bytes (%.0f%%)\n",
2153 shader_name, dispatch_width, before_size / 16, loop_count, cfg->cycle_count,
2154 spill_count, fill_count, promoted_constants, before_size, after_size,
2155 100.0f * (before_size - after_size) / before_size);
2156
2157 dump_assembly(p->store, annotation.ann_count, annotation.ann,
2158 p->devinfo);
2159 ralloc_free(annotation.mem_ctx);
2160 }
2161 assert(validated);
2162
2163 compiler->shader_debug_log(log_data,
2164 "%s SIMD%d shader: %d inst, %d loops, %u cycles, "
2165 "%d:%d spills:fills, Promoted %u constants, "
2166 "compacted %d to %d bytes.",
2167 _mesa_shader_stage_to_abbrev(stage),
2168 dispatch_width, before_size / 16,
2169 loop_count, cfg->cycle_count, spill_count,
2170 fill_count, promoted_constants, before_size,
2171 after_size);
2172
2173 return start_offset;
2174 }
2175
2176 const unsigned *
2177 fs_generator::get_assembly(unsigned int *assembly_size)
2178 {
2179 return brw_get_program(p, assembly_size);
2180 }