2 * Copyright © 2015 Intel Corporation
3 * Copyright © 2014-2015 Broadcom
4 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "compiler/nir/nir.h"
27 #include "compiler/nir/nir_builder.h"
28 #include "compiler/glsl/list.h"
29 #include "main/imports.h"
30 #include "util/ralloc.h"
32 #include "prog_to_nir.h"
33 #include "prog_instruction.h"
34 #include "prog_parameter.h"
35 #include "prog_print.h"
41 * A translator from Mesa IR (prog_instruction.h) to NIR. This is primarily
42 * intended to support ARB_vertex_program, ARB_fragment_program, and fixed-function
43 * vertex processing. Full GLSL support should use glsl_to_nir instead.
47 const struct gl_program
*prog
;
51 nir_variable
*parameters
;
52 nir_variable
*input_vars
[VARYING_SLOT_MAX
];
53 nir_variable
*output_vars
[VARYING_SLOT_MAX
];
54 nir_register
**output_regs
;
55 nir_register
**temp_regs
;
57 nir_register
*addr_reg
;
60 #define SWIZ(X, Y, Z, W) \
61 (unsigned[4]){ SWIZZLE_##X, SWIZZLE_##Y, SWIZZLE_##Z, SWIZZLE_##W }
62 #define ptn_channel(b, src, ch) nir_swizzle(b, src, SWIZ(ch, ch, ch, ch), 1, true)
65 ptn_src_for_dest(struct ptn_compile
*c
, nir_alu_dest
*dest
)
67 nir_builder
*b
= &c
->build
;
70 memset(&src
, 0, sizeof(src
));
72 if (dest
->dest
.is_ssa
)
73 src
.src
= nir_src_for_ssa(&dest
->dest
.ssa
);
75 assert(!dest
->dest
.reg
.indirect
);
76 src
.src
= nir_src_for_reg(dest
->dest
.reg
.reg
);
77 src
.src
.reg
.base_offset
= dest
->dest
.reg
.base_offset
;
80 for (int i
= 0; i
< 4; i
++)
83 return nir_fmov_alu(b
, src
, 4);
87 ptn_get_dest(struct ptn_compile
*c
, const struct prog_dst_register
*prog_dst
)
91 memset(&dest
, 0, sizeof(dest
));
93 switch (prog_dst
->File
) {
94 case PROGRAM_TEMPORARY
:
95 dest
.dest
.reg
.reg
= c
->temp_regs
[prog_dst
->Index
];
98 dest
.dest
.reg
.reg
= c
->output_regs
[prog_dst
->Index
];
100 case PROGRAM_ADDRESS
:
101 assert(prog_dst
->Index
== 0);
102 dest
.dest
.reg
.reg
= c
->addr_reg
;
104 case PROGRAM_UNDEFINED
:
108 dest
.write_mask
= prog_dst
->WriteMask
;
109 dest
.saturate
= false;
111 assert(!prog_dst
->RelAddr
);
117 ptn_get_src(struct ptn_compile
*c
, const struct prog_src_register
*prog_src
)
119 nir_builder
*b
= &c
->build
;
122 memset(&src
, 0, sizeof(src
));
124 switch (prog_src
->File
) {
125 case PROGRAM_UNDEFINED
:
126 return nir_imm_float(b
, 0.0);
127 case PROGRAM_TEMPORARY
:
128 assert(!prog_src
->RelAddr
&& prog_src
->Index
>= 0);
129 src
.src
.reg
.reg
= c
->temp_regs
[prog_src
->Index
];
131 case PROGRAM_INPUT
: {
132 /* ARB_vertex_program doesn't allow relative addressing on vertex
133 * attributes; ARB_fragment_program has no relative addressing at all.
135 assert(!prog_src
->RelAddr
);
137 assert(prog_src
->Index
>= 0 && prog_src
->Index
< VARYING_SLOT_MAX
);
139 nir_intrinsic_instr
*load
=
140 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_var
);
141 load
->num_components
= 4;
142 load
->variables
[0] = nir_deref_var_create(load
, c
->input_vars
[prog_src
->Index
]);
144 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 4, 32, NULL
);
145 nir_builder_instr_insert(b
, &load
->instr
);
147 src
.src
= nir_src_for_ssa(&load
->dest
.ssa
);
150 case PROGRAM_STATE_VAR
:
151 case PROGRAM_CONSTANT
: {
152 /* We actually want to look at the type in the Parameters list for this,
153 * because it lets us upload constant builtin uniforms as actual
156 struct gl_program_parameter_list
*plist
= c
->prog
->Parameters
;
157 gl_register_file file
= prog_src
->RelAddr
? prog_src
->File
:
158 plist
->Parameters
[prog_src
->Index
].Type
;
161 case PROGRAM_CONSTANT
:
162 if ((c
->prog
->IndirectRegisterFiles
& (1 << PROGRAM_CONSTANT
)) == 0) {
163 float *v
= (float *) plist
->ParameterValues
[prog_src
->Index
];
164 src
.src
= nir_src_for_ssa(nir_imm_vec4(b
, v
[0], v
[1], v
[2], v
[3]));
168 case PROGRAM_STATE_VAR
: {
169 assert(c
->parameters
!= NULL
);
171 nir_intrinsic_instr
*load
=
172 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_var
);
173 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 4, 32, NULL
);
174 load
->num_components
= 4;
176 load
->variables
[0] = nir_deref_var_create(load
, c
->parameters
);
177 nir_deref_array
*deref_arr
=
178 nir_deref_array_create(load
->variables
[0]);
179 deref_arr
->deref
.type
= glsl_vec4_type();
180 load
->variables
[0]->deref
.child
= &deref_arr
->deref
;
182 if (prog_src
->RelAddr
) {
183 deref_arr
->deref_array_type
= nir_deref_array_type_indirect
;
185 nir_alu_src addr_src
= { NIR_SRC_INIT
};
186 addr_src
.src
= nir_src_for_reg(c
->addr_reg
);
187 nir_ssa_def
*reladdr
= nir_imov_alu(b
, addr_src
, 1);
189 if (prog_src
->Index
< 0) {
190 /* This is a negative offset which should be added to the address
193 reladdr
= nir_iadd(b
, reladdr
, nir_imm_int(b
, prog_src
->Index
));
195 deref_arr
->base_offset
= 0;
197 deref_arr
->base_offset
= prog_src
->Index
;
199 deref_arr
->indirect
= nir_src_for_ssa(reladdr
);
201 deref_arr
->deref_array_type
= nir_deref_array_type_direct
;
202 deref_arr
->base_offset
= prog_src
->Index
;
205 nir_builder_instr_insert(b
, &load
->instr
);
207 src
.src
= nir_src_for_ssa(&load
->dest
.ssa
);
211 fprintf(stderr
, "bad uniform src register file: %s (%d)\n",
212 _mesa_register_file_name(file
), file
);
218 fprintf(stderr
, "unknown src register file: %s (%d)\n",
219 _mesa_register_file_name(prog_src
->File
), prog_src
->File
);
224 if (!HAS_EXTENDED_SWIZZLE(prog_src
->Swizzle
) &&
225 (prog_src
->Negate
== NEGATE_NONE
|| prog_src
->Negate
== NEGATE_XYZW
)) {
226 /* The simple non-SWZ case. */
227 for (int i
= 0; i
< 4; i
++)
228 src
.swizzle
[i
] = GET_SWZ(prog_src
->Swizzle
, i
);
230 def
= nir_fmov_alu(b
, src
, 4);
232 if (prog_src
->Negate
)
233 def
= nir_fneg(b
, def
);
235 /* The SWZ instruction allows per-component zero/one swizzles, and also
236 * per-component negation.
238 nir_ssa_def
*chans
[4];
239 for (int i
= 0; i
< 4; i
++) {
240 int swizzle
= GET_SWZ(prog_src
->Swizzle
, i
);
241 if (swizzle
== SWIZZLE_ZERO
) {
242 chans
[i
] = nir_imm_float(b
, 0.0);
243 } else if (swizzle
== SWIZZLE_ONE
) {
244 chans
[i
] = nir_imm_float(b
, 1.0);
246 assert(swizzle
!= SWIZZLE_NIL
);
247 nir_alu_instr
*mov
= nir_alu_instr_create(b
->shader
, nir_op_fmov
);
248 nir_ssa_dest_init(&mov
->instr
, &mov
->dest
.dest
, 1, 32, NULL
);
249 mov
->dest
.write_mask
= 0x1;
251 mov
->src
[0].swizzle
[0] = swizzle
;
252 nir_builder_instr_insert(b
, &mov
->instr
);
254 chans
[i
] = &mov
->dest
.dest
.ssa
;
257 if (prog_src
->Negate
& (1 << i
))
258 chans
[i
] = nir_fneg(b
, chans
[i
]);
260 def
= nir_vec4(b
, chans
[0], chans
[1], chans
[2], chans
[3]);
267 ptn_alu(nir_builder
*b
, nir_op op
, nir_alu_dest dest
, nir_ssa_def
**src
)
269 unsigned num_srcs
= nir_op_infos
[op
].num_inputs
;
270 nir_alu_instr
*instr
= nir_alu_instr_create(b
->shader
, op
);
273 for (i
= 0; i
< num_srcs
; i
++)
274 instr
->src
[i
].src
= nir_src_for_ssa(src
[i
]);
277 nir_builder_instr_insert(b
, &instr
->instr
);
281 ptn_move_dest_masked(nir_builder
*b
, nir_alu_dest dest
,
282 nir_ssa_def
*def
, unsigned write_mask
)
284 if (!(dest
.write_mask
& write_mask
))
287 nir_alu_instr
*mov
= nir_alu_instr_create(b
->shader
, nir_op_fmov
);
292 mov
->dest
.write_mask
&= write_mask
;
293 mov
->src
[0].src
= nir_src_for_ssa(def
);
294 for (unsigned i
= def
->num_components
; i
< 4; i
++)
295 mov
->src
[0].swizzle
[i
] = def
->num_components
- 1;
296 nir_builder_instr_insert(b
, &mov
->instr
);
300 ptn_move_dest(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
*def
)
302 ptn_move_dest_masked(b
, dest
, def
, WRITEMASK_XYZW
);
306 ptn_arl(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
**src
)
308 ptn_move_dest(b
, dest
, nir_f2i(b
, nir_ffloor(b
, src
[0])));
311 /* EXP - Approximate Exponential Base 2
312 * dst.x = 2^{\lfloor src.x\rfloor}
313 * dst.y = src.x - \lfloor src.x\rfloor
318 ptn_exp(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
**src
)
320 nir_ssa_def
*srcx
= ptn_channel(b
, src
[0], X
);
322 ptn_move_dest_masked(b
, dest
, nir_fexp2(b
, nir_ffloor(b
, srcx
)), WRITEMASK_X
);
323 ptn_move_dest_masked(b
, dest
, nir_fsub(b
, srcx
, nir_ffloor(b
, srcx
)), WRITEMASK_Y
);
324 ptn_move_dest_masked(b
, dest
, nir_fexp2(b
, srcx
), WRITEMASK_Z
);
325 ptn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), WRITEMASK_W
);
328 /* LOG - Approximate Logarithm Base 2
329 * dst.x = \lfloor\log_2{|src.x|}\rfloor
330 * dst.y = |src.x| * 2^{-\lfloor\log_2{|src.x|}\rfloor}}
331 * dst.z = \log_2{|src.x|}
335 ptn_log(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
**src
)
337 nir_ssa_def
*abs_srcx
= nir_fabs(b
, ptn_channel(b
, src
[0], X
));
338 nir_ssa_def
*log2
= nir_flog2(b
, abs_srcx
);
339 nir_ssa_def
*floor_log2
= nir_ffloor(b
, log2
);
341 ptn_move_dest_masked(b
, dest
, floor_log2
, WRITEMASK_X
);
342 ptn_move_dest_masked(b
, dest
,
343 nir_fmul(b
, abs_srcx
,
344 nir_fexp2(b
, nir_fneg(b
, floor_log2
))),
346 ptn_move_dest_masked(b
, dest
, log2
, WRITEMASK_Z
);
347 ptn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), WRITEMASK_W
);
350 /* DST - Distance Vector
352 * dst.y = src0.y \times src1.y
357 ptn_dst(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
**src
)
359 ptn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), WRITEMASK_X
);
360 ptn_move_dest_masked(b
, dest
, nir_fmul(b
, src
[0], src
[1]), WRITEMASK_Y
);
361 ptn_move_dest_masked(b
, dest
, nir_fmov(b
, src
[0]), WRITEMASK_Z
);
362 ptn_move_dest_masked(b
, dest
, nir_fmov(b
, src
[1]), WRITEMASK_W
);
365 /* LIT - Light Coefficients
367 * dst.y = max(src.x, 0.0)
368 * dst.z = (src.x > 0.0) ? max(src.y, 0.0)^{clamp(src.w, -128.0, 128.0))} : 0
372 ptn_lit(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
**src
)
374 ptn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), WRITEMASK_XW
);
376 ptn_move_dest_masked(b
, dest
, nir_fmax(b
, ptn_channel(b
, src
[0], X
),
377 nir_imm_float(b
, 0.0)), WRITEMASK_Y
);
379 if (dest
.write_mask
& WRITEMASK_Z
) {
380 nir_ssa_def
*src0_y
= ptn_channel(b
, src
[0], Y
);
381 nir_ssa_def
*wclamp
= nir_fmax(b
, nir_fmin(b
, ptn_channel(b
, src
[0], W
),
382 nir_imm_float(b
, 128.0)),
383 nir_imm_float(b
, -128.0));
384 nir_ssa_def
*pow
= nir_fpow(b
, nir_fmax(b
, src0_y
, nir_imm_float(b
, 0.0)),
388 if (b
->shader
->options
->native_integers
) {
390 nir_fge(b
, nir_imm_float(b
, 0.0), ptn_channel(b
, src
[0], X
)),
391 nir_imm_float(b
, 0.0),
395 nir_sge(b
, nir_imm_float(b
, 0.0), ptn_channel(b
, src
[0], X
)),
396 nir_imm_float(b
, 0.0),
400 ptn_move_dest_masked(b
, dest
, z
, WRITEMASK_Z
);
405 * dst.x = \cos{src.x}
406 * dst.y = \sin{src.x}
411 ptn_scs(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
**src
)
413 ptn_move_dest_masked(b
, dest
, nir_fcos(b
, ptn_channel(b
, src
[0], X
)),
415 ptn_move_dest_masked(b
, dest
, nir_fsin(b
, ptn_channel(b
, src
[0], X
)),
417 ptn_move_dest_masked(b
, dest
, nir_imm_float(b
, 0.0), WRITEMASK_Z
);
418 ptn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), WRITEMASK_W
);
422 * Emit SLT. For platforms with integers, prefer b2f(flt(...)).
425 ptn_slt(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
**src
)
427 if (b
->shader
->options
->native_integers
) {
428 ptn_move_dest(b
, dest
, nir_b2f(b
, nir_flt(b
, src
[0], src
[1])));
430 ptn_move_dest(b
, dest
, nir_slt(b
, src
[0], src
[1]));
435 * Emit SGE. For platforms with integers, prefer b2f(fge(...)).
438 ptn_sge(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
**src
)
440 if (b
->shader
->options
->native_integers
) {
441 ptn_move_dest(b
, dest
, nir_b2f(b
, nir_fge(b
, src
[0], src
[1])));
443 ptn_move_dest(b
, dest
, nir_sge(b
, src
[0], src
[1]));
448 ptn_xpd(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
**src
)
450 ptn_move_dest_masked(b
, dest
,
453 nir_swizzle(b
, src
[0], SWIZ(Y
, Z
, X
, W
), 3, true),
454 nir_swizzle(b
, src
[1], SWIZ(Z
, X
, Y
, W
), 3, true)),
456 nir_swizzle(b
, src
[1], SWIZ(Y
, Z
, X
, W
), 3, true),
457 nir_swizzle(b
, src
[0], SWIZ(Z
, X
, Y
, W
), 3, true))),
459 ptn_move_dest_masked(b
, dest
, nir_imm_float(b
, 1.0), WRITEMASK_W
);
463 ptn_dp2(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
**src
)
465 ptn_move_dest(b
, dest
, nir_fdot2(b
, src
[0], src
[1]));
469 ptn_dp3(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
**src
)
471 ptn_move_dest(b
, dest
, nir_fdot3(b
, src
[0], src
[1]));
475 ptn_dp4(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
**src
)
477 ptn_move_dest(b
, dest
, nir_fdot4(b
, src
[0], src
[1]));
481 ptn_dph(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
**src
)
483 ptn_move_dest(b
, dest
, nir_fdph(b
, src
[0], src
[1]));
487 ptn_cmp(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
**src
)
489 if (b
->shader
->options
->native_integers
) {
490 ptn_move_dest(b
, dest
, nir_bcsel(b
,
491 nir_flt(b
, src
[0], nir_imm_float(b
, 0.0)),
494 ptn_move_dest(b
, dest
, nir_fcsel(b
,
495 nir_slt(b
, src
[0], nir_imm_float(b
, 0.0)),
501 ptn_lrp(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
**src
)
503 ptn_move_dest(b
, dest
, nir_flrp(b
, src
[2], src
[1], src
[0]));
507 ptn_kil(nir_builder
*b
, nir_ssa_def
**src
)
509 nir_ssa_def
*cmp
= b
->shader
->options
->native_integers
?
510 nir_bany_inequal4(b
, nir_flt(b
, src
[0], nir_imm_float(b
, 0.0)), nir_imm_int(b
, 0)) :
511 nir_fany_nequal4(b
, nir_slt(b
, src
[0], nir_imm_float(b
, 0.0)), nir_imm_float(b
, 0.0));
513 nir_intrinsic_instr
*discard
=
514 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_discard_if
);
515 discard
->src
[0] = nir_src_for_ssa(cmp
);
516 nir_builder_instr_insert(b
, &discard
->instr
);
520 ptn_tex(nir_builder
*b
, nir_alu_dest dest
, nir_ssa_def
**src
,
521 struct prog_instruction
*prog_inst
)
523 nir_tex_instr
*instr
;
527 switch (prog_inst
->Opcode
) {
549 fprintf(stderr
, "unknown tex op %d\n", prog_inst
->Opcode
);
553 if (prog_inst
->TexShadow
)
556 instr
= nir_tex_instr_create(b
->shader
, num_srcs
);
558 instr
->dest_type
= nir_type_float
;
559 instr
->is_shadow
= prog_inst
->TexShadow
;
560 instr
->texture_index
= prog_inst
->TexSrcUnit
;
561 instr
->sampler_index
= prog_inst
->TexSrcUnit
;
563 switch (prog_inst
->TexSrcTarget
) {
564 case TEXTURE_1D_INDEX
:
565 instr
->sampler_dim
= GLSL_SAMPLER_DIM_1D
;
567 case TEXTURE_2D_INDEX
:
568 instr
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
570 case TEXTURE_3D_INDEX
:
571 instr
->sampler_dim
= GLSL_SAMPLER_DIM_3D
;
573 case TEXTURE_CUBE_INDEX
:
574 instr
->sampler_dim
= GLSL_SAMPLER_DIM_CUBE
;
576 case TEXTURE_RECT_INDEX
:
577 instr
->sampler_dim
= GLSL_SAMPLER_DIM_RECT
;
580 fprintf(stderr
, "Unknown texture target %d\n", prog_inst
->TexSrcTarget
);
584 switch (instr
->sampler_dim
) {
585 case GLSL_SAMPLER_DIM_1D
:
586 case GLSL_SAMPLER_DIM_BUF
:
587 instr
->coord_components
= 1;
589 case GLSL_SAMPLER_DIM_2D
:
590 case GLSL_SAMPLER_DIM_RECT
:
591 case GLSL_SAMPLER_DIM_EXTERNAL
:
592 case GLSL_SAMPLER_DIM_MS
:
593 instr
->coord_components
= 2;
595 case GLSL_SAMPLER_DIM_3D
:
596 case GLSL_SAMPLER_DIM_CUBE
:
597 instr
->coord_components
= 3;
599 case GLSL_SAMPLER_DIM_SUBPASS
:
600 unreachable("can't reach");
603 unsigned src_number
= 0;
605 instr
->src
[src_number
].src
=
606 nir_src_for_ssa(nir_swizzle(b
, src
[0], SWIZ(X
, Y
, Z
, W
),
607 instr
->coord_components
, true));
608 instr
->src
[src_number
].src_type
= nir_tex_src_coord
;
611 if (prog_inst
->Opcode
== OPCODE_TXP
) {
612 instr
->src
[src_number
].src
= nir_src_for_ssa(ptn_channel(b
, src
[0], W
));
613 instr
->src
[src_number
].src_type
= nir_tex_src_projector
;
617 if (prog_inst
->Opcode
== OPCODE_TXB
) {
618 instr
->src
[src_number
].src
= nir_src_for_ssa(ptn_channel(b
, src
[0], W
));
619 instr
->src
[src_number
].src_type
= nir_tex_src_bias
;
623 if (prog_inst
->Opcode
== OPCODE_TXL
) {
624 instr
->src
[src_number
].src
= nir_src_for_ssa(ptn_channel(b
, src
[0], W
));
625 instr
->src
[src_number
].src_type
= nir_tex_src_lod
;
629 if (instr
->is_shadow
) {
630 if (instr
->coord_components
< 3)
631 instr
->src
[src_number
].src
= nir_src_for_ssa(ptn_channel(b
, src
[0], Z
));
633 instr
->src
[src_number
].src
= nir_src_for_ssa(ptn_channel(b
, src
[0], W
));
635 instr
->src
[src_number
].src_type
= nir_tex_src_comparitor
;
639 assert(src_number
== num_srcs
);
641 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 4, 32, NULL
);
642 nir_builder_instr_insert(b
, &instr
->instr
);
644 /* Resolve the writemask on the texture op. */
645 ptn_move_dest(b
, dest
, &instr
->dest
.ssa
);
648 static const nir_op op_trans
[MAX_OPCODE
] = {
650 [OPCODE_ABS
] = nir_op_fabs
,
651 [OPCODE_ADD
] = nir_op_fadd
,
655 [OPCODE_DDX
] = nir_op_fddx
,
656 [OPCODE_DDY
] = nir_op_fddy
,
665 [OPCODE_FLR
] = nir_op_ffloor
,
666 [OPCODE_FRC
] = nir_op_ffract
,
672 [OPCODE_MAX
] = nir_op_fmax
,
673 [OPCODE_MIN
] = nir_op_fmin
,
674 [OPCODE_MOV
] = nir_op_fmov
,
675 [OPCODE_MUL
] = nir_op_fmul
,
684 [OPCODE_SSG
] = nir_op_fsign
,
685 [OPCODE_SUB
] = nir_op_fsub
,
688 [OPCODE_TRUNC
] = nir_op_ftrunc
,
697 ptn_emit_instruction(struct ptn_compile
*c
, struct prog_instruction
*prog_inst
)
699 nir_builder
*b
= &c
->build
;
701 const unsigned op
= prog_inst
->Opcode
;
703 if (op
== OPCODE_END
)
707 for (i
= 0; i
< 3; i
++) {
708 src
[i
] = ptn_get_src(c
, &prog_inst
->SrcReg
[i
]);
710 nir_alu_dest dest
= ptn_get_dest(c
, &prog_inst
->DstReg
);
716 ptn_move_dest(b
, dest
,
717 nir_frsq(b
, nir_fabs(b
, ptn_channel(b
, src
[0], X
))));
721 ptn_move_dest(b
, dest
, nir_frcp(b
, ptn_channel(b
, src
[0], X
)));
725 ptn_move_dest(b
, dest
, nir_fexp2(b
, ptn_channel(b
, src
[0], X
)));
729 ptn_move_dest(b
, dest
, nir_flog2(b
, ptn_channel(b
, src
[0], X
)));
733 ptn_move_dest(b
, dest
, nir_fpow(b
,
734 ptn_channel(b
, src
[0], X
),
735 ptn_channel(b
, src
[1], X
)));
739 ptn_move_dest(b
, dest
, nir_fcos(b
, ptn_channel(b
, src
[0], X
)));
743 ptn_move_dest(b
, dest
, nir_fsin(b
, ptn_channel(b
, src
[0], X
)));
747 ptn_arl(b
, dest
, src
);
751 ptn_exp(b
, dest
, src
);
755 ptn_log(b
, dest
, src
);
759 ptn_lrp(b
, dest
, src
);
763 ptn_move_dest(b
, dest
, nir_fadd(b
, nir_fmul(b
, src
[0], src
[1]), src
[2]));
767 ptn_dst(b
, dest
, src
);
771 ptn_lit(b
, dest
, src
);
775 ptn_xpd(b
, dest
, src
);
779 ptn_dp2(b
, dest
, src
);
783 ptn_dp3(b
, dest
, src
);
787 ptn_dp4(b
, dest
, src
);
791 ptn_dph(b
, dest
, src
);
799 ptn_cmp(b
, dest
, src
);
803 ptn_scs(b
, dest
, src
);
807 ptn_slt(b
, dest
, src
);
811 ptn_sge(b
, dest
, src
);
819 ptn_tex(b
, dest
, src
, prog_inst
);
823 /* Extended swizzles were already handled in ptn_get_src(). */
824 ptn_alu(b
, nir_op_fmov
, dest
, src
);
831 if (op_trans
[op
] != 0) {
832 ptn_alu(b
, op_trans
[op
], dest
, src
);
834 fprintf(stderr
, "unknown opcode: %s\n", _mesa_opcode_string(op
));
840 if (prog_inst
->Saturate
) {
841 assert(prog_inst
->Saturate
);
842 assert(!dest
.dest
.is_ssa
);
843 ptn_move_dest(b
, dest
, nir_fsat(b
, ptn_src_for_dest(c
, &dest
)));
848 * Puts a NIR intrinsic to store of each PROGRAM_OUTPUT value to the output
849 * variables at the end of the shader.
851 * We don't generate these incrementally as the PROGRAM_OUTPUT values are
852 * written, because there's no output load intrinsic, which means we couldn't
856 ptn_add_output_stores(struct ptn_compile
*c
)
858 nir_builder
*b
= &c
->build
;
860 nir_foreach_variable(var
, &b
->shader
->outputs
) {
861 nir_intrinsic_instr
*store
=
862 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_store_var
);
863 store
->num_components
= glsl_get_vector_elements(var
->type
);
864 nir_intrinsic_set_write_mask(store
, (1 << store
->num_components
) - 1);
865 store
->variables
[0] =
866 nir_deref_var_create(store
, c
->output_vars
[var
->data
.location
]);
868 if (c
->prog
->Target
== GL_FRAGMENT_PROGRAM_ARB
&&
869 var
->data
.location
== FRAG_RESULT_DEPTH
) {
870 /* result.depth has this strange convention of being the .z component of
871 * a vec4 with undefined .xyw components. We resolve it to a scalar, to
872 * match GLSL's gl_FragDepth and the expectations of most backends.
874 nir_alu_src alu_src
= { NIR_SRC_INIT
};
875 alu_src
.src
= nir_src_for_reg(c
->output_regs
[FRAG_RESULT_DEPTH
]);
876 alu_src
.swizzle
[0] = SWIZZLE_Z
;
877 store
->src
[0] = nir_src_for_ssa(nir_fmov_alu(b
, alu_src
, 1));
879 store
->src
[0].reg
.reg
= c
->output_regs
[var
->data
.location
];
881 nir_builder_instr_insert(b
, &store
->instr
);
886 setup_registers_and_variables(struct ptn_compile
*c
)
888 nir_builder
*b
= &c
->build
;
889 struct nir_shader
*shader
= b
->shader
;
891 /* Create input variables. */
892 const int num_inputs
= util_last_bit64(c
->prog
->InputsRead
);
893 for (int i
= 0; i
< num_inputs
; i
++) {
894 if (!(c
->prog
->InputsRead
& BITFIELD64_BIT(i
)))
898 nir_variable_create(shader
, nir_var_shader_in
, glsl_vec4_type(),
899 ralloc_asprintf(shader
, "in_%d", i
));
900 var
->data
.location
= i
;
903 if (c
->prog
->Target
== GL_FRAGMENT_PROGRAM_ARB
) {
904 struct gl_fragment_program
*fp
=
905 (struct gl_fragment_program
*) c
->prog
;
907 if (i
== VARYING_SLOT_POS
) {
908 var
->data
.origin_upper_left
= fp
->OriginUpperLeft
;
909 var
->data
.pixel_center_integer
= fp
->PixelCenterInteger
;
910 } else if (i
== VARYING_SLOT_FOGC
) {
911 /* fogcoord is defined as <f, 0.0, 0.0, 1.0>. Make the actual
912 * input variable a float, and create a local containing the
915 var
->type
= glsl_float_type();
917 nir_intrinsic_instr
*load_x
=
918 nir_intrinsic_instr_create(shader
, nir_intrinsic_load_var
);
919 load_x
->num_components
= 1;
920 load_x
->variables
[0] = nir_deref_var_create(load_x
, var
);
921 nir_ssa_dest_init(&load_x
->instr
, &load_x
->dest
, 1, 32, NULL
);
922 nir_builder_instr_insert(b
, &load_x
->instr
);
924 nir_ssa_def
*f001
= nir_vec4(b
, &load_x
->dest
.ssa
, nir_imm_float(b
, 0.0),
925 nir_imm_float(b
, 0.0), nir_imm_float(b
, 1.0));
927 nir_variable
*fullvar
=
928 nir_local_variable_create(b
->impl
, glsl_vec4_type(),
930 nir_intrinsic_instr
*store
=
931 nir_intrinsic_instr_create(shader
, nir_intrinsic_store_var
);
932 store
->num_components
= 4;
933 nir_intrinsic_set_write_mask(store
, WRITEMASK_XYZW
);
934 store
->variables
[0] = nir_deref_var_create(store
, fullvar
);
935 store
->src
[0] = nir_src_for_ssa(f001
);
936 nir_builder_instr_insert(b
, &store
->instr
);
938 /* We inserted the real input into the list so the driver has real
939 * inputs, but we set c->input_vars[i] to the temporary so we use
940 * the splatted value.
942 c
->input_vars
[i
] = fullvar
;
947 c
->input_vars
[i
] = var
;
950 /* Create output registers and variables. */
951 int max_outputs
= util_last_bit(c
->prog
->OutputsWritten
);
952 c
->output_regs
= rzalloc_array(c
, nir_register
*, max_outputs
);
954 for (int i
= 0; i
< max_outputs
; i
++) {
955 if (!(c
->prog
->OutputsWritten
& BITFIELD64_BIT(i
)))
958 /* Since we can't load from outputs in the IR, we make temporaries
959 * for the outputs and emit stores to the real outputs at the end of
962 nir_register
*reg
= nir_local_reg_create(b
->impl
);
963 reg
->num_components
= 4;
965 nir_variable
*var
= rzalloc(shader
, nir_variable
);
966 if (c
->prog
->Target
== GL_FRAGMENT_PROGRAM_ARB
&& i
== FRAG_RESULT_DEPTH
)
967 var
->type
= glsl_float_type();
969 var
->type
= glsl_vec4_type();
970 var
->data
.mode
= nir_var_shader_out
;
971 var
->name
= ralloc_asprintf(var
, "out_%d", i
);
973 var
->data
.location
= i
;
976 c
->output_regs
[i
] = reg
;
978 exec_list_push_tail(&shader
->outputs
, &var
->node
);
979 c
->output_vars
[i
] = var
;
982 /* Create temporary registers. */
983 c
->temp_regs
= rzalloc_array(c
, nir_register
*, c
->prog
->NumTemporaries
);
986 for (unsigned i
= 0; i
< c
->prog
->NumTemporaries
; i
++) {
987 reg
= nir_local_reg_create(b
->impl
);
992 reg
->num_components
= 4;
993 c
->temp_regs
[i
] = reg
;
996 /* Create the address register (for ARB_vertex_program). */
997 reg
= nir_local_reg_create(b
->impl
);
1002 reg
->num_components
= 1;
1007 prog_to_nir(const struct gl_program
*prog
,
1008 const nir_shader_compiler_options
*options
)
1010 struct ptn_compile
*c
;
1011 struct nir_shader
*s
;
1012 gl_shader_stage stage
= _mesa_program_enum_to_shader_stage(prog
->Target
);
1014 c
= rzalloc(NULL
, struct ptn_compile
);
1019 nir_builder_init_simple_shader(&c
->build
, NULL
, stage
, options
);
1021 /* Use the shader_info from gl_program rather than the one nir_builder
1022 * created for us. nir_sweep should clean up the other one for us.
1024 c
->build
.shader
->info
= (shader_info
*) &prog
->info
;
1026 s
= c
->build
.shader
;
1028 if (prog
->Parameters
->NumParameters
> 0) {
1029 c
->parameters
= rzalloc(s
, nir_variable
);
1030 c
->parameters
->type
=
1031 glsl_array_type(glsl_vec4_type(), prog
->Parameters
->NumParameters
);
1032 c
->parameters
->name
= "parameters";
1033 c
->parameters
->data
.read_only
= true;
1034 c
->parameters
->data
.mode
= nir_var_uniform
;
1035 exec_list_push_tail(&s
->uniforms
, &c
->parameters
->node
);
1038 setup_registers_and_variables(c
);
1039 if (unlikely(c
->error
))
1042 for (unsigned int i
= 0; i
< prog
->NumInstructions
; i
++) {
1043 ptn_emit_instruction(c
, &prog
->Instructions
[i
]);
1045 if (unlikely(c
->error
))
1049 ptn_add_output_stores(c
);
1051 s
->info
->name
= ralloc_asprintf(s
, "ARB%d", prog
->Id
);
1052 s
->info
->num_textures
= util_last_bit(prog
->SamplersUsed
);
1053 s
->info
->num_ubos
= 0;
1054 s
->info
->num_abos
= 0;
1055 s
->info
->num_ssbos
= 0;
1056 s
->info
->num_images
= 0;
1057 s
->info
->inputs_read
= prog
->InputsRead
;
1058 s
->info
->outputs_written
= prog
->OutputsWritten
;
1059 s
->info
->system_values_read
= prog
->SystemValuesRead
;
1060 s
->info
->uses_texture_gather
= false;
1061 s
->info
->uses_clip_distance_out
= false;
1062 s
->info
->separate_shader
= false;