2 * Copyright © 2014 Connor Abbott
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "nir_instr_set.h"
27 #define HASH(hash, data) _mesa_fnv32_1a_accumulate((hash), (data))
30 hash_src(uint32_t hash
, const nir_src
*src
)
33 hash
= HASH(hash
, src
->ssa
);
38 hash_alu_src(uint32_t hash
, const nir_alu_src
*src
, unsigned num_components
)
40 hash
= HASH(hash
, src
->abs
);
41 hash
= HASH(hash
, src
->negate
);
43 for (unsigned i
= 0; i
< num_components
; i
++)
44 hash
= HASH(hash
, src
->swizzle
[i
]);
46 hash
= hash_src(hash
, &src
->src
);
51 hash_alu(uint32_t hash
, const nir_alu_instr
*instr
)
53 hash
= HASH(hash
, instr
->op
);
54 hash
= HASH(hash
, instr
->dest
.dest
.ssa
.num_components
);
56 if (nir_op_infos
[instr
->op
].algebraic_properties
& NIR_OP_IS_COMMUTATIVE
) {
57 assert(nir_op_infos
[instr
->op
].num_inputs
== 2);
58 uint32_t hash0
= hash_alu_src(hash
, &instr
->src
[0],
59 nir_ssa_alu_instr_src_components(instr
, 0));
60 uint32_t hash1
= hash_alu_src(hash
, &instr
->src
[1],
61 nir_ssa_alu_instr_src_components(instr
, 1));
62 /* For commutative operations, we need some commutative way of
63 * combining the hashes. One option would be to XOR them but that
64 * means that anything with two identical sources will hash to 0 and
65 * that's common enough we probably don't want the guaranteed
66 * collision. Either addition or multiplication will also work.
70 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
71 hash
= hash_alu_src(hash
, &instr
->src
[i
],
72 nir_ssa_alu_instr_src_components(instr
, i
));
80 hash_load_const(uint32_t hash
, const nir_load_const_instr
*instr
)
82 hash
= HASH(hash
, instr
->def
.num_components
);
84 hash
= _mesa_fnv32_1a_accumulate_block(hash
, instr
->value
.f
,
85 instr
->def
.num_components
86 * sizeof(instr
->value
.f
[0]));
92 cmp_phi_src(const void *data1
, const void *data2
)
94 nir_phi_src
*src1
= *(nir_phi_src
**)data1
;
95 nir_phi_src
*src2
= *(nir_phi_src
**)data2
;
96 return src1
->pred
- src2
->pred
;
100 hash_phi(uint32_t hash
, const nir_phi_instr
*instr
)
102 hash
= HASH(hash
, instr
->instr
.block
);
104 /* sort sources by predecessor, since the order shouldn't matter */
105 unsigned num_preds
= instr
->instr
.block
->predecessors
->entries
;
106 NIR_VLA(nir_phi_src
*, srcs
, num_preds
);
108 nir_foreach_phi_src(instr
, src
) {
112 qsort(srcs
, num_preds
, sizeof(nir_phi_src
*), cmp_phi_src
);
114 for (i
= 0; i
< num_preds
; i
++) {
115 hash
= hash_src(hash
, &srcs
[i
]->src
);
116 hash
= HASH(hash
, srcs
[i
]->pred
);
123 hash_intrinsic(uint32_t hash
, const nir_intrinsic_instr
*instr
)
125 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[instr
->intrinsic
];
126 hash
= HASH(hash
, instr
->intrinsic
);
129 hash
= HASH(hash
, instr
->dest
.ssa
.num_components
);
131 assert(info
->num_variables
== 0);
133 hash
= _mesa_fnv32_1a_accumulate_block(hash
, instr
->const_index
,
135 * sizeof(instr
->const_index
[0]));
140 hash_tex(uint32_t hash
, const nir_tex_instr
*instr
)
142 hash
= HASH(hash
, instr
->op
);
143 hash
= HASH(hash
, instr
->num_srcs
);
145 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
146 hash
= HASH(hash
, instr
->src
[i
].src_type
);
147 hash
= hash_src(hash
, &instr
->src
[i
].src
);
150 hash
= HASH(hash
, instr
->coord_components
);
151 hash
= HASH(hash
, instr
->sampler_dim
);
152 hash
= HASH(hash
, instr
->is_array
);
153 hash
= HASH(hash
, instr
->is_shadow
);
154 hash
= HASH(hash
, instr
->is_new_style_shadow
);
155 hash
= HASH(hash
, instr
->const_offset
);
156 unsigned component
= instr
->component
;
157 hash
= HASH(hash
, component
);
158 hash
= HASH(hash
, instr
->texture_index
);
159 hash
= HASH(hash
, instr
->texture_array_size
);
160 hash
= HASH(hash
, instr
->sampler_index
);
162 assert(!instr
->sampler
);
167 /* Computes a hash of an instruction for use in a hash table. Note that this
168 * will only work for instructions where instr_can_rewrite() returns true, and
169 * it should return identical hashes for two instructions that are the same
170 * according nir_instrs_equal().
174 hash_instr(const void *data
)
176 const nir_instr
*instr
= data
;
177 uint32_t hash
= _mesa_fnv32_1a_offset_bias
;
179 switch (instr
->type
) {
180 case nir_instr_type_alu
:
181 hash
= hash_alu(hash
, nir_instr_as_alu(instr
));
183 case nir_instr_type_load_const
:
184 hash
= hash_load_const(hash
, nir_instr_as_load_const(instr
));
186 case nir_instr_type_phi
:
187 hash
= hash_phi(hash
, nir_instr_as_phi(instr
));
189 case nir_instr_type_intrinsic
:
190 hash
= hash_intrinsic(hash
, nir_instr_as_intrinsic(instr
));
192 case nir_instr_type_tex
:
193 hash
= hash_tex(hash
, nir_instr_as_tex(instr
));
196 unreachable("Invalid instruction type");
203 nir_srcs_equal(nir_src src1
, nir_src src2
)
207 return src1
.ssa
== src2
.ssa
;
215 if ((src1
.reg
.indirect
== NULL
) != (src2
.reg
.indirect
== NULL
))
218 if (src1
.reg
.indirect
) {
219 if (!nir_srcs_equal(*src1
.reg
.indirect
, *src2
.reg
.indirect
))
223 return src1
.reg
.reg
== src2
.reg
.reg
&&
224 src1
.reg
.base_offset
== src2
.reg
.base_offset
;
230 nir_alu_srcs_equal(const nir_alu_instr
*alu1
, const nir_alu_instr
*alu2
,
231 unsigned src1
, unsigned src2
)
233 if (alu1
->src
[src1
].abs
!= alu2
->src
[src2
].abs
||
234 alu1
->src
[src1
].negate
!= alu2
->src
[src2
].negate
)
237 for (unsigned i
= 0; i
< nir_ssa_alu_instr_src_components(alu1
, src1
); i
++) {
238 if (alu1
->src
[src1
].swizzle
[i
] != alu2
->src
[src2
].swizzle
[i
])
242 return nir_srcs_equal(alu1
->src
[src1
].src
, alu2
->src
[src2
].src
);
245 /* Returns "true" if two instructions are equal. Note that this will only
246 * work for the subset of instructions defined by instr_can_rewrite(). Also,
247 * it should only return "true" for instructions that hash_instr() will return
248 * the same hash for (ignoring collisions, of course).
252 nir_instrs_equal(const nir_instr
*instr1
, const nir_instr
*instr2
)
254 if (instr1
->type
!= instr2
->type
)
257 switch (instr1
->type
) {
258 case nir_instr_type_alu
: {
259 nir_alu_instr
*alu1
= nir_instr_as_alu(instr1
);
260 nir_alu_instr
*alu2
= nir_instr_as_alu(instr2
);
262 if (alu1
->op
!= alu2
->op
)
265 /* TODO: We can probably acutally do something more inteligent such
266 * as allowing different numbers and taking a maximum or something
268 if (alu1
->dest
.dest
.ssa
.num_components
!= alu2
->dest
.dest
.ssa
.num_components
)
271 if (nir_op_infos
[alu1
->op
].algebraic_properties
& NIR_OP_IS_COMMUTATIVE
) {
272 assert(nir_op_infos
[alu1
->op
].num_inputs
== 2);
273 return (nir_alu_srcs_equal(alu1
, alu2
, 0, 0) &&
274 nir_alu_srcs_equal(alu1
, alu2
, 1, 1)) ||
275 (nir_alu_srcs_equal(alu1
, alu2
, 0, 1) &&
276 nir_alu_srcs_equal(alu1
, alu2
, 1, 0));
278 for (unsigned i
= 0; i
< nir_op_infos
[alu1
->op
].num_inputs
; i
++) {
279 if (!nir_alu_srcs_equal(alu1
, alu2
, i
, i
))
285 case nir_instr_type_tex
: {
286 nir_tex_instr
*tex1
= nir_instr_as_tex(instr1
);
287 nir_tex_instr
*tex2
= nir_instr_as_tex(instr2
);
289 if (tex1
->op
!= tex2
->op
)
292 if (tex1
->num_srcs
!= tex2
->num_srcs
)
294 for (unsigned i
= 0; i
< tex1
->num_srcs
; i
++) {
295 if (tex1
->src
[i
].src_type
!= tex2
->src
[i
].src_type
||
296 !nir_srcs_equal(tex1
->src
[i
].src
, tex2
->src
[i
].src
)) {
301 if (tex1
->coord_components
!= tex2
->coord_components
||
302 tex1
->sampler_dim
!= tex2
->sampler_dim
||
303 tex1
->is_array
!= tex2
->is_array
||
304 tex1
->is_shadow
!= tex2
->is_shadow
||
305 tex1
->is_new_style_shadow
!= tex2
->is_new_style_shadow
||
306 memcmp(tex1
->const_offset
, tex2
->const_offset
,
307 sizeof(tex1
->const_offset
)) != 0 ||
308 tex1
->component
!= tex2
->component
||
309 tex1
->texture_index
!= tex2
->texture_index
||
310 tex1
->texture_array_size
!= tex2
->texture_array_size
||
311 tex1
->sampler_index
!= tex2
->sampler_index
) {
315 /* Don't support un-lowered sampler derefs currently. */
316 assert(!tex1
->texture
&& !tex1
->sampler
&&
317 !tex2
->texture
&& !tex2
->sampler
);
321 case nir_instr_type_load_const
: {
322 nir_load_const_instr
*load1
= nir_instr_as_load_const(instr1
);
323 nir_load_const_instr
*load2
= nir_instr_as_load_const(instr2
);
325 if (load1
->def
.num_components
!= load2
->def
.num_components
)
328 return memcmp(load1
->value
.f
, load2
->value
.f
,
329 load1
->def
.num_components
* sizeof(*load2
->value
.f
)) == 0;
331 case nir_instr_type_phi
: {
332 nir_phi_instr
*phi1
= nir_instr_as_phi(instr1
);
333 nir_phi_instr
*phi2
= nir_instr_as_phi(instr2
);
335 if (phi1
->instr
.block
!= phi2
->instr
.block
)
338 nir_foreach_phi_src(phi1
, src1
) {
339 nir_foreach_phi_src(phi2
, src2
) {
340 if (src1
->pred
== src2
->pred
) {
341 if (!nir_srcs_equal(src1
->src
, src2
->src
))
351 case nir_instr_type_intrinsic
: {
352 nir_intrinsic_instr
*intrinsic1
= nir_instr_as_intrinsic(instr1
);
353 nir_intrinsic_instr
*intrinsic2
= nir_instr_as_intrinsic(instr2
);
354 const nir_intrinsic_info
*info
=
355 &nir_intrinsic_infos
[intrinsic1
->intrinsic
];
357 if (intrinsic1
->intrinsic
!= intrinsic2
->intrinsic
||
358 intrinsic1
->num_components
!= intrinsic2
->num_components
)
361 if (info
->has_dest
&& intrinsic1
->dest
.ssa
.num_components
!=
362 intrinsic2
->dest
.ssa
.num_components
)
365 for (unsigned i
= 0; i
< info
->num_srcs
; i
++) {
366 if (!nir_srcs_equal(intrinsic1
->src
[i
], intrinsic2
->src
[i
]))
370 assert(info
->num_variables
== 0);
372 for (unsigned i
= 0; i
< info
->num_indices
; i
++) {
373 if (intrinsic1
->const_index
[i
] != intrinsic2
->const_index
[i
])
379 case nir_instr_type_call
:
380 case nir_instr_type_jump
:
381 case nir_instr_type_ssa_undef
:
382 case nir_instr_type_parallel_copy
:
384 unreachable("Invalid instruction type");
391 src_is_ssa(nir_src
*src
, void *data
)
398 dest_is_ssa(nir_dest
*dest
, void *data
)
404 /* This function determines if uses of an instruction can safely be rewritten
405 * to use another identical instruction instead. Note that this function must
406 * be kept in sync with hash_instr() and nir_instrs_equal() -- only
407 * instructions that pass this test will be handed on to those functions, and
408 * conversely they must handle everything that this function returns true for.
412 instr_can_rewrite(nir_instr
*instr
)
414 /* We only handle SSA. */
415 if (!nir_foreach_dest(instr
, dest_is_ssa
, NULL
) ||
416 !nir_foreach_src(instr
, src_is_ssa
, NULL
))
419 switch (instr
->type
) {
420 case nir_instr_type_alu
:
421 case nir_instr_type_load_const
:
422 case nir_instr_type_phi
:
424 case nir_instr_type_tex
: {
425 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
427 /* Don't support un-lowered sampler derefs currently. */
428 if (tex
->texture
|| tex
->sampler
)
433 case nir_instr_type_intrinsic
: {
434 const nir_intrinsic_info
*info
=
435 &nir_intrinsic_infos
[nir_instr_as_intrinsic(instr
)->intrinsic
];
436 return (info
->flags
& NIR_INTRINSIC_CAN_ELIMINATE
) &&
437 (info
->flags
& NIR_INTRINSIC_CAN_REORDER
) &&
438 info
->num_variables
== 0; /* not implemented yet */
440 case nir_instr_type_call
:
441 case nir_instr_type_jump
:
442 case nir_instr_type_ssa_undef
:
444 case nir_instr_type_parallel_copy
:
446 unreachable("Invalid instruction type");
453 nir_instr_get_dest_ssa_def(nir_instr
*instr
)
455 switch (instr
->type
) {
456 case nir_instr_type_alu
:
457 assert(nir_instr_as_alu(instr
)->dest
.dest
.is_ssa
);
458 return &nir_instr_as_alu(instr
)->dest
.dest
.ssa
;
459 case nir_instr_type_load_const
:
460 return &nir_instr_as_load_const(instr
)->def
;
461 case nir_instr_type_phi
:
462 assert(nir_instr_as_phi(instr
)->dest
.is_ssa
);
463 return &nir_instr_as_phi(instr
)->dest
.ssa
;
464 case nir_instr_type_intrinsic
:
465 assert(nir_instr_as_intrinsic(instr
)->dest
.is_ssa
);
466 return &nir_instr_as_intrinsic(instr
)->dest
.ssa
;
467 case nir_instr_type_tex
:
468 assert(nir_instr_as_tex(instr
)->dest
.is_ssa
);
469 return &nir_instr_as_tex(instr
)->dest
.ssa
;
471 unreachable("We never ask for any of these");
476 cmp_func(const void *data1
, const void *data2
)
478 return nir_instrs_equal(data1
, data2
);
482 nir_instr_set_create(void *mem_ctx
)
484 return _mesa_set_create(mem_ctx
, hash_instr
, cmp_func
);
488 nir_instr_set_destroy(struct set
*instr_set
)
490 _mesa_set_destroy(instr_set
, NULL
);
494 nir_instr_set_add_or_rewrite(struct set
*instr_set
, nir_instr
*instr
)
496 if (!instr_can_rewrite(instr
))
499 struct set_entry
*entry
= _mesa_set_search(instr_set
, instr
);
501 nir_ssa_def
*def
= nir_instr_get_dest_ssa_def(instr
);
502 nir_ssa_def
*new_def
=
503 nir_instr_get_dest_ssa_def((nir_instr
*) entry
->key
);
504 nir_ssa_def_rewrite_uses(def
, nir_src_for_ssa(new_def
));
508 _mesa_set_add(instr_set
, instr
);
513 nir_instr_set_remove(struct set
*instr_set
, nir_instr
*instr
)
515 if (!instr_can_rewrite(instr
))
518 struct set_entry
*entry
= _mesa_set_search(instr_set
, instr
);
520 _mesa_set_remove(instr_set
, entry
);