The middle operand would have gone entirely unchecked, allowing e.g.
vmovss %xmm0, %esp, %xmm2
to assemble successfully, or e.g.
vmovss %xmm0, $4, %xmm2
causing an internal error. Alongside dealing with this also drop a
related comment, which hasn't been applicable anymore since the
introduction of 3-operand patterns with D set (and which perhaps never
had been logical to be there, as reverse-matched insns don't make it
there in the first place).
/* Try reversing direction of operands. */
overlap0 = operand_type_and (i.types[0], operand_types[i.operands - 1]);
overlap1 = operand_type_and (i.types[i.operands - 1], operand_types[0]);
+ overlap2 = operand_type_and (i.types[1], operand_types[1]);
+ gas_assert (t->operands != 3 || !check_register);
if (!operand_type_match (overlap0, i.types[0])
|| !operand_type_match (overlap1, i.types[i.operands - 1])
+ || (t->operands == 3
+ && !operand_type_match (overlap2, i.types[1]))
|| (check_register
&& !operand_type_register_match (i.types[0],
operand_types[i.operands - 1],
continue;
/* Fall through. */
case 3:
- /* Here we make use of the fact that there are no
- reverse match 3 operand instructions. */
if (!operand_type_match (overlap2, i.types[2])
|| ((check_register & 5) == 5
&& !operand_type_register_match (i.types[0],
.*:4: Error: .*
.*:5: Error: .*
.*:6: Error: .*
-.*:9: Error:.* ambiguous .* `vcvtpd2dq'
-.*:10: Error:.* ambiguous .* `vcvtpd2ps'
-.*:11: Error:.* ambiguous .* `vcvttpd2dq'
+.*:8: Error: .*
+.*:9: Error: .*
+.*:10: Error: .*
+.*:11: Error: .*
+.*:12: Error: .*
+.*:15: Error:.* ambiguous .* `vcvtpd2dq'
+.*:16: Error:.* ambiguous .* `vcvtpd2ps'
+.*:17: Error:.* ambiguous .* `vcvttpd2dq'
GAS LISTING .*
-
-
-[ ]*1[ ]+\# Check illegal AVX instructions
-[ ]*2[ ]+\.text
-[ ]*3[ ]+_start:
-[ ]*4[ ]+vcvtpd2dq \(%ecx\),%xmm2
-[ ]*5[ ]+vcvtpd2ps \(%ecx\),%xmm2
-[ ]*6[ ]+vcvttpd2dq \(%ecx\),%xmm2
-[ ]*7[ ]+
-[ ]*8[ ]+\.intel_syntax noprefix
-[ ]*9[ ]+vcvtpd2dq xmm2,\[ecx\]
-[ ]*10[ ]+vcvtpd2ps xmm2,\[ecx\]
-[ ]*11[ ]+vcvttpd2dq xmm2,\[ecx\]
+#pass
vcvtpd2ps (%ecx),%xmm2
vcvttpd2dq (%ecx),%xmm2
+ vmovss %xmm0, (%esp), %xmm2
+ vmovss %xmm0, $4, %xmm2
+ vmovss %xmm0, %cr0, %xmm2
+ vmovss %xmm0, %ymm4, %xmm2
+ vmovss %xmm0, %mm4, %xmm2
+
.intel_syntax noprefix
vcvtpd2dq xmm2,[ecx]
vcvtpd2ps xmm2,[ecx]