[AArch64] Rename cmp_result iterator
authorRichard Sandiford <richard.sandiford@linaro.org>
Thu, 31 Aug 2017 09:52:38 +0000 (09:52 +0000)
committerRichard Sandiford <rsandifo@gcc.gnu.org>
Thu, 31 Aug 2017 09:52:38 +0000 (09:52 +0000)
The comparison results provided by the V_cmp_result/v_cmp_result
attribute were simply the corresponding integer vector.  We'd also
like to have easy access to the integer vector for SVE, but using
"cmp_result" would be confusing because SVE comparisons return
predicates instead of vectors.  This patch therefore renames the
attributes to the more general V_INT_EQUIV/v_int_equiv instead.

As to the capitalisation: there are already many iterators that use
all lowercase vs. all uppercase names to distinguish all lowercase
vs. all uppercase expansions (e.g. fcvt_target and FCVT_TARGET).
It's also the convention used for the built-in mode/MODE/code/CODE/etc.
attributes.  IMO those names are easier to read at a glance, rather than
relying on a single letter's difference.

2017-08-22  Richard Sandiford  <richard.sandiford@linaro.org>
    Alan Hayward  <alan.hayward@arm.com>
    David Sherwood  <david.sherwood@arm.com>

gcc/
* config/aarch64/iterators.md (V_cmp_result): Rename to...
(V_INT_EQUIV): ...this.
(v_cmp_result): Rename to...
(v_int_equiv): ...this.
* config/aarch64/aarch64.md (xorsign<mode>3): Update accordingly.
* config/aarch64/aarch64-simd.md (xorsign<mode>3): Likewise.
(copysign<mode>3): Likewise.
(aarch64_simd_bsl<mode>_internal): Likewise.
(aarch64_simd_bsl<mode>): Likewise.
(vec_cmp<mode><mode>): Likewise.
(vcond<mode><mode>): Likewise.
(vcond<v_cmp_mixed><mode>): Likewise.
(vcondu<mode><v_cmp_mixed>): Likewise.
(aarch64_cm<optab><mode>): Likewise.
(aarch64_cmtst<mode>): Likewise.
(aarch64_fac<optab><mode>): Likewise.
(vec_perm_const<mode>): Likewise.
(vcond_mask_<mode><v_cmp_result>): Rename to...
(vcond_mask_<mode><v_int_equiv>): ...this.
(vec_cmp<mode><v_cmp_result>): Rename to...
(vec_cmp<mode><v_int_equiv>): ...this.

Co-Authored-By: Alan Hayward <alan.hayward@arm.com>
Co-Authored-By: David Sherwood <david.sherwood@arm.com>
From-SVN: r251556

gcc/ChangeLog
gcc/config/aarch64/aarch64-simd.md
gcc/config/aarch64/aarch64.md
gcc/config/aarch64/iterators.md

index 71728b74ae522fad2550c83c9fe329526b1e969d..8d724510a4528ba2785d32551e909a4f9b468f0a 100644 (file)
@@ -1,3 +1,29 @@
+2017-08-31  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
+
+       * config/aarch64/iterators.md (V_cmp_result): Rename to...
+       (V_INT_EQUIV): ...this.
+       (v_cmp_result): Rename to...
+       (v_int_equiv): ...this.
+       * config/aarch64/aarch64.md (xorsign<mode>3): Update accordingly.
+       * config/aarch64/aarch64-simd.md (xorsign<mode>3): Likewise.
+       (copysign<mode>3): Likewise.
+       (aarch64_simd_bsl<mode>_internal): Likewise.
+       (aarch64_simd_bsl<mode>): Likewise.
+       (vec_cmp<mode><mode>): Likewise.
+       (vcond<mode><mode>): Likewise.
+       (vcond<v_cmp_mixed><mode>): Likewise.
+       (vcondu<mode><v_cmp_mixed>): Likewise.
+       (aarch64_cm<optab><mode>): Likewise.
+       (aarch64_cmtst<mode>): Likewise.
+       (aarch64_fac<optab><mode>): Likewise.
+       (vec_perm_const<mode>): Likewise.
+       (vcond_mask_<mode><v_cmp_result>): Rename to...
+       (vcond_mask_<mode><v_int_equiv>): ...this.
+       (vec_cmp<mode><v_cmp_result>): Rename to...
+       (vec_cmp<mode><v_int_equiv>): ...this.
+
 2017-08-31  Richard Sandiford  <richard.sandiford@linaro.org>
            Alan Hayward  <alan.hayward@arm.com>
            David Sherwood  <david.sherwood@arm.com>
index f7609616c34d333f0ca6afda79e9780f8dd6ed7f..a94c6fdabdc2bcdc8e8fcdfec70f41e87f752a17 100644 (file)
   "TARGET_SIMD"
 {
 
-  machine_mode imode = <V_cmp_result>mode;
+  machine_mode imode = <V_INT_EQUIV>mode;
   rtx v_bitmask = gen_reg_rtx (imode);
   rtx op1x = gen_reg_rtx (imode);
   rtx op2x = gen_reg_rtx (imode);
   int bits = GET_MODE_UNIT_BITSIZE (<MODE>mode) - 1;
 
   emit_move_insn (v_bitmask,
-                 aarch64_simd_gen_const_vector_dup (<V_cmp_result>mode,
+                 aarch64_simd_gen_const_vector_dup (<V_INT_EQUIV>mode,
                                                     HOST_WIDE_INT_M1U << bits));
 
-  emit_insn (gen_and<v_cmp_result>3 (op2x, v_bitmask, arg2));
-  emit_insn (gen_xor<v_cmp_result>3 (op1x, arg1, op2x));
+  emit_insn (gen_and<v_int_equiv>3 (op2x, v_bitmask, arg2));
+  emit_insn (gen_xor<v_int_equiv>3 (op1x, arg1, op2x));
   emit_move_insn (operands[0],
                  lowpart_subreg (<MODE>mode, op1x, imode));
   DONE;
    (match_operand:VHSDF 2 "register_operand")]
   "TARGET_FLOAT && TARGET_SIMD"
 {
-  rtx v_bitmask = gen_reg_rtx (<V_cmp_result>mode);
+  rtx v_bitmask = gen_reg_rtx (<V_INT_EQUIV>mode);
   int bits = GET_MODE_UNIT_BITSIZE (<MODE>mode) - 1;
 
   emit_move_insn (v_bitmask,
-                 aarch64_simd_gen_const_vector_dup (<V_cmp_result>mode,
+                 aarch64_simd_gen_const_vector_dup (<V_INT_EQUIV>mode,
                                                     HOST_WIDE_INT_M1U << bits));
   emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], v_bitmask,
                                         operands[2], operands[1]));
        (xor:VSDQ_I_DI
           (and:VSDQ_I_DI
             (xor:VSDQ_I_DI
-              (match_operand:<V_cmp_result> 3 "register_operand" "w,0,w")
+              (match_operand:<V_INT_EQUIV> 3 "register_operand" "w,0,w")
               (match_operand:VSDQ_I_DI 2 "register_operand" "w,w,0"))
             (match_operand:VSDQ_I_DI 1 "register_operand" "0,w,w"))
-         (match_dup:<V_cmp_result> 3)
+         (match_dup:<V_INT_EQUIV> 3)
        ))]
   "TARGET_SIMD"
   "@
 
 (define_expand "aarch64_simd_bsl<mode>"
   [(match_operand:VALLDIF 0 "register_operand")
-   (match_operand:<V_cmp_result> 1 "register_operand")
+   (match_operand:<V_INT_EQUIV> 1 "register_operand")
    (match_operand:VALLDIF 2 "register_operand")
    (match_operand:VALLDIF 3 "register_operand")]
  "TARGET_SIMD"
   rtx tmp = operands[0];
   if (FLOAT_MODE_P (<MODE>mode))
     {
-      operands[2] = gen_lowpart (<V_cmp_result>mode, operands[2]);
-      operands[3] = gen_lowpart (<V_cmp_result>mode, operands[3]);
-      tmp = gen_reg_rtx (<V_cmp_result>mode);
+      operands[2] = gen_lowpart (<V_INT_EQUIV>mode, operands[2]);
+      operands[3] = gen_lowpart (<V_INT_EQUIV>mode, operands[3]);
+      tmp = gen_reg_rtx (<V_INT_EQUIV>mode);
     }
-  operands[1] = gen_lowpart (<V_cmp_result>mode, operands[1]);
-  emit_insn (gen_aarch64_simd_bsl<v_cmp_result>_internal (tmp,
-                                                         operands[1],
-                                                         operands[2],
-                                                         operands[3]));
+  operands[1] = gen_lowpart (<V_INT_EQUIV>mode, operands[1]);
+  emit_insn (gen_aarch64_simd_bsl<v_int_equiv>_internal (tmp,
+                                                        operands[1],
+                                                        operands[2],
+                                                        operands[3]));
   if (tmp != operands[0])
     emit_move_insn (operands[0], gen_lowpart (<MODE>mode, tmp));
 
   DONE;
 })
 
-(define_expand "vcond_mask_<mode><v_cmp_result>"
+(define_expand "vcond_mask_<mode><v_int_equiv>"
   [(match_operand:VALLDI 0 "register_operand")
    (match_operand:VALLDI 1 "nonmemory_operand")
    (match_operand:VALLDI 2 "nonmemory_operand")
-   (match_operand:<V_cmp_result> 3 "register_operand")]
+   (match_operand:<V_INT_EQUIV> 3 "register_operand")]
   "TARGET_SIMD"
 {
   /* If we have (a = (P) ? -1 : 0);
   /* Similarly, (a = (P) ? 0 : -1) is just inverting the generated mask.  */
   else if (operands[1] == CONST0_RTX (<MODE>mode)
           && operands[2] == CONSTM1_RTX (<MODE>mode))
-    emit_insn (gen_one_cmpl<v_cmp_result>2 (operands[0], operands[3]));
+    emit_insn (gen_one_cmpl<v_int_equiv>2 (operands[0], operands[3]));
   else
     {
       if (!REG_P (operands[1]))
     case NE:
       /* Handle NE as !EQ.  */
       emit_insn (gen_aarch64_cmeq<mode> (mask, operands[2], operands[3]));
-      emit_insn (gen_one_cmpl<v_cmp_result>2 (mask, mask));
+      emit_insn (gen_one_cmpl<v_int_equiv>2 (mask, mask));
       break;
 
     case EQ:
   DONE;
 })
 
-(define_expand "vec_cmp<mode><v_cmp_result>"
-  [(set (match_operand:<V_cmp_result> 0 "register_operand")
+(define_expand "vec_cmp<mode><v_int_equiv>"
+  [(set (match_operand:<V_INT_EQUIV> 0 "register_operand")
        (match_operator 1 "comparison_operator"
            [(match_operand:VDQF 2 "register_operand")
             (match_operand:VDQF 3 "nonmemory_operand")]))]
 {
   int use_zero_form = 0;
   enum rtx_code code = GET_CODE (operands[1]);
-  rtx tmp = gen_reg_rtx (<V_cmp_result>mode);
+  rtx tmp = gen_reg_rtx (<V_INT_EQUIV>mode);
 
   rtx (*comparison) (rtx, rtx, rtx) = NULL;
 
         a   NE b -> !(a EQ b)  */
       gcc_assert (comparison != NULL);
       emit_insn (comparison (operands[0], operands[2], operands[3]));
-      emit_insn (gen_one_cmpl<v_cmp_result>2 (operands[0], operands[0]));
+      emit_insn (gen_one_cmpl<v_int_equiv>2 (operands[0], operands[0]));
       break;
 
     case LT:
       emit_insn (gen_aarch64_cmgt<mode> (operands[0],
                                         operands[2], operands[3]));
       emit_insn (gen_aarch64_cmgt<mode> (tmp, operands[3], operands[2]));
-      emit_insn (gen_ior<v_cmp_result>3 (operands[0], operands[0], tmp));
-      emit_insn (gen_one_cmpl<v_cmp_result>2 (operands[0], operands[0]));
+      emit_insn (gen_ior<v_int_equiv>3 (operands[0], operands[0], tmp));
+      emit_insn (gen_one_cmpl<v_int_equiv>2 (operands[0], operands[0]));
       break;
 
     case UNORDERED:
       emit_insn (gen_aarch64_cmgt<mode> (tmp, operands[2], operands[3]));
       emit_insn (gen_aarch64_cmge<mode> (operands[0],
                                         operands[3], operands[2]));
-      emit_insn (gen_ior<v_cmp_result>3 (operands[0], operands[0], tmp));
-      emit_insn (gen_one_cmpl<v_cmp_result>2 (operands[0], operands[0]));
+      emit_insn (gen_ior<v_int_equiv>3 (operands[0], operands[0], tmp));
+      emit_insn (gen_one_cmpl<v_int_equiv>2 (operands[0], operands[0]));
       break;
 
     case ORDERED:
       emit_insn (gen_aarch64_cmgt<mode> (tmp, operands[2], operands[3]));
       emit_insn (gen_aarch64_cmge<mode> (operands[0],
                                         operands[3], operands[2]));
-      emit_insn (gen_ior<v_cmp_result>3 (operands[0], operands[0], tmp));
+      emit_insn (gen_ior<v_int_equiv>3 (operands[0], operands[0], tmp));
       break;
 
     default:
          (match_operand:VALLDI 2 "nonmemory_operand")))]
   "TARGET_SIMD"
 {
-  rtx mask = gen_reg_rtx (<V_cmp_result>mode);
+  rtx mask = gen_reg_rtx (<V_INT_EQUIV>mode);
   enum rtx_code code = GET_CODE (operands[3]);
 
   /* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert
                                    operands[4], operands[5]);
       std::swap (operands[1], operands[2]);
     }
-  emit_insn (gen_vec_cmp<mode><v_cmp_result> (mask, operands[3],
-                                             operands[4], operands[5]));
-  emit_insn (gen_vcond_mask_<mode><v_cmp_result> (operands[0], operands[1],
-                                                 operands[2], mask));
+  emit_insn (gen_vec_cmp<mode><v_int_equiv> (mask, operands[3],
+                                            operands[4], operands[5]));
+  emit_insn (gen_vcond_mask_<mode><v_int_equiv> (operands[0], operands[1],
+                                                operands[2], mask));
 
   DONE;
 })
          (match_operand:<V_cmp_mixed> 2 "nonmemory_operand")))]
   "TARGET_SIMD"
 {
-  rtx mask = gen_reg_rtx (<V_cmp_result>mode);
+  rtx mask = gen_reg_rtx (<V_INT_EQUIV>mode);
   enum rtx_code code = GET_CODE (operands[3]);
 
   /* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert
                                    operands[4], operands[5]);
       std::swap (operands[1], operands[2]);
     }
-  emit_insn (gen_vec_cmp<mode><v_cmp_result> (mask, operands[3],
-                                             operands[4], operands[5]));
-  emit_insn (gen_vcond_mask_<v_cmp_mixed><v_cmp_result> (
+  emit_insn (gen_vec_cmp<mode><v_int_equiv> (mask, operands[3],
+                                            operands[4], operands[5]));
+  emit_insn (gen_vcond_mask_<v_cmp_mixed><v_int_equiv> (
                                                operands[0], operands[1],
                                                operands[2], mask));
 
     }
   emit_insn (gen_vec_cmp<mode><mode> (mask, operands[3],
                                      operands[4], operands[5]));
-  emit_insn (gen_vcond_mask_<mode><v_cmp_result> (operands[0], operands[1],
-                                                 operands[2], mask));
+  emit_insn (gen_vcond_mask_<mode><v_int_equiv> (operands[0], operands[1],
+                                                operands[2], mask));
   DONE;
 })
 
          (match_operand:VDQF 2 "nonmemory_operand")))]
   "TARGET_SIMD"
 {
-  rtx mask = gen_reg_rtx (<V_cmp_result>mode);
+  rtx mask = gen_reg_rtx (<V_INT_EQUIV>mode);
   enum rtx_code code = GET_CODE (operands[3]);
 
   /* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert
   emit_insn (gen_vec_cmp<v_cmp_mixed><v_cmp_mixed> (
                                                  mask, operands[3],
                                                  operands[4], operands[5]));
-  emit_insn (gen_vcond_mask_<mode><v_cmp_result> (operands[0], operands[1],
-                                                 operands[2], mask));
+  emit_insn (gen_vcond_mask_<mode><v_int_equiv> (operands[0], operands[1],
+                                                operands[2], mask));
   DONE;
 })
 
 ;; have different ideas of what should be passed to this pattern.
 
 (define_insn "aarch64_cm<optab><mode>"
-  [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w")
-       (neg:<V_cmp_result>
-         (COMPARISONS:<V_cmp_result>
+  [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w,w")
+       (neg:<V_INT_EQUIV>
+         (COMPARISONS:<V_INT_EQUIV>
            (match_operand:VDQ_I 1 "register_operand" "w,w")
            (match_operand:VDQ_I 2 "aarch64_simd_reg_or_zero" "w,ZDz")
          )))]
 ;; cm(hs|hi)
 
 (define_insn "aarch64_cm<optab><mode>"
-  [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
-       (neg:<V_cmp_result>
-         (UCOMPARISONS:<V_cmp_result>
+  [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w")
+       (neg:<V_INT_EQUIV>
+         (UCOMPARISONS:<V_INT_EQUIV>
            (match_operand:VDQ_I 1 "register_operand" "w")
            (match_operand:VDQ_I 2 "register_operand" "w")
          )))]
 ;; plus (eq (and x y) 0) -1.
 
 (define_insn "aarch64_cmtst<mode>"
-  [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
-       (plus:<V_cmp_result>
-         (eq:<V_cmp_result>
+  [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w")
+       (plus:<V_INT_EQUIV>
+         (eq:<V_INT_EQUIV>
            (and:VDQ_I
              (match_operand:VDQ_I 1 "register_operand" "w")
              (match_operand:VDQ_I 2 "register_operand" "w"))
            (match_operand:VDQ_I 3 "aarch64_simd_imm_zero"))
-         (match_operand:<V_cmp_result> 4 "aarch64_simd_imm_minus_one")))
+         (match_operand:<V_INT_EQUIV> 4 "aarch64_simd_imm_minus_one")))
   ]
   "TARGET_SIMD"
   "cmtst\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
 ;; fcm(eq|ge|gt|le|lt)
 
 (define_insn "aarch64_cm<optab><mode>"
-  [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w")
-       (neg:<V_cmp_result>
-         (COMPARISONS:<V_cmp_result>
+  [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w,w")
+       (neg:<V_INT_EQUIV>
+         (COMPARISONS:<V_INT_EQUIV>
            (match_operand:VHSDF_HSDF 1 "register_operand" "w,w")
            (match_operand:VHSDF_HSDF 2 "aarch64_simd_reg_or_zero" "w,YDz")
          )))]
 ;; generating fac(ge|gt).
 
 (define_insn "aarch64_fac<optab><mode>"
-  [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
-       (neg:<V_cmp_result>
-         (FAC_COMPARISONS:<V_cmp_result>
+  [(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w")
+       (neg:<V_INT_EQUIV>
+         (FAC_COMPARISONS:<V_INT_EQUIV>
            (abs:VHSDF_HSDF
              (match_operand:VHSDF_HSDF 1 "register_operand" "w"))
            (abs:VHSDF_HSDF
   [(match_operand:VALL_F16 0 "register_operand")
    (match_operand:VALL_F16 1 "register_operand")
    (match_operand:VALL_F16 2 "register_operand")
-   (match_operand:<V_cmp_result> 3)]
+   (match_operand:<V_INT_EQUIV> 3)]
   "TARGET_SIMD"
 {
   if (aarch64_expand_vec_perm_const (operands[0], operands[1],
index 7552bf36ba29044c0806330c05bb8bf24f865dd8..bb7f2c045329b378ccc9d09b2cdee9f9fa8c1459 100644 (file)
   "TARGET_FLOAT && TARGET_SIMD"
 {
 
-  machine_mode imode = <V_cmp_result>mode;
+  machine_mode imode = <V_INT_EQUIV>mode;
   rtx mask = gen_reg_rtx (imode);
   rtx op1x = gen_reg_rtx (imode);
   rtx op2x = gen_reg_rtx (imode);
   emit_move_insn (mask, GEN_INT (trunc_int_for_mode (HOST_WIDE_INT_M1U << bits,
                                                     imode)));
 
-  emit_insn (gen_and<v_cmp_result>3 (op2x, mask,
-                                    lowpart_subreg (imode, operands[2],
-                                                    <MODE>mode)));
-  emit_insn (gen_xor<v_cmp_result>3 (op1x,
-                                    lowpart_subreg (imode, operands[1],
-                                                    <MODE>mode),
-                                    op2x));
+  emit_insn (gen_and<v_int_equiv>3 (op2x, mask,
+                                   lowpart_subreg (imode, operands[2],
+                                                   <MODE>mode)));
+  emit_insn (gen_xor<v_int_equiv>3 (op1x,
+                                   lowpart_subreg (imode, operands[1],
+                                                   <MODE>mode),
+                                   op2x));
   emit_move_insn (operands[0],
                  lowpart_subreg (<MODE>mode, op1x, imode));
   DONE;
index c8cb54f71ab05b07a5e51a88d346e12952dbc986..3e387678b45a45a575825b3d95ec215b228c5fc6 100644 (file)
 ;; Double vector types for ALLX.
 (define_mode_attr Vallxd [(QI "8b") (HI "4h") (SI "2s")])
 
-;; Mode of result of comparison operations.
-(define_mode_attr V_cmp_result [(V8QI "V8QI") (V16QI "V16QI")
-                               (V4HI "V4HI") (V8HI  "V8HI")
-                               (V2SI "V2SI") (V4SI  "V4SI")
-                               (DI   "DI")   (V2DI  "V2DI")
-                               (V4HF "V4HI") (V8HF  "V8HI")
-                               (V2SF "V2SI") (V4SF  "V4SI")
-                               (V2DF "V2DI") (DF    "DI")
-                               (SF   "SI")   (HF    "HI")])
-
-;; Lower case mode of results of comparison operations.
-(define_mode_attr v_cmp_result [(V8QI "v8qi") (V16QI "v16qi")
-                               (V4HI "v4hi") (V8HI  "v8hi")
-                               (V2SI "v2si") (V4SI  "v4si")
-                               (DI   "di")   (V2DI  "v2di")
-                               (V4HF "v4hi") (V8HF  "v8hi")
-                               (V2SF "v2si") (V4SF  "v4si")
-                               (V2DF "v2di") (DF    "di")
-                               (SF   "si")])
+;; Mode with floating-point values replaced by like-sized integers.
+(define_mode_attr V_INT_EQUIV [(V8QI "V8QI") (V16QI "V16QI")
+                              (V4HI "V4HI") (V8HI  "V8HI")
+                              (V2SI "V2SI") (V4SI  "V4SI")
+                              (DI   "DI")   (V2DI  "V2DI")
+                              (V4HF "V4HI") (V8HF  "V8HI")
+                              (V2SF "V2SI") (V4SF  "V4SI")
+                              (V2DF "V2DI") (DF    "DI")
+                              (SF   "SI")   (HF    "HI")])
+
+;; Lower case mode with floating-point values replaced by like-sized integers.
+(define_mode_attr v_int_equiv [(V8QI "v8qi") (V16QI "v16qi")
+                              (V4HI "v4hi") (V8HI  "v8hi")
+                              (V2SI "v2si") (V4SI  "v4si")
+                              (DI   "di")   (V2DI  "v2di")
+                              (V4HF "v4hi") (V8HF  "v8hi")
+                              (V2SF "v2si") (V4SF  "v4si")
+                              (V2DF "v2di") (DF    "di")
+                              (SF   "si")])
 
 ;; Mode for vector conditional operations where the comparison has
 ;; different type from the lhs.