VAX: Make `extv' an expander matching the remaining bit-field operations
We have matching insns defined for `sign_extract' and `zero_extract'
expressions, so make the three named patterns for bit-field operations
consistent and make `extv' an expander rather than an insn taking a
SImode, a QImode, and a SImode general operand for the LOC, SIZE, and
POS operands respectively, like with the `extzv' and `insv' patterns,
matching the machine instructions and giving the middle end more choice
as to which actual insn to choose in a given situation.
Given this program:
typedef struct
{
int f0:1;
int f1:7;
int f8:8;
int f16:16;
} bit_t;
typedef struct
{
unsigned int f0:1;
unsigned int f1:7;
unsigned int f8:8;
unsigned int f16:16;
} ubit_t;
typedef union
{
bit_t b;
int i;
} bit_u;
typedef union
{
ubit_t b;
unsigned int i;
} ubit_u;
int
ins1 (bit_u x, int y)
{
asm volatile ("" : "+r" (x), "+r" (y));
x.b.f1 = y;
return x.i;
}
int
ext1 (bit_u x)
{
asm volatile ("" : "+r" (x));
return x.b.f1;
}
unsigned int
extz1 (ubit_u x)
{
asm volatile ("" : "+r" (x));
return x.b.f1;
}
int
ins8 (bit_u x, int y)
{
asm volatile ("" : "+r" (x), "+r" (y));
x.b.f8 = y;
return x.i;
}
int
ext8 (bit_u x)
{
asm volatile ("" : "+r" (x));
return x.b.f8;
}
unsigned int
extz8 (ubit_u x)
{
asm volatile ("" : "+r" (x));
return x.b.f8;
}
int
ins16 (bit_u x, int y)
{
asm volatile ("" : "+r" (x), "+r" (y));
x.b.f16 = y;
return x.i;
}
int
ext16 (bit_u x)
{
asm volatile ("" : "+r" (x));
return x.b.f16;
}
unsigned int
extz16 (ubit_u x)
{
asm volatile ("" : "+r" (x));
return x.b.f16;
}
this results in the following code change:
@@ -16,12 +16,12 @@ ins1:
.globl ext1
.type ext1, @function
ext1:
- .word 0 # 19 [c=0] procedure_entry_mask
- subl2 $4,%sp # 20 [c=32] addsi3
+ .word 0 # 18 [c=0] procedure_entry_mask
+ subl2 $4,%sp # 19 [c=32] addsi3
movl 4(%ap),%r0 # 2 [c=16] movsi_2
- cvtbl %r0,%r0 # 7 [c=4] extendqisi2
- ashl $-1,%r0,%r0 # 14 [c=40] *vax.md:624
- ret # 24 [c=0] return
+ extv $1,$7,%r0,%r0 # 7 [c=60] *extv_non_const
+ cvtbl %r0,%r0 # 13 [c=4] extendqisi2
+ ret # 23 [c=0] return
.size ext1, .-ext1
.align 1
.globl extz1
@@ -49,12 +49,12 @@ ins8:
.globl ext8
.type ext8, @function
ext8:
- .word 0 # 20 [c=0] procedure_entry_mask
- subl2 $4,%sp # 21 [c=32] addsi3
+ .word 0 # 18 [c=0] procedure_entry_mask
+ subl2 $4,%sp # 19 [c=32] addsi3
movl 4(%ap),%r0 # 2 [c=16] movsi_2
- cvtwl %r0,%r0 # 7 [c=4] extendhisi2
- ashl $-8,%r0,%r0 # 15 [c=40] *vax.md:624
- ret # 25 [c=0] return
+ rotl $24,%r0,%r0 # 13 [c=60] *extv_non_const
+ cvtbl %r0,%r0
+ ret # 23 [c=0] return
.size ext8, .-ext8
.align 1
.globl extz8
If there is a performance degradation with the replacement sequences,
then it can and should be sorted within `extv_non_const'.
gcc/
* config/vax/vax.md (extv): Rename insn to...
(*extv): ... this.
(extv): New expander.