* Only the default rounding mode is intended for best performances.
* Exceptions aren't supported yet, but that can be added quite easily
* if necessary without impacting performances.
+ *
+ * In the CFI related comments, 'previousOffset' refers to the previous offset
+ * from sp used to compute the CFA.
*/
+ .cfi_sections .debug_frame
#ifndef __ARMEB__
#define xl r0
ARM_FUNC_START negdf2
ARM_FUNC_ALIAS aeabi_dneg negdf2
+ CFI_START_FUNCTION
@ flip sign bit
eor xh, xh, #0x80000000
RET
+ CFI_END_FUNCTION
FUNC_END aeabi_dneg
FUNC_END negdf2
#ifdef L_arm_addsubdf3
ARM_FUNC_START aeabi_drsub
+ CFI_START_FUNCTION
eor xh, xh, #0x80000000 @ flip sign bit of first arg
b 1f
ARM_FUNC_START adddf3
ARM_FUNC_ALIAS aeabi_dadd adddf3
-1: do_push {r4, r5, lr}
+1: do_push {r4, r5, lr} @ sp -= 12
+ .cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
+ .cfi_rel_offset r4, 0 @ Registers are saved from sp to sp + 8
+ .cfi_rel_offset r5, 4
+ .cfi_rel_offset lr, 8
@ Look for zeroes, equal values, INF, or NAN.
shift1 lsl, r4, xh, #1
@ Since this is not common case, rescale them off line.
teq r4, r5
beq LSYM(Lad_d)
+
+@ CFI note: we're lucky that the branches to Lad_* that appear after this function
+@ have a CFI state that's exactly the same as the one we're in at this
+@ point. Otherwise the CFI would change to a different state after the branch,
+@ which would be disastrous for backtracing.
LSYM(Lad_x):
@ Compensate for the exponent overlapping the mantissa MSB added later
orrne xh, xh, #0x00080000 @ quiet NAN
RETLDM "r4, r5"
+ CFI_END_FUNCTION
FUNC_END aeabi_dsub
FUNC_END subdf3
FUNC_END aeabi_dadd
ARM_FUNC_START floatunsidf
ARM_FUNC_ALIAS aeabi_ui2d floatunsidf
+ CFI_START_FUNCTION
teq r0, #0
do_it eq, t
moveq r1, #0
RETc(eq)
- do_push {r4, r5, lr}
+
+ do_push {r4, r5, lr} @ sp -= 12
+ .cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
+ .cfi_rel_offset r4, 0 @ Registers are saved from sp + 0 to sp + 8.
+ .cfi_rel_offset r5, 4
+ .cfi_rel_offset lr, 8
+
mov r4, #0x400 @ initial exponent
add r4, r4, #(52-1 - 1)
mov r5, #0 @ sign bit is 0
mov xh, #0
b LSYM(Lad_l)
+ CFI_END_FUNCTION
FUNC_END aeabi_ui2d
FUNC_END floatunsidf
ARM_FUNC_START floatsidf
ARM_FUNC_ALIAS aeabi_i2d floatsidf
+ CFI_START_FUNCTION
teq r0, #0
do_it eq, t
moveq r1, #0
RETc(eq)
- do_push {r4, r5, lr}
+
+ do_push {r4, r5, lr} @ sp -= 12
+ .cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
+ .cfi_rel_offset r4, 0 @ Registers are saved from sp + 0 to sp + 8.
+ .cfi_rel_offset r5, 4
+ .cfi_rel_offset lr, 8
+
mov r4, #0x400 @ initial exponent
add r4, r4, #(52-1 - 1)
ands r5, r0, #0x80000000 @ sign bit in r5
mov xh, #0
b LSYM(Lad_l)
+ CFI_END_FUNCTION
FUNC_END aeabi_i2d
FUNC_END floatsidf
ARM_FUNC_START extendsfdf2
ARM_FUNC_ALIAS aeabi_f2d extendsfdf2
+ CFI_START_FUNCTION
movs r2, r0, lsl #1 @ toss sign bit
mov xh, r2, asr #3 @ stretch exponent
@ value was denormalized. We can normalize it now.
do_push {r4, r5, lr}
+ .cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
+ .cfi_rel_offset r4, 0 @ Registers are saved from sp + 0 to sp + 8.
+ .cfi_rel_offset r5, 4
+ .cfi_rel_offset lr, 8
+
mov r4, #0x380 @ setup corresponding exponent
and r5, xh, #0x80000000 @ move sign bit in r5
bic xh, xh, #0x80000000
b LSYM(Lad_l)
+ CFI_END_FUNCTION
FUNC_END aeabi_f2d
FUNC_END extendsfdf2
ARM_FUNC_START floatundidf
ARM_FUNC_ALIAS aeabi_ul2d floatundidf
+ CFI_START_FUNCTION
+ .cfi_remember_state @ Save the current CFA state.
orrs r2, r0, r1
do_it eq
RETc(eq)
- do_push {r4, r5, lr}
+ do_push {r4, r5, lr} @ sp -= 12
+ .cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
+ .cfi_rel_offset r4, 0 @ Registers are saved from sp + 0 to sp + 8
+ .cfi_rel_offset r5, 4
+ .cfi_rel_offset lr, 8
mov r5, #0
b 2f
ARM_FUNC_START floatdidf
ARM_FUNC_ALIAS aeabi_l2d floatdidf
+ .cfi_restore_state
+ @ Restore the CFI state we saved above. If we didn't do this then the
+ @ following instructions would have the CFI state that was set by the
+ @ offset adjustments made in floatundidf.
orrs r2, r0, r1
do_it eq
RETc(eq)
- do_push {r4, r5, lr}
+ do_push {r4, r5, lr} @ sp -= 12
+ .cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
+ .cfi_rel_offset r4, 0 @ Registers are saved from sp to sp + 8
+ .cfi_rel_offset r5, 4
+ .cfi_rel_offset lr, 8
ands r5, ah, #0x80000000 @ sign bit in r5
bpl 2f
add r4, r4, r2
b LSYM(Lad_p)
+ CFI_END_FUNCTION
FUNC_END floatdidf
FUNC_END aeabi_l2d
FUNC_END floatundidf
ARM_FUNC_START muldf3
ARM_FUNC_ALIAS aeabi_dmul muldf3
- do_push {r4, r5, r6, lr}
+ CFI_START_FUNCTION
+
+ do_push {r4, r5, r6, lr} @ sp -= 16
+ .cfi_adjust_cfa_offset 16 @ CFA is now sp + previousOffset + 16
+ .cfi_rel_offset r4, 0 @ Registers are saved from sp to sp + 12.
+ .cfi_rel_offset r5, 4
+ .cfi_rel_offset r6, 8
+ .cfi_rel_offset lr, 12
@ Mask out exponents, trap any zero/denormal/INF/NAN.
mov ip, #0xff
and r6, r6, #0x80000000
@ Well, no way to make it shorter without the umull instruction.
- stmfd sp!, {r6, r7, r8, r9, sl, fp}
+ stmfd sp!, {r6, r7, r8, r9, sl, fp} @ sp -= 24
+ .cfi_remember_state @ Save the current CFI state.
+ .cfi_adjust_cfa_offset 24 @ CFA is now sp + previousOffset + 24.
+ .cfi_rel_offset r6, 0 @ Registers are saved from sp to sp + 20.
+ .cfi_rel_offset r7, 4
+ .cfi_rel_offset r8, 8
+ .cfi_rel_offset r9, 12
+ .cfi_rel_offset sl, 16
+ .cfi_rel_offset fp, 20
+
mov r7, xl, lsr #16
mov r8, yl, lsr #16
mov r9, xh, lsr #16
mul fp, xh, yh
adcs r5, r5, fp
adc r6, r6, #0
- ldmfd sp!, {yl, r7, r8, r9, sl, fp}
-
+ ldmfd sp!, {yl, r7, r8, r9, sl, fp} @ sp += 24
+ .cfi_restore_state @ Restore the previous CFI state.
#else
@ Here is the actual multiplication.
orr xh, xh, #0x00100000
mov lr, #0
subs r4, r4, #1
-
LSYM(Lml_u):
@ Overflow?
bgt LSYM(Lml_o)
orr xh, xh, #0x00f80000
RETLDM "r4, r5, r6"
+ CFI_END_FUNCTION
FUNC_END aeabi_dmul
FUNC_END muldf3
ARM_FUNC_START divdf3
ARM_FUNC_ALIAS aeabi_ddiv divdf3
+ CFI_START_FUNCTION
do_push {r4, r5, r6, lr}
+ .cfi_adjust_cfa_offset 16
+ .cfi_rel_offset r4, 0
+ .cfi_rel_offset r5, 4
+ .cfi_rel_offset r6, 8
+ .cfi_rel_offset lr, 12
@ Mask out exponents, trap any zero/denormal/INF/NAN.
mov ip, #0xff
bne LSYM(Lml_z) @ 0 / <non_zero> -> 0
b LSYM(Lml_n) @ 0 / 0 -> NAN
+ CFI_END_FUNCTION
FUNC_END aeabi_ddiv
FUNC_END divdf3
ARM_FUNC_START gtdf2
ARM_FUNC_ALIAS gedf2 gtdf2
+ CFI_START_FUNCTION
mov ip, #-1
b 1f
mov ip, #1 @ how should we specify unordered here?
1: str ip, [sp, #-4]!
+ .cfi_adjust_cfa_offset 4 @ CFA is now sp + previousOffset + 4.
+ @ We're not adding CFI for ip as it's pushed into the stack
+ @ only because @ it may be popped off later as a return value
+ @ (i.e. we're not preserving @ it anyways).
@ Trap any INF/NAN first.
mov ip, xh, lsl #1
do_it ne
COND(mvn,s,ne) ip, ip, asr #21
beq 3f
-
- @ Test for equality.
- @ Note that 0.0 is equal to -0.0.
+ .cfi_remember_state
+ @ Save the current CFI state. This is done because the branch
+ @ is conditional, @ and if we don't take it we'll issue a
+ @ .cfi_adjust_cfa_offset and return. @ If we do take it,
+ @ however, the .cfi_adjust_cfa_offset from the non-branch @ code
+ @ will affect the branch code as well. To avoid this we'll
+ @ restore @ the current state before executing the branch code.
+
+ @ Test for equality. @ Note that 0.0 is equal to -0.0.
2: add sp, sp, #4
+ .cfi_adjust_cfa_offset -4 @ CFA is now sp + previousOffset.
+
orrs ip, xl, xh, lsl #1 @ if x == 0.0 or -0.0
do_it eq, e
COND(orr,s,eq) ip, yl, yh, lsl #1 @ and y == 0.0 or -0.0
orr r0, r0, #1
RET
- @ Look for a NAN.
-3: mov ip, xh, lsl #1
+3: @ Look for a NAN.
+
+ @ Restore the previous CFI state (i.e. keep the CFI state as it was
+ @ before the branch).
+ .cfi_restore_state
+
+ mov ip, xh, lsl #1
mvns ip, ip, asr #21
bne 4f
orrs ip, xl, xh, lsl #12
bne 2b
orrs ip, yl, yh, lsl #12
beq 2b @ y is not NAN
+
5: ldr r0, [sp], #4 @ unordered return code
+ .cfi_adjust_cfa_offset -4 @ CFA is now sp + previousOffset.
+
RET
+ CFI_END_FUNCTION
FUNC_END gedf2
FUNC_END gtdf2
FUNC_END ledf2
FUNC_END cmpdf2
ARM_FUNC_START aeabi_cdrcmple
+ CFI_START_FUNCTION
mov ip, r0
mov r0, r2
mov r1, r3
mov r3, ip
b 6f
-
+
ARM_FUNC_START aeabi_cdcmpeq
ARM_FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq
@ The status-returning routines are required to preserve all
@ registers except ip, lr, and cpsr.
6: do_push {r0, lr}
+ .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8.
+ .cfi_rel_offset r0, 0 @ Previous r0 is saved at sp.
+ .cfi_rel_offset lr, 4 @ Previous lr is saved at sp + 4.
+
ARM_CALL cmpdf2
@ Set the Z flag correctly, and the C flag unconditionally.
cmp r0, #0
@ that the first operand was smaller than the second.
do_it mi
cmnmi r0, #0
+
RETLDM "r0"
+ CFI_END_FUNCTION
FUNC_END aeabi_cdcmple
FUNC_END aeabi_cdcmpeq
FUNC_END aeabi_cdrcmple
ARM_FUNC_START aeabi_dcmpeq
+ CFI_START_FUNCTION
+
+ str lr, [sp, #-8]! @ sp -= 8
+ .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
+ .cfi_rel_offset lr, 0 @ lr is at sp
- str lr, [sp, #-8]!
ARM_CALL aeabi_cdcmple
do_it eq, e
moveq r0, #1 @ Equal to.
movne r0, #0 @ Less than, greater than, or unordered.
+
RETLDM
+ CFI_END_FUNCTION
FUNC_END aeabi_dcmpeq
ARM_FUNC_START aeabi_dcmplt
+ CFI_START_FUNCTION
+
+ str lr, [sp, #-8]! @ sp -= 8
+ .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
+ .cfi_rel_offset lr, 0 @ lr is at sp
- str lr, [sp, #-8]!
ARM_CALL aeabi_cdcmple
do_it cc, e
movcc r0, #1 @ Less than.
movcs r0, #0 @ Equal to, greater than, or unordered.
RETLDM
+ CFI_END_FUNCTION
FUNC_END aeabi_dcmplt
ARM_FUNC_START aeabi_dcmple
+ CFI_START_FUNCTION
+
+ str lr, [sp, #-8]! @ sp -= 8
+ .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
+ .cfi_rel_offset lr, 0 @ lr is at sp
- str lr, [sp, #-8]!
ARM_CALL aeabi_cdcmple
do_it ls, e
movls r0, #1 @ Less than or equal to.
movhi r0, #0 @ Greater than or unordered.
RETLDM
+ CFI_END_FUNCTION
FUNC_END aeabi_dcmple
ARM_FUNC_START aeabi_dcmpge
+ CFI_START_FUNCTION
+
+ str lr, [sp, #-8]! @ sp -= 8
+ .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
+ .cfi_rel_offset lr, 0 @ lr is at sp
- str lr, [sp, #-8]!
ARM_CALL aeabi_cdrcmple
do_it ls, e
movls r0, #1 @ Operand 2 is less than or equal to operand 1.
movhi r0, #0 @ Operand 2 greater than operand 1, or unordered.
RETLDM
+ CFI_END_FUNCTION
FUNC_END aeabi_dcmpge
ARM_FUNC_START aeabi_dcmpgt
+ CFI_START_FUNCTION
+
+ str lr, [sp, #-8]! @ sp -= 8
+ .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
+ .cfi_rel_offset lr, 0 @ lr is at sp
- str lr, [sp, #-8]!
ARM_CALL aeabi_cdrcmple
do_it cc, e
movcc r0, #1 @ Operand 2 is less than operand 1.
@ or they are unordered.
RETLDM
+ CFI_END_FUNCTION
FUNC_END aeabi_dcmpgt
#endif /* L_cmpdf2 */
ARM_FUNC_START unorddf2
ARM_FUNC_ALIAS aeabi_dcmpun unorddf2
+ .cfi_startproc
mov ip, xh, lsl #1
mvns ip, ip, asr #21
3: mov r0, #1 @ arguments are unordered.
RET
+ .cfi_endproc
FUNC_END aeabi_dcmpun
FUNC_END unorddf2
ARM_FUNC_START fixdfsi
ARM_FUNC_ALIAS aeabi_d2iz fixdfsi
+ CFI_START_FUNCTION
@ check exponent range.
mov r2, xh, lsl #1
4: mov r0, #0 @ How should we convert NAN?
RET
+ CFI_END_FUNCTION
FUNC_END aeabi_d2iz
FUNC_END fixdfsi
ARM_FUNC_START fixunsdfsi
ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi
+ CFI_START_FUNCTION
@ check exponent range.
movs r2, xh, lsl #1
4: mov r0, #0 @ How should we convert NAN?
RET
+ CFI_END_FUNCTION
FUNC_END aeabi_d2uiz
FUNC_END fixunsdfsi
ARM_FUNC_START truncdfsf2
ARM_FUNC_ALIAS aeabi_d2f truncdfsf2
+ CFI_START_FUNCTION
@ check exponent range.
mov r2, xh, lsl #1
orr r0, r0, #0x00800000
RET
+ CFI_END_FUNCTION
FUNC_END aeabi_d2f
FUNC_END truncdfsf2
* Only the default rounding mode is intended for best performances.
* Exceptions aren't supported yet, but that can be added quite easily
* if necessary without impacting performances.
+ *
+ * In the CFI related comments, 'previousOffset' refers to the previous offset
+ * from sp used to compute the CFA.
*/
#ifdef L_arm_negsf2
ARM_FUNC_START negsf2
ARM_FUNC_ALIAS aeabi_fneg negsf2
+ CFI_START_FUNCTION
eor r0, r0, #0x80000000 @ flip sign bit
RET
+ CFI_END_FUNCTION
FUNC_END aeabi_fneg
FUNC_END negsf2
#ifdef L_arm_addsubsf3
ARM_FUNC_START aeabi_frsub
+ CFI_START_FUNCTION
eor r0, r0, #0x80000000 @ flip sign bit of first arg
b 1f
orrne r0, r0, #0x00400000 @ quiet NAN
RET
+ CFI_END_FUNCTION
FUNC_END aeabi_frsub
FUNC_END aeabi_fadd
FUNC_END addsf3
ARM_FUNC_START floatunsisf
ARM_FUNC_ALIAS aeabi_ui2f floatunsisf
+ CFI_START_FUNCTION
mov r3, #0
b 1f
mov al, #0
b 2f
+ CFI_END_FUNCTION
FUNC_END aeabi_i2f
FUNC_END floatsisf
FUNC_END aeabi_ui2f
ARM_FUNC_START floatundisf
ARM_FUNC_ALIAS aeabi_ul2f floatundisf
+ CFI_START_FUNCTION
orrs r2, r0, r1
do_it eq
biceq r0, r0, ip, lsr #31
RET
+ CFI_END_FUNCTION
FUNC_END floatdisf
FUNC_END aeabi_l2f
FUNC_END floatundisf
ARM_FUNC_START mulsf3
ARM_FUNC_ALIAS aeabi_fmul mulsf3
+ CFI_START_FUNCTION
@ Mask out exponents, trap any zero/denormal/INF/NAN.
mov ip, #0xff
and r3, ip, #0x80000000
@ Well, no way to make it shorter without the umull instruction.
- do_push {r3, r4, r5}
+ do_push {r3, r4, r5} @ sp -= 12
+ .cfi_remember_state @ Save the current CFI state
+ .cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
+ .cfi_rel_offset r3, 0 @ Registers are saved from sp to sp + 8
+ .cfi_rel_offset r4, 4
+ .cfi_rel_offset r5, 8
+
mov r4, r0, lsr #16
mov r5, r1, lsr #16
bic r0, r0, r4, lsl #16
mla r0, r4, r1, r0
adds r3, r3, r0, lsl #16
adc r1, ip, r0, lsr #16
- do_pop {r0, r4, r5}
+ do_pop {r0, r4, r5} @ sp += 12
+ .cfi_restore_state @ Restore the previous CFI state
#else
orr r0, r0, #0x00c00000
RET
+ CFI_END_FUNCTION
FUNC_END aeabi_fmul
FUNC_END mulsf3
ARM_FUNC_START divsf3
ARM_FUNC_ALIAS aeabi_fdiv divsf3
+ CFI_START_FUNCTION
@ Mask out exponents, trap any zero/denormal/INF/NAN.
mov ip, #0xff
bne LSYM(Lml_z) @ 0 / <non_zero> -> 0
b LSYM(Lml_n) @ 0 / 0 -> NAN
+ CFI_END_FUNCTION
FUNC_END aeabi_fdiv
FUNC_END divsf3
ARM_FUNC_START gtsf2
ARM_FUNC_ALIAS gesf2 gtsf2
+ CFI_START_FUNCTION
mov ip, #-1
b 1f
mov ip, #1 @ how should we specify unordered here?
1: str ip, [sp, #-4]!
+ .cfi_adjust_cfa_offset 4 @ CFA is now sp + previousOffset + 4.
+ @ We're not adding CFI for ip as it's pushed into the stack only because
+ @ it may be popped off later as a return value (i.e. we're not preserving
+ @ it anyways).
@ Trap any INF/NAN first.
mov r2, r0, lsl #1
do_it ne
COND(mvn,s,ne) ip, r3, asr #24
beq 3f
+ .cfi_remember_state
+ @ Save the current CFI state. This is done because the branch is conditional,
+ @ and if we don't take it we'll issue a .cfi_adjust_cfa_offset and return.
+ @ If we do take it, however, the .cfi_adjust_cfa_offset from the non-branch
+ @ code will affect the branch code as well. To avoid this we'll restore
+ @ the current state before executing the branch code.
@ Compare values.
@ Note that 0.0 is equal to -0.0.
2: add sp, sp, #4
+ .cfi_adjust_cfa_offset -4 @ CFA is now sp + previousOffset.
+
orrs ip, r2, r3, lsr #1 @ test if both are 0, clear C flag
do_it ne
teqne r0, r1 @ if not 0 compare sign
orrne r0, r0, #1
RET
- @ Look for a NAN.
-3: mvns ip, r2, asr #24
+3: @ Look for a NAN.
+
+ @ Restore the previous CFI state (i.e. keep the CFI state as it was
+ @ before the branch).
+ .cfi_restore_state
+
+ mvns ip, r2, asr #24
bne 4f
movs ip, r0, lsl #9
bne 5f @ r0 is NAN
bne 2b
movs ip, r1, lsl #9
beq 2b @ r1 is not NAN
+
5: ldr r0, [sp], #4 @ return unordered code.
+ .cfi_adjust_cfa_offset -4 @ CFA is now sp + previousOffset.
RET
+ CFI_END_FUNCTION
FUNC_END gesf2
FUNC_END gtsf2
FUNC_END lesf2
FUNC_END cmpsf2
ARM_FUNC_START aeabi_cfrcmple
+ CFI_START_FUNCTION
mov ip, r0
mov r0, r1
@ The status-returning routines are required to preserve all
@ registers except ip, lr, and cpsr.
6: do_push {r0, r1, r2, r3, lr}
+ .cfi_adjust_cfa_offset 20 @ CFA is at sp + previousOffset + 20
+ .cfi_rel_offset r0, 0 @ Registers are saved from sp to sp + 16
+ .cfi_rel_offset r1, 4
+ .cfi_rel_offset r2, 8
+ .cfi_rel_offset r3, 12
+ .cfi_rel_offset lr, 16
+
ARM_CALL cmpsf2
@ Set the Z flag correctly, and the C flag unconditionally.
cmp r0, #0
cmnmi r0, #0
RETLDM "r0, r1, r2, r3"
+ CFI_END_FUNCTION
FUNC_END aeabi_cfcmple
FUNC_END aeabi_cfcmpeq
FUNC_END aeabi_cfrcmple
ARM_FUNC_START aeabi_fcmpeq
+ CFI_START_FUNCTION
+
+ str lr, [sp, #-8]! @ sp -= 8
+ .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
+ .cfi_rel_offset lr, 0 @ lr is at sp
- str lr, [sp, #-8]!
ARM_CALL aeabi_cfcmple
do_it eq, e
moveq r0, #1 @ Equal to.
movne r0, #0 @ Less than, greater than, or unordered.
RETLDM
+ CFI_END_FUNCTION
FUNC_END aeabi_fcmpeq
ARM_FUNC_START aeabi_fcmplt
+ CFI_START_FUNCTION
+
+ str lr, [sp, #-8]! @ sp -= 8
+ .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
+ .cfi_rel_offset lr, 0 @ lr is at sp
- str lr, [sp, #-8]!
ARM_CALL aeabi_cfcmple
do_it cc, e
movcc r0, #1 @ Less than.
movcs r0, #0 @ Equal to, greater than, or unordered.
RETLDM
+ CFI_END_FUNCTION
FUNC_END aeabi_fcmplt
ARM_FUNC_START aeabi_fcmple
+ CFI_START_FUNCTION
+
+ str lr, [sp, #-8]! @ sp -= 8
+ .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
+ .cfi_rel_offset lr, 0 @ lr is at sp
- str lr, [sp, #-8]!
ARM_CALL aeabi_cfcmple
do_it ls, e
movls r0, #1 @ Less than or equal to.
movhi r0, #0 @ Greater than or unordered.
RETLDM
+ CFI_END_FUNCTION
FUNC_END aeabi_fcmple
ARM_FUNC_START aeabi_fcmpge
+ CFI_START_FUNCTION
+
+ str lr, [sp, #-8]! @ sp -= 8
+ .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
+ .cfi_rel_offset lr, 0 @ lr is at sp
- str lr, [sp, #-8]!
ARM_CALL aeabi_cfrcmple
do_it ls, e
movls r0, #1 @ Operand 2 is less than or equal to operand 1.
movhi r0, #0 @ Operand 2 greater than operand 1, or unordered.
RETLDM
+ CFI_END_FUNCTION
FUNC_END aeabi_fcmpge
ARM_FUNC_START aeabi_fcmpgt
+ CFI_START_FUNCTION
+
+ str lr, [sp, #-8]! @ sp -= 8
+ .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
+ .cfi_rel_offset lr, 0 @ lr is at sp
- str lr, [sp, #-8]!
ARM_CALL aeabi_cfrcmple
do_it cc, e
movcc r0, #1 @ Operand 2 is less than operand 1.
@ or they are unordered.
RETLDM
+ CFI_END_FUNCTION
FUNC_END aeabi_fcmpgt
#endif /* L_cmpsf2 */
ARM_FUNC_START unordsf2
ARM_FUNC_ALIAS aeabi_fcmpun unordsf2
+ CFI_START_FUNCTION
mov r2, r0, lsl #1
mov r3, r1, lsl #1
3: mov r0, #1 @ arguments are unordered.
RET
+ CFI_END_FUNCTION
FUNC_END aeabi_fcmpun
FUNC_END unordsf2
ARM_FUNC_START fixsfsi
ARM_FUNC_ALIAS aeabi_f2iz fixsfsi
+ CFI_START_FUNCTION
@ check exponent range.
mov r2, r0, lsl #1
4: mov r0, #0 @ What should we convert NAN to?
RET
+ CFI_END_FUNCTION
FUNC_END aeabi_f2iz
FUNC_END fixsfsi
ARM_FUNC_START fixunssfsi
ARM_FUNC_ALIAS aeabi_f2uiz fixunssfsi
+ CFI_START_FUNCTION
@ check exponent range.
movs r2, r0, lsl #1
4: mov r0, #0 @ What should we convert NAN to?
RET
+ CFI_END_FUNCTION
FUNC_END aeabi_f2uiz
FUNC_END fixunssfsi