1 # IEEE754 Floating Point Conversion
2 # Copyright (C) 2019 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
4 from nmigen
import Module
, Signal
, Const
5 from nmigen
.cli
import main
, verilog
7 from nmutil
.pipemodbase
import PipeModBase
8 from ieee754
.fpcommon
.basedata
import FPBaseData
9 from ieee754
.fpcommon
.postcalc
import FPPostCalcData
11 from ieee754
.fpcommon
.fpbase
import FPNumDecode
, FPNumBaseRecord
14 class FPCVTDownConvertMod(PipeModBase
):
15 """ FP down-conversion (higher to lower bitwidth)
17 def __init__(self
, in_pspec
, out_pspec
):
18 self
.in_pspec
= in_pspec
19 self
.out_pspec
= out_pspec
20 super().__init
__(in_pspec
, "downconvert")
23 return FPBaseData(self
.in_pspec
)
26 return FPPostCalcData(self
.out_pspec
, e_extra
=True)
28 def elaborate(self
, platform
):
31 print("in_width out", self
.in_pspec
.width
, self
.out_pspec
.width
)
33 # here we make room (in temporary constants / ospec) for extra
34 # bits in the exponent, at the size of the *incoming* number
35 # bitwidth. in this way it is possible to detect, in the
36 # *outgoing* number, if the exponent is too large and needs
37 # adjustment. otherwise we have to mess about with all sorts
38 # of width-detection and the normalisation, special cases etc.
39 # all become horribly complicated.
41 a1
= FPNumBaseRecord(self
.in_pspec
.width
, False)
42 print("a1", a1
.width
, a1
.rmw
, a1
.e_width
, a1
.e_start
, a1
.e_end
)
43 m
.submodules
.sc_decode_a
= a1
= FPNumDecode(None, a1
)
44 comb
+= a1
.v
.eq(self
.i
.a
)
46 print("z1", z1
.width
, z1
.rmw
, z1
.e_width
, z1
.e_start
, z1
.e_end
)
49 ms
= a1
.rmw
- self
.o
.z
.rmw
50 print("ms-me", ms
, me
)
53 exp_sub_n126
= Signal((a1
.e_width
, True), reset_less
=True)
54 exp_gt127
= Signal(reset_less
=True)
55 # constants from z1, at the bit-width of a1.
56 N126
= Const(z1
.fp
.N126
.value
, (a1
.e_width
, True))
57 P127
= Const(z1
.fp
.P127
.value
, (a1
.e_width
, True))
58 comb
+= exp_sub_n126
.eq(a1
.e
- N126
)
59 comb
+= exp_gt127
.eq(a1
.e
> P127
)
61 # bypass (always enabled except for normalisation, below)
62 comb
+= self
.o
.out_do_z
.eq(1)
64 # if a zero, return zero (signed)
65 with m
.If(a1
.exp_n127
):
66 comb
+= self
.o
.z
.zero(a1
.s
)
68 # if a range outside z's min range (-126)
69 with m
.Elif(exp_sub_n126
< 0):
70 comb
+= self
.o
.of
.guard
.eq(a1
.m
[ms
-1])
71 comb
+= self
.o
.of
.round_bit
.eq(a1
.m
[ms
-2])
72 comb
+= self
.o
.of
.sticky
.eq(a1
.m
[:ms
-2].bool())
73 comb
+= self
.o
.of
.m0
.eq(a1
.m
[ms
]) # LSB bit of a1
75 comb
+= self
.o
.z
.s
.eq(a1
.s
)
76 comb
+= self
.o
.z
.e
.eq(a1
.e
)
77 comb
+= self
.o
.z
.m
.eq(a1
.m
[-self
.o
.z
.rmw
-1:])
78 comb
+= self
.o
.z
.m
[-1].eq(1)
80 # normalisation required
81 comb
+= self
.o
.out_do_z
.eq(0)
83 # if a is inf return inf
84 with m
.Elif(a1
.is_inf
):
85 comb
+= self
.o
.z
.inf(a1
.s
)
87 # if a is NaN return NaN
88 with m
.Elif(a1
.is_nan
):
89 comb
+= self
.o
.z
.nan(0)
91 # if a mantissa greater than 127, return inf
92 with m
.Elif(exp_gt127
):
93 print("inf", self
.o
.z
.inf(a1
.s
))
94 comb
+= self
.o
.z
.inf(a1
.s
)
96 # ok after all that, anything else should fit fine (whew)
98 comb
+= self
.o
.of
.guard
.eq(a1
.m
[ms
-1])
99 comb
+= self
.o
.of
.round_bit
.eq(a1
.m
[ms
-2])
100 comb
+= self
.o
.of
.sticky
.eq(a1
.m
[:ms
-2].bool())
101 comb
+= self
.o
.of
.m0
.eq(a1
.m
[ms
]) # bit of a1
103 # XXX TODO: this is basically duplicating FPRoundMod. hmmm...
104 print("alen", a1
.e_start
, z1
.fp
.N126
, N126
)
105 print("m1", self
.o
.z
.rmw
, a1
.m
[-self
.o
.z
.rmw
-1:])
106 mo
= Signal(self
.o
.z
.m_width
-1)
107 comb
+= mo
.eq(a1
.m
[ms
:me
])
108 with m
.If(self
.o
.of
.roundz
):
109 with m
.If((mo
.all())): # mantissa-out is all 1s
110 comb
+= self
.o
.z
.create(a1
.s
, a1
.e
+1, mo
+1)
112 comb
+= self
.o
.z
.create(a1
.s
, a1
.e
, mo
+1)
114 comb
+= self
.o
.z
.create(a1
.s
, a1
.e
, a1
.m
[-self
.o
.z
.rmw
-1:])
116 # copy the context (muxid, operator)
117 comb
+= self
.o
.oz
.eq(self
.o
.z
.v
)
118 comb
+= self
.o
.ctx
.eq(self
.i
.ctx
)