Allow the formal engine to perform a same-cycle result in the ALU
[soc.git] / src / soc / experiment / compalu_multi.py
1 """Computation Unit (aka "ALU Manager").
2
3 Manages a Pipeline or FSM, ensuring that the start and end time are 100%
4 monitored. At no time may the ALU proceed without this module notifying
5 the Dependency Matrices. At no time is a result production "abandoned".
6 This module blocks (indicates busy) starting from when it first receives
7 an opcode until it receives notification that
8 its result(s) have been successfully stored in the regfile(s)
9
10 Documented at http://libre-soc.org/3d_gpu/architecture/compunit
11 """
12
13 from nmigen import Module, Signal, Mux, Elaboratable, Repl, Cat, Const
14 from nmigen.hdl.rec import (Record, DIR_FANIN, DIR_FANOUT)
15
16 from nmutil.latch import SRLatch, latchregister
17 from nmutil.iocontrol import RecordObject
18 from nmutil.util import rising_edge
19
20 from soc.fu.regspec import RegSpec, RegSpecALUAPI
21
22
23 def find_ok(fields):
24 """find_ok helper function - finds field ending in "_ok"
25 """
26 for field_name in fields:
27 if field_name.endswith("_ok"):
28 return field_name
29 return None
30
31
32 def go_record(n, name):
33 r = Record([('go_i', n, DIR_FANIN),
34 ('rel_o', n, DIR_FANOUT)], name=name)
35 r.go_i.reset_less = True
36 r.rel_o.reset_less = True
37 return r
38
39
40 # see https://libre-soc.org/3d_gpu/architecture/regfile/ section on regspecs
41
42 class CompUnitRecord(RegSpec, RecordObject):
43 """CompUnitRecord
44
45 base class for Computation Units, to provide a uniform API
46 and allow "record.connect" etc. to be used, particularly when
47 it comes to connecting multiple Computation Units up as a block
48 (very laborious)
49
50 LDSTCompUnitRecord should derive from this class and add the
51 additional signals it requires
52
53 :subkls: the class (not an instance) needed to construct the opcode
54 :rwid: either an integer (specifies width of all regs) or a "regspec"
55
56 see https://libre-soc.org/3d_gpu/architecture/regfile/ section on regspecs
57 """
58
59 def __init__(self, subkls, rwid, n_src=None, n_dst=None, name=None):
60 RegSpec.__init__(self, rwid, n_src, n_dst)
61 print ("name", name)
62 RecordObject.__init__(self)
63 self._subkls = subkls
64 n_src, n_dst = self._n_src, self._n_dst
65
66 # create source operands
67 src = []
68 for i in range(n_src):
69 j = i + 1 # name numbering to match src1/src2
70 sname = "src%d_i" % j
71 rw = self._get_srcwid(i)
72 sreg = Signal(rw, name=sname, reset_less=True)
73 setattr(self, sname, sreg)
74 src.append(sreg)
75 self._src_i = src
76
77 # create dest operands
78 dst = []
79 for i in range(n_dst):
80 j = i + 1 # name numbering to match dest1/2...
81 dname = "dest%d_o" % j
82 rw = self._get_dstwid(i)
83 # dreg = Data(rw, name=name) XXX ??? output needs to be a Data type?
84 dreg = Signal(rw, name=dname, reset_less=True)
85 setattr(self, dname, dreg)
86 dst.append(dreg)
87 self._dest = dst
88
89 # operation / data input
90 self.oper_i = subkls(name="oper_i_%s" % name) # operand
91
92 # create read/write and other scoreboard signalling
93 self.rd = go_record(n_src, name="cu_rd") # read in, req out
94 self.wr = go_record(n_dst, name="cu_wr") # write in, req out
95 # read / write mask
96 self.rdmaskn = Signal(n_src, name="cu_rdmaskn_i", reset_less=True)
97 self.wrmask = Signal(n_dst, name="cu_wrmask_o", reset_less=True)
98
99 # fn issue in
100 self.issue_i = Signal(name="cu_issue_i", reset_less=True)
101 # shadow function, defaults to ON
102 self.shadown_i = Signal(name="cu_shadown_i", reset=1)
103 # go die (reset)
104 self.go_die_i = Signal(name="cu_go_die_i")
105
106 # output (busy/done)
107 self.busy_o = Signal(name="cu_busy_o", reset_less=True) # fn busy out
108 self.done_o = Signal(name="cu_done_o", reset_less=True)
109 self.alu_done_o = Signal(name="cu_alu_done_o", reset_less=True)
110
111
112 class MultiCompUnit(RegSpecALUAPI, Elaboratable):
113 def __init__(self, rwid, alu, opsubsetkls, n_src=2, n_dst=1, name=None,
114 sync_rw=True):
115 """MultiCompUnit
116
117 * :rwid: width of register latches (TODO: allocate per regspec)
118 * :alu: ALU (pipeline, FSM) - must conform to nmutil Pipe API
119 * :opsubsetkls: subset of Decode2ExecuteType
120 * :n_src: number of src operands
121 * :n_dst: number of destination operands
122 """
123 RegSpecALUAPI.__init__(self, rwid, alu)
124 self.sync_rw = sync_rw
125 self.alu_name = name or "alu"
126 self.opsubsetkls = opsubsetkls
127 self.cu = cu = CompUnitRecord(opsubsetkls, rwid, n_src, n_dst,
128 name=name)
129 n_src, n_dst = self.n_src, self.n_dst = cu._n_src, cu._n_dst
130 print("n_src %d n_dst %d" % (self.n_src, self.n_dst))
131
132 # convenience names for src operands
133 for i in range(n_src):
134 j = i + 1 # name numbering to match src1/src2
135 name = "src%d_i" % j
136 setattr(self, name, getattr(cu, name))
137
138 # convenience names for dest operands
139 for i in range(n_dst):
140 j = i + 1 # name numbering to match dest1/2...
141 name = "dest%d_o" % j
142 setattr(self, name, getattr(cu, name))
143
144 # more convenience names
145 self.rd = cu.rd
146 self.wr = cu.wr
147 self.rdmaskn = cu.rdmaskn
148 self.wrmask = cu.wrmask
149 self.alu_done_o = cu.alu_done_o
150 self.go_rd_i = self.rd.go_i # temporary naming
151 self.go_wr_i = self.wr.go_i # temporary naming
152 self.rd_rel_o = self.rd.rel_o # temporary naming
153 self.req_rel_o = self.wr.rel_o # temporary naming
154 self.issue_i = cu.issue_i
155 self.shadown_i = cu.shadown_i
156 self.go_die_i = cu.go_die_i
157
158 # operation / data input
159 self.oper_i = cu.oper_i
160 self.src_i = cu._src_i
161
162 self.busy_o = cu.busy_o
163 self.dest = cu._dest
164 self.o_data = self.dest[0] # Dest out
165 self.done_o = cu.done_o
166
167 def _mux_op(self, m, sl, op_is_imm, imm, i):
168 # select imm if opcode says so. however also change the latch
169 # to trigger *from* the opcode latch instead.
170 src_or_imm = Signal(self.cu._get_srcwid(i), reset_less=True)
171 src_sel = Signal(reset_less=True)
172 m.d.comb += src_sel.eq(Mux(op_is_imm, self.opc_l.q, sl[i][2]))
173 m.d.comb += src_or_imm.eq(Mux(op_is_imm, imm, self.src_i[i]))
174 # overwrite 1st src-latch with immediate-muxed stuff
175 sl[i][0] = src_or_imm
176 sl[i][2] = src_sel
177 sl[i][3] = ~op_is_imm # change rd.rel[i] gate condition
178
179 def elaborate(self, platform):
180 m = Module()
181 if self.sync_rw:
182 rw_domain = m.d.sync
183 else:
184 rw_domain = m.d.comb
185 # generate a pulse on system reset, to reset any latches, if needed
186 system_reset = Signal(reset=1)
187 m.d.sync += system_reset.eq(0)
188
189 # add the ALU to the MultiCompUnit only if it is a "real" ALU
190 # see AllFunctionUnits as to why: a FunctionUnitBaseMulti
191 # only has one "real" ALU but multiple pseudo front-ends,
192 # aka "ReservationStations" (ALUProxy "fronts")
193 if isinstance(self.alu, Elaboratable):
194 setattr(m.submodules, self.alu_name, self.alu)
195 m.submodules.src_l = src_l = SRLatch(False, self.n_src, name="src")
196 m.submodules.opc_l = opc_l = SRLatch(sync=False, name="opc")
197 m.submodules.req_l = req_l = SRLatch(False, self.n_dst, name="req")
198 m.submodules.rst_l = rst_l = SRLatch(sync=False, name="rst")
199 m.submodules.rok_l = rok_l = SRLatch(sync=False, name="rdok")
200 self.opc_l, self.src_l = opc_l, src_l
201
202 # ALU only proceeds when all src are ready. rd_rel_o is delayed
203 # so combine it with go_rd_i. if all bits are set we're good
204 all_rd = Signal(reset_less=True)
205 m.d.comb += all_rd.eq(self.busy_o & # rok_l.q & # XXX LOOP
206 (((~self.rd.rel_o) | self.rd.go_i).all()))
207
208 # generate read-done pulse
209 all_rd_pulse = Signal(reset_less=True)
210 m.d.comb += all_rd_pulse.eq(rising_edge(m, all_rd)) # XXX LOOP
211
212 # create rising pulse from alu valid condition.
213 alu_done = self.cu.alu_done_o
214 alu_pulse = Signal(reset_less=True)
215 alu_pulsem = Signal(self.n_dst, reset_less=True)
216 m.d.comb += alu_done.eq(self.alu.n.o_valid)
217 m.d.comb += alu_pulse.eq(rising_edge(m, alu_done))
218 m.d.comb += alu_pulsem.eq(Repl(alu_pulse, self.n_dst))
219
220 # sigh bug where req_l gets both set and reset raised at same time
221 prev_wr_go = Signal(self.n_dst)
222 brd = Repl(self.busy_o, self.n_dst)
223 m.d.sync += prev_wr_go.eq(self.wr.go_i & brd)
224
225 # write_requests all done
226 # req_done works because any one of the last of the writes
227 # is enough, when combined with when read-phase is done (rst_l.q)
228 wr_any = Signal(reset_less=True)
229 req_done = Signal(reset_less=True)
230 m.d.comb += self.done_o.eq(self.busy_o & ~(self.wr.rel_o).bool())
231 m.d.comb += wr_any.eq(self.wr.go_i.bool() | prev_wr_go.bool())
232 m.d.comb += req_done.eq(wr_any & ~self.alu.n.i_ready & (req_l.q == 0))
233 # argh, complicated hack: if there are no regs to write,
234 # instead of waiting for regs that are never going to happen,
235 # we indicate "done" when the ALU is "done"
236 with m.If((self.wrmask == 0) &
237 self.alu.n.i_ready & self.alu.n.o_valid & self.busy_o):
238 m.d.comb += req_done.eq(1)
239
240 # shadow/go_die
241 reset = Signal(reset_less=True)
242 rst_r = Signal(reset_less=True) # reset latch off
243 reset_w = Signal(self.n_dst, reset_less=True)
244 reset_r = Signal(self.n_src, reset_less=True)
245 m.d.comb += reset.eq(req_done | self.go_die_i)
246 m.d.comb += rst_r.eq(self.issue_i | self.go_die_i)
247 m.d.comb += reset_w.eq(self.wr.go_i | Repl(self.go_die_i, self.n_dst))
248 m.d.comb += reset_r.eq(self.rd.go_i | Repl(rst_r, self.n_src))
249
250 # read-done,wr-proceed latch
251 rw_domain += rok_l.s.eq(self.issue_i) # set up when issue starts
252 rw_domain += rok_l.r.eq(self.alu.n.o_valid & self.busy_o) # ALUdone LOOP
253
254 # wr-done, back-to-start latch
255 rw_domain += rst_l.s.eq(all_rd) # set when read-phase is fully done
256 rw_domain += rst_l.r.eq(rst_r) # *off* on issue
257
258 # opcode latch (not using go_rd_i) - inverted so that busy resets to 0
259 m.d.sync += opc_l.s.eq(self.issue_i) # set on issue
260 m.d.sync += opc_l.r.eq(req_done) # reset on ALU
261
262 # src operand latch (not using go_wr_i) ANDed with rdmask
263 rdmaskn = Signal(self.n_src)
264 latchregister(m, self.rdmaskn, rdmaskn, self.issue_i, name="rdmask_l")
265 m.d.sync += src_l.s.eq(Repl(self.issue_i, self.n_src) & ~rdmaskn)
266 m.d.sync += src_l.r.eq(reset_r)
267
268 # dest operand latch (not using issue_i)
269 rw_domain += req_l.s.eq(alu_pulsem & self.wrmask)
270 m.d.comb += req_l.r.eq(reset_w | prev_wr_go |
271 Repl(system_reset, self.n_dst))
272
273 # pass operation to the ALU (sync: plenty time to wait for src reads)
274 op = self.get_op()
275 with m.If(self.issue_i):
276 m.d.sync += op.eq(self.oper_i)
277
278 # and for each output from the ALU: capture when ALU output is valid
279 drl = []
280 wrok = []
281 for i in range(self.n_dst):
282 name = "data_r%d" % i
283 lro = self.get_out(i)
284 ok = Const(1, 1)
285 data_r_ok = Const(1, 1)
286 if isinstance(lro, Record):
287 print("wr fields", i, lro, lro.fields)
288 data_r = Record.like(lro, name=name)
289 # bye-bye abstract interface design..
290 fname = find_ok(lro.fields)
291 if fname:
292 ok = getattr(lro, fname)
293 data_r_ok = getattr(data_r, fname)
294 # write-ok based on incoming output *and* whether the latched
295 # data was ok.
296 # XXX fails - wrok.append((ok|data_r_ok) & self.busy_o)
297 wrok.append(ok & self.busy_o)
298 else:
299 data_r = Signal.like(lro, name=name)
300 # really should retire this but it's part of unit tests
301 wrok.append(ok & self.busy_o)
302 #latchregister(m, lro, data_r, ok & self.busy_o, name=name)
303 latchregister(m, lro, data_r, alu_pulse, name=name)
304 with m.If(self.issue_i):
305 m.d.comb += data_r.eq(0)
306 drl.append(data_r)
307
308 # ok, above we collated anything with an "ok" on the output side
309 # now actually use those to create a write-mask. this basically
310 # is now the Function Unit API tells the Comp Unit "do not request
311 # a regfile port because this particular output is not valid"
312 m.d.comb += self.wrmask.eq(Cat(*wrok))
313
314 # create list of src/alu-src/src-latch. override 1st and 2nd one below.
315 # in the case, for ALU and Logical pipelines, we assume RB is the
316 # 2nd operand in the input "regspec". see for example
317 # soc.fu.alu.pipe_data.ALUInputData
318 sl = []
319 print("src_i", self.src_i)
320 for i in range(self.n_src):
321 sl.append([self.src_i[i], self.get_in(i), src_l.q[i], Const(1, 1)])
322
323 # if the operand subset has "zero_a" we implicitly assume that means
324 # src_i[0] is an INT reg type where zero can be multiplexed in, instead.
325 # see https://bugs.libre-soc.org/show_bug.cgi?id=336
326 if hasattr(op, "zero_a"):
327 # select zero imm if opcode says so. however also change the latch
328 # to trigger *from* the opcode latch instead.
329 self._mux_op(m, sl, op.zero_a, 0, 0)
330
331 # if the operand subset has "imm_data" we implicitly assume that means
332 # "this is an INT ALU/Logical FU jobbie, RB is muxed with the immediate"
333 if hasattr(op, "imm_data"):
334 # select immediate if opcode says so. however also change the latch
335 # to trigger *from* the opcode latch instead.
336 op_is_imm = op.imm_data.ok
337 imm = op.imm_data.data
338 self._mux_op(m, sl, op_is_imm, imm, 1)
339
340 # create a latch/register for src1/src2 (even if it is a copy of imm)
341 for i in range(self.n_src):
342 src, alusrc, latch, _ = sl[i]
343 reg = latchregister(m, src, alusrc, latch, name="src_r%d" % i)
344 # rdmask stops src latches from being set. clear all if not busy
345 with m.If(~self.busy_o):
346 m.d.sync += reg.eq(0)
347
348 # -----
349 # ALU connection / interaction
350 # -----
351
352 # on a go_read, tell the ALU we're accepting data.
353 m.submodules.alui_l = alui_l = SRLatch(False, name="alui")
354 m.d.comb += self.alu.p.i_valid.eq(alui_l.q)
355 m.d.sync += alui_l.r.eq(self.alu.p.o_ready & alui_l.q)
356 m.d.comb += alui_l.s.eq(all_rd_pulse)
357
358 # ALU output "ready" side. alu "ready" indication stays hi until
359 # ALU says "valid".
360 m.submodules.alu_l = alu_l = SRLatch(False, name="alu")
361 m.d.comb += self.alu.n.i_ready.eq(alu_l.q)
362 m.d.sync += alu_l.r.eq(self.alu.n.o_valid & alu_l.q)
363 m.d.comb += alu_l.s.eq(all_rd_pulse) # XXX LOOP
364
365 # -----
366 # outputs
367 # -----
368
369 slg = Cat(*map(lambda x: x[3], sl)) # get req gate conditions
370 # all request signals gated by busy_o. prevents picker problems
371 m.d.comb += self.busy_o.eq(opc_l.q) # busy out
372
373 # read-release gated by busy (and read-mask)
374 if True: #self.sync_rw: - experiment (doesn't work)
375 bro = Repl(self.busy_o, self.n_src)
376 else:
377 bro = Repl(self.busy_o|self.issue_i, self.n_src)
378 m.d.comb += self.rd.rel_o.eq(src_l.q & bro & slg)
379
380 # write-release gated by busy and by shadow (and write-mask)
381 brd = Repl(self.busy_o & self.shadown_i, self.n_dst)
382 m.d.comb += self.wr.rel_o.eq(req_l.q_int & brd)
383
384 # output the data from the latch on go_write
385 for i in range(self.n_dst):
386 with m.If(self.wr.go_i[i] & self.busy_o):
387 m.d.comb += self.dest[i].eq(drl[i])
388
389 return m
390
391 def get_fu_out(self, i):
392 return self.dest[i]
393
394 def __iter__(self):
395 yield self.rd.go_i
396 yield self.wr.go_i
397 yield self.issue_i
398 yield self.shadown_i
399 yield self.go_die_i
400 yield from self.oper_i.ports()
401 yield self.src1_i
402 yield self.src2_i
403 yield self.busy_o
404 yield self.rd.rel_o
405 yield self.wr.rel_o
406 yield self.o_data
407
408 def ports(self):
409 return list(self)