* clarifying core function unit enable
[soc.git] / src / soc / experiment / compalu_multi.py
1 """Computation Unit (aka "ALU Manager").
2
3 Manages a Pipeline or FSM, ensuring that the start and end time are 100%
4 monitored. At no time may the ALU proceed without this module notifying
5 the Dependency Matrices. At no time is a result production "abandoned".
6 This module blocks (indicates busy) starting from when it first receives
7 an opcode until it receives notification that
8 its result(s) have been successfully stored in the regfile(s)
9
10 Documented at http://libre-soc.org/3d_gpu/architecture/compunit
11 """
12
13 from nmigen import Module, Signal, Mux, Elaboratable, Repl, Cat, Const
14 from nmigen.hdl.rec import (Record, DIR_FANIN, DIR_FANOUT)
15
16 from nmutil.latch import SRLatch, latchregister
17 from nmutil.iocontrol import RecordObject
18
19 from soc.fu.regspec import RegSpec, RegSpecALUAPI
20
21
22 def find_ok(fields):
23 """find_ok helper function - finds field ending in "_ok"
24 """
25 for field_name in fields:
26 if field_name.endswith("_ok"):
27 return field_name
28 return None
29
30
31 def go_record(n, name):
32 r = Record([('go', n, DIR_FANIN),
33 ('rel', n, DIR_FANOUT)], name=name)
34 r.go.reset_less = True
35 r.rel.reset_less = True
36 return r
37
38
39 # see https://libre-soc.org/3d_gpu/architecture/regfile/ section on regspecs
40
41 class CompUnitRecord(RegSpec, RecordObject):
42 """CompUnitRecord
43
44 base class for Computation Units, to provide a uniform API
45 and allow "record.connect" etc. to be used, particularly when
46 it comes to connecting multiple Computation Units up as a block
47 (very laborious)
48
49 LDSTCompUnitRecord should derive from this class and add the
50 additional signals it requires
51
52 :subkls: the class (not an instance) needed to construct the opcode
53 :rwid: either an integer (specifies width of all regs) or a "regspec"
54
55 see https://libre-soc.org/3d_gpu/architecture/regfile/ section on regspecs
56 """
57 def __init__(self, subkls, rwid, n_src=None, n_dst=None, name=None):
58 RegSpec.__init__(self, rwid, n_src, n_dst)
59 RecordObject.__init__(self, name)
60 self._subkls = subkls
61 n_src, n_dst = self._n_src, self._n_dst
62
63 # create source operands
64 src = []
65 for i in range(n_src):
66 j = i + 1 # name numbering to match src1/src2
67 name = "src%d_i" % j
68 rw = self._get_srcwid(i)
69 sreg = Signal(rw, name=name, reset_less=True)
70 setattr(self, name, sreg)
71 src.append(sreg)
72 self._src_i = src
73
74 # create dest operands
75 dst = []
76 for i in range(n_dst):
77 j = i + 1 # name numbering to match dest1/2...
78 name = "dest%d_o" % j
79 rw = self._get_dstwid(i)
80 #dreg = Data(rw, name=name) XXX ??? output needs to be a Data type?
81 dreg = Signal(rw, name=name, reset_less=True)
82 setattr(self, name, dreg)
83 dst.append(dreg)
84 self._dest = dst
85
86 # operation / data input
87 self.oper_i = subkls(name="oper_i") # operand
88
89 # create read/write and other scoreboard signalling
90 self.rd = go_record(n_src, name="rd") # read in, req out
91 self.wr = go_record(n_dst, name="wr") # write in, req out
92 self.rdmaskn = Signal(n_src, reset_less=True) # read mask
93 self.wrmask = Signal(n_dst, reset_less=True) # write mask
94 self.issue_i = Signal(reset_less=True) # fn issue in
95 self.shadown_i = Signal(reset=1) # shadow function, defaults to ON
96 self.go_die_i = Signal() # go die (reset)
97
98 # output (busy/done)
99 self.busy_o = Signal(reset_less=True) # fn busy out
100 self.done_o = Signal(reset_less=True)
101
102
103 class MultiCompUnit(RegSpecALUAPI, Elaboratable):
104 def __init__(self, rwid, alu, opsubsetkls, n_src=2, n_dst=1, name=None):
105 """MultiCompUnit
106
107 * :rwid: width of register latches (TODO: allocate per regspec)
108 * :alu: ALU (pipeline, FSM) - must conform to nmutil Pipe API
109 * :opsubsetkls: subset of Decode2ExecuteType
110 * :n_src: number of src operands
111 * :n_dst: number of destination operands
112 """
113 RegSpecALUAPI.__init__(self, rwid, alu)
114 self.alu_name = name or "alu"
115 self.opsubsetkls = opsubsetkls
116 self.cu = cu = CompUnitRecord(opsubsetkls, rwid, n_src, n_dst)
117 n_src, n_dst = self.n_src, self.n_dst = cu._n_src, cu._n_dst
118 print ("n_src %d n_dst %d" % (self.n_src, self.n_dst))
119
120 # convenience names for src operands
121 for i in range(n_src):
122 j = i + 1 # name numbering to match src1/src2
123 name = "src%d_i" % j
124 setattr(self, name, getattr(cu, name))
125
126 # convenience names for dest operands
127 for i in range(n_dst):
128 j = i + 1 # name numbering to match dest1/2...
129 name = "dest%d_o" % j
130 setattr(self, name, getattr(cu, name))
131
132 # more convenience names
133 self.rd = cu.rd
134 self.wr = cu.wr
135 self.rdmaskn = cu.rdmaskn
136 self.wrmask = cu.wrmask
137 self.go_rd_i = self.rd.go # temporary naming
138 self.go_wr_i = self.wr.go # temporary naming
139 self.rd_rel_o = self.rd.rel # temporary naming
140 self.req_rel_o = self.wr.rel # temporary naming
141 self.issue_i = cu.issue_i
142 self.shadown_i = cu.shadown_i
143 self.go_die_i = cu.go_die_i
144
145 # operation / data input
146 self.oper_i = cu.oper_i
147 self.src_i = cu._src_i
148
149 self.busy_o = cu.busy_o
150 self.dest = cu._dest
151 self.data_o = self.dest[0] # Dest out
152 self.done_o = cu.done_o
153
154 def _mux_op(self, m, sl, op_is_imm, imm, i):
155 # select imm if opcode says so. however also change the latch
156 # to trigger *from* the opcode latch instead.
157 src_or_imm = Signal(self.cu._get_srcwid(i), reset_less=True)
158 src_sel = Signal(reset_less=True)
159 m.d.comb += src_sel.eq(Mux(op_is_imm, self.opc_l.q, self.src_l.q[i]))
160 m.d.comb += src_or_imm.eq(Mux(op_is_imm, imm, self.src_i[i]))
161 # overwrite 1st src-latch with immediate-muxed stuff
162 sl[i][0] = src_or_imm
163 sl[i][2] = src_sel
164 sl[i][3] = ~op_is_imm # change rd.rel[i] gate condition
165
166 def elaborate(self, platform):
167 m = Module()
168 setattr(m.submodules, self.alu_name, self.alu)
169 m.submodules.src_l = src_l = SRLatch(False, self.n_src, name="src")
170 m.submodules.opc_l = opc_l = SRLatch(sync=False, name="opc")
171 m.submodules.req_l = req_l = SRLatch(False, self.n_dst, name="req")
172 m.submodules.rst_l = rst_l = SRLatch(sync=False, name="rst")
173 m.submodules.rok_l = rok_l = SRLatch(sync=False, name="rdok")
174 self.opc_l, self.src_l = opc_l, src_l
175
176 # ALU only proceeds when all src are ready. rd_rel_o is delayed
177 # so combine it with go_rd_i. if all bits are set we're good
178 all_rd = Signal(reset_less=True)
179 m.d.comb += all_rd.eq(self.busy_o & rok_l.q &
180 (((~self.rd.rel) | self.rd.go).all()))
181
182 # generate read-done pulse
183 all_rd_dly = Signal(reset_less=True)
184 all_rd_pulse = Signal(reset_less=True)
185 m.d.sync += all_rd_dly.eq(all_rd)
186 m.d.comb += all_rd_pulse.eq(all_rd & ~all_rd_dly)
187
188 # create rising pulse from alu valid condition.
189 alu_done = Signal(reset_less=True)
190 alu_done_dly = Signal(reset_less=True)
191 alu_pulse = Signal(reset_less=True)
192 alu_pulsem = Signal(self.n_dst, reset_less=True)
193 m.d.comb += alu_done.eq(self.alu.n.valid_o)
194 m.d.sync += alu_done_dly.eq(alu_done)
195 m.d.comb += alu_pulse.eq(alu_done & ~alu_done_dly)
196 m.d.comb += alu_pulsem.eq(Repl(alu_pulse, self.n_dst))
197
198 # sigh bug where req_l gets both set and reset raised at same time
199 prev_wr_go = Signal(self.n_dst)
200 brd = Repl(self.busy_o, self.n_dst)
201 m.d.sync += prev_wr_go.eq(self.wr.go & brd)
202
203 # write_requests all done
204 # req_done works because any one of the last of the writes
205 # is enough, when combined with when read-phase is done (rst_l.q)
206 wr_any = Signal(reset_less=True)
207 req_done = Signal(reset_less=True)
208 m.d.comb += self.done_o.eq(self.busy_o & \
209 ~((self.wr.rel & ~self.wrmask).bool()))
210 m.d.comb += wr_any.eq(self.wr.go.bool() | prev_wr_go.bool())
211 m.d.comb += req_done.eq(wr_any & ~self.alu.n.ready_i & \
212 ((req_l.q & self.wrmask) == 0))
213 # argh, complicated hack: if there are no regs to write,
214 # instead of waiting for regs that are never going to happen,
215 # we indicate "done" when the ALU is "done"
216 with m.If((self.wrmask == 0) & \
217 self.alu.n.ready_i & self.alu.n.valid_o & self.busy_o):
218 m.d.comb += req_done.eq(1)
219
220 # shadow/go_die
221 reset = Signal(reset_less=True)
222 rst_r = Signal(reset_less=True) # reset latch off
223 reset_w = Signal(self.n_dst, reset_less=True)
224 reset_r = Signal(self.n_src, reset_less=True)
225 m.d.comb += reset.eq(req_done | self.go_die_i)
226 m.d.comb += rst_r.eq(self.issue_i | self.go_die_i)
227 m.d.comb += reset_w.eq(self.wr.go | Repl(self.go_die_i, self.n_dst))
228 m.d.comb += reset_r.eq(self.rd.go | Repl(self.go_die_i, self.n_src))
229
230 # read-done,wr-proceed latch
231 m.d.comb += rok_l.s.eq(self.issue_i) # set up when issue starts
232 m.d.sync += rok_l.r.eq(self.alu.n.valid_o & self.busy_o) # ALU done
233
234 # wr-done, back-to-start latch
235 m.d.comb += rst_l.s.eq(all_rd) # set when read-phase is fully done
236 m.d.comb += rst_l.r.eq(rst_r) # *off* on issue
237
238 # opcode latch (not using go_rd_i) - inverted so that busy resets to 0
239 m.d.sync += opc_l.s.eq(self.issue_i) # set on issue
240 m.d.sync += opc_l.r.eq(req_done) # reset on ALU
241
242 # src operand latch (not using go_wr_i)
243 m.d.sync += src_l.s.eq(Repl(self.issue_i, self.n_src))
244 m.d.sync += src_l.r.eq(reset_r)
245
246 # dest operand latch (not using issue_i)
247 m.d.comb += req_l.s.eq(alu_pulsem & self.wrmask)
248 m.d.comb += req_l.r.eq(reset_w | prev_wr_go)
249
250 # create a latch/register for the operand
251 oper_r = self.opsubsetkls(name="oper_r")
252 latchregister(m, self.oper_i, oper_r, self.issue_i, "oper_l")
253
254 # and for each output from the ALU: capture when ALU output is valid
255 drl = []
256 wrok = []
257 for i in range(self.n_dst):
258 name = "data_r%d" % i
259 lro = self.get_out(i)
260 ok = Const(1, 1)
261 if isinstance(lro, Record):
262 data_r = Record.like(lro, name=name)
263 print ("wr fields", i, lro, data_r.fields)
264 # bye-bye abstract interface design..
265 fname = find_ok(data_r.fields)
266 if fname:
267 ok = data_r[fname]
268 else:
269 data_r = Signal.like(lro, name=name, reset_less=True)
270 wrok.append(ok & self.busy_o)
271 latchregister(m, lro, data_r, alu_pulsem, name + "_l")
272 drl.append(data_r)
273
274 # ok, above we collated anything with an "ok" on the output side
275 # now actually use those to create a write-mask. this basically
276 # is now the Function Unit API tells the Comp Unit "do not request
277 # a regfile port because this particular output is not valid"
278 m.d.comb += self.wrmask.eq(Cat(*wrok))
279
280 # pass the operation to the ALU
281 m.d.comb += self.get_op().eq(oper_r)
282
283 # create list of src/alu-src/src-latch. override 1st and 2nd one below.
284 # in the case, for ALU and Logical pipelines, we assume RB is the
285 # 2nd operand in the input "regspec". see for example
286 # soc.fu.alu.pipe_data.ALUInputData
287 sl = []
288 print ("src_i", self.src_i)
289 for i in range(self.n_src):
290 sl.append([self.src_i[i], self.get_in(i), src_l.q[i], Const(1,1)])
291
292 # if the operand subset has "zero_a" we implicitly assume that means
293 # src_i[0] is an INT reg type where zero can be multiplexed in, instead.
294 # see https://bugs.libre-soc.org/show_bug.cgi?id=336
295 if hasattr(oper_r, "zero_a"):
296 # select zero imm if opcode says so. however also change the latch
297 # to trigger *from* the opcode latch instead.
298 self._mux_op(m, sl, oper_r.zero_a, 0, 0)
299
300 # if the operand subset has "imm_data" we implicitly assume that means
301 # "this is an INT ALU/Logical FU jobbie, RB is muxed with the immediate"
302 if hasattr(oper_r, "imm_data"):
303 # select immediate if opcode says so. however also change the latch
304 # to trigger *from* the opcode latch instead.
305 op_is_imm = oper_r.imm_data.imm_ok
306 imm = oper_r.imm_data.imm
307 self._mux_op(m, sl, op_is_imm, imm, 1)
308
309 # create a latch/register for src1/src2 (even if it is a copy of imm)
310 for i in range(self.n_src):
311 src, alusrc, latch, _ = sl[i]
312 latchregister(m, src, alusrc, latch, name="src_r%d" % i)
313
314 # -----
315 # ALU connection / interaction
316 # -----
317
318 # on a go_read, tell the ALU we're accepting data.
319 m.submodules.alui_l = alui_l = SRLatch(False, name="alui")
320 m.d.comb += self.alu.p.valid_i.eq(alui_l.q)
321 m.d.sync += alui_l.r.eq(self.alu.p.ready_o & alui_l.q)
322 m.d.comb += alui_l.s.eq(all_rd_pulse)
323
324 # ALU output "ready" side. alu "ready" indication stays hi until
325 # ALU says "valid".
326 m.submodules.alu_l = alu_l = SRLatch(False, name="alu")
327 m.d.comb += self.alu.n.ready_i.eq(alu_l.q)
328 m.d.sync += alu_l.r.eq(self.alu.n.valid_o & alu_l.q)
329 m.d.comb += alu_l.s.eq(all_rd_pulse)
330
331 # -----
332 # outputs
333 # -----
334
335 slg = Cat(*map(lambda x: x[3], sl)) # get req gate conditions
336 # all request signals gated by busy_o. prevents picker problems
337 m.d.comb += self.busy_o.eq(opc_l.q) # busy out
338
339 # read-release gated by busy (and read-mask)
340 bro = Repl(self.busy_o, self.n_src)
341 m.d.comb += self.rd.rel.eq(src_l.q & bro & slg & ~self.rdmaskn)
342
343 # write-release gated by busy and by shadow (and write-mask)
344 brd = Repl(self.busy_o & self.shadown_i, self.n_dst)
345 m.d.comb += self.wr.rel.eq(req_l.q & brd & self.wrmask)
346
347 # output the data from the latch on go_write
348 for i in range(self.n_dst):
349 with m.If(self.wr.go[i]):
350 m.d.comb += self.dest[i].eq(drl[i])
351
352 return m
353
354 def __iter__(self):
355 yield self.rd.go
356 yield self.wr.go
357 yield self.issue_i
358 yield self.shadown_i
359 yield self.go_die_i
360 yield from self.oper_i.ports()
361 yield self.src1_i
362 yield self.src2_i
363 yield self.busy_o
364 yield self.rd.rel
365 yield self.wr.rel
366 yield self.data_o
367
368 def ports(self):
369 return list(self)
370
371