1 """Computation Unit (aka "ALU Manager").
3 Manages a Pipeline or FSM, ensuring that the start and end time are 100%
4 monitored. At no time may the ALU proceed without this module notifying
5 the Dependency Matrices. At no time is a result production "abandoned".
6 This module blocks (indicates busy) starting from when it first receives
7 an opcode until it receives notification that
8 its result(s) have been successfully stored in the regfile(s)
10 Documented at http://libre-soc.org/3d_gpu/architecture/compunit
13 from nmigen
.compat
.sim
import run_simulation
, Settle
14 from nmigen
.cli
import verilog
, rtlil
15 from nmigen
import Module
, Signal
, Mux
, Elaboratable
, Repl
, Array
, Cat
, Const
16 from nmigen
.hdl
.rec
import (Record
, DIR_FANIN
, DIR_FANOUT
)
18 from nmutil
.latch
import SRLatch
, latchregister
19 from nmutil
.iocontrol
import RecordObject
21 from soc
.decoder
.power_decoder2
import Data
22 from soc
.decoder
.power_enums
import InternalOp
23 from soc
.fu
.regspec
import RegSpec
, RegSpecALUAPI
26 def go_record(n
, name
):
27 r
= Record([('go', n
, DIR_FANIN
),
28 ('rel', n
, DIR_FANOUT
)], name
=name
)
29 r
.go
.reset_less
= True
30 r
.rel
.reset_less
= True
33 # see https://libre-soc.org/3d_gpu/architecture/regfile/ section on regspecs
35 class CompUnitRecord(RegSpec
, RecordObject
):
38 base class for Computation Units, to provide a uniform API
39 and allow "record.connect" etc. to be used, particularly when
40 it comes to connecting multiple Computation Units up as a block
43 LDSTCompUnitRecord should derive from this class and add the
44 additional signals it requires
46 :subkls: the class (not an instance) needed to construct the opcode
47 :rwid: either an integer (specifies width of all regs) or a "regspec"
49 see https://libre-soc.org/3d_gpu/architecture/regfile/ section on regspecs
51 def __init__(self
, subkls
, rwid
, n_src
=None, n_dst
=None, name
=None):
52 RegSpec
.__init
__(self
, rwid
, n_src
, n_dst
)
53 RecordObject
.__init
__(self
, name
)
55 n_src
, n_dst
= self
._n
_src
, self
._n
_dst
57 # create source operands
59 for i
in range(n_src
):
60 j
= i
+ 1 # name numbering to match src1/src2
62 rw
= self
._get
_srcwid
(i
)
63 sreg
= Signal(rw
, name
=name
, reset_less
=True)
64 setattr(self
, name
, sreg
)
68 # create dest operands
70 for i
in range(n_dst
):
71 j
= i
+ 1 # name numbering to match dest1/2...
73 rw
= self
._get
_dstwid
(i
)
74 dreg
= Signal(rw
, name
=name
, reset_less
=True)
75 setattr(self
, name
, dreg
)
79 # operation / data input
80 self
.oper_i
= subkls(name
="oper_i") # operand
82 # create read/write and other scoreboard signalling
83 self
.rd
= go_record(n_src
, name
="rd") # read in, req out
84 self
.wr
= go_record(n_dst
, name
="wr") # write in, req out
85 self
.rdmaskn
= Signal(n_src
, reset_less
=True) # read mask
86 self
.issue_i
= Signal(reset_less
=True) # fn issue in
87 self
.shadown_i
= Signal(reset
=1) # shadow function, defaults to ON
88 self
.go_die_i
= Signal() # go die (reset)
91 self
.busy_o
= Signal(reset_less
=True) # fn busy out
92 self
.done_o
= Signal(reset_less
=True)
95 class MultiCompUnit(RegSpecALUAPI
, Elaboratable
):
96 def __init__(self
, rwid
, alu
, opsubsetkls
, n_src
=2, n_dst
=1):
99 * :rwid: width of register latches (TODO: allocate per regspec)
100 * :alu: ALU (pipeline, FSM) - must conform to nmutil Pipe API
101 * :opsubsetkls: subset of Decode2ExecuteType
102 * :n_src: number of src operands
103 * :n_dst: number of destination operands
105 RegSpecALUAPI
.__init
__(self
, rwid
, alu
)
106 self
.opsubsetkls
= opsubsetkls
107 self
.cu
= cu
= CompUnitRecord(opsubsetkls
, rwid
, n_src
, n_dst
)
108 n_src
, n_dst
= self
.n_src
, self
.n_dst
= cu
._n
_src
, cu
._n
_dst
109 print ("n_src %d n_dst %d" % (self
.n_src
, self
.n_dst
))
111 # convenience names for src operands
112 for i
in range(n_src
):
113 j
= i
+ 1 # name numbering to match src1/src2
115 setattr(self
, name
, getattr(cu
, name
))
117 # convenience names for dest operands
118 for i
in range(n_dst
):
119 j
= i
+ 1 # name numbering to match dest1/2...
120 name
= "dest%d_o" % j
121 setattr(self
, name
, getattr(cu
, name
))
123 # more convenience names
126 self
.rdmaskn
= cu
.rdmaskn
127 self
.go_rd_i
= self
.rd
.go
# temporary naming
128 self
.go_wr_i
= self
.wr
.go
# temporary naming
129 self
.rd_rel_o
= self
.rd
.rel
# temporary naming
130 self
.req_rel_o
= self
.wr
.rel
# temporary naming
131 self
.issue_i
= cu
.issue_i
132 self
.shadown_i
= cu
.shadown_i
133 self
.go_die_i
= cu
.go_die_i
135 # operation / data input
136 self
.oper_i
= cu
.oper_i
137 self
.src_i
= cu
._src
_i
139 self
.busy_o
= cu
.busy_o
141 self
.data_o
= self
.dest
[0] # Dest out
142 self
.done_o
= cu
.done_o
145 def _mux_op(self
, m
, sl
, op_is_imm
, imm
, i
):
146 # select imm if opcode says so. however also change the latch
147 # to trigger *from* the opcode latch instead.
148 src_or_imm
= Signal(self
.cu
._get
_srcwid
(i
), reset_less
=True)
149 src_sel
= Signal(reset_less
=True)
150 m
.d
.comb
+= src_sel
.eq(Mux(op_is_imm
, self
.opc_l
.q
, self
.src_l
.q
[i
]))
151 m
.d
.comb
+= src_or_imm
.eq(Mux(op_is_imm
, imm
, self
.src_i
[i
]))
152 # overwrite 1st src-latch with immediate-muxed stuff
153 sl
[i
][0] = src_or_imm
155 sl
[i
][3] = ~op_is_imm
# change rd.rel[i] gate condition
157 def elaborate(self
, platform
):
159 m
.submodules
.alu
= self
.alu
160 m
.submodules
.src_l
= src_l
= SRLatch(False, self
.n_src
, name
="src")
161 m
.submodules
.opc_l
= opc_l
= SRLatch(sync
=False, name
="opc")
162 m
.submodules
.req_l
= req_l
= SRLatch(False, self
.n_dst
, name
="req")
163 m
.submodules
.rst_l
= rst_l
= SRLatch(sync
=False, name
="rst")
164 m
.submodules
.rok_l
= rok_l
= SRLatch(sync
=False, name
="rdok")
165 self
.opc_l
, self
.src_l
= opc_l
, src_l
167 # ALU only proceeds when all src are ready. rd_rel_o is delayed
168 # so combine it with go_rd_i. if all bits are set we're good
169 all_rd
= Signal(reset_less
=True)
170 m
.d
.comb
+= all_rd
.eq(self
.busy_o
& rok_l
.q
&
171 (((~self
.rd
.rel
) | self
.rd
.go
).all()))
173 # generate read-done pulse
174 all_rd_dly
= Signal(reset_less
=True)
175 all_rd_pulse
= Signal(reset_less
=True)
176 m
.d
.sync
+= all_rd_dly
.eq(all_rd
)
177 m
.d
.comb
+= all_rd_pulse
.eq(all_rd
& ~all_rd_dly
)
179 # create rising pulse from alu valid condition.
180 alu_done
= Signal(reset_less
=True)
181 alu_done_dly
= Signal(reset_less
=True)
182 alu_pulse
= Signal(reset_less
=True)
183 alu_pulsem
= Signal(self
.n_dst
, reset_less
=True)
184 m
.d
.comb
+= alu_done
.eq(self
.alu
.n
.valid_o
)
185 m
.d
.sync
+= alu_done_dly
.eq(alu_done
)
186 m
.d
.comb
+= alu_pulse
.eq(alu_done
& ~alu_done_dly
)
187 m
.d
.comb
+= alu_pulsem
.eq(Repl(alu_pulse
, self
.n_dst
))
189 # write_requests all done
190 # req_done works because any one of the last of the writes
191 # is enough, when combined with when read-phase is done (rst_l.q)
192 wr_any
= Signal(reset_less
=True)
193 req_done
= Signal(reset_less
=True)
194 m
.d
.comb
+= self
.done_o
.eq(self
.busy_o
& ~
(self
.wr
.rel
.bool()))
195 m
.d
.comb
+= wr_any
.eq(self
.wr
.go
.bool())
196 m
.d
.comb
+= req_done
.eq(wr_any
& ~self
.alu
.n
.ready_i
& (req_l
.q
== 0))
199 reset
= Signal(reset_less
=True)
200 rst_r
= Signal(reset_less
=True) # reset latch off
201 reset_w
= Signal(self
.n_dst
, reset_less
=True)
202 reset_r
= Signal(self
.n_src
, reset_less
=True)
203 m
.d
.comb
+= reset
.eq(req_done | self
.go_die_i
)
204 m
.d
.comb
+= rst_r
.eq(self
.issue_i | self
.go_die_i
)
205 m
.d
.comb
+= reset_w
.eq(self
.wr
.go |
Repl(self
.go_die_i
, self
.n_dst
))
206 m
.d
.comb
+= reset_r
.eq(self
.rd
.go |
Repl(self
.go_die_i
, self
.n_src
))
208 # read-done,wr-proceed latch
209 m
.d
.comb
+= rok_l
.s
.eq(self
.issue_i
) # set up when issue starts
210 m
.d
.comb
+= rok_l
.r
.eq(self
.alu
.n
.valid_o
& self
.busy_o
) # ALU done
212 # wr-done, back-to-start latch
213 m
.d
.comb
+= rst_l
.s
.eq(all_rd
) # set when read-phase is fully done
214 m
.d
.comb
+= rst_l
.r
.eq(rst_r
) # *off* on issue
216 # opcode latch (not using go_rd_i) - inverted so that busy resets to 0
217 m
.d
.sync
+= opc_l
.s
.eq(self
.issue_i
) # set on issue
218 m
.d
.sync
+= opc_l
.r
.eq(req_done
) # reset on ALU
220 # src operand latch (not using go_wr_i)
221 m
.d
.sync
+= src_l
.s
.eq(Repl(self
.issue_i
, self
.n_src
))
222 m
.d
.sync
+= src_l
.r
.eq(reset_r
)
224 # dest operand latch (not using issue_i)
225 m
.d
.comb
+= req_l
.s
.eq(alu_pulsem
)
226 m
.d
.comb
+= req_l
.r
.eq(reset_w
)
228 # create a latch/register for the operand
229 oper_r
= self
.opsubsetkls(name
="oper_r")
230 latchregister(m
, self
.oper_i
, oper_r
, self
.issue_i
, "oper_l")
232 # and for each output from the ALU: capture when ALU output is valid
234 for i
in range(self
.n_dst
):
235 name
= "data_r%d" % i
236 data_r
= Signal(self
.cu
._get
_dstwid
(i
), name
=name
, reset_less
=True)
237 latchregister(m
, self
.get_out(i
), data_r
, alu_pulsem
, name
+ "_l")
240 # pass the operation to the ALU
241 m
.d
.comb
+= self
.get_op().eq(oper_r
)
243 # create list of src/alu-src/src-latch. override 1st and 2nd one below.
244 # in the case, for ALU and Logical pipelines, we assume RB is the
245 # 2nd operand in the input "regspec". see for example
246 # soc.fu.alu.pipe_data.ALUInputData
248 print ("src_i", self
.src_i
)
249 for i
in range(self
.n_src
):
250 sl
.append([self
.src_i
[i
], self
.get_in(i
), src_l
.q
[i
], Const(1,1)])
252 # if the operand subset has "zero_a" we implicitly assume that means
253 # src_i[0] is an INT reg type where zero can be multiplexed in, instead.
254 # see https://bugs.libre-soc.org/show_bug.cgi?id=336
255 if hasattr(oper_r
, "zero_a"):
256 # select zero imm if opcode says so. however also change the latch
257 # to trigger *from* the opcode latch instead.
258 self
._mux
_op
(m
, sl
, oper_r
.zero_a
, 0, 0)
260 # if the operand subset has "imm_data" we implicitly assume that means
261 # "this is an INT ALU/Logical FU jobbie, RB is muxed with the immediate"
262 if hasattr(oper_r
, "imm_data"):
263 # select immediate if opcode says so. however also change the latch
264 # to trigger *from* the opcode latch instead.
265 op_is_imm
= oper_r
.imm_data
.imm_ok
266 imm
= oper_r
.imm_data
.imm
267 self
._mux
_op
(m
, sl
, op_is_imm
, imm
, 1)
269 # create a latch/register for src1/src2 (even if it is a copy of imm)
270 for i
in range(self
.n_src
):
271 src
, alusrc
, latch
, _
= sl
[i
]
272 latchregister(m
, src
, alusrc
, latch
, name
="src_r%d" % i
)
275 # ALU connection / interaction
278 # on a go_read, tell the ALU we're accepting data.
279 m
.submodules
.alui_l
= alui_l
= SRLatch(False, name
="alui")
280 m
.d
.comb
+= self
.alu
.p
.valid_i
.eq(alui_l
.q
)
281 m
.d
.sync
+= alui_l
.r
.eq(self
.alu
.p
.ready_o
& alui_l
.q
)
282 m
.d
.comb
+= alui_l
.s
.eq(all_rd_pulse
)
284 # ALU output "ready" side. alu "ready" indication stays hi until
286 m
.submodules
.alu_l
= alu_l
= SRLatch(False, name
="alu")
287 m
.d
.comb
+= self
.alu
.n
.ready_i
.eq(alu_l
.q
)
288 m
.d
.sync
+= alu_l
.r
.eq(self
.alu
.n
.valid_o
& alu_l
.q
)
289 m
.d
.comb
+= alu_l
.s
.eq(all_rd_pulse
)
295 slg
= Cat(*map(lambda x
: x
[3], sl
)) # get req gate conditions
296 # all request signals gated by busy_o. prevents picker problems
297 m
.d
.comb
+= self
.busy_o
.eq(opc_l
.q
) # busy out
299 # read-release gated by busy (and read-mask)
300 bro
= Repl(self
.busy_o
, self
.n_src
)
301 m
.d
.comb
+= self
.rd
.rel
.eq(src_l
.q
& bro
& slg
& ~self
.rdmaskn
)
303 # write-release gated by busy and by shadow
304 brd
= Repl(self
.busy_o
& self
.shadown_i
, self
.n_dst
)
305 m
.d
.comb
+= self
.wr
.rel
.eq(req_l
.q
& brd
)
307 # output the data from the latch on go_write
308 for i
in range(self
.n_dst
):
309 with m
.If(self
.wr
.go
[i
]):
310 m
.d
.comb
+= self
.dest
[i
].eq(drl
[i
])
320 yield from self
.oper_i
.ports()
332 def op_sim(dut
, a
, b
, op
, inv_a
=0, imm
=0, imm_ok
=0, zero_a
=0):
333 yield dut
.issue_i
.eq(0)
335 yield dut
.src_i
[0].eq(a
)
336 yield dut
.src_i
[1].eq(b
)
337 yield dut
.oper_i
.insn_type
.eq(op
)
338 yield dut
.oper_i
.invert_a
.eq(inv_a
)
339 yield dut
.oper_i
.imm_data
.imm
.eq(imm
)
340 yield dut
.oper_i
.imm_data
.imm_ok
.eq(imm_ok
)
341 yield dut
.oper_i
.zero_a
.eq(zero_a
)
342 yield dut
.issue_i
.eq(1)
344 yield dut
.issue_i
.eq(0)
346 if not imm_ok
or not zero_a
:
347 yield dut
.rd
.go
.eq(0b11)
350 rd_rel_o
= yield dut
.rd
.rel
351 print ("rd_rel", rd_rel_o
)
354 yield dut
.rd
.go
.eq(0)
355 if len(dut
.src_i
) == 3:
356 yield dut
.rd
.go
.eq(0b100)
359 rd_rel_o
= yield dut
.rd
.rel
360 print ("rd_rel", rd_rel_o
)
363 yield dut
.rd
.go
.eq(0)
365 req_rel_o
= yield dut
.wr
.rel
366 result
= yield dut
.data_o
367 print ("req_rel", req_rel_o
, result
)
369 req_rel_o
= yield dut
.wr
.rel
370 result
= yield dut
.data_o
371 print ("req_rel", req_rel_o
, result
)
375 yield dut
.wr
.go
[0].eq(1)
377 result
= yield dut
.data_o
379 print ("result", result
)
380 yield dut
.wr
.go
[0].eq(0)
385 def scoreboard_sim_dummy(dut
):
386 result
= yield from op_sim(dut
, 5, 2, InternalOp
.OP_NOP
, inv_a
=0,
388 assert result
== 5, result
390 result
= yield from op_sim(dut
, 9, 2, InternalOp
.OP_NOP
, inv_a
=0,
392 assert result
== 9, result
395 def scoreboard_sim(dut
):
396 result
= yield from op_sim(dut
, 5, 2, InternalOp
.OP_ADD
, inv_a
=0,
400 result
= yield from op_sim(dut
, 5, 2, InternalOp
.OP_ADD
)
403 result
= yield from op_sim(dut
, 5, 2, InternalOp
.OP_ADD
, inv_a
=1)
404 assert result
== 65532
406 result
= yield from op_sim(dut
, 5, 2, InternalOp
.OP_ADD
, zero_a
=1,
410 result
= yield from op_sim(dut
, 5, 2, InternalOp
.OP_ADD
, zero_a
=1)
415 from alu_hier
import ALU
416 from soc
.fu
.alu
.alu_input_record
import CompALUOpSubset
420 dut
= MultiCompUnit(16, alu
, CompALUOpSubset
)
421 m
.submodules
.cu
= dut
423 vl
= rtlil
.convert(dut
, ports
=dut
.ports())
424 with
open("test_compunit1.il", "w") as f
:
427 run_simulation(m
, scoreboard_sim(dut
), vcd_name
='test_compunit1.vcd')
430 class CompUnitParallelTest
:
431 def __init__(self
, dut
):
434 # Operation cycle should not take longer than this:
435 self
.MAX_BUSY_WAIT
= 50
437 # Minimum duration in which issue_i will be kept inactive,
438 # during which busy_o must remain low.
439 self
.MIN_BUSY_LOW
= 5
441 # Number of cycles to stall until the assertion of go.
442 # One value, for each port. Can be zero, for no delay.
443 self
.RD_GO_DELAY
= [0, 3]
445 # store common data for the input operation of the processes
448 self
.inv_a
= self
.zero_a
= 0
449 self
.imm
= self
.imm_ok
= 0
454 print("Begin parallel test.")
455 yield from self
.operation(5, 2, InternalOp
.OP_ADD
, inv_a
=0,
458 def operation(self
, a
, b
, op
, inv_a
=0, imm
=0, imm_ok
=0, zero_a
=0):
459 # store data for the operation
468 # trigger operation cycle
469 yield from self
.issue()
472 # issue_i starts inactive
473 yield self
.dut
.issue_i
.eq(0)
475 for n
in range(self
.MIN_BUSY_LOW
):
477 # busy_o must remain inactive. It cannot rise on its own.
478 busy_o
= yield self
.dut
.busy_o
481 # activate issue_i to begin the operation cycle
482 yield self
.dut
.issue_i
.eq(1)
484 # at the same time, present the operation
485 yield self
.dut
.oper_i
.insn_type
.eq(self
.op
)
486 yield self
.dut
.oper_i
.invert_a
.eq(self
.inv_a
)
487 yield self
.dut
.oper_i
.imm_data
.imm
.eq(self
.imm
)
488 yield self
.dut
.oper_i
.imm_data
.imm_ok
.eq(self
.imm_ok
)
489 yield self
.dut
.oper_i
.zero_a
.eq(self
.zero_a
)
491 # give one cycle for the CompUnit to latch the data
494 # busy_o must keep being low in this cycle, because issue_i was
495 # low on the previous cycle.
496 # It cannot rise on its own.
497 # Also, busy_o and issue_i must never be active at the same time, ever.
498 busy_o
= yield self
.dut
.busy_o
502 yield self
.dut
.issue_i
.eq(0)
504 # deactivate inputs along with issue_i, so we can be sure the data
505 # was latched at the correct cycle
506 yield self
.dut
.oper_i
.insn_type
.eq(0)
507 yield self
.dut
.oper_i
.invert_a
.eq(0)
508 yield self
.dut
.oper_i
.imm_data
.imm
.eq(0)
509 yield self
.dut
.oper_i
.imm_data
.imm_ok
.eq(0)
510 yield self
.dut
.oper_i
.zero_a
.eq(0)
513 # wait for busy_o to lower
514 # timeout after self.MAX_BUSY_WAIT cycles
515 for n
in range(self
.MAX_BUSY_WAIT
):
516 # sample busy_o in the current cycle
517 busy_o
= yield self
.dut
.busy_o
519 # operation cycle ends when busy_o becomes inactive
523 # if busy_o is still active, a timeout has occurred
524 # TODO: Uncomment this, once the test is complete:
528 print("If you are reading this, "
529 "it's because the above test failed, as expected,\n"
530 "with a timeout. It must pass, once the test is complete.")
533 print("If you are reading this, "
534 "it's because the above test unexpectedly passed.")
536 def rd(self
, rd_idx
):
537 # wait for issue_i to rise
539 issue_i
= yield self
.dut
.issue_i
542 # issue_i has not risen yet, so rd must keep low
543 rel
= yield self
.dut
.rd
.rel
[rd_idx
]
547 # we do not want rd to rise on an immediate operand
548 # if it is immediate, exit the process
549 # TODO: don't exit the process, monitor rd instead to ensure it
550 # doesn't rise on its own
551 if (self
.zero_a
and rd_idx
== 0) or (self
.imm_ok
and rd_idx
== 1):
554 # issue_i has risen. rel must rise on the next cycle
555 rel
= yield self
.dut
.rd
.rel
[rd_idx
]
558 # stall for additional cycles. Check that rel doesn't fall on its own
559 for n
in range(self
.RD_GO_DELAY
[rd_idx
]):
561 rel
= yield self
.dut
.rd
.rel
[rd_idx
]
564 # Before asserting "go", make sure "rel" has risen.
565 # The use of Settle allows "go" to be set combinatorially,
566 # rising on the same cycle as "rel".
568 rel
= yield self
.dut
.rd
.rel
[rd_idx
]
571 # assert go for one cycle
572 yield self
.dut
.rd
.go
[rd_idx
].eq(1)
575 # rel must keep high, since go was inactive in the last cycle
576 rel
= yield self
.dut
.rd
.rel
[rd_idx
]
579 # finish the go one-clock pulse
580 yield self
.dut
.rd
.go
[rd_idx
].eq(0)
583 # rel must have gone low in response to go being high
584 # on the previous cycle
585 rel
= yield self
.dut
.rd
.rel
[rd_idx
]
588 # TODO: also when dut.rd.go is set, put the expected value into
589 # the src_i. use dut.get_in[rd_idx] to do so
591 def wr(self
, wr_idx
):
592 # monitor self.dut.wr.req[rd_idx] and sets dut.wr.go[idx] for one cycle
594 # TODO: also when dut.wr.go is set, check the output against the
595 # self.expected_o and assert. use dut.get_out(wr_idx) to do so.
597 def run_simulation(self
, vcd_name
):
598 run_simulation(self
.dut
, [self
.driver(),
599 self
.rd(0), # one read port (a)
600 self
.rd(1), # one read port (b)
601 self
.wr(0), # one write port (o)
606 def test_compunit_regspec3():
607 from alu_hier
import DummyALU
608 from soc
.fu
.alu
.alu_input_record
import CompALUOpSubset
610 inspec
= [('INT', 'a', '0:15'),
611 ('INT', 'b', '0:15'),
612 ('INT', 'c', '0:15')]
613 outspec
= [('INT', 'o', '0:15'),
616 regspec
= (inspec
, outspec
)
620 dut
= MultiCompUnit(regspec
, alu
, CompALUOpSubset
)
621 m
.submodules
.cu
= dut
623 run_simulation(m
, scoreboard_sim_dummy(dut
),
624 vcd_name
='test_compunit_regspec3.vcd')
627 def test_compunit_regspec1():
628 from alu_hier
import ALU
629 from soc
.fu
.alu
.alu_input_record
import CompALUOpSubset
631 inspec
= [('INT', 'a', '0:15'),
632 ('INT', 'b', '0:15')]
633 outspec
= [('INT', 'o', '0:15'),
636 regspec
= (inspec
, outspec
)
640 dut
= MultiCompUnit(regspec
, alu
, CompALUOpSubset
)
641 m
.submodules
.cu
= dut
643 vl
= rtlil
.convert(dut
, ports
=dut
.ports())
644 with
open("test_compunit_regspec1.il", "w") as f
:
647 run_simulation(m
, scoreboard_sim(dut
),
648 vcd_name
='test_compunit_regspec1.vcd')
650 test
= CompUnitParallelTest(dut
)
651 test
.run_simulation("test_compunit_parallel.vcd")
654 if __name__
== '__main__':
656 test_compunit_regspec1()
657 test_compunit_regspec3()