whitespace
[soc.git] / src / soc / simple / issuer.py
1 """simple core issuer
2
3 not in any way intended for production use. this runs a FSM that:
4
5 * reads the Program Counter from StateRegs
6 * reads an instruction from a fixed-size Test Memory
7 * issues it to the Simple Core
8 * waits for it to complete
9 * increments the PC
10 * does it all over again
11
12 the purpose of this module is to verify the functional correctness
13 of the Function Units in the absolute simplest and clearest possible
14 way, and to at provide something that can be further incrementally
15 improved.
16 """
17
18 from nmigen import (Elaboratable, Module, Signal, ClockSignal, ResetSignal,
19 ClockDomain, DomainRenamer, Mux, Const, Repl, Cat)
20 from nmigen.cli import rtlil
21 from nmigen.cli import main
22 import sys
23
24 from nmutil.singlepipe import ControlBase
25 from soc.simple.core_data import FetchOutput, FetchInput
26
27 from nmigen.lib.coding import PriorityEncoder
28
29 from openpower.decoder.power_decoder import create_pdecode
30 from openpower.decoder.power_decoder2 import PowerDecode2, SVP64PrefixDecoder
31 from openpower.decoder.decode2execute1 import IssuerDecode2ToOperand
32 from openpower.decoder.decode2execute1 import Data
33 from openpower.decoder.power_enums import (MicrOp, SVP64PredInt, SVP64PredCR,
34 SVP64PredMode)
35 from openpower.state import CoreState
36 from openpower.consts import (CR, SVP64CROffs, MSR)
37 from soc.experiment.testmem import TestMemory # test only for instructions
38 from soc.regfile.regfiles import StateRegs, FastRegs
39 from soc.simple.core import NonProductionCore
40 from soc.config.test.test_loadstore import TestMemPspec
41 from soc.config.ifetch import ConfigFetchUnit
42 from soc.debug.dmi import CoreDebug, DMIInterface
43 from soc.debug.jtag import JTAG
44 from soc.config.pinouts import get_pinspecs
45 from soc.interrupts.xics import XICS_ICP, XICS_ICS
46 from soc.bus.simple_gpio import SimpleGPIO
47 from soc.bus.SPBlock512W64B8W import SPBlock512W64B8W
48 from soc.clock.select import ClockSelect
49 from soc.clock.dummypll import DummyPLL
50 from openpower.sv.svstate import SVSTATERec
51 from soc.experiment.icache import ICache
52
53 from nmutil.util import rising_edge
54
55
56 def get_insn(f_instr_o, pc):
57 if f_instr_o.width == 32:
58 return f_instr_o
59 else:
60 # 64-bit: bit 2 of pc decides which word to select
61 return f_instr_o.word_select(pc[2], 32)
62
63 # gets state input or reads from state regfile
64
65
66 def state_get(m, res, core_rst, state_i, name, regfile, regnum):
67 comb = m.d.comb
68 sync = m.d.sync
69 # read the {insert state variable here}
70 res_ok_delay = Signal(name="%s_ok_delay" % name)
71 with m.If(~core_rst):
72 sync += res_ok_delay.eq(~state_i.ok)
73 with m.If(state_i.ok):
74 # incoming override (start from pc_i)
75 comb += res.eq(state_i.data)
76 with m.Else():
77 # otherwise read StateRegs regfile for {insert state here}...
78 comb += regfile.ren.eq(1 << regnum)
79 # ... but on a 1-clock delay
80 with m.If(res_ok_delay):
81 comb += res.eq(regfile.o_data)
82
83
84 def get_predint(m, mask, name):
85 """decode SVP64 predicate integer mask field to reg number and invert
86 this is identical to the equivalent function in ISACaller except that
87 it doesn't read the INT directly, it just decodes "what needs to be done"
88 i.e. which INT reg, whether it is shifted and whether it is bit-inverted.
89
90 * all1s is set to indicate that no mask is to be applied.
91 * regread indicates the GPR register number to be read
92 * invert is set to indicate that the register value is to be inverted
93 * unary indicates that the contents of the register is to be shifted 1<<r3
94 """
95 comb = m.d.comb
96 regread = Signal(5, name=name+"regread")
97 invert = Signal(name=name+"invert")
98 unary = Signal(name=name+"unary")
99 all1s = Signal(name=name+"all1s")
100 with m.Switch(mask):
101 with m.Case(SVP64PredInt.ALWAYS.value):
102 comb += all1s.eq(1) # use 0b1111 (all ones)
103 with m.Case(SVP64PredInt.R3_UNARY.value):
104 comb += regread.eq(3)
105 comb += unary.eq(1) # 1<<r3 - shift r3 (single bit)
106 with m.Case(SVP64PredInt.R3.value):
107 comb += regread.eq(3)
108 with m.Case(SVP64PredInt.R3_N.value):
109 comb += regread.eq(3)
110 comb += invert.eq(1)
111 with m.Case(SVP64PredInt.R10.value):
112 comb += regread.eq(10)
113 with m.Case(SVP64PredInt.R10_N.value):
114 comb += regread.eq(10)
115 comb += invert.eq(1)
116 with m.Case(SVP64PredInt.R30.value):
117 comb += regread.eq(30)
118 with m.Case(SVP64PredInt.R30_N.value):
119 comb += regread.eq(30)
120 comb += invert.eq(1)
121 return regread, invert, unary, all1s
122
123
124 def get_predcr(m, mask, name):
125 """decode SVP64 predicate CR to reg number field and invert status
126 this is identical to _get_predcr in ISACaller
127 """
128 comb = m.d.comb
129 idx = Signal(2, name=name+"idx")
130 invert = Signal(name=name+"crinvert")
131 with m.Switch(mask):
132 with m.Case(SVP64PredCR.LT.value):
133 comb += idx.eq(CR.LT)
134 comb += invert.eq(0)
135 with m.Case(SVP64PredCR.GE.value):
136 comb += idx.eq(CR.LT)
137 comb += invert.eq(1)
138 with m.Case(SVP64PredCR.GT.value):
139 comb += idx.eq(CR.GT)
140 comb += invert.eq(0)
141 with m.Case(SVP64PredCR.LE.value):
142 comb += idx.eq(CR.GT)
143 comb += invert.eq(1)
144 with m.Case(SVP64PredCR.EQ.value):
145 comb += idx.eq(CR.EQ)
146 comb += invert.eq(0)
147 with m.Case(SVP64PredCR.NE.value):
148 comb += idx.eq(CR.EQ)
149 comb += invert.eq(1)
150 with m.Case(SVP64PredCR.SO.value):
151 comb += idx.eq(CR.SO)
152 comb += invert.eq(0)
153 with m.Case(SVP64PredCR.NS.value):
154 comb += idx.eq(CR.SO)
155 comb += invert.eq(1)
156 return idx, invert
157
158
159 class TestIssuerBase(Elaboratable):
160 """TestIssuerBase - common base class for Issuers
161
162 takes care of power-on reset, peripherals, debug, DEC/TB,
163 and gets PC/MSR/SVSTATE from the State Regfile etc.
164 """
165
166 def __init__(self, pspec):
167
168 # test if microwatt compatibility is to be enabled
169 self.microwatt_compat = (hasattr(pspec, "microwatt_compat") and
170 (pspec.microwatt_compat == True))
171 self.alt_reset = Signal(reset_less=True) # not connected yet (microwatt)
172
173 # test is SVP64 is to be enabled
174 self.svp64_en = hasattr(pspec, "svp64") and (pspec.svp64 == True)
175
176 # and if regfiles are reduced
177 self.regreduce_en = (hasattr(pspec, "regreduce") and
178 (pspec.regreduce == True))
179
180 # and if overlap requested
181 self.allow_overlap = (hasattr(pspec, "allow_overlap") and
182 (pspec.allow_overlap == True))
183
184 # and get the core domain
185 self.core_domain = "coresync"
186 if (hasattr(pspec, "core_domain") and
187 isinstance(pspec.core_domain, str)):
188 self.core_domain = pspec.core_domain
189
190 # JTAG interface. add this right at the start because if it's
191 # added it *modifies* the pspec, by adding enable/disable signals
192 # for parts of the rest of the core
193 self.jtag_en = hasattr(pspec, "debug") and pspec.debug == 'jtag'
194 #self.dbg_domain = "sync" # sigh "dbgsunc" too problematic
195 self.dbg_domain = "dbgsync" # domain for DMI/JTAG clock
196 if self.jtag_en:
197 # XXX MUST keep this up-to-date with litex, and
198 # soc-cocotb-sim, and err.. all needs sorting out, argh
199 subset = ['uart',
200 'mtwi',
201 'eint', 'gpio', 'mspi0',
202 # 'mspi1', - disabled for now
203 # 'pwm', 'sd0', - disabled for now
204 'sdr']
205 self.jtag = JTAG(get_pinspecs(subset=subset),
206 domain=self.dbg_domain)
207 # add signals to pspec to enable/disable icache and dcache
208 # (or data and intstruction wishbone if icache/dcache not included)
209 # https://bugs.libre-soc.org/show_bug.cgi?id=520
210 # TODO: do we actually care if these are not domain-synchronised?
211 # honestly probably not.
212 pspec.wb_icache_en = self.jtag.wb_icache_en
213 pspec.wb_dcache_en = self.jtag.wb_dcache_en
214 self.wb_sram_en = self.jtag.wb_sram_en
215 else:
216 self.wb_sram_en = Const(1)
217
218 # add 4k sram blocks?
219 self.sram4x4k = (hasattr(pspec, "sram4x4kblock") and
220 pspec.sram4x4kblock == True)
221 if self.sram4x4k:
222 self.sram4k = []
223 for i in range(4):
224 self.sram4k.append(SPBlock512W64B8W(name="sram4k_%d" % i,
225 # features={'err'}
226 ))
227
228 # add interrupt controller?
229 self.xics = hasattr(pspec, "xics") and pspec.xics == True
230 if self.xics:
231 self.xics_icp = XICS_ICP()
232 self.xics_ics = XICS_ICS()
233 self.int_level_i = self.xics_ics.int_level_i
234 else:
235 self.ext_irq = Signal()
236
237 # add GPIO peripheral?
238 self.gpio = hasattr(pspec, "gpio") and pspec.gpio == True
239 if self.gpio:
240 self.simple_gpio = SimpleGPIO()
241 self.gpio_o = self.simple_gpio.gpio_o
242
243 # main instruction core. suitable for prototyping / demo only
244 self.core = core = NonProductionCore(pspec)
245 self.core_rst = ResetSignal(self.core_domain)
246
247 # instruction decoder. goes into Trap Record
248 #pdecode = create_pdecode()
249 self.cur_state = CoreState("cur") # current state (MSR/PC/SVSTATE)
250 self.pdecode2 = PowerDecode2(None, state=self.cur_state,
251 opkls=IssuerDecode2ToOperand,
252 svp64_en=self.svp64_en,
253 regreduce_en=self.regreduce_en)
254 pdecode = self.pdecode2.dec
255
256 if self.svp64_en:
257 self.svp64 = SVP64PrefixDecoder() # for decoding SVP64 prefix
258
259 self.update_svstate = Signal() # set this if updating svstate
260 self.new_svstate = new_svstate = SVSTATERec("new_svstate")
261
262 # Test Instruction memory
263 if hasattr(core, "icache"):
264 # XXX BLECH! use pspec to transfer the I-Cache to ConfigFetchUnit
265 # truly dreadful. needs a huge reorg.
266 pspec.icache = core.icache
267 self.imem = ConfigFetchUnit(pspec).fu
268
269 # DMI interface
270 self.dbg = CoreDebug()
271 self.dbg_rst_i = Signal(reset_less=True)
272
273 # instruction go/monitor
274 self.pc_o = Signal(64, reset_less=True)
275 self.pc_i = Data(64, "pc_i") # set "ok" to indicate "please change me"
276 self.msr_i = Data(64, "msr_i") # set "ok" to indicate "please change me"
277 self.svstate_i = Data(64, "svstate_i") # ditto
278 self.core_bigendian_i = Signal() # TODO: set based on MSR.LE
279 self.busy_o = Signal(reset_less=True)
280 self.memerr_o = Signal(reset_less=True)
281
282 # STATE regfile read /write ports for PC, MSR, SVSTATE
283 staterf = self.core.regs.rf['state']
284 self.state_r_msr = staterf.r_ports['msr'] # MSR rd
285 self.state_r_pc = staterf.r_ports['cia'] # PC rd
286 self.state_r_sv = staterf.r_ports['sv'] # SVSTATE rd
287
288 self.state_w_msr = staterf.w_ports['d_wr2'] # MSR wr
289 self.state_w_pc = staterf.w_ports['d_wr1'] # PC wr
290 self.state_w_sv = staterf.w_ports['sv'] # SVSTATE wr
291
292 # DMI interface access
293 intrf = self.core.regs.rf['int']
294 crrf = self.core.regs.rf['cr']
295 xerrf = self.core.regs.rf['xer']
296 self.int_r = intrf.r_ports['dmi'] # INT read
297 self.cr_r = crrf.r_ports['full_cr_dbg'] # CR read
298 self.xer_r = xerrf.r_ports['full_xer'] # XER read
299
300 if self.svp64_en:
301 # for predication
302 self.int_pred = intrf.r_ports['pred'] # INT predicate read
303 self.cr_pred = crrf.r_ports['cr_pred'] # CR predicate read
304
305 # hack method of keeping an eye on whether branch/trap set the PC
306 self.state_nia = self.core.regs.rf['state'].w_ports['nia']
307 self.state_nia.wen.name = 'state_nia_wen'
308
309 # pulse to synchronize the simulator at instruction end
310 self.insn_done = Signal()
311
312 # indicate any instruction still outstanding, in execution
313 self.any_busy = Signal()
314
315 if self.svp64_en:
316 # store copies of predicate masks
317 self.srcmask = Signal(64)
318 self.dstmask = Signal(64)
319
320 # sigh, the wishbone addresses are not wishbone-compliant in microwatt
321 if self.microwatt_compat:
322 self.ibus_adr = Signal(32, name='wishbone_insn_out.adr')
323 self.dbus_adr = Signal(32, name='wishbone_data_out.adr')
324
325 # add an output of the PC and instruction, and whether it was requested
326 # this is for verilator debug purposes
327 if self.microwatt_compat:
328 self.nia = Signal(64)
329 self.msr_o = Signal(64)
330 self.nia_req = Signal(1)
331 self.insn = Signal(32)
332 self.ldst_req = Signal(1)
333 self.ldst_addr = Signal(1)
334
335 # for pausing dec/tb during an SPR pipeline event, this
336 # ensures that an SPR write (mtspr) to TB or DEC does not
337 # get overwritten by the DEC/TB FSM
338 self.pause_dec_tb = Signal()
339
340 def setup_peripherals(self, m):
341 comb, sync = m.d.comb, m.d.sync
342
343 # okaaaay so the debug module must be in coresync clock domain
344 # but NOT its reset signal. to cope with this, set every single
345 # submodule explicitly in coresync domain, debug and JTAG
346 # in their own one but using *external* reset.
347 csd = DomainRenamer(self.core_domain)
348 dbd = DomainRenamer(self.dbg_domain)
349
350 if self.microwatt_compat:
351 m.submodules.core = core = self.core
352 else:
353 m.submodules.core = core = csd(self.core)
354
355 # this _so_ needs sorting out. ICache is added down inside
356 # LoadStore1 and is already a submodule of LoadStore1
357 if not isinstance(self.imem, ICache):
358 m.submodules.imem = imem = csd(self.imem)
359
360 m.submodules.dbg = dbg = dbd(self.dbg)
361 if self.jtag_en:
362 m.submodules.jtag = jtag = dbd(self.jtag)
363 # TODO: UART2GDB mux, here, from external pin
364 # see https://bugs.libre-soc.org/show_bug.cgi?id=499
365 sync += dbg.dmi.connect_to(jtag.dmi)
366
367 # fixup the clocks in microwatt-compat mode (but leave resets alone
368 # so that microwatt soc.vhdl can pull a reset on the core or DMI
369 # can do it, just like in TestIssuer)
370 if self.microwatt_compat:
371 intclk = ClockSignal(self.core_domain)
372 dbgclk = ClockSignal(self.dbg_domain)
373 if self.core_domain != 'sync':
374 comb += intclk.eq(ClockSignal())
375 if self.dbg_domain != 'sync':
376 comb += dbgclk.eq(ClockSignal())
377
378 # drop the first 3 bits of the incoming wishbone addresses
379 # this can go if using later versions of microwatt (not now)
380 if self.microwatt_compat:
381 ibus = self.imem.ibus
382 dbus = self.core.l0.cmpi.wb_bus()
383 comb += self.ibus_adr.eq(Cat(Const(0, 3), ibus.adr))
384 comb += self.dbus_adr.eq(Cat(Const(0, 3), dbus.adr))
385 # microwatt verilator debug purposes
386 pi = self.core.l0.cmpi.pi.pi
387 comb += self.ldst_req.eq(pi.addr_ok_o)
388 comb += self.ldst_addr.eq(pi.addr)
389
390 cur_state = self.cur_state
391
392 # 4x 4k SRAM blocks. these simply "exist", they get routed in litex
393 if self.sram4x4k:
394 for i, sram in enumerate(self.sram4k):
395 m.submodules["sram4k_%d" % i] = csd(sram)
396 comb += sram.enable.eq(self.wb_sram_en)
397
398 # XICS interrupt handler
399 if self.xics:
400 m.submodules.xics_icp = icp = csd(self.xics_icp)
401 m.submodules.xics_ics = ics = csd(self.xics_ics)
402 comb += icp.ics_i.eq(ics.icp_o) # connect ICS to ICP
403 sync += cur_state.eint.eq(icp.core_irq_o) # connect ICP to core
404 else:
405 sync += cur_state.eint.eq(self.ext_irq) # connect externally
406
407 # GPIO test peripheral
408 if self.gpio:
409 m.submodules.simple_gpio = simple_gpio = csd(self.simple_gpio)
410
411 # connect one GPIO output to ICS bit 15 (like in microwatt soc.vhdl)
412 # XXX causes litex ECP5 test to get wrong idea about input and output
413 # (but works with verilator sim *sigh*)
414 # if self.gpio and self.xics:
415 # comb += self.int_level_i[15].eq(simple_gpio.gpio_o[0])
416
417 # instruction decoder
418 pdecode = create_pdecode()
419 m.submodules.dec2 = pdecode2 = csd(self.pdecode2)
420 if self.svp64_en:
421 m.submodules.svp64 = svp64 = csd(self.svp64)
422
423 # convenience
424 dmi, d_reg, d_cr, d_xer, = dbg.dmi, dbg.d_gpr, dbg.d_cr, dbg.d_xer
425 intrf = self.core.regs.rf['int']
426
427 # clock delay power-on reset
428 cd_por = ClockDomain(reset_less=True)
429 cd_sync = ClockDomain()
430 m.domains += cd_por, cd_sync
431 core_sync = ClockDomain(self.core_domain)
432 if self.core_domain != "sync":
433 m.domains += core_sync
434 if self.dbg_domain != "sync":
435 dbg_sync = ClockDomain(self.dbg_domain)
436 m.domains += dbg_sync
437
438 ti_rst = Signal(reset_less=True)
439 delay = Signal(range(4), reset=3)
440 with m.If(delay != 0):
441 m.d.por += delay.eq(delay - 1)
442 comb += cd_por.clk.eq(ClockSignal())
443
444 # power-on reset delay
445 core_rst = ResetSignal(self.core_domain)
446 if self.core_domain != "sync":
447 comb += ti_rst.eq(delay != 0 | dbg.core_rst_o | ResetSignal())
448 comb += core_rst.eq(ti_rst)
449 else:
450 with m.If(delay != 0 | dbg.core_rst_o):
451 comb += core_rst.eq(1)
452
453 # connect external reset signal to DMI Reset
454 if self.dbg_domain != "sync":
455 dbg_rst = ResetSignal(self.dbg_domain)
456 comb += dbg_rst.eq(self.dbg_rst_i)
457
458 # busy/halted signals from core
459 core_busy_o = ~core.p.o_ready | core.n.o_data.busy_o # core is busy
460 comb += self.busy_o.eq(core_busy_o)
461 comb += pdecode2.dec.bigendian.eq(self.core_bigendian_i)
462
463 # temporary hack: says "go" immediately for both address gen and ST
464 l0 = core.l0
465 ldst = core.fus.fus['ldst0']
466 st_go_edge = rising_edge(m, ldst.st.rel_o)
467 # link addr-go direct to rel
468 m.d.comb += ldst.ad.go_i.eq(ldst.ad.rel_o)
469 m.d.comb += ldst.st.go_i.eq(st_go_edge) # link store-go to rising rel
470
471 def do_dmi(self, m, dbg):
472 """deals with DMI debug requests
473
474 currently only provides read requests for the INT regfile, CR and XER
475 it will later also deal with *writing* to these regfiles.
476 """
477 comb = m.d.comb
478 sync = m.d.sync
479 dmi, d_reg, d_cr, d_xer, = dbg.dmi, dbg.d_gpr, dbg.d_cr, dbg.d_xer
480 intrf = self.core.regs.rf['int']
481
482 with m.If(d_reg.req): # request for regfile access being made
483 # TODO: error-check this
484 # XXX should this be combinatorial? sync better?
485 if intrf.unary:
486 comb += self.int_r.ren.eq(1 << d_reg.addr)
487 else:
488 comb += self.int_r.addr.eq(d_reg.addr)
489 comb += self.int_r.ren.eq(1)
490 d_reg_delay = Signal()
491 sync += d_reg_delay.eq(d_reg.req)
492 with m.If(d_reg_delay):
493 # data arrives one clock later
494 comb += d_reg.data.eq(self.int_r.o_data)
495 comb += d_reg.ack.eq(1)
496
497 # sigh same thing for CR debug
498 with m.If(d_cr.req): # request for regfile access being made
499 comb += self.cr_r.ren.eq(0b11111111) # enable all
500 d_cr_delay = Signal()
501 sync += d_cr_delay.eq(d_cr.req)
502 with m.If(d_cr_delay):
503 # data arrives one clock later
504 comb += d_cr.data.eq(self.cr_r.o_data)
505 comb += d_cr.ack.eq(1)
506
507 # aaand XER...
508 with m.If(d_xer.req): # request for regfile access being made
509 comb += self.xer_r.ren.eq(0b111111) # enable all
510 d_xer_delay = Signal()
511 sync += d_xer_delay.eq(d_xer.req)
512 with m.If(d_xer_delay):
513 # data arrives one clock later
514 comb += d_xer.data.eq(self.xer_r.o_data)
515 comb += d_xer.ack.eq(1)
516
517 def tb_dec_fsm(self, m, spr_dec):
518 """tb_dec_fsm
519
520 this is a FSM for updating either dec or tb. it runs alternately
521 DEC, TB, DEC, TB. note that SPR pipeline could have written a new
522 value to DEC, however the regfile has "passthrough" on it so this
523 *should* be ok.
524
525 see v3.0B p1097-1099 for Timeer Resource and p1065 and p1076
526 """
527
528 comb, sync = m.d.comb, m.d.sync
529 fast_rf = self.core.regs.rf['fast']
530 fast_r_dectb = fast_rf.r_ports['issue'] # DEC/TB
531 fast_w_dectb = fast_rf.w_ports['issue'] # DEC/TB
532
533 with m.FSM() as fsm:
534
535 # initiates read of current DEC
536 with m.State("DEC_READ"):
537 comb += fast_r_dectb.addr.eq(FastRegs.DEC)
538 comb += fast_r_dectb.ren.eq(1)
539 with m.If(~self.pause_dec_tb):
540 m.next = "DEC_WRITE"
541
542 # waits for DEC read to arrive (1 cycle), updates with new value
543 # respects if dec/tb writing has been paused
544 with m.State("DEC_WRITE"):
545 with m.If(self.pause_dec_tb):
546 # if paused, return to reading
547 m.next = "DEC_READ"
548 with m.Else():
549 new_dec = Signal(64)
550 # TODO: MSR.LPCR 32-bit decrement mode
551 comb += new_dec.eq(fast_r_dectb.o_data - 1)
552 comb += fast_w_dectb.addr.eq(FastRegs.DEC)
553 comb += fast_w_dectb.wen.eq(1)
554 comb += fast_w_dectb.i_data.eq(new_dec)
555 # copy to cur_state for decoder, for an interrupt
556 sync += spr_dec.eq(new_dec)
557 m.next = "TB_READ"
558
559 # initiates read of current TB
560 with m.State("TB_READ"):
561 comb += fast_r_dectb.addr.eq(FastRegs.TB)
562 comb += fast_r_dectb.ren.eq(1)
563 with m.If(~self.pause_dec_tb):
564 m.next = "TB_WRITE"
565
566 # waits for read TB to arrive, initiates write of current TB
567 # respects if dec/tb writing has been paused
568 with m.State("TB_WRITE"):
569 with m.If(self.pause_dec_tb):
570 # if paused, return to reading
571 m.next = "TB_READ"
572 with m.Else():
573 new_tb = Signal(64)
574 comb += new_tb.eq(fast_r_dectb.o_data + 1)
575 comb += fast_w_dectb.addr.eq(FastRegs.TB)
576 comb += fast_w_dectb.wen.eq(1)
577 comb += fast_w_dectb.i_data.eq(new_tb)
578 m.next = "DEC_READ"
579
580 return m
581
582 def elaborate(self, platform):
583 m = Module()
584 # convenience
585 comb, sync = m.d.comb, m.d.sync
586 cur_state = self.cur_state
587 pdecode2 = self.pdecode2
588 dbg = self.dbg
589
590 # set up peripherals and core
591 core_rst = self.core_rst
592 self.setup_peripherals(m)
593
594 # reset current state if core reset requested
595 with m.If(core_rst):
596 m.d.sync += self.cur_state.eq(0)
597
598 # check halted condition: requested PC to execute matches DMI stop addr
599 # and immediately stop. address of 0xffff_ffff_ffff_ffff can never
600 # match
601 halted = Signal()
602 comb += halted.eq(dbg.stop_addr_o == dbg.state.pc)
603 with m.If(halted):
604 comb += dbg.core_stopped_i.eq(1)
605 comb += dbg.terminate_i.eq(1)
606
607 # PC and instruction from I-Memory
608 comb += self.pc_o.eq(cur_state.pc)
609 self.pc_changed = Signal() # note write to PC
610 self.msr_changed = Signal() # note write to MSR
611 self.sv_changed = Signal() # note write to SVSTATE
612
613 # read state either from incoming override or from regfile
614 state = CoreState("get") # current state (MSR/PC/SVSTATE)
615 state_get(m, state.msr, core_rst, self.msr_i,
616 "msr", # read MSR
617 self.state_r_msr, StateRegs.MSR)
618 state_get(m, state.pc, core_rst, self.pc_i,
619 "pc", # read PC
620 self.state_r_pc, StateRegs.PC)
621 state_get(m, state.svstate, core_rst, self.svstate_i,
622 "svstate", # read SVSTATE
623 self.state_r_sv, StateRegs.SVSTATE)
624
625 # don't write pc every cycle
626 comb += self.state_w_pc.wen.eq(0)
627 comb += self.state_w_pc.i_data.eq(0)
628
629 # connect up debug state. note "combinatorially same" below,
630 # this is a bit naff, passing state over in the dbg class, but
631 # because it is combinatorial it achieves the desired goal
632 comb += dbg.state.eq(state)
633
634 # this bit doesn't have to be in the FSM: connect up to read
635 # regfiles on demand from DMI
636 self.do_dmi(m, dbg)
637
638 # DEC and TB inc/dec FSM. copy of DEC is put into CoreState,
639 # (which uses that in PowerDecoder2 to raise 0x900 exception)
640 self.tb_dec_fsm(m, cur_state.dec)
641
642 # while stopped, allow updating the MSR, PC and SVSTATE.
643 # these are mainly for debugging purposes (including DMI/JTAG)
644 with m.If(dbg.core_stopped_i):
645 with m.If(self.pc_i.ok):
646 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
647 comb += self.state_w_pc.i_data.eq(self.pc_i.data)
648 sync += self.pc_changed.eq(1)
649 with m.If(self.msr_i.ok):
650 comb += self.state_w_msr.wen.eq(1 << StateRegs.MSR)
651 comb += self.state_w_msr.i_data.eq(self.msr_i.data)
652 sync += self.msr_changed.eq(1)
653 with m.If(self.svstate_i.ok | self.update_svstate):
654 with m.If(self.svstate_i.ok): # over-ride from external source
655 comb += self.new_svstate.eq(self.svstate_i.data)
656 comb += self.state_w_sv.wen.eq(1 << StateRegs.SVSTATE)
657 comb += self.state_w_sv.i_data.eq(self.new_svstate)
658 sync += self.sv_changed.eq(1)
659
660 # start renaming some of the ports to match microwatt
661 if self.microwatt_compat:
662 self.core.o.core_terminate_o.name = "terminated_out"
663 # names of DMI interface
664 self.dbg.dmi.addr_i.name = 'dmi_addr'
665 self.dbg.dmi.din.name = 'dmi_din'
666 self.dbg.dmi.dout.name = 'dmi_dout'
667 self.dbg.dmi.req_i.name = 'dmi_req'
668 self.dbg.dmi.we_i.name = 'dmi_wr'
669 self.dbg.dmi.ack_o.name = 'dmi_ack'
670 # wishbone instruction bus
671 ibus = self.imem.ibus
672 ibus.adr.name = 'wishbone_insn_out.adr'
673 ibus.dat_w.name = 'wishbone_insn_out.dat'
674 ibus.sel.name = 'wishbone_insn_out.sel'
675 ibus.cyc.name = 'wishbone_insn_out.cyc'
676 ibus.stb.name = 'wishbone_insn_out.stb'
677 ibus.we.name = 'wishbone_insn_out.we'
678 ibus.dat_r.name = 'wishbone_insn_in.dat'
679 ibus.ack.name = 'wishbone_insn_in.ack'
680 ibus.stall.name = 'wishbone_insn_in.stall'
681 # wishbone data bus
682 dbus = self.core.l0.cmpi.wb_bus()
683 dbus.adr.name = 'wishbone_data_out.adr'
684 dbus.dat_w.name = 'wishbone_data_out.dat'
685 dbus.sel.name = 'wishbone_data_out.sel'
686 dbus.cyc.name = 'wishbone_data_out.cyc'
687 dbus.stb.name = 'wishbone_data_out.stb'
688 dbus.we.name = 'wishbone_data_out.we'
689 dbus.dat_r.name = 'wishbone_data_in.dat'
690 dbus.ack.name = 'wishbone_data_in.ack'
691 dbus.stall.name = 'wishbone_data_in.stall'
692
693 return m
694
695 def __iter__(self):
696 yield from self.pc_i.ports()
697 yield from self.msr_i.ports()
698 yield self.pc_o
699 yield self.memerr_o
700 yield from self.core.ports()
701 yield from self.imem.ports()
702 yield self.core_bigendian_i
703 yield self.busy_o
704
705 def ports(self):
706 return list(self)
707
708 def external_ports(self):
709 if self.microwatt_compat:
710 ports = [self.core.o.core_terminate_o,
711 self.ext_irq,
712 self.alt_reset, # not connected yet
713 self.nia, self.insn, self.nia_req, self.msr_o,
714 self.ldst_req, self.ldst_addr,
715 ClockSignal(),
716 ResetSignal(),
717 ]
718 ports += list(self.dbg.dmi.ports())
719 # for dbus/ibus microwatt, exclude err btw and cti
720 for name, sig in self.imem.ibus.fields.items():
721 if name not in ['err', 'bte', 'cti', 'adr']:
722 ports.append(sig)
723 for name, sig in self.core.l0.cmpi.wb_bus().fields.items():
724 if name not in ['err', 'bte', 'cti', 'adr']:
725 ports.append(sig)
726 # microwatt non-compliant with wishbone
727 ports.append(self.ibus_adr)
728 ports.append(self.dbus_adr)
729 return ports
730
731 ports = self.pc_i.ports()
732 ports = self.msr_i.ports()
733 ports += [self.pc_o, self.memerr_o, self.core_bigendian_i, self.busy_o,
734 ]
735
736 if self.jtag_en:
737 ports += list(self.jtag.external_ports())
738 else:
739 # don't add DMI if JTAG is enabled
740 ports += list(self.dbg.dmi.ports())
741
742 ports += list(self.imem.ibus.fields.values())
743 ports += list(self.core.l0.cmpi.wb_bus().fields.values())
744
745 if self.sram4x4k:
746 for sram in self.sram4k:
747 ports += list(sram.bus.fields.values())
748
749 if self.xics:
750 ports += list(self.xics_icp.bus.fields.values())
751 ports += list(self.xics_ics.bus.fields.values())
752 ports.append(self.int_level_i)
753 else:
754 ports.append(self.ext_irq)
755
756 if self.gpio:
757 ports += list(self.simple_gpio.bus.fields.values())
758 ports.append(self.gpio_o)
759
760 return ports
761
762 def ports(self):
763 return list(self)
764
765
766 class TestIssuerInternal(TestIssuerBase):
767 """TestIssuer - reads instructions from TestMemory and issues them
768
769 efficiency and speed is not the main goal here: functional correctness
770 and code clarity is. optimisations (which almost 100% interfere with
771 easy understanding) come later.
772 """
773
774 def fetch_fsm(self, m, dbg, core, pc, msr, svstate, nia, is_svp64_mode,
775 fetch_pc_o_ready, fetch_pc_i_valid,
776 fetch_insn_o_valid, fetch_insn_i_ready):
777 """fetch FSM
778
779 this FSM performs fetch of raw instruction data, partial-decodes
780 it 32-bit at a time to detect SVP64 prefixes, and will optionally
781 read a 2nd 32-bit quantity if that occurs.
782 """
783 comb = m.d.comb
784 sync = m.d.sync
785 pdecode2 = self.pdecode2
786 cur_state = self.cur_state
787 dec_opcode_i = pdecode2.dec.raw_opcode_in # raw opcode
788
789 # also note instruction fetch failed
790 if hasattr(core, "icache"):
791 fetch_failed = core.icache.i_out.fetch_failed
792 flush_needed = True
793 else:
794 fetch_failed = Const(0, 1)
795 flush_needed = False
796
797 # set priv / virt mode on I-Cache, sigh
798 if isinstance(self.imem, ICache):
799 comb += self.imem.i_in.priv_mode.eq(~msr[MSR.PR])
800 comb += self.imem.i_in.virt_mode.eq(msr[MSR.IR]) # Instr. Redir (VM)
801
802 with m.FSM(name='fetch_fsm'):
803
804 # waiting (zzz)
805 with m.State("IDLE"):
806 # fetch allowed if not failed and stopped but not stepping
807 # (see dmi.py for how core_stop_o is generated)
808 with m.If(~fetch_failed & ~dbg.core_stop_o):
809 comb += fetch_pc_o_ready.eq(1)
810 with m.If(fetch_pc_i_valid & ~pdecode2.instr_fault
811 & ~dbg.core_stop_o):
812 # instruction allowed to go: start by reading the PC
813 # capture the PC and also drop it into Insn Memory
814 # we have joined a pair of combinatorial memory
815 # lookups together. this is Generally Bad.
816 comb += self.imem.a_pc_i.eq(pc)
817 comb += self.imem.a_i_valid.eq(1)
818 comb += self.imem.f_i_valid.eq(1)
819 # transfer state to output
820 sync += cur_state.pc.eq(pc)
821 sync += cur_state.svstate.eq(svstate) # and svstate
822 sync += cur_state.msr.eq(msr) # and msr
823
824 m.next = "INSN_READ" # move to "wait for bus" phase
825
826 # dummy pause to find out why simulation is not keeping up
827 with m.State("INSN_READ"):
828 # when using "single-step" mode, checking dbg.stopping_o
829 # prevents progress. allow fetch to proceed once started
830 stopping = Const(0)
831 #if self.allow_overlap:
832 # stopping = dbg.stopping_o
833 with m.If(stopping):
834 # stopping: jump back to idle
835 m.next = "IDLE"
836 with m.Else():
837 with m.If(self.imem.f_busy_o &
838 ~pdecode2.instr_fault): # zzz...
839 # busy but not fetch failed: stay in wait-read
840 comb += self.imem.a_pc_i.eq(pc)
841 comb += self.imem.a_i_valid.eq(1)
842 comb += self.imem.f_i_valid.eq(1)
843 with m.Else():
844 # not busy (or fetch failed!): instruction fetched
845 # when fetch failed, the instruction gets ignored
846 # by the decoder
847 if hasattr(core, "icache"):
848 # blech, icache returns actual instruction
849 insn = self.imem.f_instr_o
850 else:
851 # but these return raw memory
852 insn = get_insn(self.imem.f_instr_o, cur_state.pc)
853 if self.svp64_en:
854 svp64 = self.svp64
855 # decode the SVP64 prefix, if any
856 comb += svp64.raw_opcode_in.eq(insn)
857 comb += svp64.bigendian.eq(self.core_bigendian_i)
858 # pass the decoded prefix (if any) to PowerDecoder2
859 sync += pdecode2.sv_rm.eq(svp64.svp64_rm)
860 sync += pdecode2.is_svp64_mode.eq(is_svp64_mode)
861 # remember whether this is a prefixed instruction,
862 # so the FSM can readily loop when VL==0
863 sync += is_svp64_mode.eq(svp64.is_svp64_mode)
864 # calculate the address of the following instruction
865 insn_size = Mux(svp64.is_svp64_mode, 8, 4)
866 sync += nia.eq(cur_state.pc + insn_size)
867 with m.If(~svp64.is_svp64_mode):
868 # with no prefix, store the instruction
869 # and hand it directly to the next FSM
870 sync += dec_opcode_i.eq(insn)
871 m.next = "INSN_READY"
872 with m.Else():
873 # fetch the rest of the instruction from memory
874 comb += self.imem.a_pc_i.eq(cur_state.pc + 4)
875 comb += self.imem.a_i_valid.eq(1)
876 comb += self.imem.f_i_valid.eq(1)
877 m.next = "INSN_READ2"
878 else:
879 # not SVP64 - 32-bit only
880 sync += nia.eq(cur_state.pc + 4)
881 sync += dec_opcode_i.eq(insn)
882 if self.microwatt_compat:
883 # for verilator debug purposes
884 comb += self.insn.eq(insn)
885 comb += self.nia.eq(cur_state.pc)
886 comb += self.msr_o.eq(cur_state.msr)
887 comb += self.nia_req.eq(1)
888 m.next = "INSN_READY"
889
890 with m.State("INSN_READ2"):
891 with m.If(self.imem.f_busy_o): # zzz...
892 # busy: stay in wait-read
893 comb += self.imem.a_i_valid.eq(1)
894 comb += self.imem.f_i_valid.eq(1)
895 with m.Else():
896 # not busy: instruction fetched
897 if hasattr(core, "icache"):
898 # blech, icache returns actual instruction
899 insn = self.imem.f_instr_o
900 else:
901 insn = get_insn(self.imem.f_instr_o, cur_state.pc+4)
902 sync += dec_opcode_i.eq(insn)
903 m.next = "INSN_READY"
904 # TODO: probably can start looking at pdecode2.rm_dec
905 # here or maybe even in INSN_READ state, if svp64_mode
906 # detected, in order to trigger - and wait for - the
907 # predicate reading.
908 if self.svp64_en:
909 pmode = pdecode2.rm_dec.predmode
910 """
911 if pmode != SVP64PredMode.ALWAYS.value:
912 fire predicate loading FSM and wait before
913 moving to INSN_READY
914 else:
915 sync += self.srcmask.eq(-1) # set to all 1s
916 sync += self.dstmask.eq(-1) # set to all 1s
917 m.next = "INSN_READY"
918 """
919
920 with m.State("INSN_READY"):
921 # hand over the instruction, to be decoded
922 comb += fetch_insn_o_valid.eq(1)
923 with m.If(fetch_insn_i_ready):
924 m.next = "IDLE"
925
926
927 def fetch_predicate_fsm(self, m,
928 pred_insn_i_valid, pred_insn_o_ready,
929 pred_mask_o_valid, pred_mask_i_ready):
930 """fetch_predicate_fsm - obtains (constructs in the case of CR)
931 src/dest predicate masks
932
933 https://bugs.libre-soc.org/show_bug.cgi?id=617
934 the predicates can be read here, by using IntRegs r_ports['pred']
935 or CRRegs r_ports['pred']. in the case of CRs it will have to
936 be done through multiple reads, extracting one relevant at a time.
937 later, a faster way would be to use the 32-bit-wide CR port but
938 this is more complex decoding, here. equivalent code used in
939 ISACaller is "from openpower.decoder.isa.caller import get_predcr"
940
941 note: this ENTIRE FSM is not to be called when svp64 is disabled
942 """
943 comb = m.d.comb
944 sync = m.d.sync
945 pdecode2 = self.pdecode2
946 rm_dec = pdecode2.rm_dec # SVP64RMModeDecode
947 predmode = rm_dec.predmode
948 srcpred, dstpred = rm_dec.srcpred, rm_dec.dstpred
949 cr_pred, int_pred = self.cr_pred, self.int_pred # read regfiles
950 # get src/dst step, so we can skip already used mask bits
951 cur_state = self.cur_state
952 srcstep = cur_state.svstate.srcstep
953 dststep = cur_state.svstate.dststep
954 cur_vl = cur_state.svstate.vl
955
956 # decode predicates
957 sregread, sinvert, sunary, sall1s = get_predint(m, srcpred, 's')
958 dregread, dinvert, dunary, dall1s = get_predint(m, dstpred, 'd')
959 sidx, scrinvert = get_predcr(m, srcpred, 's')
960 didx, dcrinvert = get_predcr(m, dstpred, 'd')
961
962 # store fetched masks, for either intpred or crpred
963 # when src/dst step is not zero, the skipped mask bits need to be
964 # shifted-out, before actually storing them in src/dest mask
965 new_srcmask = Signal(64, reset_less=True)
966 new_dstmask = Signal(64, reset_less=True)
967
968 with m.FSM(name="fetch_predicate"):
969
970 with m.State("FETCH_PRED_IDLE"):
971 comb += pred_insn_o_ready.eq(1)
972 with m.If(pred_insn_i_valid):
973 with m.If(predmode == SVP64PredMode.INT):
974 # skip fetching destination mask register, when zero
975 with m.If(dall1s):
976 sync += new_dstmask.eq(-1)
977 # directly go to fetch source mask register
978 # guaranteed not to be zero (otherwise predmode
979 # would be SVP64PredMode.ALWAYS, not INT)
980 comb += int_pred.addr.eq(sregread)
981 comb += int_pred.ren.eq(1)
982 m.next = "INT_SRC_READ"
983 # fetch destination predicate register
984 with m.Else():
985 comb += int_pred.addr.eq(dregread)
986 comb += int_pred.ren.eq(1)
987 m.next = "INT_DST_READ"
988 with m.Elif(predmode == SVP64PredMode.CR):
989 # go fetch masks from the CR register file
990 sync += new_srcmask.eq(0)
991 sync += new_dstmask.eq(0)
992 m.next = "CR_READ"
993 with m.Else():
994 sync += self.srcmask.eq(-1)
995 sync += self.dstmask.eq(-1)
996 m.next = "FETCH_PRED_DONE"
997
998 with m.State("INT_DST_READ"):
999 # store destination mask
1000 inv = Repl(dinvert, 64)
1001 with m.If(dunary):
1002 # set selected mask bit for 1<<r3 mode
1003 dst_shift = Signal(range(64))
1004 comb += dst_shift.eq(self.int_pred.o_data & 0b111111)
1005 sync += new_dstmask.eq(1 << dst_shift)
1006 with m.Else():
1007 # invert mask if requested
1008 sync += new_dstmask.eq(self.int_pred.o_data ^ inv)
1009 # skip fetching source mask register, when zero
1010 with m.If(sall1s):
1011 sync += new_srcmask.eq(-1)
1012 m.next = "FETCH_PRED_SHIFT_MASK"
1013 # fetch source predicate register
1014 with m.Else():
1015 comb += int_pred.addr.eq(sregread)
1016 comb += int_pred.ren.eq(1)
1017 m.next = "INT_SRC_READ"
1018
1019 with m.State("INT_SRC_READ"):
1020 # store source mask
1021 inv = Repl(sinvert, 64)
1022 with m.If(sunary):
1023 # set selected mask bit for 1<<r3 mode
1024 src_shift = Signal(range(64))
1025 comb += src_shift.eq(self.int_pred.o_data & 0b111111)
1026 sync += new_srcmask.eq(1 << src_shift)
1027 with m.Else():
1028 # invert mask if requested
1029 sync += new_srcmask.eq(self.int_pred.o_data ^ inv)
1030 m.next = "FETCH_PRED_SHIFT_MASK"
1031
1032 # fetch masks from the CR register file
1033 # implements the following loop:
1034 # idx, inv = get_predcr(mask)
1035 # mask = 0
1036 # for cr_idx in range(vl):
1037 # cr = crl[cr_idx + SVP64CROffs.CRPred] # takes one cycle
1038 # if cr[idx] ^ inv:
1039 # mask |= 1 << cr_idx
1040 # return mask
1041 with m.State("CR_READ"):
1042 # CR index to be read, which will be ready by the next cycle
1043 cr_idx = Signal.like(cur_vl, reset_less=True)
1044 # submit the read operation to the regfile
1045 with m.If(cr_idx != cur_vl):
1046 # the CR read port is unary ...
1047 # ren = 1 << cr_idx
1048 # ... in MSB0 convention ...
1049 # ren = 1 << (7 - cr_idx)
1050 # ... and with an offset:
1051 # ren = 1 << (7 - off - cr_idx)
1052 idx = SVP64CROffs.CRPred + cr_idx
1053 comb += cr_pred.ren.eq(1 << (7 - idx))
1054 # signal data valid in the next cycle
1055 cr_read = Signal(reset_less=True)
1056 sync += cr_read.eq(1)
1057 # load the next index
1058 sync += cr_idx.eq(cr_idx + 1)
1059 with m.Else():
1060 # exit on loop end
1061 sync += cr_read.eq(0)
1062 sync += cr_idx.eq(0)
1063 m.next = "FETCH_PRED_SHIFT_MASK"
1064 with m.If(cr_read):
1065 # compensate for the one cycle delay on the regfile
1066 cur_cr_idx = Signal.like(cur_vl)
1067 comb += cur_cr_idx.eq(cr_idx - 1)
1068 # read the CR field, select the appropriate bit
1069 cr_field = Signal(4)
1070 scr_bit = Signal()
1071 dcr_bit = Signal()
1072 comb += cr_field.eq(cr_pred.o_data)
1073 comb += scr_bit.eq(cr_field.bit_select(sidx, 1)
1074 ^ scrinvert)
1075 comb += dcr_bit.eq(cr_field.bit_select(didx, 1)
1076 ^ dcrinvert)
1077 # set the corresponding mask bit
1078 bit_to_set = Signal.like(self.srcmask)
1079 comb += bit_to_set.eq(1 << cur_cr_idx)
1080 with m.If(scr_bit):
1081 sync += new_srcmask.eq(new_srcmask | bit_to_set)
1082 with m.If(dcr_bit):
1083 sync += new_dstmask.eq(new_dstmask | bit_to_set)
1084
1085 with m.State("FETCH_PRED_SHIFT_MASK"):
1086 # shift-out skipped mask bits
1087 sync += self.srcmask.eq(new_srcmask >> srcstep)
1088 sync += self.dstmask.eq(new_dstmask >> dststep)
1089 m.next = "FETCH_PRED_DONE"
1090
1091 with m.State("FETCH_PRED_DONE"):
1092 comb += pred_mask_o_valid.eq(1)
1093 with m.If(pred_mask_i_ready):
1094 m.next = "FETCH_PRED_IDLE"
1095
1096 def issue_fsm(self, m, core, nia,
1097 dbg, core_rst, is_svp64_mode,
1098 fetch_pc_o_ready, fetch_pc_i_valid,
1099 fetch_insn_o_valid, fetch_insn_i_ready,
1100 pred_insn_i_valid, pred_insn_o_ready,
1101 pred_mask_o_valid, pred_mask_i_ready,
1102 exec_insn_i_valid, exec_insn_o_ready,
1103 exec_pc_o_valid, exec_pc_i_ready):
1104 """issue FSM
1105
1106 decode / issue FSM. this interacts with the "fetch" FSM
1107 through fetch_insn_ready/valid (incoming) and fetch_pc_ready/valid
1108 (outgoing). also interacts with the "execute" FSM
1109 through exec_insn_ready/valid (outgoing) and exec_pc_ready/valid
1110 (incoming).
1111 SVP64 RM prefixes have already been set up by the
1112 "fetch" phase, so execute is fairly straightforward.
1113 """
1114
1115 comb = m.d.comb
1116 sync = m.d.sync
1117 pdecode2 = self.pdecode2
1118 cur_state = self.cur_state
1119 new_svstate = self.new_svstate
1120
1121 # temporaries
1122 dec_opcode_i = pdecode2.dec.raw_opcode_in # raw opcode
1123
1124 # for updating svstate (things like srcstep etc.)
1125 comb += new_svstate.eq(cur_state.svstate)
1126
1127 # precalculate srcstep+1 and dststep+1
1128 cur_srcstep = cur_state.svstate.srcstep
1129 cur_dststep = cur_state.svstate.dststep
1130 next_srcstep = Signal.like(cur_srcstep)
1131 next_dststep = Signal.like(cur_dststep)
1132 comb += next_srcstep.eq(cur_state.svstate.srcstep+1)
1133 comb += next_dststep.eq(cur_state.svstate.dststep+1)
1134
1135 # note if an exception happened. in a pipelined or OoO design
1136 # this needs to be accompanied by "shadowing" (or stalling)
1137 exc_happened = self.core.o.exc_happened
1138 # also note instruction fetch failed
1139 if hasattr(core, "icache"):
1140 fetch_failed = core.icache.i_out.fetch_failed
1141 flush_needed = True
1142 # set to fault in decoder
1143 # update (highest priority) instruction fault
1144 rising_fetch_failed = rising_edge(m, fetch_failed)
1145 with m.If(rising_fetch_failed):
1146 sync += pdecode2.instr_fault.eq(1)
1147 else:
1148 fetch_failed = Const(0, 1)
1149 flush_needed = False
1150
1151 with m.FSM(name="issue_fsm"):
1152
1153 # sync with the "fetch" phase which is reading the instruction
1154 # at this point, there is no instruction running, that
1155 # could inadvertently update the PC.
1156 with m.State("ISSUE_START"):
1157 # reset instruction fault
1158 sync += pdecode2.instr_fault.eq(0)
1159 # wait on "core stop" release, before next fetch
1160 # need to do this here, in case we are in a VL==0 loop
1161 with m.If(~dbg.core_stop_o & ~core_rst):
1162 comb += fetch_pc_i_valid.eq(1) # tell fetch to start
1163 with m.If(fetch_pc_o_ready): # fetch acknowledged us
1164 m.next = "INSN_WAIT"
1165 with m.Else():
1166 # tell core it's stopped, and acknowledge debug handshake
1167 comb += dbg.core_stopped_i.eq(1)
1168 # while stopped, allow updating SVSTATE
1169 with m.If(self.svstate_i.ok):
1170 comb += new_svstate.eq(self.svstate_i.data)
1171 comb += self.update_svstate.eq(1)
1172 sync += self.sv_changed.eq(1)
1173
1174 # wait for an instruction to arrive from Fetch
1175 with m.State("INSN_WAIT"):
1176 # when using "single-step" mode, checking dbg.stopping_o
1177 # prevents progress. allow issue to proceed once started
1178 stopping = Const(0)
1179 #if self.allow_overlap:
1180 # stopping = dbg.stopping_o
1181 with m.If(stopping):
1182 # stopping: jump back to idle
1183 m.next = "ISSUE_START"
1184 if flush_needed:
1185 # request the icache to stop asserting "failed"
1186 comb += core.icache.flush_in.eq(1)
1187 # stop instruction fault
1188 sync += pdecode2.instr_fault.eq(0)
1189 with m.Else():
1190 comb += fetch_insn_i_ready.eq(1)
1191 with m.If(fetch_insn_o_valid):
1192 # loop into ISSUE_START if it's a SVP64 instruction
1193 # and VL == 0. this because VL==0 is a for-loop
1194 # from 0 to 0 i.e. always, always a NOP.
1195 cur_vl = cur_state.svstate.vl
1196 with m.If(is_svp64_mode & (cur_vl == 0)):
1197 # update the PC before fetching the next instruction
1198 # since we are in a VL==0 loop, no instruction was
1199 # executed that we could be overwriting
1200 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
1201 comb += self.state_w_pc.i_data.eq(nia)
1202 comb += self.insn_done.eq(1)
1203 m.next = "ISSUE_START"
1204 with m.Else():
1205 if self.svp64_en:
1206 m.next = "PRED_START" # fetching predicate
1207 else:
1208 m.next = "DECODE_SV" # skip predication
1209
1210 with m.State("PRED_START"):
1211 comb += pred_insn_i_valid.eq(1) # tell fetch_pred to start
1212 with m.If(pred_insn_o_ready): # fetch_pred acknowledged us
1213 m.next = "MASK_WAIT"
1214
1215 with m.State("MASK_WAIT"):
1216 comb += pred_mask_i_ready.eq(1) # ready to receive the masks
1217 with m.If(pred_mask_o_valid): # predication masks are ready
1218 m.next = "PRED_SKIP"
1219
1220 # skip zeros in predicate
1221 with m.State("PRED_SKIP"):
1222 with m.If(~is_svp64_mode):
1223 m.next = "DECODE_SV" # nothing to do
1224 with m.Else():
1225 if self.svp64_en:
1226 pred_src_zero = pdecode2.rm_dec.pred_sz
1227 pred_dst_zero = pdecode2.rm_dec.pred_dz
1228
1229 # new srcstep, after skipping zeros
1230 skip_srcstep = Signal.like(cur_srcstep)
1231 # value to be added to the current srcstep
1232 src_delta = Signal.like(cur_srcstep)
1233 # add leading zeros to srcstep, if not in zero mode
1234 with m.If(~pred_src_zero):
1235 # priority encoder (count leading zeros)
1236 # append guard bit, in case the mask is all zeros
1237 pri_enc_src = PriorityEncoder(65)
1238 m.submodules.pri_enc_src = pri_enc_src
1239 comb += pri_enc_src.i.eq(Cat(self.srcmask,
1240 Const(1, 1)))
1241 comb += src_delta.eq(pri_enc_src.o)
1242 # apply delta to srcstep
1243 comb += skip_srcstep.eq(cur_srcstep + src_delta)
1244 # shift-out all leading zeros from the mask
1245 # plus the leading "one" bit
1246 # TODO count leading zeros and shift-out the zero
1247 # bits, in the same step, in hardware
1248 sync += self.srcmask.eq(self.srcmask >> (src_delta+1))
1249
1250 # same as above, but for dststep
1251 skip_dststep = Signal.like(cur_dststep)
1252 dst_delta = Signal.like(cur_dststep)
1253 with m.If(~pred_dst_zero):
1254 pri_enc_dst = PriorityEncoder(65)
1255 m.submodules.pri_enc_dst = pri_enc_dst
1256 comb += pri_enc_dst.i.eq(Cat(self.dstmask,
1257 Const(1, 1)))
1258 comb += dst_delta.eq(pri_enc_dst.o)
1259 comb += skip_dststep.eq(cur_dststep + dst_delta)
1260 sync += self.dstmask.eq(self.dstmask >> (dst_delta+1))
1261
1262 # TODO: initialize mask[VL]=1 to avoid passing past VL
1263 with m.If((skip_srcstep >= cur_vl) |
1264 (skip_dststep >= cur_vl)):
1265 # end of VL loop. Update PC and reset src/dst step
1266 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
1267 comb += self.state_w_pc.i_data.eq(nia)
1268 comb += new_svstate.srcstep.eq(0)
1269 comb += new_svstate.dststep.eq(0)
1270 comb += self.update_svstate.eq(1)
1271 # synchronize with the simulator
1272 comb += self.insn_done.eq(1)
1273 # go back to Issue
1274 m.next = "ISSUE_START"
1275 with m.Else():
1276 # update new src/dst step
1277 comb += new_svstate.srcstep.eq(skip_srcstep)
1278 comb += new_svstate.dststep.eq(skip_dststep)
1279 comb += self.update_svstate.eq(1)
1280 # proceed to Decode
1281 m.next = "DECODE_SV"
1282
1283 # pass predicate mask bits through to satellite decoders
1284 # TODO: for SIMD this will be *multiple* bits
1285 sync += core.i.sv_pred_sm.eq(self.srcmask[0])
1286 sync += core.i.sv_pred_dm.eq(self.dstmask[0])
1287
1288 # after src/dst step have been updated, we are ready
1289 # to decode the instruction
1290 with m.State("DECODE_SV"):
1291 # decode the instruction
1292 with m.If(~fetch_failed):
1293 sync += pdecode2.instr_fault.eq(0)
1294 sync += core.i.e.eq(pdecode2.e)
1295 sync += core.i.state.eq(cur_state)
1296 sync += core.i.raw_insn_i.eq(dec_opcode_i)
1297 sync += core.i.bigendian_i.eq(self.core_bigendian_i)
1298 if self.svp64_en:
1299 sync += core.i.sv_rm.eq(pdecode2.sv_rm)
1300 # set RA_OR_ZERO detection in satellite decoders
1301 sync += core.i.sv_a_nz.eq(pdecode2.sv_a_nz)
1302 # and svp64 detection
1303 sync += core.i.is_svp64_mode.eq(is_svp64_mode)
1304 # and svp64 bit-rev'd ldst mode
1305 ldst_dec = pdecode2.use_svp64_ldst_dec
1306 sync += core.i.use_svp64_ldst_dec.eq(ldst_dec)
1307 # after decoding, reset any previous exception condition,
1308 # allowing it to be set again during the next execution
1309 sync += pdecode2.ldst_exc.eq(0)
1310
1311 m.next = "INSN_EXECUTE" # move to "execute"
1312
1313 # handshake with execution FSM, move to "wait" once acknowledged
1314 with m.State("INSN_EXECUTE"):
1315 # when using "single-step" mode, checking dbg.stopping_o
1316 # prevents progress. allow execute to proceed once started
1317 stopping = Const(0)
1318 #if self.allow_overlap:
1319 # stopping = dbg.stopping_o
1320 with m.If(stopping):
1321 # stopping: jump back to idle
1322 m.next = "ISSUE_START"
1323 if flush_needed:
1324 # request the icache to stop asserting "failed"
1325 comb += core.icache.flush_in.eq(1)
1326 # stop instruction fault
1327 sync += pdecode2.instr_fault.eq(0)
1328 with m.Else():
1329 comb += exec_insn_i_valid.eq(1) # trigger execute
1330 with m.If(exec_insn_o_ready): # execute acknowledged us
1331 m.next = "EXECUTE_WAIT"
1332
1333 with m.State("EXECUTE_WAIT"):
1334 comb += exec_pc_i_ready.eq(1)
1335 # see https://bugs.libre-soc.org/show_bug.cgi?id=636
1336 # the exception info needs to be blatted into
1337 # pdecode.ldst_exc, and the instruction "re-run".
1338 # when ldst_exc.happened is set, the PowerDecoder2
1339 # reacts very differently: it re-writes the instruction
1340 # with a "trap" (calls PowerDecoder2.trap()) which
1341 # will *overwrite* whatever was requested and jump the
1342 # PC to the exception address, as well as alter MSR.
1343 # nothing else needs to be done other than to note
1344 # the change of PC and MSR (and, later, SVSTATE)
1345 with m.If(exc_happened):
1346 mmu = core.fus.get_exc("mmu0")
1347 ldst = core.fus.get_exc("ldst0")
1348 if mmu is not None:
1349 with m.If(fetch_failed):
1350 # instruction fetch: exception is from MMU
1351 # reset instr_fault (highest priority)
1352 sync += pdecode2.ldst_exc.eq(mmu)
1353 sync += pdecode2.instr_fault.eq(0)
1354 if flush_needed:
1355 # request icache to stop asserting "failed"
1356 comb += core.icache.flush_in.eq(1)
1357 with m.If(~fetch_failed):
1358 # otherwise assume it was a LDST exception
1359 sync += pdecode2.ldst_exc.eq(ldst)
1360
1361 with m.If(exec_pc_o_valid):
1362
1363 # was this the last loop iteration?
1364 is_last = Signal()
1365 cur_vl = cur_state.svstate.vl
1366 comb += is_last.eq(next_srcstep == cur_vl)
1367
1368 with m.If(pdecode2.instr_fault):
1369 # reset instruction fault, try again
1370 sync += pdecode2.instr_fault.eq(0)
1371 m.next = "ISSUE_START"
1372
1373 # return directly to Decode if Execute generated an
1374 # exception.
1375 with m.Elif(pdecode2.ldst_exc.happened):
1376 m.next = "DECODE_SV"
1377
1378 # if MSR, PC or SVSTATE were changed by the previous
1379 # instruction, go directly back to Fetch, without
1380 # updating either MSR PC or SVSTATE
1381 with m.Elif(self.msr_changed | self.pc_changed |
1382 self.sv_changed):
1383 m.next = "ISSUE_START"
1384
1385 # also return to Fetch, when no output was a vector
1386 # (regardless of SRCSTEP and VL), or when the last
1387 # instruction was really the last one of the VL loop
1388 with m.Elif((~pdecode2.loop_continue) | is_last):
1389 # before going back to fetch, update the PC state
1390 # register with the NIA.
1391 # ok here we are not reading the branch unit.
1392 # TODO: this just blithely overwrites whatever
1393 # pipeline updated the PC
1394 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
1395 comb += self.state_w_pc.i_data.eq(nia)
1396 # reset SRCSTEP before returning to Fetch
1397 if self.svp64_en:
1398 with m.If(pdecode2.loop_continue):
1399 comb += new_svstate.srcstep.eq(0)
1400 comb += new_svstate.dststep.eq(0)
1401 comb += self.update_svstate.eq(1)
1402 else:
1403 comb += new_svstate.srcstep.eq(0)
1404 comb += new_svstate.dststep.eq(0)
1405 comb += self.update_svstate.eq(1)
1406 m.next = "ISSUE_START"
1407
1408 # returning to Execute? then, first update SRCSTEP
1409 with m.Else():
1410 comb += new_svstate.srcstep.eq(next_srcstep)
1411 comb += new_svstate.dststep.eq(next_dststep)
1412 comb += self.update_svstate.eq(1)
1413 # return to mask skip loop
1414 m.next = "PRED_SKIP"
1415
1416
1417 # check if svstate needs updating: if so, write it to State Regfile
1418 with m.If(self.update_svstate):
1419 sync += cur_state.svstate.eq(self.new_svstate) # for next clock
1420
1421 def execute_fsm(self, m, core,
1422 exec_insn_i_valid, exec_insn_o_ready,
1423 exec_pc_o_valid, exec_pc_i_ready):
1424 """execute FSM
1425
1426 execute FSM. this interacts with the "issue" FSM
1427 through exec_insn_ready/valid (incoming) and exec_pc_ready/valid
1428 (outgoing). SVP64 RM prefixes have already been set up by the
1429 "issue" phase, so execute is fairly straightforward.
1430 """
1431
1432 comb = m.d.comb
1433 sync = m.d.sync
1434 dbg = self.dbg
1435 pdecode2 = self.pdecode2
1436
1437 # temporaries
1438 core_busy_o = core.n.o_data.busy_o # core is busy
1439 core_ivalid_i = core.p.i_valid # instruction is valid
1440
1441 if hasattr(core, "icache"):
1442 fetch_failed = core.icache.i_out.fetch_failed
1443 else:
1444 fetch_failed = Const(0, 1)
1445
1446 with m.FSM(name="exec_fsm"):
1447
1448 # waiting for instruction bus (stays there until not busy)
1449 with m.State("INSN_START"):
1450 comb += exec_insn_o_ready.eq(1)
1451 with m.If(exec_insn_i_valid):
1452 comb += core_ivalid_i.eq(1) # instruction is valid/issued
1453 sync += self.sv_changed.eq(0)
1454 sync += self.pc_changed.eq(0)
1455 sync += self.msr_changed.eq(0)
1456 with m.If(core.p.o_ready): # only move if accepted
1457 m.next = "INSN_ACTIVE" # move to "wait completion"
1458
1459 # instruction started: must wait till it finishes
1460 with m.State("INSN_ACTIVE"):
1461 # note changes to MSR, PC and SVSTATE
1462 # XXX oops, really must monitor *all* State Regfile write
1463 # ports looking for changes!
1464 with m.If(self.state_nia.wen & (1 << StateRegs.SVSTATE)):
1465 sync += self.sv_changed.eq(1)
1466 with m.If(self.state_nia.wen & (1 << StateRegs.MSR)):
1467 sync += self.msr_changed.eq(1)
1468 with m.If(self.state_nia.wen & (1 << StateRegs.PC)):
1469 sync += self.pc_changed.eq(1)
1470 with m.If(~core_busy_o): # instruction done!
1471 comb += exec_pc_o_valid.eq(1)
1472 with m.If(exec_pc_i_ready):
1473 # when finished, indicate "done".
1474 # however, if there was an exception, the instruction
1475 # is *not* yet done. this is an implementation
1476 # detail: we choose to implement exceptions by
1477 # taking the exception information from the LDST
1478 # unit, putting that *back* into the PowerDecoder2,
1479 # and *re-running the entire instruction*.
1480 # if we erroneously indicate "done" here, it is as if
1481 # there were *TWO* instructions:
1482 # 1) the failed LDST 2) a TRAP.
1483 with m.If(~pdecode2.ldst_exc.happened &
1484 ~pdecode2.instr_fault):
1485 comb += self.insn_done.eq(1)
1486 m.next = "INSN_START" # back to fetch
1487 # terminate returns directly to INSN_START
1488 with m.If(dbg.terminate_i):
1489 # comb += self.insn_done.eq(1) - no because it's not
1490 m.next = "INSN_START" # back to fetch
1491
1492 def elaborate(self, platform):
1493 m = super().elaborate(platform)
1494 # convenience
1495 comb, sync = m.d.comb, m.d.sync
1496 cur_state = self.cur_state
1497 pdecode2 = self.pdecode2
1498 dbg = self.dbg
1499 core = self.core
1500
1501 # set up peripherals and core
1502 core_rst = self.core_rst
1503
1504 # indicate to outside world if any FU is still executing
1505 comb += self.any_busy.eq(core.n.o_data.any_busy_o) # any FU executing
1506
1507 # address of the next instruction, in the absence of a branch
1508 # depends on the instruction size
1509 nia = Signal(64)
1510
1511 # connect up debug signals
1512 with m.If(core.o.core_terminate_o):
1513 comb += dbg.terminate_i.eq(1)
1514
1515 # pass the prefix mode from Fetch to Issue, so the latter can loop
1516 # on VL==0
1517 is_svp64_mode = Signal()
1518
1519 # there are *THREE^WFOUR-if-SVP64-enabled* FSMs, fetch (32/64-bit)
1520 # issue, decode/execute, now joined by "Predicate fetch/calculate".
1521 # these are the handshake signals between each
1522
1523 # fetch FSM can run as soon as the PC is valid
1524 fetch_pc_i_valid = Signal() # Execute tells Fetch "start next read"
1525 fetch_pc_o_ready = Signal() # Fetch Tells SVSTATE "proceed"
1526
1527 # fetch FSM hands over the instruction to be decoded / issued
1528 fetch_insn_o_valid = Signal()
1529 fetch_insn_i_ready = Signal()
1530
1531 # predicate fetch FSM decodes and fetches the predicate
1532 pred_insn_i_valid = Signal()
1533 pred_insn_o_ready = Signal()
1534
1535 # predicate fetch FSM delivers the masks
1536 pred_mask_o_valid = Signal()
1537 pred_mask_i_ready = Signal()
1538
1539 # issue FSM delivers the instruction to the be executed
1540 exec_insn_i_valid = Signal()
1541 exec_insn_o_ready = Signal()
1542
1543 # execute FSM, hands over the PC/SVSTATE back to the issue FSM
1544 exec_pc_o_valid = Signal()
1545 exec_pc_i_ready = Signal()
1546
1547 # the FSMs here are perhaps unusual in that they detect conditions
1548 # then "hold" information, combinatorially, for the core
1549 # (as opposed to using sync - which would be on a clock's delay)
1550 # this includes the actual opcode, valid flags and so on.
1551
1552 # Fetch, then predicate fetch, then Issue, then Execute.
1553 # Issue is where the VL for-loop # lives. the ready/valid
1554 # signalling is used to communicate between the four.
1555
1556 self.fetch_fsm(m, dbg, core, dbg.state.pc, dbg.state.msr,
1557 dbg.state.svstate, nia, is_svp64_mode,
1558 fetch_pc_o_ready, fetch_pc_i_valid,
1559 fetch_insn_o_valid, fetch_insn_i_ready)
1560
1561 self.issue_fsm(m, core, nia,
1562 dbg, core_rst, is_svp64_mode,
1563 fetch_pc_o_ready, fetch_pc_i_valid,
1564 fetch_insn_o_valid, fetch_insn_i_ready,
1565 pred_insn_i_valid, pred_insn_o_ready,
1566 pred_mask_o_valid, pred_mask_i_ready,
1567 exec_insn_i_valid, exec_insn_o_ready,
1568 exec_pc_o_valid, exec_pc_i_ready)
1569
1570 if self.svp64_en:
1571 self.fetch_predicate_fsm(m,
1572 pred_insn_i_valid, pred_insn_o_ready,
1573 pred_mask_o_valid, pred_mask_i_ready)
1574
1575 self.execute_fsm(m, core,
1576 exec_insn_i_valid, exec_insn_o_ready,
1577 exec_pc_o_valid, exec_pc_i_ready)
1578
1579 # whatever was done above, over-ride it if core reset is held
1580 with m.If(core_rst):
1581 sync += nia.eq(0)
1582
1583 return m
1584
1585
1586 class TestIssuer(Elaboratable):
1587 def __init__(self, pspec):
1588 self.ti = TestIssuerInternal(pspec)
1589 self.pll = DummyPLL(instance=True)
1590
1591 self.dbg_rst_i = Signal(reset_less=True)
1592
1593 # PLL direct clock or not
1594 self.pll_en = hasattr(pspec, "use_pll") and pspec.use_pll
1595 if self.pll_en:
1596 self.pll_test_o = Signal(reset_less=True)
1597 self.pll_vco_o = Signal(reset_less=True)
1598 self.clk_sel_i = Signal(2, reset_less=True)
1599 self.ref_clk = ClockSignal() # can't rename it but that's ok
1600 self.pllclk_clk = ClockSignal("pllclk")
1601
1602 def elaborate(self, platform):
1603 m = Module()
1604 comb = m.d.comb
1605
1606 # TestIssuer nominally runs at main clock, actually it is
1607 # all combinatorial internally except for coresync'd components
1608 m.submodules.ti = ti = self.ti
1609
1610 if self.pll_en:
1611 # ClockSelect runs at PLL output internal clock rate
1612 m.submodules.wrappll = pll = self.pll
1613
1614 # add clock domains from PLL
1615 cd_pll = ClockDomain("pllclk")
1616 m.domains += cd_pll
1617
1618 # PLL clock established. has the side-effect of running clklsel
1619 # at the PLL's speed (see DomainRenamer("pllclk") above)
1620 pllclk = self.pllclk_clk
1621 comb += pllclk.eq(pll.clk_pll_o)
1622
1623 # wire up external 24mhz to PLL
1624 #comb += pll.clk_24_i.eq(self.ref_clk)
1625 # output 18 mhz PLL test signal, and analog oscillator out
1626 comb += self.pll_test_o.eq(pll.pll_test_o)
1627 comb += self.pll_vco_o.eq(pll.pll_vco_o)
1628
1629 # input to pll clock selection
1630 comb += pll.clk_sel_i.eq(self.clk_sel_i)
1631
1632 # now wire up ResetSignals. don't mind them being in this domain
1633 pll_rst = ResetSignal("pllclk")
1634 comb += pll_rst.eq(ResetSignal())
1635
1636 # internal clock is set to selector clock-out. has the side-effect of
1637 # running TestIssuer at this speed (see DomainRenamer("intclk") above)
1638 # debug clock runs at coresync internal clock
1639 if self.ti.dbg_domain != 'sync':
1640 cd_dbgsync = ClockDomain("dbgsync")
1641 intclk = ClockSignal(self.ti.core_domain)
1642 dbgclk = ClockSignal(self.ti.dbg_domain)
1643 # XXX BYPASS PLL XXX
1644 # XXX BYPASS PLL XXX
1645 # XXX BYPASS PLL XXX
1646 if self.pll_en:
1647 comb += intclk.eq(self.ref_clk)
1648 assert self.ti.core_domain != 'sync', \
1649 "cannot set core_domain to sync and use pll at the same time"
1650 else:
1651 if self.ti.core_domain != 'sync':
1652 comb += intclk.eq(ClockSignal())
1653 if self.ti.dbg_domain != 'sync':
1654 dbgclk = ClockSignal(self.ti.dbg_domain)
1655 comb += dbgclk.eq(intclk)
1656 comb += self.ti.dbg_rst_i.eq(self.dbg_rst_i)
1657
1658 return m
1659
1660 def ports(self):
1661 return list(self.ti.ports()) + list(self.pll.ports()) + \
1662 [ClockSignal(), ResetSignal()]
1663
1664 def external_ports(self):
1665 ports = self.ti.external_ports()
1666 ports.append(ClockSignal())
1667 ports.append(ResetSignal())
1668 if self.pll_en:
1669 ports.append(self.clk_sel_i)
1670 ports.append(self.pll.clk_24_i)
1671 ports.append(self.pll_test_o)
1672 ports.append(self.pll_vco_o)
1673 ports.append(self.pllclk_clk)
1674 ports.append(self.ref_clk)
1675 return ports
1676
1677
1678 if __name__ == '__main__':
1679 units = {'alu': 1, 'cr': 1, 'branch': 1, 'trap': 1, 'logical': 1,
1680 'spr': 1,
1681 'div': 1,
1682 'mul': 1,
1683 'shiftrot': 1
1684 }
1685 pspec = TestMemPspec(ldst_ifacetype='bare_wb',
1686 imem_ifacetype='bare_wb',
1687 addr_wid=64,
1688 mask_wid=8,
1689 reg_wid=64,
1690 units=units)
1691 dut = TestIssuer(pspec)
1692 vl = main(dut, ports=dut.ports(), name="test_issuer")
1693
1694 if len(sys.argv) == 1:
1695 vl = rtlil.convert(dut, ports=dut.external_ports(), name="test_issuer")
1696 with open("test_issuer.il", "w") as f:
1697 f.write(vl)