3 not in any way intended for production use. this runs a FSM that:
5 * reads the Program Counter from StateRegs
6 * reads an instruction from a fixed-size Test Memory
7 * issues it to the Simple Core
8 * waits for it to complete
10 * does it all over again
12 the purpose of this module is to verify the functional correctness
13 of the Function Units in the absolute simplest and clearest possible
14 way, and to at provide something that can be further incrementally
18 from nmigen
import (Elaboratable
, Module
, Signal
, ClockSignal
, ResetSignal
,
19 ClockDomain
, DomainRenamer
, Mux
, Const
, Repl
, Cat
)
20 from nmigen
.cli
import rtlil
21 from nmigen
.cli
import main
24 from nmutil
.singlepipe
import ControlBase
25 from soc
.simple
.core_data
import FetchOutput
, FetchInput
27 from nmigen
.lib
.coding
import PriorityEncoder
29 from openpower
.decoder
.power_decoder
import create_pdecode
30 from openpower
.decoder
.power_decoder2
import PowerDecode2
, SVP64PrefixDecoder
31 from openpower
.decoder
.decode2execute1
import IssuerDecode2ToOperand
32 from openpower
.decoder
.decode2execute1
import Data
33 from openpower
.decoder
.power_enums
import (MicrOp
, SVP64PredInt
, SVP64PredCR
,
35 from openpower
.state
import CoreState
36 from openpower
.consts
import (CR
, SVP64CROffs
, MSR
)
37 from soc
.experiment
.testmem
import TestMemory
# test only for instructions
38 from soc
.regfile
.regfiles
import StateRegs
, FastRegs
39 from soc
.simple
.core
import NonProductionCore
40 from soc
.config
.test
.test_loadstore
import TestMemPspec
41 from soc
.config
.ifetch
import ConfigFetchUnit
42 from soc
.debug
.dmi
import CoreDebug
, DMIInterface
43 from soc
.debug
.jtag
import JTAG
44 from soc
.config
.pinouts
import get_pinspecs
45 from soc
.interrupts
.xics
import XICS_ICP
, XICS_ICS
46 from soc
.bus
.simple_gpio
import SimpleGPIO
47 from soc
.bus
.SPBlock512W64B8W
import SPBlock512W64B8W
48 from soc
.clock
.select
import ClockSelect
49 from soc
.clock
.dummypll
import DummyPLL
50 from openpower
.sv
.svstate
import SVSTATERec
51 from soc
.experiment
.icache
import ICache
53 from nmutil
.util
import rising_edge
56 def get_insn(f_instr_o
, pc
):
57 if f_instr_o
.width
== 32:
60 # 64-bit: bit 2 of pc decides which word to select
61 return f_instr_o
.word_select(pc
[2], 32)
63 # gets state input or reads from state regfile
66 def state_get(m
, res
, core_rst
, state_i
, name
, regfile
, regnum
):
69 # read the {insert state variable here}
70 res_ok_delay
= Signal(name
="%s_ok_delay" % name
)
72 sync
+= res_ok_delay
.eq(~state_i
.ok
)
73 with m
.If(state_i
.ok
):
74 # incoming override (start from pc_i)
75 comb
+= res
.eq(state_i
.data
)
77 # otherwise read StateRegs regfile for {insert state here}...
78 comb
+= regfile
.ren
.eq(1 << regnum
)
79 # ... but on a 1-clock delay
80 with m
.If(res_ok_delay
):
81 comb
+= res
.eq(regfile
.o_data
)
84 def get_predint(m
, mask
, name
):
85 """decode SVP64 predicate integer mask field to reg number and invert
86 this is identical to the equivalent function in ISACaller except that
87 it doesn't read the INT directly, it just decodes "what needs to be done"
88 i.e. which INT reg, whether it is shifted and whether it is bit-inverted.
90 * all1s is set to indicate that no mask is to be applied.
91 * regread indicates the GPR register number to be read
92 * invert is set to indicate that the register value is to be inverted
93 * unary indicates that the contents of the register is to be shifted 1<<r3
96 regread
= Signal(5, name
=name
+"regread")
97 invert
= Signal(name
=name
+"invert")
98 unary
= Signal(name
=name
+"unary")
99 all1s
= Signal(name
=name
+"all1s")
101 with m
.Case(SVP64PredInt
.ALWAYS
.value
):
102 comb
+= all1s
.eq(1) # use 0b1111 (all ones)
103 with m
.Case(SVP64PredInt
.R3_UNARY
.value
):
104 comb
+= regread
.eq(3)
105 comb
+= unary
.eq(1) # 1<<r3 - shift r3 (single bit)
106 with m
.Case(SVP64PredInt
.R3
.value
):
107 comb
+= regread
.eq(3)
108 with m
.Case(SVP64PredInt
.R3_N
.value
):
109 comb
+= regread
.eq(3)
111 with m
.Case(SVP64PredInt
.R10
.value
):
112 comb
+= regread
.eq(10)
113 with m
.Case(SVP64PredInt
.R10_N
.value
):
114 comb
+= regread
.eq(10)
116 with m
.Case(SVP64PredInt
.R30
.value
):
117 comb
+= regread
.eq(30)
118 with m
.Case(SVP64PredInt
.R30_N
.value
):
119 comb
+= regread
.eq(30)
121 return regread
, invert
, unary
, all1s
124 def get_predcr(m
, mask
, name
):
125 """decode SVP64 predicate CR to reg number field and invert status
126 this is identical to _get_predcr in ISACaller
129 idx
= Signal(2, name
=name
+"idx")
130 invert
= Signal(name
=name
+"crinvert")
132 with m
.Case(SVP64PredCR
.LT
.value
):
133 comb
+= idx
.eq(CR
.LT
)
135 with m
.Case(SVP64PredCR
.GE
.value
):
136 comb
+= idx
.eq(CR
.LT
)
138 with m
.Case(SVP64PredCR
.GT
.value
):
139 comb
+= idx
.eq(CR
.GT
)
141 with m
.Case(SVP64PredCR
.LE
.value
):
142 comb
+= idx
.eq(CR
.GT
)
144 with m
.Case(SVP64PredCR
.EQ
.value
):
145 comb
+= idx
.eq(CR
.EQ
)
147 with m
.Case(SVP64PredCR
.NE
.value
):
148 comb
+= idx
.eq(CR
.EQ
)
150 with m
.Case(SVP64PredCR
.SO
.value
):
151 comb
+= idx
.eq(CR
.SO
)
153 with m
.Case(SVP64PredCR
.NS
.value
):
154 comb
+= idx
.eq(CR
.SO
)
159 class TestIssuerBase(Elaboratable
):
160 """TestIssuerBase - common base class for Issuers
162 takes care of power-on reset, peripherals, debug, DEC/TB,
163 and gets PC/MSR/SVSTATE from the State Regfile etc.
166 def __init__(self
, pspec
):
168 # test if microwatt compatibility is to be enabled
169 self
.microwatt_compat
= (hasattr(pspec
, "microwatt_compat") and
170 (pspec
.microwatt_compat
== True))
171 self
.alt_reset
= Signal(reset_less
=True) # not connected yet (microwatt)
173 # test is SVP64 is to be enabled
174 self
.svp64_en
= hasattr(pspec
, "svp64") and (pspec
.svp64
== True)
176 # and if regfiles are reduced
177 self
.regreduce_en
= (hasattr(pspec
, "regreduce") and
178 (pspec
.regreduce
== True))
180 # and if overlap requested
181 self
.allow_overlap
= (hasattr(pspec
, "allow_overlap") and
182 (pspec
.allow_overlap
== True))
184 # and get the core domain
185 self
.core_domain
= "coresync"
186 if (hasattr(pspec
, "core_domain") and
187 isinstance(pspec
.core_domain
, str)):
188 self
.core_domain
= pspec
.core_domain
190 # JTAG interface. add this right at the start because if it's
191 # added it *modifies* the pspec, by adding enable/disable signals
192 # for parts of the rest of the core
193 self
.jtag_en
= hasattr(pspec
, "debug") and pspec
.debug
== 'jtag'
194 #self.dbg_domain = "sync" # sigh "dbgsunc" too problematic
195 self
.dbg_domain
= "dbgsync" # domain for DMI/JTAG clock
197 # XXX MUST keep this up-to-date with litex, and
198 # soc-cocotb-sim, and err.. all needs sorting out, argh
201 'eint', 'gpio', 'mspi0',
202 # 'mspi1', - disabled for now
203 # 'pwm', 'sd0', - disabled for now
205 self
.jtag
= JTAG(get_pinspecs(subset
=subset
),
206 domain
=self
.dbg_domain
)
207 # add signals to pspec to enable/disable icache and dcache
208 # (or data and intstruction wishbone if icache/dcache not included)
209 # https://bugs.libre-soc.org/show_bug.cgi?id=520
210 # TODO: do we actually care if these are not domain-synchronised?
211 # honestly probably not.
212 pspec
.wb_icache_en
= self
.jtag
.wb_icache_en
213 pspec
.wb_dcache_en
= self
.jtag
.wb_dcache_en
214 self
.wb_sram_en
= self
.jtag
.wb_sram_en
216 self
.wb_sram_en
= Const(1)
218 # add 4k sram blocks?
219 self
.sram4x4k
= (hasattr(pspec
, "sram4x4kblock") and
220 pspec
.sram4x4kblock
== True)
224 self
.sram4k
.append(SPBlock512W64B8W(name
="sram4k_%d" % i
,
228 # add interrupt controller?
229 self
.xics
= hasattr(pspec
, "xics") and pspec
.xics
== True
231 self
.xics_icp
= XICS_ICP()
232 self
.xics_ics
= XICS_ICS()
233 self
.int_level_i
= self
.xics_ics
.int_level_i
235 self
.ext_irq
= Signal()
237 # add GPIO peripheral?
238 self
.gpio
= hasattr(pspec
, "gpio") and pspec
.gpio
== True
240 self
.simple_gpio
= SimpleGPIO()
241 self
.gpio_o
= self
.simple_gpio
.gpio_o
243 # main instruction core. suitable for prototyping / demo only
244 self
.core
= core
= NonProductionCore(pspec
)
245 self
.core_rst
= ResetSignal(self
.core_domain
)
247 # instruction decoder. goes into Trap Record
248 #pdecode = create_pdecode()
249 self
.cur_state
= CoreState("cur") # current state (MSR/PC/SVSTATE)
250 self
.pdecode2
= PowerDecode2(None, state
=self
.cur_state
,
251 opkls
=IssuerDecode2ToOperand
,
252 svp64_en
=self
.svp64_en
,
253 regreduce_en
=self
.regreduce_en
)
254 pdecode
= self
.pdecode2
.dec
257 self
.svp64
= SVP64PrefixDecoder() # for decoding SVP64 prefix
259 self
.update_svstate
= Signal() # set this if updating svstate
260 self
.new_svstate
= new_svstate
= SVSTATERec("new_svstate")
262 # Test Instruction memory
263 if hasattr(core
, "icache"):
264 # XXX BLECH! use pspec to transfer the I-Cache to ConfigFetchUnit
265 # truly dreadful. needs a huge reorg.
266 pspec
.icache
= core
.icache
267 self
.imem
= ConfigFetchUnit(pspec
).fu
270 self
.dbg
= CoreDebug()
271 self
.dbg_rst_i
= Signal(reset_less
=True)
273 # instruction go/monitor
274 self
.pc_o
= Signal(64, reset_less
=True)
275 self
.pc_i
= Data(64, "pc_i") # set "ok" to indicate "please change me"
276 self
.msr_i
= Data(64, "msr_i") # set "ok" to indicate "please change me"
277 self
.svstate_i
= Data(64, "svstate_i") # ditto
278 self
.core_bigendian_i
= Signal() # TODO: set based on MSR.LE
279 self
.busy_o
= Signal(reset_less
=True)
280 self
.memerr_o
= Signal(reset_less
=True)
282 # STATE regfile read /write ports for PC, MSR, SVSTATE
283 staterf
= self
.core
.regs
.rf
['state']
284 self
.state_r_msr
= staterf
.r_ports
['msr'] # MSR rd
285 self
.state_r_pc
= staterf
.r_ports
['cia'] # PC rd
286 self
.state_r_sv
= staterf
.r_ports
['sv'] # SVSTATE rd
288 self
.state_w_msr
= staterf
.w_ports
['msr'] # MSR wr
289 self
.state_w_pc
= staterf
.w_ports
['d_wr1'] # PC wr
290 self
.state_w_sv
= staterf
.w_ports
['sv'] # SVSTATE wr
292 # DMI interface access
293 intrf
= self
.core
.regs
.rf
['int']
294 crrf
= self
.core
.regs
.rf
['cr']
295 xerrf
= self
.core
.regs
.rf
['xer']
296 self
.int_r
= intrf
.r_ports
['dmi'] # INT read
297 self
.cr_r
= crrf
.r_ports
['full_cr_dbg'] # CR read
298 self
.xer_r
= xerrf
.r_ports
['full_xer'] # XER read
302 self
.int_pred
= intrf
.r_ports
['pred'] # INT predicate read
303 self
.cr_pred
= crrf
.r_ports
['cr_pred'] # CR predicate read
305 # hack method of keeping an eye on whether branch/trap set the PC
306 self
.state_nia
= self
.core
.regs
.rf
['state'].w_ports
['nia']
307 self
.state_nia
.wen
.name
= 'state_nia_wen'
309 # pulse to synchronize the simulator at instruction end
310 self
.insn_done
= Signal()
312 # indicate any instruction still outstanding, in execution
313 self
.any_busy
= Signal()
316 # store copies of predicate masks
317 self
.srcmask
= Signal(64)
318 self
.dstmask
= Signal(64)
320 # sigh, the wishbone addresses are not wishbone-compliant in microwatt
321 if self
.microwatt_compat
:
322 self
.ibus_adr
= Signal(32, name
='wishbone_insn_out.adr')
323 self
.dbus_adr
= Signal(32, name
='wishbone_data_out.adr')
325 def setup_peripherals(self
, m
):
326 comb
, sync
= m
.d
.comb
, m
.d
.sync
328 # okaaaay so the debug module must be in coresync clock domain
329 # but NOT its reset signal. to cope with this, set every single
330 # submodule explicitly in coresync domain, debug and JTAG
331 # in their own one but using *external* reset.
332 csd
= DomainRenamer(self
.core_domain
)
333 dbd
= DomainRenamer(self
.dbg_domain
)
335 if self
.microwatt_compat
:
336 m
.submodules
.core
= core
= self
.core
338 m
.submodules
.core
= core
= csd(self
.core
)
339 # this _so_ needs sorting out. ICache is added down inside
340 # LoadStore1 and is already a submodule of LoadStore1
341 if not isinstance(self
.imem
, ICache
):
342 m
.submodules
.imem
= imem
= csd(self
.imem
)
343 m
.submodules
.dbg
= dbg
= dbd(self
.dbg
)
345 m
.submodules
.jtag
= jtag
= dbd(self
.jtag
)
346 # TODO: UART2GDB mux, here, from external pin
347 # see https://bugs.libre-soc.org/show_bug.cgi?id=499
348 sync
+= dbg
.dmi
.connect_to(jtag
.dmi
)
350 # fixup the clocks in microwatt-compat mode (but leave resets alone
351 # so that microwatt soc.vhdl can pull a reset on the core or DMI
352 # can do it, just like in TestIssuer)
353 if self
.microwatt_compat
:
354 intclk
= ClockSignal(self
.core_domain
)
355 dbgclk
= ClockSignal(self
.dbg_domain
)
356 if self
.core_domain
!= 'sync':
357 comb
+= intclk
.eq(ClockSignal())
358 if self
.dbg_domain
!= 'sync':
359 comb
+= dbgclk
.eq(ClockSignal())
361 # drop the first 3 bits of the incoming wishbone addresses
362 # this can go if using later versions of microwatt (not now)
363 if self
.microwatt_compat
:
364 ibus
= self
.imem
.ibus
365 dbus
= self
.core
.l0
.cmpi
.wb_bus()
366 comb
+= self
.ibus_adr
.eq(Cat(Const(0, 3), ibus
.adr
))
367 comb
+= self
.dbus_adr
.eq(Cat(Const(0, 3), dbus
.adr
))
369 cur_state
= self
.cur_state
371 # 4x 4k SRAM blocks. these simply "exist", they get routed in litex
373 for i
, sram
in enumerate(self
.sram4k
):
374 m
.submodules
["sram4k_%d" % i
] = csd(sram
)
375 comb
+= sram
.enable
.eq(self
.wb_sram_en
)
377 # XICS interrupt handler
379 m
.submodules
.xics_icp
= icp
= csd(self
.xics_icp
)
380 m
.submodules
.xics_ics
= ics
= csd(self
.xics_ics
)
381 comb
+= icp
.ics_i
.eq(ics
.icp_o
) # connect ICS to ICP
382 sync
+= cur_state
.eint
.eq(icp
.core_irq_o
) # connect ICP to core
384 sync
+= cur_state
.eint
.eq(self
.ext_irq
) # connect externally
386 # GPIO test peripheral
388 m
.submodules
.simple_gpio
= simple_gpio
= csd(self
.simple_gpio
)
390 # connect one GPIO output to ICS bit 15 (like in microwatt soc.vhdl)
391 # XXX causes litex ECP5 test to get wrong idea about input and output
392 # (but works with verilator sim *sigh*)
393 # if self.gpio and self.xics:
394 # comb += self.int_level_i[15].eq(simple_gpio.gpio_o[0])
396 # instruction decoder
397 pdecode
= create_pdecode()
398 m
.submodules
.dec2
= pdecode2
= csd(self
.pdecode2
)
400 m
.submodules
.svp64
= svp64
= csd(self
.svp64
)
403 dmi
, d_reg
, d_cr
, d_xer
, = dbg
.dmi
, dbg
.d_gpr
, dbg
.d_cr
, dbg
.d_xer
404 intrf
= self
.core
.regs
.rf
['int']
406 # clock delay power-on reset
407 cd_por
= ClockDomain(reset_less
=True)
408 cd_sync
= ClockDomain()
409 m
.domains
+= cd_por
, cd_sync
410 core_sync
= ClockDomain(self
.core_domain
)
411 if self
.core_domain
!= "sync":
412 m
.domains
+= core_sync
413 if self
.dbg_domain
!= "sync":
414 dbg_sync
= ClockDomain(self
.dbg_domain
)
415 m
.domains
+= dbg_sync
417 ti_rst
= Signal(reset_less
=True)
418 delay
= Signal(range(4), reset
=3)
419 with m
.If(delay
!= 0):
420 m
.d
.por
+= delay
.eq(delay
- 1)
421 comb
+= cd_por
.clk
.eq(ClockSignal())
423 # power-on reset delay
424 core_rst
= ResetSignal(self
.core_domain
)
425 if self
.core_domain
!= "sync":
426 comb
+= ti_rst
.eq(delay
!= 0 | dbg
.core_rst_o |
ResetSignal())
427 comb
+= core_rst
.eq(ti_rst
)
429 with m
.If(delay
!= 0 | dbg
.core_rst_o
):
430 comb
+= core_rst
.eq(1)
432 # connect external reset signal to DMI Reset
433 if self
.dbg_domain
!= "sync":
434 dbg_rst
= ResetSignal(self
.dbg_domain
)
435 comb
+= dbg_rst
.eq(self
.dbg_rst_i
)
437 # busy/halted signals from core
438 core_busy_o
= ~core
.p
.o_ready | core
.n
.o_data
.busy_o
# core is busy
439 comb
+= self
.busy_o
.eq(core_busy_o
)
440 comb
+= pdecode2
.dec
.bigendian
.eq(self
.core_bigendian_i
)
442 # temporary hack: says "go" immediately for both address gen and ST
444 ldst
= core
.fus
.fus
['ldst0']
445 st_go_edge
= rising_edge(m
, ldst
.st
.rel_o
)
446 # link addr-go direct to rel
447 m
.d
.comb
+= ldst
.ad
.go_i
.eq(ldst
.ad
.rel_o
)
448 m
.d
.comb
+= ldst
.st
.go_i
.eq(st_go_edge
) # link store-go to rising rel
450 def do_dmi(self
, m
, dbg
):
451 """deals with DMI debug requests
453 currently only provides read requests for the INT regfile, CR and XER
454 it will later also deal with *writing* to these regfiles.
458 dmi
, d_reg
, d_cr
, d_xer
, = dbg
.dmi
, dbg
.d_gpr
, dbg
.d_cr
, dbg
.d_xer
459 intrf
= self
.core
.regs
.rf
['int']
461 with m
.If(d_reg
.req
): # request for regfile access being made
462 # TODO: error-check this
463 # XXX should this be combinatorial? sync better?
465 comb
+= self
.int_r
.ren
.eq(1 << d_reg
.addr
)
467 comb
+= self
.int_r
.addr
.eq(d_reg
.addr
)
468 comb
+= self
.int_r
.ren
.eq(1)
469 d_reg_delay
= Signal()
470 sync
+= d_reg_delay
.eq(d_reg
.req
)
471 with m
.If(d_reg_delay
):
472 # data arrives one clock later
473 comb
+= d_reg
.data
.eq(self
.int_r
.o_data
)
474 comb
+= d_reg
.ack
.eq(1)
476 # sigh same thing for CR debug
477 with m
.If(d_cr
.req
): # request for regfile access being made
478 comb
+= self
.cr_r
.ren
.eq(0b11111111) # enable all
479 d_cr_delay
= Signal()
480 sync
+= d_cr_delay
.eq(d_cr
.req
)
481 with m
.If(d_cr_delay
):
482 # data arrives one clock later
483 comb
+= d_cr
.data
.eq(self
.cr_r
.o_data
)
484 comb
+= d_cr
.ack
.eq(1)
487 with m
.If(d_xer
.req
): # request for regfile access being made
488 comb
+= self
.xer_r
.ren
.eq(0b111111) # enable all
489 d_xer_delay
= Signal()
490 sync
+= d_xer_delay
.eq(d_xer
.req
)
491 with m
.If(d_xer_delay
):
492 # data arrives one clock later
493 comb
+= d_xer
.data
.eq(self
.xer_r
.o_data
)
494 comb
+= d_xer
.ack
.eq(1)
496 def tb_dec_fsm(self
, m
, spr_dec
):
499 this is a FSM for updating either dec or tb. it runs alternately
500 DEC, TB, DEC, TB. note that SPR pipeline could have written a new
501 value to DEC, however the regfile has "passthrough" on it so this
504 see v3.0B p1097-1099 for Timeer Resource and p1065 and p1076
507 comb
, sync
= m
.d
.comb
, m
.d
.sync
508 fast_rf
= self
.core
.regs
.rf
['fast']
509 fast_r_dectb
= fast_rf
.r_ports
['issue'] # DEC/TB
510 fast_w_dectb
= fast_rf
.w_ports
['issue'] # DEC/TB
514 # initiates read of current DEC
515 with m
.State("DEC_READ"):
516 comb
+= fast_r_dectb
.addr
.eq(FastRegs
.DEC
)
517 comb
+= fast_r_dectb
.ren
.eq(1)
520 # waits for DEC read to arrive (1 cycle), updates with new value
521 with m
.State("DEC_WRITE"):
523 # TODO: MSR.LPCR 32-bit decrement mode
524 comb
+= new_dec
.eq(fast_r_dectb
.o_data
- 1)
525 comb
+= fast_w_dectb
.addr
.eq(FastRegs
.DEC
)
526 comb
+= fast_w_dectb
.wen
.eq(1)
527 comb
+= fast_w_dectb
.i_data
.eq(new_dec
)
528 sync
+= spr_dec
.eq(new_dec
) # copy into cur_state for decoder
531 # initiates read of current TB
532 with m
.State("TB_READ"):
533 comb
+= fast_r_dectb
.addr
.eq(FastRegs
.TB
)
534 comb
+= fast_r_dectb
.ren
.eq(1)
537 # waits for read TB to arrive, initiates write of current TB
538 with m
.State("TB_WRITE"):
540 comb
+= new_tb
.eq(fast_r_dectb
.o_data
+ 1)
541 comb
+= fast_w_dectb
.addr
.eq(FastRegs
.TB
)
542 comb
+= fast_w_dectb
.wen
.eq(1)
543 comb
+= fast_w_dectb
.i_data
.eq(new_tb
)
548 def elaborate(self
, platform
):
551 comb
, sync
= m
.d
.comb
, m
.d
.sync
552 cur_state
= self
.cur_state
553 pdecode2
= self
.pdecode2
556 # set up peripherals and core
557 core_rst
= self
.core_rst
558 self
.setup_peripherals(m
)
560 # reset current state if core reset requested
562 m
.d
.sync
+= self
.cur_state
.eq(0)
564 # check halted condition: requested PC to execute matches DMI stop addr
565 # and immediately stop. address of 0xffff_ffff_ffff_ffff can never
568 comb
+= halted
.eq(dbg
.stop_addr_o
== dbg
.state
.pc
)
570 comb
+= dbg
.core_stopped_i
.eq(1)
571 comb
+= dbg
.terminate_i
.eq(1)
573 # PC and instruction from I-Memory
574 comb
+= self
.pc_o
.eq(cur_state
.pc
)
575 self
.pc_changed
= Signal() # note write to PC
576 self
.msr_changed
= Signal() # note write to MSR
577 self
.sv_changed
= Signal() # note write to SVSTATE
579 # read state either from incoming override or from regfile
580 state
= CoreState("get") # current state (MSR/PC/SVSTATE)
581 state_get(m
, state
.msr
, core_rst
, self
.msr_i
,
583 self
.state_r_msr
, StateRegs
.MSR
)
584 state_get(m
, state
.pc
, core_rst
, self
.pc_i
,
586 self
.state_r_pc
, StateRegs
.PC
)
587 state_get(m
, state
.svstate
, core_rst
, self
.svstate_i
,
588 "svstate", # read SVSTATE
589 self
.state_r_sv
, StateRegs
.SVSTATE
)
591 # don't write pc every cycle
592 comb
+= self
.state_w_pc
.wen
.eq(0)
593 comb
+= self
.state_w_pc
.i_data
.eq(0)
595 # connect up debug state. note "combinatorially same" below,
596 # this is a bit naff, passing state over in the dbg class, but
597 # because it is combinatorial it achieves the desired goal
598 comb
+= dbg
.state
.eq(state
)
600 # this bit doesn't have to be in the FSM: connect up to read
601 # regfiles on demand from DMI
604 # DEC and TB inc/dec FSM. copy of DEC is put into CoreState,
605 # (which uses that in PowerDecoder2 to raise 0x900 exception)
606 self
.tb_dec_fsm(m
, cur_state
.dec
)
608 # while stopped, allow updating the MSR, PC and SVSTATE.
609 # these are mainly for debugging purposes (including DMI/JTAG)
610 with m
.If(dbg
.core_stopped_i
):
611 with m
.If(self
.pc_i
.ok
):
612 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
613 comb
+= self
.state_w_pc
.i_data
.eq(self
.pc_i
.data
)
614 sync
+= self
.pc_changed
.eq(1)
615 with m
.If(self
.msr_i
.ok
):
616 comb
+= self
.state_w_msr
.wen
.eq(1 << StateRegs
.MSR
)
617 comb
+= self
.state_w_msr
.i_data
.eq(self
.msr_i
.data
)
618 sync
+= self
.msr_changed
.eq(1)
619 with m
.If(self
.svstate_i
.ok | self
.update_svstate
):
620 with m
.If(self
.svstate_i
.ok
): # over-ride from external source
621 comb
+= self
.new_svstate
.eq(self
.svstate_i
.data
)
622 comb
+= self
.state_w_sv
.wen
.eq(1 << StateRegs
.SVSTATE
)
623 comb
+= self
.state_w_sv
.i_data
.eq(self
.new_svstate
)
624 sync
+= self
.sv_changed
.eq(1)
626 # start renaming some of the ports to match microwatt
627 if self
.microwatt_compat
:
628 self
.core
.o
.core_terminate_o
.name
= "terminated_out"
629 # names of DMI interface
630 self
.dbg
.dmi
.addr_i
.name
= 'dmi_addr'
631 self
.dbg
.dmi
.din
.name
= 'dmi_din'
632 self
.dbg
.dmi
.dout
.name
= 'dmi_dout'
633 self
.dbg
.dmi
.req_i
.name
= 'dmi_req'
634 self
.dbg
.dmi
.we_i
.name
= 'dmi_wr'
635 self
.dbg
.dmi
.ack_o
.name
= 'dmi_ack'
636 # wishbone instruction bus
637 ibus
= self
.imem
.ibus
638 ibus
.adr
.name
= 'wishbone_insn_out.adr'
639 ibus
.dat_w
.name
= 'wishbone_insn_out.dat'
640 ibus
.sel
.name
= 'wishbone_insn_out.sel'
641 ibus
.cyc
.name
= 'wishbone_insn_out.cyc'
642 ibus
.stb
.name
= 'wishbone_insn_out.stb'
643 ibus
.we
.name
= 'wishbone_insn_out.we'
644 ibus
.dat_r
.name
= 'wishbone_insn_in.dat'
645 ibus
.ack
.name
= 'wishbone_insn_in.ack'
646 ibus
.stall
.name
= 'wishbone_insn_in.stall'
648 dbus
= self
.core
.l0
.cmpi
.wb_bus()
649 dbus
.adr
.name
= 'wishbone_data_out.adr'
650 dbus
.dat_w
.name
= 'wishbone_data_out.dat'
651 dbus
.sel
.name
= 'wishbone_data_out.sel'
652 dbus
.cyc
.name
= 'wishbone_data_out.cyc'
653 dbus
.stb
.name
= 'wishbone_data_out.stb'
654 dbus
.we
.name
= 'wishbone_data_out.we'
655 dbus
.dat_r
.name
= 'wishbone_data_in.dat'
656 dbus
.ack
.name
= 'wishbone_data_in.ack'
657 dbus
.stall
.name
= 'wishbone_data_in.stall'
662 yield from self
.pc_i
.ports()
663 yield from self
.msr_i
.ports()
666 yield from self
.core
.ports()
667 yield from self
.imem
.ports()
668 yield self
.core_bigendian_i
674 def external_ports(self
):
675 if self
.microwatt_compat
:
676 ports
= [self
.core
.o
.core_terminate_o
,
678 self
.alt_reset
, # not connected yet
682 ports
+= list(self
.dbg
.dmi
.ports())
683 # for dbus/ibus microwatt, exclude err btw and cti
684 for name
, sig
in self
.imem
.ibus
.fields
.items():
685 if name
not in ['err', 'bte', 'cti', 'adr']:
687 for name
, sig
in self
.core
.l0
.cmpi
.wb_bus().fields
.items():
688 if name
not in ['err', 'bte', 'cti', 'adr']:
690 # microwatt non-compliant with wishbone
691 ports
.append(self
.ibus_adr
)
692 ports
.append(self
.dbus_adr
)
695 ports
= self
.pc_i
.ports()
696 ports
= self
.msr_i
.ports()
697 ports
+= [self
.pc_o
, self
.memerr_o
, self
.core_bigendian_i
, self
.busy_o
,
701 ports
+= list(self
.jtag
.external_ports())
703 # don't add DMI if JTAG is enabled
704 ports
+= list(self
.dbg
.dmi
.ports())
706 ports
+= list(self
.imem
.ibus
.fields
.values())
707 ports
+= list(self
.core
.l0
.cmpi
.wb_bus().fields
.values())
710 for sram
in self
.sram4k
:
711 ports
+= list(sram
.bus
.fields
.values())
714 ports
+= list(self
.xics_icp
.bus
.fields
.values())
715 ports
+= list(self
.xics_ics
.bus
.fields
.values())
716 ports
.append(self
.int_level_i
)
718 ports
.append(self
.ext_irq
)
721 ports
+= list(self
.simple_gpio
.bus
.fields
.values())
722 ports
.append(self
.gpio_o
)
731 # Fetch Finite State Machine.
732 # WARNING: there are currently DriverConflicts but it's actually working.
733 # TODO, here: everything that is global in nature, information from the
734 # main TestIssuerInternal, needs to move to either ispec() or ospec().
735 # not only that: TestIssuerInternal.imem can entirely move into here
736 # because imem is only ever accessed inside the FetchFSM.
737 class FetchFSM(ControlBase
):
738 def __init__(self
, allow_overlap
, svp64_en
, imem
, core_rst
,
740 dbg
, core
, svstate
, nia
, is_svp64_mode
):
741 self
.allow_overlap
= allow_overlap
742 self
.svp64_en
= svp64_en
744 self
.core_rst
= core_rst
745 self
.pdecode2
= pdecode2
746 self
.cur_state
= cur_state
749 self
.svstate
= svstate
751 self
.is_svp64_mode
= is_svp64_mode
753 # set up pipeline ControlBase and allocate i/o specs
754 # (unusual: normally done by the Pipeline API)
755 super().__init
__(stage
=self
)
756 self
.p
.i_data
, self
.n
.o_data
= self
.new_specs(None)
757 self
.i
, self
.o
= self
.p
.i_data
, self
.n
.o_data
759 # next 3 functions are Stage API Compliance
760 def setup(self
, m
, i
):
769 def elaborate(self
, platform
):
772 this FSM performs fetch of raw instruction data, partial-decodes
773 it 32-bit at a time to detect SVP64 prefixes, and will optionally
774 read a 2nd 32-bit quantity if that occurs.
776 m
= super().elaborate(platform
)
782 svstate
= self
.svstate
784 is_svp64_mode
= self
.is_svp64_mode
785 fetch_pc_o_ready
= self
.p
.o_ready
786 fetch_pc_i_valid
= self
.p
.i_valid
787 fetch_insn_o_valid
= self
.n
.o_valid
788 fetch_insn_i_ready
= self
.n
.i_ready
792 pdecode2
= self
.pdecode2
793 cur_state
= self
.cur_state
794 dec_opcode_o
= pdecode2
.dec
.raw_opcode_in
# raw opcode
796 # also note instruction fetch failed
797 if hasattr(core
, "icache"):
798 fetch_failed
= core
.icache
.i_out
.fetch_failed
801 fetch_failed
= Const(0, 1)
804 # set priv / virt mode on I-Cache, sigh
805 if isinstance(self
.imem
, ICache
):
806 comb
+= self
.imem
.i_in
.priv_mode
.eq(~msr
[MSR
.PR
])
807 comb
+= self
.imem
.i_in
.virt_mode
.eq(msr
[MSR
.IR
]) # Instr. Redir (VM)
809 with m
.FSM(name
='fetch_fsm'):
812 with m
.State("IDLE"):
813 # fetch allowed if not failed and stopped but not stepping
814 # (see dmi.py for how core_stop_o is generated)
815 with m
.If(~fetch_failed
& ~dbg
.core_stop_o
):
816 comb
+= fetch_pc_o_ready
.eq(1)
817 with m
.If(fetch_pc_i_valid
& ~pdecode2
.instr_fault
819 # instruction allowed to go: start by reading the PC
820 # capture the PC and also drop it into Insn Memory
821 # we have joined a pair of combinatorial memory
822 # lookups together. this is Generally Bad.
823 comb
+= self
.imem
.a_pc_i
.eq(pc
)
824 comb
+= self
.imem
.a_i_valid
.eq(1)
825 comb
+= self
.imem
.f_i_valid
.eq(1)
826 # transfer state to output
827 sync
+= cur_state
.pc
.eq(pc
)
828 sync
+= cur_state
.svstate
.eq(svstate
) # and svstate
829 sync
+= cur_state
.msr
.eq(msr
) # and msr
831 m
.next
= "INSN_READ" # move to "wait for bus" phase
833 # dummy pause to find out why simulation is not keeping up
834 with m
.State("INSN_READ"):
835 # when using "single-step" mode, checking dbg.stopping_o
836 # prevents progress. allow fetch to proceed once started
838 #if self.allow_overlap:
839 # stopping = dbg.stopping_o
841 # stopping: jump back to idle
844 with m
.If(self
.imem
.f_busy_o
&
845 ~pdecode2
.instr_fault
): # zzz...
846 # busy but not fetch failed: stay in wait-read
847 comb
+= self
.imem
.a_pc_i
.eq(pc
)
848 comb
+= self
.imem
.a_i_valid
.eq(1)
849 comb
+= self
.imem
.f_i_valid
.eq(1)
851 # not busy (or fetch failed!): instruction fetched
852 # when fetch failed, the instruction gets ignored
854 if hasattr(core
, "icache"):
855 # blech, icache returns actual instruction
856 insn
= self
.imem
.f_instr_o
858 # but these return raw memory
859 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
)
862 # decode the SVP64 prefix, if any
863 comb
+= svp64
.raw_opcode_in
.eq(insn
)
864 comb
+= svp64
.bigendian
.eq(self
.core_bigendian_i
)
865 # pass the decoded prefix (if any) to PowerDecoder2
866 sync
+= pdecode2
.sv_rm
.eq(svp64
.svp64_rm
)
867 sync
+= pdecode2
.is_svp64_mode
.eq(is_svp64_mode
)
868 # remember whether this is a prefixed instruction,
869 # so the FSM can readily loop when VL==0
870 sync
+= is_svp64_mode
.eq(svp64
.is_svp64_mode
)
871 # calculate the address of the following instruction
872 insn_size
= Mux(svp64
.is_svp64_mode
, 8, 4)
873 sync
+= nia
.eq(cur_state
.pc
+ insn_size
)
874 with m
.If(~svp64
.is_svp64_mode
):
875 # with no prefix, store the instruction
876 # and hand it directly to the next FSM
877 sync
+= dec_opcode_o
.eq(insn
)
878 m
.next
= "INSN_READY"
880 # fetch the rest of the instruction from memory
881 comb
+= self
.imem
.a_pc_i
.eq(cur_state
.pc
+ 4)
882 comb
+= self
.imem
.a_i_valid
.eq(1)
883 comb
+= self
.imem
.f_i_valid
.eq(1)
884 m
.next
= "INSN_READ2"
886 # not SVP64 - 32-bit only
887 sync
+= nia
.eq(cur_state
.pc
+ 4)
888 sync
+= dec_opcode_o
.eq(insn
)
889 m
.next
= "INSN_READY"
891 with m
.State("INSN_READ2"):
892 with m
.If(self
.imem
.f_busy_o
): # zzz...
893 # busy: stay in wait-read
894 comb
+= self
.imem
.a_i_valid
.eq(1)
895 comb
+= self
.imem
.f_i_valid
.eq(1)
897 # not busy: instruction fetched
898 if hasattr(core
, "icache"):
899 # blech, icache returns actual instruction
900 insn
= self
.imem
.f_instr_o
902 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
+4)
903 sync
+= dec_opcode_o
.eq(insn
)
904 m
.next
= "INSN_READY"
905 # TODO: probably can start looking at pdecode2.rm_dec
906 # here or maybe even in INSN_READ state, if svp64_mode
907 # detected, in order to trigger - and wait for - the
910 pmode
= pdecode2
.rm_dec
.predmode
912 if pmode != SVP64PredMode.ALWAYS.value:
913 fire predicate loading FSM and wait before
916 sync += self.srcmask.eq(-1) # set to all 1s
917 sync += self.dstmask.eq(-1) # set to all 1s
918 m.next = "INSN_READY"
921 with m
.State("INSN_READY"):
922 # hand over the instruction, to be decoded
923 comb
+= fetch_insn_o_valid
.eq(1)
924 with m
.If(fetch_insn_i_ready
):
927 # whatever was done above, over-ride it if core reset is held
928 with m
.If(self
.core_rst
):
934 class TestIssuerInternal(TestIssuerBase
):
935 """TestIssuer - reads instructions from TestMemory and issues them
937 efficiency and speed is not the main goal here: functional correctness
938 and code clarity is. optimisations (which almost 100% interfere with
939 easy understanding) come later.
942 def fetch_predicate_fsm(self
, m
,
943 pred_insn_i_valid
, pred_insn_o_ready
,
944 pred_mask_o_valid
, pred_mask_i_ready
):
945 """fetch_predicate_fsm - obtains (constructs in the case of CR)
946 src/dest predicate masks
948 https://bugs.libre-soc.org/show_bug.cgi?id=617
949 the predicates can be read here, by using IntRegs r_ports['pred']
950 or CRRegs r_ports['pred']. in the case of CRs it will have to
951 be done through multiple reads, extracting one relevant at a time.
952 later, a faster way would be to use the 32-bit-wide CR port but
953 this is more complex decoding, here. equivalent code used in
954 ISACaller is "from openpower.decoder.isa.caller import get_predcr"
956 note: this ENTIRE FSM is not to be called when svp64 is disabled
960 pdecode2
= self
.pdecode2
961 rm_dec
= pdecode2
.rm_dec
# SVP64RMModeDecode
962 predmode
= rm_dec
.predmode
963 srcpred
, dstpred
= rm_dec
.srcpred
, rm_dec
.dstpred
964 cr_pred
, int_pred
= self
.cr_pred
, self
.int_pred
# read regfiles
965 # get src/dst step, so we can skip already used mask bits
966 cur_state
= self
.cur_state
967 srcstep
= cur_state
.svstate
.srcstep
968 dststep
= cur_state
.svstate
.dststep
969 cur_vl
= cur_state
.svstate
.vl
972 sregread
, sinvert
, sunary
, sall1s
= get_predint(m
, srcpred
, 's')
973 dregread
, dinvert
, dunary
, dall1s
= get_predint(m
, dstpred
, 'd')
974 sidx
, scrinvert
= get_predcr(m
, srcpred
, 's')
975 didx
, dcrinvert
= get_predcr(m
, dstpred
, 'd')
977 # store fetched masks, for either intpred or crpred
978 # when src/dst step is not zero, the skipped mask bits need to be
979 # shifted-out, before actually storing them in src/dest mask
980 new_srcmask
= Signal(64, reset_less
=True)
981 new_dstmask
= Signal(64, reset_less
=True)
983 with m
.FSM(name
="fetch_predicate"):
985 with m
.State("FETCH_PRED_IDLE"):
986 comb
+= pred_insn_o_ready
.eq(1)
987 with m
.If(pred_insn_i_valid
):
988 with m
.If(predmode
== SVP64PredMode
.INT
):
989 # skip fetching destination mask register, when zero
991 sync
+= new_dstmask
.eq(-1)
992 # directly go to fetch source mask register
993 # guaranteed not to be zero (otherwise predmode
994 # would be SVP64PredMode.ALWAYS, not INT)
995 comb
+= int_pred
.addr
.eq(sregread
)
996 comb
+= int_pred
.ren
.eq(1)
997 m
.next
= "INT_SRC_READ"
998 # fetch destination predicate register
1000 comb
+= int_pred
.addr
.eq(dregread
)
1001 comb
+= int_pred
.ren
.eq(1)
1002 m
.next
= "INT_DST_READ"
1003 with m
.Elif(predmode
== SVP64PredMode
.CR
):
1004 # go fetch masks from the CR register file
1005 sync
+= new_srcmask
.eq(0)
1006 sync
+= new_dstmask
.eq(0)
1009 sync
+= self
.srcmask
.eq(-1)
1010 sync
+= self
.dstmask
.eq(-1)
1011 m
.next
= "FETCH_PRED_DONE"
1013 with m
.State("INT_DST_READ"):
1014 # store destination mask
1015 inv
= Repl(dinvert
, 64)
1017 # set selected mask bit for 1<<r3 mode
1018 dst_shift
= Signal(range(64))
1019 comb
+= dst_shift
.eq(self
.int_pred
.o_data
& 0b111111)
1020 sync
+= new_dstmask
.eq(1 << dst_shift
)
1022 # invert mask if requested
1023 sync
+= new_dstmask
.eq(self
.int_pred
.o_data ^ inv
)
1024 # skip fetching source mask register, when zero
1026 sync
+= new_srcmask
.eq(-1)
1027 m
.next
= "FETCH_PRED_SHIFT_MASK"
1028 # fetch source predicate register
1030 comb
+= int_pred
.addr
.eq(sregread
)
1031 comb
+= int_pred
.ren
.eq(1)
1032 m
.next
= "INT_SRC_READ"
1034 with m
.State("INT_SRC_READ"):
1036 inv
= Repl(sinvert
, 64)
1038 # set selected mask bit for 1<<r3 mode
1039 src_shift
= Signal(range(64))
1040 comb
+= src_shift
.eq(self
.int_pred
.o_data
& 0b111111)
1041 sync
+= new_srcmask
.eq(1 << src_shift
)
1043 # invert mask if requested
1044 sync
+= new_srcmask
.eq(self
.int_pred
.o_data ^ inv
)
1045 m
.next
= "FETCH_PRED_SHIFT_MASK"
1047 # fetch masks from the CR register file
1048 # implements the following loop:
1049 # idx, inv = get_predcr(mask)
1051 # for cr_idx in range(vl):
1052 # cr = crl[cr_idx + SVP64CROffs.CRPred] # takes one cycle
1054 # mask |= 1 << cr_idx
1056 with m
.State("CR_READ"):
1057 # CR index to be read, which will be ready by the next cycle
1058 cr_idx
= Signal
.like(cur_vl
, reset_less
=True)
1059 # submit the read operation to the regfile
1060 with m
.If(cr_idx
!= cur_vl
):
1061 # the CR read port is unary ...
1063 # ... in MSB0 convention ...
1064 # ren = 1 << (7 - cr_idx)
1065 # ... and with an offset:
1066 # ren = 1 << (7 - off - cr_idx)
1067 idx
= SVP64CROffs
.CRPred
+ cr_idx
1068 comb
+= cr_pred
.ren
.eq(1 << (7 - idx
))
1069 # signal data valid in the next cycle
1070 cr_read
= Signal(reset_less
=True)
1071 sync
+= cr_read
.eq(1)
1072 # load the next index
1073 sync
+= cr_idx
.eq(cr_idx
+ 1)
1076 sync
+= cr_read
.eq(0)
1077 sync
+= cr_idx
.eq(0)
1078 m
.next
= "FETCH_PRED_SHIFT_MASK"
1080 # compensate for the one cycle delay on the regfile
1081 cur_cr_idx
= Signal
.like(cur_vl
)
1082 comb
+= cur_cr_idx
.eq(cr_idx
- 1)
1083 # read the CR field, select the appropriate bit
1084 cr_field
= Signal(4)
1087 comb
+= cr_field
.eq(cr_pred
.o_data
)
1088 comb
+= scr_bit
.eq(cr_field
.bit_select(sidx
, 1)
1090 comb
+= dcr_bit
.eq(cr_field
.bit_select(didx
, 1)
1092 # set the corresponding mask bit
1093 bit_to_set
= Signal
.like(self
.srcmask
)
1094 comb
+= bit_to_set
.eq(1 << cur_cr_idx
)
1096 sync
+= new_srcmask
.eq(new_srcmask | bit_to_set
)
1098 sync
+= new_dstmask
.eq(new_dstmask | bit_to_set
)
1100 with m
.State("FETCH_PRED_SHIFT_MASK"):
1101 # shift-out skipped mask bits
1102 sync
+= self
.srcmask
.eq(new_srcmask
>> srcstep
)
1103 sync
+= self
.dstmask
.eq(new_dstmask
>> dststep
)
1104 m
.next
= "FETCH_PRED_DONE"
1106 with m
.State("FETCH_PRED_DONE"):
1107 comb
+= pred_mask_o_valid
.eq(1)
1108 with m
.If(pred_mask_i_ready
):
1109 m
.next
= "FETCH_PRED_IDLE"
1111 def issue_fsm(self
, m
, core
, nia
,
1112 dbg
, core_rst
, is_svp64_mode
,
1113 fetch_pc_o_ready
, fetch_pc_i_valid
,
1114 fetch_insn_o_valid
, fetch_insn_i_ready
,
1115 pred_insn_i_valid
, pred_insn_o_ready
,
1116 pred_mask_o_valid
, pred_mask_i_ready
,
1117 exec_insn_i_valid
, exec_insn_o_ready
,
1118 exec_pc_o_valid
, exec_pc_i_ready
):
1121 decode / issue FSM. this interacts with the "fetch" FSM
1122 through fetch_insn_ready/valid (incoming) and fetch_pc_ready/valid
1123 (outgoing). also interacts with the "execute" FSM
1124 through exec_insn_ready/valid (outgoing) and exec_pc_ready/valid
1126 SVP64 RM prefixes have already been set up by the
1127 "fetch" phase, so execute is fairly straightforward.
1132 pdecode2
= self
.pdecode2
1133 cur_state
= self
.cur_state
1134 new_svstate
= self
.new_svstate
1137 dec_opcode_i
= pdecode2
.dec
.raw_opcode_in
# raw opcode
1139 # for updating svstate (things like srcstep etc.)
1140 comb
+= new_svstate
.eq(cur_state
.svstate
)
1142 # precalculate srcstep+1 and dststep+1
1143 cur_srcstep
= cur_state
.svstate
.srcstep
1144 cur_dststep
= cur_state
.svstate
.dststep
1145 next_srcstep
= Signal
.like(cur_srcstep
)
1146 next_dststep
= Signal
.like(cur_dststep
)
1147 comb
+= next_srcstep
.eq(cur_state
.svstate
.srcstep
+1)
1148 comb
+= next_dststep
.eq(cur_state
.svstate
.dststep
+1)
1150 # note if an exception happened. in a pipelined or OoO design
1151 # this needs to be accompanied by "shadowing" (or stalling)
1152 exc_happened
= self
.core
.o
.exc_happened
1153 # also note instruction fetch failed
1154 if hasattr(core
, "icache"):
1155 fetch_failed
= core
.icache
.i_out
.fetch_failed
1157 # set to fault in decoder
1158 # update (highest priority) instruction fault
1159 rising_fetch_failed
= rising_edge(m
, fetch_failed
)
1160 with m
.If(rising_fetch_failed
):
1161 sync
+= pdecode2
.instr_fault
.eq(1)
1163 fetch_failed
= Const(0, 1)
1164 flush_needed
= False
1166 with m
.FSM(name
="issue_fsm"):
1168 # sync with the "fetch" phase which is reading the instruction
1169 # at this point, there is no instruction running, that
1170 # could inadvertently update the PC.
1171 with m
.State("ISSUE_START"):
1172 # reset instruction fault
1173 sync
+= pdecode2
.instr_fault
.eq(0)
1174 # wait on "core stop" release, before next fetch
1175 # need to do this here, in case we are in a VL==0 loop
1176 with m
.If(~dbg
.core_stop_o
& ~core_rst
):
1177 comb
+= fetch_pc_i_valid
.eq(1) # tell fetch to start
1178 with m
.If(fetch_pc_o_ready
): # fetch acknowledged us
1179 m
.next
= "INSN_WAIT"
1181 # tell core it's stopped, and acknowledge debug handshake
1182 comb
+= dbg
.core_stopped_i
.eq(1)
1183 # while stopped, allow updating SVSTATE
1184 with m
.If(self
.svstate_i
.ok
):
1185 comb
+= new_svstate
.eq(self
.svstate_i
.data
)
1186 comb
+= self
.update_svstate
.eq(1)
1187 sync
+= self
.sv_changed
.eq(1)
1189 # wait for an instruction to arrive from Fetch
1190 with m
.State("INSN_WAIT"):
1191 # when using "single-step" mode, checking dbg.stopping_o
1192 # prevents progress. allow issue to proceed once started
1194 #if self.allow_overlap:
1195 # stopping = dbg.stopping_o
1196 with m
.If(stopping
):
1197 # stopping: jump back to idle
1198 m
.next
= "ISSUE_START"
1200 # request the icache to stop asserting "failed"
1201 comb
+= core
.icache
.flush_in
.eq(1)
1202 # stop instruction fault
1203 sync
+= pdecode2
.instr_fault
.eq(0)
1205 comb
+= fetch_insn_i_ready
.eq(1)
1206 with m
.If(fetch_insn_o_valid
):
1207 # loop into ISSUE_START if it's a SVP64 instruction
1208 # and VL == 0. this because VL==0 is a for-loop
1209 # from 0 to 0 i.e. always, always a NOP.
1210 cur_vl
= cur_state
.svstate
.vl
1211 with m
.If(is_svp64_mode
& (cur_vl
== 0)):
1212 # update the PC before fetching the next instruction
1213 # since we are in a VL==0 loop, no instruction was
1214 # executed that we could be overwriting
1215 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1216 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1217 comb
+= self
.insn_done
.eq(1)
1218 m
.next
= "ISSUE_START"
1221 m
.next
= "PRED_START" # fetching predicate
1223 m
.next
= "DECODE_SV" # skip predication
1225 with m
.State("PRED_START"):
1226 comb
+= pred_insn_i_valid
.eq(1) # tell fetch_pred to start
1227 with m
.If(pred_insn_o_ready
): # fetch_pred acknowledged us
1228 m
.next
= "MASK_WAIT"
1230 with m
.State("MASK_WAIT"):
1231 comb
+= pred_mask_i_ready
.eq(1) # ready to receive the masks
1232 with m
.If(pred_mask_o_valid
): # predication masks are ready
1233 m
.next
= "PRED_SKIP"
1235 # skip zeros in predicate
1236 with m
.State("PRED_SKIP"):
1237 with m
.If(~is_svp64_mode
):
1238 m
.next
= "DECODE_SV" # nothing to do
1241 pred_src_zero
= pdecode2
.rm_dec
.pred_sz
1242 pred_dst_zero
= pdecode2
.rm_dec
.pred_dz
1244 # new srcstep, after skipping zeros
1245 skip_srcstep
= Signal
.like(cur_srcstep
)
1246 # value to be added to the current srcstep
1247 src_delta
= Signal
.like(cur_srcstep
)
1248 # add leading zeros to srcstep, if not in zero mode
1249 with m
.If(~pred_src_zero
):
1250 # priority encoder (count leading zeros)
1251 # append guard bit, in case the mask is all zeros
1252 pri_enc_src
= PriorityEncoder(65)
1253 m
.submodules
.pri_enc_src
= pri_enc_src
1254 comb
+= pri_enc_src
.i
.eq(Cat(self
.srcmask
,
1256 comb
+= src_delta
.eq(pri_enc_src
.o
)
1257 # apply delta to srcstep
1258 comb
+= skip_srcstep
.eq(cur_srcstep
+ src_delta
)
1259 # shift-out all leading zeros from the mask
1260 # plus the leading "one" bit
1261 # TODO count leading zeros and shift-out the zero
1262 # bits, in the same step, in hardware
1263 sync
+= self
.srcmask
.eq(self
.srcmask
>> (src_delta
+1))
1265 # same as above, but for dststep
1266 skip_dststep
= Signal
.like(cur_dststep
)
1267 dst_delta
= Signal
.like(cur_dststep
)
1268 with m
.If(~pred_dst_zero
):
1269 pri_enc_dst
= PriorityEncoder(65)
1270 m
.submodules
.pri_enc_dst
= pri_enc_dst
1271 comb
+= pri_enc_dst
.i
.eq(Cat(self
.dstmask
,
1273 comb
+= dst_delta
.eq(pri_enc_dst
.o
)
1274 comb
+= skip_dststep
.eq(cur_dststep
+ dst_delta
)
1275 sync
+= self
.dstmask
.eq(self
.dstmask
>> (dst_delta
+1))
1277 # TODO: initialize mask[VL]=1 to avoid passing past VL
1278 with m
.If((skip_srcstep
>= cur_vl
) |
1279 (skip_dststep
>= cur_vl
)):
1280 # end of VL loop. Update PC and reset src/dst step
1281 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1282 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1283 comb
+= new_svstate
.srcstep
.eq(0)
1284 comb
+= new_svstate
.dststep
.eq(0)
1285 comb
+= self
.update_svstate
.eq(1)
1286 # synchronize with the simulator
1287 comb
+= self
.insn_done
.eq(1)
1289 m
.next
= "ISSUE_START"
1291 # update new src/dst step
1292 comb
+= new_svstate
.srcstep
.eq(skip_srcstep
)
1293 comb
+= new_svstate
.dststep
.eq(skip_dststep
)
1294 comb
+= self
.update_svstate
.eq(1)
1296 m
.next
= "DECODE_SV"
1298 # pass predicate mask bits through to satellite decoders
1299 # TODO: for SIMD this will be *multiple* bits
1300 sync
+= core
.i
.sv_pred_sm
.eq(self
.srcmask
[0])
1301 sync
+= core
.i
.sv_pred_dm
.eq(self
.dstmask
[0])
1303 # after src/dst step have been updated, we are ready
1304 # to decode the instruction
1305 with m
.State("DECODE_SV"):
1306 # decode the instruction
1307 with m
.If(~fetch_failed
):
1308 sync
+= pdecode2
.instr_fault
.eq(0)
1309 sync
+= core
.i
.e
.eq(pdecode2
.e
)
1310 sync
+= core
.i
.state
.eq(cur_state
)
1311 sync
+= core
.i
.raw_insn_i
.eq(dec_opcode_i
)
1312 sync
+= core
.i
.bigendian_i
.eq(self
.core_bigendian_i
)
1314 sync
+= core
.i
.sv_rm
.eq(pdecode2
.sv_rm
)
1315 # set RA_OR_ZERO detection in satellite decoders
1316 sync
+= core
.i
.sv_a_nz
.eq(pdecode2
.sv_a_nz
)
1317 # and svp64 detection
1318 sync
+= core
.i
.is_svp64_mode
.eq(is_svp64_mode
)
1319 # and svp64 bit-rev'd ldst mode
1320 ldst_dec
= pdecode2
.use_svp64_ldst_dec
1321 sync
+= core
.i
.use_svp64_ldst_dec
.eq(ldst_dec
)
1322 # after decoding, reset any previous exception condition,
1323 # allowing it to be set again during the next execution
1324 sync
+= pdecode2
.ldst_exc
.eq(0)
1326 m
.next
= "INSN_EXECUTE" # move to "execute"
1328 # handshake with execution FSM, move to "wait" once acknowledged
1329 with m
.State("INSN_EXECUTE"):
1330 # when using "single-step" mode, checking dbg.stopping_o
1331 # prevents progress. allow execute to proceed once started
1333 #if self.allow_overlap:
1334 # stopping = dbg.stopping_o
1335 with m
.If(stopping
):
1336 # stopping: jump back to idle
1337 m
.next
= "ISSUE_START"
1339 # request the icache to stop asserting "failed"
1340 comb
+= core
.icache
.flush_in
.eq(1)
1341 # stop instruction fault
1342 sync
+= pdecode2
.instr_fault
.eq(0)
1344 comb
+= exec_insn_i_valid
.eq(1) # trigger execute
1345 with m
.If(exec_insn_o_ready
): # execute acknowledged us
1346 m
.next
= "EXECUTE_WAIT"
1348 with m
.State("EXECUTE_WAIT"):
1349 comb
+= exec_pc_i_ready
.eq(1)
1350 # see https://bugs.libre-soc.org/show_bug.cgi?id=636
1351 # the exception info needs to be blatted into
1352 # pdecode.ldst_exc, and the instruction "re-run".
1353 # when ldst_exc.happened is set, the PowerDecoder2
1354 # reacts very differently: it re-writes the instruction
1355 # with a "trap" (calls PowerDecoder2.trap()) which
1356 # will *overwrite* whatever was requested and jump the
1357 # PC to the exception address, as well as alter MSR.
1358 # nothing else needs to be done other than to note
1359 # the change of PC and MSR (and, later, SVSTATE)
1360 with m
.If(exc_happened
):
1361 mmu
= core
.fus
.get_exc("mmu0")
1362 ldst
= core
.fus
.get_exc("ldst0")
1364 with m
.If(fetch_failed
):
1365 # instruction fetch: exception is from MMU
1366 # reset instr_fault (highest priority)
1367 sync
+= pdecode2
.ldst_exc
.eq(mmu
)
1368 sync
+= pdecode2
.instr_fault
.eq(0)
1370 # request icache to stop asserting "failed"
1371 comb
+= core
.icache
.flush_in
.eq(1)
1372 with m
.If(~fetch_failed
):
1373 # otherwise assume it was a LDST exception
1374 sync
+= pdecode2
.ldst_exc
.eq(ldst
)
1376 with m
.If(exec_pc_o_valid
):
1378 # was this the last loop iteration?
1380 cur_vl
= cur_state
.svstate
.vl
1381 comb
+= is_last
.eq(next_srcstep
== cur_vl
)
1383 with m
.If(pdecode2
.instr_fault
):
1384 # reset instruction fault, try again
1385 sync
+= pdecode2
.instr_fault
.eq(0)
1386 m
.next
= "ISSUE_START"
1388 # return directly to Decode if Execute generated an
1390 with m
.Elif(pdecode2
.ldst_exc
.happened
):
1391 m
.next
= "DECODE_SV"
1393 # if MSR, PC or SVSTATE were changed by the previous
1394 # instruction, go directly back to Fetch, without
1395 # updating either MSR PC or SVSTATE
1396 with m
.Elif(self
.msr_changed | self
.pc_changed |
1398 m
.next
= "ISSUE_START"
1400 # also return to Fetch, when no output was a vector
1401 # (regardless of SRCSTEP and VL), or when the last
1402 # instruction was really the last one of the VL loop
1403 with m
.Elif((~pdecode2
.loop_continue
) | is_last
):
1404 # before going back to fetch, update the PC state
1405 # register with the NIA.
1406 # ok here we are not reading the branch unit.
1407 # TODO: this just blithely overwrites whatever
1408 # pipeline updated the PC
1409 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1410 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1411 # reset SRCSTEP before returning to Fetch
1413 with m
.If(pdecode2
.loop_continue
):
1414 comb
+= new_svstate
.srcstep
.eq(0)
1415 comb
+= new_svstate
.dststep
.eq(0)
1416 comb
+= self
.update_svstate
.eq(1)
1418 comb
+= new_svstate
.srcstep
.eq(0)
1419 comb
+= new_svstate
.dststep
.eq(0)
1420 comb
+= self
.update_svstate
.eq(1)
1421 m
.next
= "ISSUE_START"
1423 # returning to Execute? then, first update SRCSTEP
1425 comb
+= new_svstate
.srcstep
.eq(next_srcstep
)
1426 comb
+= new_svstate
.dststep
.eq(next_dststep
)
1427 comb
+= self
.update_svstate
.eq(1)
1428 # return to mask skip loop
1429 m
.next
= "PRED_SKIP"
1432 # check if svstate needs updating: if so, write it to State Regfile
1433 with m
.If(self
.update_svstate
):
1434 sync
+= cur_state
.svstate
.eq(self
.new_svstate
) # for next clock
1436 def execute_fsm(self
, m
, core
,
1437 exec_insn_i_valid
, exec_insn_o_ready
,
1438 exec_pc_o_valid
, exec_pc_i_ready
):
1441 execute FSM. this interacts with the "issue" FSM
1442 through exec_insn_ready/valid (incoming) and exec_pc_ready/valid
1443 (outgoing). SVP64 RM prefixes have already been set up by the
1444 "issue" phase, so execute is fairly straightforward.
1450 pdecode2
= self
.pdecode2
1453 core_busy_o
= core
.n
.o_data
.busy_o
# core is busy
1454 core_ivalid_i
= core
.p
.i_valid
# instruction is valid
1456 if hasattr(core
, "icache"):
1457 fetch_failed
= core
.icache
.i_out
.fetch_failed
1459 fetch_failed
= Const(0, 1)
1461 with m
.FSM(name
="exec_fsm"):
1463 # waiting for instruction bus (stays there until not busy)
1464 with m
.State("INSN_START"):
1465 comb
+= exec_insn_o_ready
.eq(1)
1466 with m
.If(exec_insn_i_valid
):
1467 comb
+= core_ivalid_i
.eq(1) # instruction is valid/issued
1468 sync
+= self
.sv_changed
.eq(0)
1469 sync
+= self
.pc_changed
.eq(0)
1470 sync
+= self
.msr_changed
.eq(0)
1471 with m
.If(core
.p
.o_ready
): # only move if accepted
1472 m
.next
= "INSN_ACTIVE" # move to "wait completion"
1474 # instruction started: must wait till it finishes
1475 with m
.State("INSN_ACTIVE"):
1476 # note changes to MSR, PC and SVSTATE
1477 # XXX oops, really must monitor *all* State Regfile write
1478 # ports looking for changes!
1479 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.SVSTATE
)):
1480 sync
+= self
.sv_changed
.eq(1)
1481 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.MSR
)):
1482 sync
+= self
.msr_changed
.eq(1)
1483 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.PC
)):
1484 sync
+= self
.pc_changed
.eq(1)
1485 with m
.If(~core_busy_o
): # instruction done!
1486 comb
+= exec_pc_o_valid
.eq(1)
1487 with m
.If(exec_pc_i_ready
):
1488 # when finished, indicate "done".
1489 # however, if there was an exception, the instruction
1490 # is *not* yet done. this is an implementation
1491 # detail: we choose to implement exceptions by
1492 # taking the exception information from the LDST
1493 # unit, putting that *back* into the PowerDecoder2,
1494 # and *re-running the entire instruction*.
1495 # if we erroneously indicate "done" here, it is as if
1496 # there were *TWO* instructions:
1497 # 1) the failed LDST 2) a TRAP.
1498 with m
.If(~pdecode2
.ldst_exc
.happened
&
1499 ~pdecode2
.instr_fault
):
1500 comb
+= self
.insn_done
.eq(1)
1501 m
.next
= "INSN_START" # back to fetch
1502 # terminate returns directly to INSN_START
1503 with m
.If(dbg
.terminate_i
):
1504 # comb += self.insn_done.eq(1) - no because it's not
1505 m
.next
= "INSN_START" # back to fetch
1507 def elaborate(self
, platform
):
1508 m
= super().elaborate(platform
)
1510 comb
, sync
= m
.d
.comb
, m
.d
.sync
1511 cur_state
= self
.cur_state
1512 pdecode2
= self
.pdecode2
1516 # set up peripherals and core
1517 core_rst
= self
.core_rst
1519 # indicate to outside world if any FU is still executing
1520 comb
+= self
.any_busy
.eq(core
.n
.o_data
.any_busy_o
) # any FU executing
1522 # address of the next instruction, in the absence of a branch
1523 # depends on the instruction size
1526 # connect up debug signals
1527 with m
.If(core
.o
.core_terminate_o
):
1528 comb
+= dbg
.terminate_i
.eq(1)
1530 # pass the prefix mode from Fetch to Issue, so the latter can loop
1532 is_svp64_mode
= Signal()
1534 # there are *THREE^WFOUR-if-SVP64-enabled* FSMs, fetch (32/64-bit)
1535 # issue, decode/execute, now joined by "Predicate fetch/calculate".
1536 # these are the handshake signals between each
1538 # fetch FSM can run as soon as the PC is valid
1539 fetch_pc_i_valid
= Signal() # Execute tells Fetch "start next read"
1540 fetch_pc_o_ready
= Signal() # Fetch Tells SVSTATE "proceed"
1542 # fetch FSM hands over the instruction to be decoded / issued
1543 fetch_insn_o_valid
= Signal()
1544 fetch_insn_i_ready
= Signal()
1546 # predicate fetch FSM decodes and fetches the predicate
1547 pred_insn_i_valid
= Signal()
1548 pred_insn_o_ready
= Signal()
1550 # predicate fetch FSM delivers the masks
1551 pred_mask_o_valid
= Signal()
1552 pred_mask_i_ready
= Signal()
1554 # issue FSM delivers the instruction to the be executed
1555 exec_insn_i_valid
= Signal()
1556 exec_insn_o_ready
= Signal()
1558 # execute FSM, hands over the PC/SVSTATE back to the issue FSM
1559 exec_pc_o_valid
= Signal()
1560 exec_pc_i_ready
= Signal()
1562 # the FSMs here are perhaps unusual in that they detect conditions
1563 # then "hold" information, combinatorially, for the core
1564 # (as opposed to using sync - which would be on a clock's delay)
1565 # this includes the actual opcode, valid flags and so on.
1567 # Fetch, then predicate fetch, then Issue, then Execute.
1568 # Issue is where the VL for-loop # lives. the ready/valid
1569 # signalling is used to communicate between the four.
1572 fetch
= FetchFSM(self
.allow_overlap
, self
.svp64_en
,
1573 self
.imem
, core_rst
, pdecode2
, cur_state
,
1575 dbg
.state
.svstate
, # combinatorially same
1577 m
.submodules
.fetch
= fetch
1578 # connect up in/out data to existing Signals
1579 comb
+= fetch
.p
.i_data
.pc
.eq(dbg
.state
.pc
) # combinatorially same
1580 comb
+= fetch
.p
.i_data
.msr
.eq(dbg
.state
.msr
) # combinatorially same
1581 # and the ready/valid signalling
1582 comb
+= fetch_pc_o_ready
.eq(fetch
.p
.o_ready
)
1583 comb
+= fetch
.p
.i_valid
.eq(fetch_pc_i_valid
)
1584 comb
+= fetch_insn_o_valid
.eq(fetch
.n
.o_valid
)
1585 comb
+= fetch
.n
.i_ready
.eq(fetch_insn_i_ready
)
1587 self
.issue_fsm(m
, core
, nia
,
1588 dbg
, core_rst
, is_svp64_mode
,
1589 fetch_pc_o_ready
, fetch_pc_i_valid
,
1590 fetch_insn_o_valid
, fetch_insn_i_ready
,
1591 pred_insn_i_valid
, pred_insn_o_ready
,
1592 pred_mask_o_valid
, pred_mask_i_ready
,
1593 exec_insn_i_valid
, exec_insn_o_ready
,
1594 exec_pc_o_valid
, exec_pc_i_ready
)
1597 self
.fetch_predicate_fsm(m
,
1598 pred_insn_i_valid
, pred_insn_o_ready
,
1599 pred_mask_o_valid
, pred_mask_i_ready
)
1601 self
.execute_fsm(m
, core
,
1602 exec_insn_i_valid
, exec_insn_o_ready
,
1603 exec_pc_o_valid
, exec_pc_i_ready
)
1608 class TestIssuer(Elaboratable
):
1609 def __init__(self
, pspec
):
1610 self
.ti
= TestIssuerInternal(pspec
)
1611 self
.pll
= DummyPLL(instance
=True)
1613 self
.dbg_rst_i
= Signal(reset_less
=True)
1615 # PLL direct clock or not
1616 self
.pll_en
= hasattr(pspec
, "use_pll") and pspec
.use_pll
1618 self
.pll_test_o
= Signal(reset_less
=True)
1619 self
.pll_vco_o
= Signal(reset_less
=True)
1620 self
.clk_sel_i
= Signal(2, reset_less
=True)
1621 self
.ref_clk
= ClockSignal() # can't rename it but that's ok
1622 self
.pllclk_clk
= ClockSignal("pllclk")
1624 def elaborate(self
, platform
):
1628 # TestIssuer nominally runs at main clock, actually it is
1629 # all combinatorial internally except for coresync'd components
1630 m
.submodules
.ti
= ti
= self
.ti
1633 # ClockSelect runs at PLL output internal clock rate
1634 m
.submodules
.wrappll
= pll
= self
.pll
1636 # add clock domains from PLL
1637 cd_pll
= ClockDomain("pllclk")
1640 # PLL clock established. has the side-effect of running clklsel
1641 # at the PLL's speed (see DomainRenamer("pllclk") above)
1642 pllclk
= self
.pllclk_clk
1643 comb
+= pllclk
.eq(pll
.clk_pll_o
)
1645 # wire up external 24mhz to PLL
1646 #comb += pll.clk_24_i.eq(self.ref_clk)
1647 # output 18 mhz PLL test signal, and analog oscillator out
1648 comb
+= self
.pll_test_o
.eq(pll
.pll_test_o
)
1649 comb
+= self
.pll_vco_o
.eq(pll
.pll_vco_o
)
1651 # input to pll clock selection
1652 comb
+= pll
.clk_sel_i
.eq(self
.clk_sel_i
)
1654 # now wire up ResetSignals. don't mind them being in this domain
1655 pll_rst
= ResetSignal("pllclk")
1656 comb
+= pll_rst
.eq(ResetSignal())
1658 # internal clock is set to selector clock-out. has the side-effect of
1659 # running TestIssuer at this speed (see DomainRenamer("intclk") above)
1660 # debug clock runs at coresync internal clock
1661 if self
.ti
.dbg_domain
!= 'sync':
1662 cd_dbgsync
= ClockDomain("dbgsync")
1663 intclk
= ClockSignal(self
.ti
.core_domain
)
1664 dbgclk
= ClockSignal(self
.ti
.dbg_domain
)
1665 # XXX BYPASS PLL XXX
1666 # XXX BYPASS PLL XXX
1667 # XXX BYPASS PLL XXX
1669 comb
+= intclk
.eq(self
.ref_clk
)
1670 assert self
.ti
.core_domain
!= 'sync', \
1671 "cannot set core_domain to sync and use pll at the same time"
1673 if self
.ti
.core_domain
!= 'sync':
1674 comb
+= intclk
.eq(ClockSignal())
1675 if self
.ti
.dbg_domain
!= 'sync':
1676 dbgclk
= ClockSignal(self
.ti
.dbg_domain
)
1677 comb
+= dbgclk
.eq(intclk
)
1678 comb
+= self
.ti
.dbg_rst_i
.eq(self
.dbg_rst_i
)
1683 return list(self
.ti
.ports()) + list(self
.pll
.ports()) + \
1684 [ClockSignal(), ResetSignal()]
1686 def external_ports(self
):
1687 ports
= self
.ti
.external_ports()
1688 ports
.append(ClockSignal())
1689 ports
.append(ResetSignal())
1691 ports
.append(self
.clk_sel_i
)
1692 ports
.append(self
.pll
.clk_24_i
)
1693 ports
.append(self
.pll_test_o
)
1694 ports
.append(self
.pll_vco_o
)
1695 ports
.append(self
.pllclk_clk
)
1696 ports
.append(self
.ref_clk
)
1700 if __name__
== '__main__':
1701 units
= {'alu': 1, 'cr': 1, 'branch': 1, 'trap': 1, 'logical': 1,
1707 pspec
= TestMemPspec(ldst_ifacetype
='bare_wb',
1708 imem_ifacetype
='bare_wb',
1713 dut
= TestIssuer(pspec
)
1714 vl
= main(dut
, ports
=dut
.ports(), name
="test_issuer")
1716 if len(sys
.argv
) == 1:
1717 vl
= rtlil
.convert(dut
, ports
=dut
.external_ports(), name
="test_issuer")
1718 with
open("test_issuer.il", "w") as f
: