3 not in any way intended for production use. this runs a FSM that:
5 * reads the Program Counter from StateRegs
6 * reads an instruction from a fixed-size Test Memory
7 * issues it to the Simple Core
8 * waits for it to complete
10 * does it all over again
12 the purpose of this module is to verify the functional correctness
13 of the Function Units in the absolute simplest and clearest possible
14 way, and to at provide something that can be further incrementally
18 from nmigen
import (Elaboratable
, Module
, Signal
, ClockSignal
, ResetSignal
,
19 ClockDomain
, DomainRenamer
, Mux
, Const
, Repl
, Cat
)
20 from nmigen
.cli
import rtlil
21 from nmigen
.cli
import main
24 from nmutil
.singlepipe
import ControlBase
25 from soc
.simple
.core_data
import FetchOutput
, FetchInput
27 from nmigen
.lib
.coding
import PriorityEncoder
29 from openpower
.decoder
.power_decoder
import create_pdecode
30 from openpower
.decoder
.power_decoder2
import PowerDecode2
, SVP64PrefixDecoder
31 from openpower
.decoder
.decode2execute1
import IssuerDecode2ToOperand
32 from openpower
.decoder
.decode2execute1
import Data
33 from openpower
.decoder
.power_enums
import (MicrOp
, SVP64PredInt
, SVP64PredCR
,
35 from openpower
.state
import CoreState
36 from openpower
.consts
import (CR
, SVP64CROffs
, MSR
)
37 from soc
.experiment
.testmem
import TestMemory
# test only for instructions
38 from soc
.regfile
.regfiles
import StateRegs
, FastRegs
39 from soc
.simple
.core
import NonProductionCore
40 from soc
.config
.test
.test_loadstore
import TestMemPspec
41 from soc
.config
.ifetch
import ConfigFetchUnit
42 from soc
.debug
.dmi
import CoreDebug
, DMIInterface
43 from soc
.debug
.jtag
import JTAG
44 from soc
.config
.pinouts
import get_pinspecs
45 from soc
.interrupts
.xics
import XICS_ICP
, XICS_ICS
46 from soc
.bus
.simple_gpio
import SimpleGPIO
47 from soc
.bus
.SPBlock512W64B8W
import SPBlock512W64B8W
48 from soc
.clock
.select
import ClockSelect
49 from soc
.clock
.dummypll
import DummyPLL
50 from openpower
.sv
.svstate
import SVSTATERec
51 from soc
.experiment
.icache
import ICache
53 from nmutil
.util
import rising_edge
56 def get_insn(f_instr_o
, pc
):
57 if f_instr_o
.width
== 32:
60 # 64-bit: bit 2 of pc decides which word to select
61 return f_instr_o
.word_select(pc
[2], 32)
63 # gets state input or reads from state regfile
66 def state_get(m
, res
, core_rst
, state_i
, name
, regfile
, regnum
):
69 # read the {insert state variable here}
70 res_ok_delay
= Signal(name
="%s_ok_delay" % name
)
72 sync
+= res_ok_delay
.eq(~state_i
.ok
)
73 with m
.If(state_i
.ok
):
74 # incoming override (start from pc_i)
75 comb
+= res
.eq(state_i
.data
)
77 # otherwise read StateRegs regfile for {insert state here}...
78 comb
+= regfile
.ren
.eq(1 << regnum
)
79 # ... but on a 1-clock delay
80 with m
.If(res_ok_delay
):
81 comb
+= res
.eq(regfile
.o_data
)
84 def get_predint(m
, mask
, name
):
85 """decode SVP64 predicate integer mask field to reg number and invert
86 this is identical to the equivalent function in ISACaller except that
87 it doesn't read the INT directly, it just decodes "what needs to be done"
88 i.e. which INT reg, whether it is shifted and whether it is bit-inverted.
90 * all1s is set to indicate that no mask is to be applied.
91 * regread indicates the GPR register number to be read
92 * invert is set to indicate that the register value is to be inverted
93 * unary indicates that the contents of the register is to be shifted 1<<r3
96 regread
= Signal(5, name
=name
+"regread")
97 invert
= Signal(name
=name
+"invert")
98 unary
= Signal(name
=name
+"unary")
99 all1s
= Signal(name
=name
+"all1s")
101 with m
.Case(SVP64PredInt
.ALWAYS
.value
):
102 comb
+= all1s
.eq(1) # use 0b1111 (all ones)
103 with m
.Case(SVP64PredInt
.R3_UNARY
.value
):
104 comb
+= regread
.eq(3)
105 comb
+= unary
.eq(1) # 1<<r3 - shift r3 (single bit)
106 with m
.Case(SVP64PredInt
.R3
.value
):
107 comb
+= regread
.eq(3)
108 with m
.Case(SVP64PredInt
.R3_N
.value
):
109 comb
+= regread
.eq(3)
111 with m
.Case(SVP64PredInt
.R10
.value
):
112 comb
+= regread
.eq(10)
113 with m
.Case(SVP64PredInt
.R10_N
.value
):
114 comb
+= regread
.eq(10)
116 with m
.Case(SVP64PredInt
.R30
.value
):
117 comb
+= regread
.eq(30)
118 with m
.Case(SVP64PredInt
.R30_N
.value
):
119 comb
+= regread
.eq(30)
121 return regread
, invert
, unary
, all1s
124 def get_predcr(m
, mask
, name
):
125 """decode SVP64 predicate CR to reg number field and invert status
126 this is identical to _get_predcr in ISACaller
129 idx
= Signal(2, name
=name
+"idx")
130 invert
= Signal(name
=name
+"crinvert")
132 with m
.Case(SVP64PredCR
.LT
.value
):
133 comb
+= idx
.eq(CR
.LT
)
135 with m
.Case(SVP64PredCR
.GE
.value
):
136 comb
+= idx
.eq(CR
.LT
)
138 with m
.Case(SVP64PredCR
.GT
.value
):
139 comb
+= idx
.eq(CR
.GT
)
141 with m
.Case(SVP64PredCR
.LE
.value
):
142 comb
+= idx
.eq(CR
.GT
)
144 with m
.Case(SVP64PredCR
.EQ
.value
):
145 comb
+= idx
.eq(CR
.EQ
)
147 with m
.Case(SVP64PredCR
.NE
.value
):
148 comb
+= idx
.eq(CR
.EQ
)
150 with m
.Case(SVP64PredCR
.SO
.value
):
151 comb
+= idx
.eq(CR
.SO
)
153 with m
.Case(SVP64PredCR
.NS
.value
):
154 comb
+= idx
.eq(CR
.SO
)
159 class TestIssuerBase(Elaboratable
):
160 """TestIssuerBase - common base class for Issuers
162 takes care of power-on reset, peripherals, debug, DEC/TB,
163 and gets PC/MSR/SVSTATE from the State Regfile etc.
166 def __init__(self
, pspec
):
168 # test is SVP64 is to be enabled
169 self
.svp64_en
= hasattr(pspec
, "svp64") and (pspec
.svp64
== True)
171 # and if regfiles are reduced
172 self
.regreduce_en
= (hasattr(pspec
, "regreduce") and
173 (pspec
.regreduce
== True))
175 # and if overlap requested
176 self
.allow_overlap
= (hasattr(pspec
, "allow_overlap") and
177 (pspec
.allow_overlap
== True))
179 # and get the core domain
180 self
.core_domain
= "coresync"
181 if (hasattr(pspec
, "core_domain") and
182 isinstance(pspec
.core_domain
, str)):
183 self
.core_domain
= pspec
.core_domain
185 # JTAG interface. add this right at the start because if it's
186 # added it *modifies* the pspec, by adding enable/disable signals
187 # for parts of the rest of the core
188 self
.jtag_en
= hasattr(pspec
, "debug") and pspec
.debug
== 'jtag'
189 #self.dbg_domain = "sync" # sigh "dbgsunc" too problematic
190 self
.dbg_domain
= "dbgsync" # domain for DMI/JTAG clock
192 # XXX MUST keep this up-to-date with litex, and
193 # soc-cocotb-sim, and err.. all needs sorting out, argh
196 'eint', 'gpio', 'mspi0',
197 # 'mspi1', - disabled for now
198 # 'pwm', 'sd0', - disabled for now
200 self
.jtag
= JTAG(get_pinspecs(subset
=subset
),
201 domain
=self
.dbg_domain
)
202 # add signals to pspec to enable/disable icache and dcache
203 # (or data and intstruction wishbone if icache/dcache not included)
204 # https://bugs.libre-soc.org/show_bug.cgi?id=520
205 # TODO: do we actually care if these are not domain-synchronised?
206 # honestly probably not.
207 pspec
.wb_icache_en
= self
.jtag
.wb_icache_en
208 pspec
.wb_dcache_en
= self
.jtag
.wb_dcache_en
209 self
.wb_sram_en
= self
.jtag
.wb_sram_en
211 self
.wb_sram_en
= Const(1)
213 # add 4k sram blocks?
214 self
.sram4x4k
= (hasattr(pspec
, "sram4x4kblock") and
215 pspec
.sram4x4kblock
== True)
219 self
.sram4k
.append(SPBlock512W64B8W(name
="sram4k_%d" % i
,
223 # add interrupt controller?
224 self
.xics
= hasattr(pspec
, "xics") and pspec
.xics
== True
226 self
.xics_icp
= XICS_ICP()
227 self
.xics_ics
= XICS_ICS()
228 self
.int_level_i
= self
.xics_ics
.int_level_i
230 self
.ext_irq
= Signal()
232 # add GPIO peripheral?
233 self
.gpio
= hasattr(pspec
, "gpio") and pspec
.gpio
== True
235 self
.simple_gpio
= SimpleGPIO()
236 self
.gpio_o
= self
.simple_gpio
.gpio_o
238 # main instruction core. suitable for prototyping / demo only
239 self
.core
= core
= NonProductionCore(pspec
)
240 self
.core_rst
= ResetSignal(self
.core_domain
)
242 # instruction decoder. goes into Trap Record
243 #pdecode = create_pdecode()
244 self
.cur_state
= CoreState("cur") # current state (MSR/PC/SVSTATE)
245 self
.pdecode2
= PowerDecode2(None, state
=self
.cur_state
,
246 opkls
=IssuerDecode2ToOperand
,
247 svp64_en
=self
.svp64_en
,
248 regreduce_en
=self
.regreduce_en
)
249 pdecode
= self
.pdecode2
.dec
252 self
.svp64
= SVP64PrefixDecoder() # for decoding SVP64 prefix
254 self
.update_svstate
= Signal() # set this if updating svstate
255 self
.new_svstate
= new_svstate
= SVSTATERec("new_svstate")
257 # Test Instruction memory
258 if hasattr(core
, "icache"):
259 # XXX BLECH! use pspec to transfer the I-Cache to ConfigFetchUnit
260 # truly dreadful. needs a huge reorg.
261 pspec
.icache
= core
.icache
262 self
.imem
= ConfigFetchUnit(pspec
).fu
265 self
.dbg
= CoreDebug()
266 self
.dbg_rst_i
= Signal(reset_less
=True)
268 # instruction go/monitor
269 self
.pc_o
= Signal(64, reset_less
=True)
270 self
.pc_i
= Data(64, "pc_i") # set "ok" to indicate "please change me"
271 self
.msr_i
= Data(64, "msr_i") # set "ok" to indicate "please change me"
272 self
.svstate_i
= Data(64, "svstate_i") # ditto
273 self
.core_bigendian_i
= Signal() # TODO: set based on MSR.LE
274 self
.busy_o
= Signal(reset_less
=True)
275 self
.memerr_o
= Signal(reset_less
=True)
277 # STATE regfile read /write ports for PC, MSR, SVSTATE
278 staterf
= self
.core
.regs
.rf
['state']
279 self
.state_r_msr
= staterf
.r_ports
['msr'] # MSR rd
280 self
.state_r_pc
= staterf
.r_ports
['cia'] # PC rd
281 self
.state_r_sv
= staterf
.r_ports
['sv'] # SVSTATE rd
283 self
.state_w_msr
= staterf
.w_ports
['msr'] # MSR wr
284 self
.state_w_pc
= staterf
.w_ports
['d_wr1'] # PC wr
285 self
.state_w_sv
= staterf
.w_ports
['sv'] # SVSTATE wr
287 # DMI interface access
288 intrf
= self
.core
.regs
.rf
['int']
289 crrf
= self
.core
.regs
.rf
['cr']
290 xerrf
= self
.core
.regs
.rf
['xer']
291 self
.int_r
= intrf
.r_ports
['dmi'] # INT read
292 self
.cr_r
= crrf
.r_ports
['full_cr_dbg'] # CR read
293 self
.xer_r
= xerrf
.r_ports
['full_xer'] # XER read
297 self
.int_pred
= intrf
.r_ports
['pred'] # INT predicate read
298 self
.cr_pred
= crrf
.r_ports
['cr_pred'] # CR predicate read
300 # hack method of keeping an eye on whether branch/trap set the PC
301 self
.state_nia
= self
.core
.regs
.rf
['state'].w_ports
['nia']
302 self
.state_nia
.wen
.name
= 'state_nia_wen'
304 # pulse to synchronize the simulator at instruction end
305 self
.insn_done
= Signal()
307 # indicate any instruction still outstanding, in execution
308 self
.any_busy
= Signal()
311 # store copies of predicate masks
312 self
.srcmask
= Signal(64)
313 self
.dstmask
= Signal(64)
315 def setup_peripherals(self
, m
):
316 comb
, sync
= m
.d
.comb
, m
.d
.sync
318 # okaaaay so the debug module must be in coresync clock domain
319 # but NOT its reset signal. to cope with this, set every single
320 # submodule explicitly in coresync domain, debug and JTAG
321 # in their own one but using *external* reset.
322 csd
= DomainRenamer(self
.core_domain
)
323 dbd
= DomainRenamer(self
.dbg_domain
)
325 m
.submodules
.core
= core
= csd(self
.core
)
326 # this _so_ needs sorting out. ICache is added down inside
327 # LoadStore1 and is already a submodule of LoadStore1
328 if not isinstance(self
.imem
, ICache
):
329 m
.submodules
.imem
= imem
= csd(self
.imem
)
330 m
.submodules
.dbg
= dbg
= dbd(self
.dbg
)
332 m
.submodules
.jtag
= jtag
= dbd(self
.jtag
)
333 # TODO: UART2GDB mux, here, from external pin
334 # see https://bugs.libre-soc.org/show_bug.cgi?id=499
335 sync
+= dbg
.dmi
.connect_to(jtag
.dmi
)
337 cur_state
= self
.cur_state
339 # 4x 4k SRAM blocks. these simply "exist", they get routed in litex
341 for i
, sram
in enumerate(self
.sram4k
):
342 m
.submodules
["sram4k_%d" % i
] = csd(sram
)
343 comb
+= sram
.enable
.eq(self
.wb_sram_en
)
345 # XICS interrupt handler
347 m
.submodules
.xics_icp
= icp
= csd(self
.xics_icp
)
348 m
.submodules
.xics_ics
= ics
= csd(self
.xics_ics
)
349 comb
+= icp
.ics_i
.eq(ics
.icp_o
) # connect ICS to ICP
350 sync
+= cur_state
.eint
.eq(icp
.core_irq_o
) # connect ICP to core
352 sync
+= cur_state
.eint
.eq(self
.ext_irq
) # connect externally
354 # GPIO test peripheral
356 m
.submodules
.simple_gpio
= simple_gpio
= csd(self
.simple_gpio
)
358 # connect one GPIO output to ICS bit 15 (like in microwatt soc.vhdl)
359 # XXX causes litex ECP5 test to get wrong idea about input and output
360 # (but works with verilator sim *sigh*)
361 # if self.gpio and self.xics:
362 # comb += self.int_level_i[15].eq(simple_gpio.gpio_o[0])
364 # instruction decoder
365 pdecode
= create_pdecode()
366 m
.submodules
.dec2
= pdecode2
= csd(self
.pdecode2
)
368 m
.submodules
.svp64
= svp64
= csd(self
.svp64
)
371 dmi
, d_reg
, d_cr
, d_xer
, = dbg
.dmi
, dbg
.d_gpr
, dbg
.d_cr
, dbg
.d_xer
372 intrf
= self
.core
.regs
.rf
['int']
374 # clock delay power-on reset
375 cd_por
= ClockDomain(reset_less
=True)
376 cd_sync
= ClockDomain()
377 m
.domains
+= cd_por
, cd_sync
378 core_sync
= ClockDomain(self
.core_domain
)
379 if self
.core_domain
!= "sync":
380 m
.domains
+= core_sync
381 if self
.dbg_domain
!= "sync":
382 dbg_sync
= ClockDomain(self
.dbg_domain
)
383 m
.domains
+= dbg_sync
385 ti_rst
= Signal(reset_less
=True)
386 delay
= Signal(range(4), reset
=3)
387 with m
.If(delay
!= 0):
388 m
.d
.por
+= delay
.eq(delay
- 1)
389 comb
+= cd_por
.clk
.eq(ClockSignal())
391 # power-on reset delay
392 core_rst
= ResetSignal(self
.core_domain
)
393 if self
.core_domain
!= "sync":
394 comb
+= ti_rst
.eq(delay
!= 0 | dbg
.core_rst_o |
ResetSignal())
395 comb
+= core_rst
.eq(ti_rst
)
397 with m
.If(delay
!= 0 | dbg
.core_rst_o
):
398 comb
+= core_rst
.eq(1)
400 # connect external reset signal to DMI Reset
401 if self
.dbg_domain
!= "sync":
402 dbg_rst
= ResetSignal(self
.dbg_domain
)
403 comb
+= dbg_rst
.eq(self
.dbg_rst_i
)
405 # busy/halted signals from core
406 core_busy_o
= ~core
.p
.o_ready | core
.n
.o_data
.busy_o
# core is busy
407 comb
+= self
.busy_o
.eq(core_busy_o
)
408 comb
+= pdecode2
.dec
.bigendian
.eq(self
.core_bigendian_i
)
410 # temporary hack: says "go" immediately for both address gen and ST
412 ldst
= core
.fus
.fus
['ldst0']
413 st_go_edge
= rising_edge(m
, ldst
.st
.rel_o
)
414 # link addr-go direct to rel
415 m
.d
.comb
+= ldst
.ad
.go_i
.eq(ldst
.ad
.rel_o
)
416 m
.d
.comb
+= ldst
.st
.go_i
.eq(st_go_edge
) # link store-go to rising rel
418 def do_dmi(self
, m
, dbg
):
419 """deals with DMI debug requests
421 currently only provides read requests for the INT regfile, CR and XER
422 it will later also deal with *writing* to these regfiles.
426 dmi
, d_reg
, d_cr
, d_xer
, = dbg
.dmi
, dbg
.d_gpr
, dbg
.d_cr
, dbg
.d_xer
427 intrf
= self
.core
.regs
.rf
['int']
429 with m
.If(d_reg
.req
): # request for regfile access being made
430 # TODO: error-check this
431 # XXX should this be combinatorial? sync better?
433 comb
+= self
.int_r
.ren
.eq(1 << d_reg
.addr
)
435 comb
+= self
.int_r
.addr
.eq(d_reg
.addr
)
436 comb
+= self
.int_r
.ren
.eq(1)
437 d_reg_delay
= Signal()
438 sync
+= d_reg_delay
.eq(d_reg
.req
)
439 with m
.If(d_reg_delay
):
440 # data arrives one clock later
441 comb
+= d_reg
.data
.eq(self
.int_r
.o_data
)
442 comb
+= d_reg
.ack
.eq(1)
444 # sigh same thing for CR debug
445 with m
.If(d_cr
.req
): # request for regfile access being made
446 comb
+= self
.cr_r
.ren
.eq(0b11111111) # enable all
447 d_cr_delay
= Signal()
448 sync
+= d_cr_delay
.eq(d_cr
.req
)
449 with m
.If(d_cr_delay
):
450 # data arrives one clock later
451 comb
+= d_cr
.data
.eq(self
.cr_r
.o_data
)
452 comb
+= d_cr
.ack
.eq(1)
455 with m
.If(d_xer
.req
): # request for regfile access being made
456 comb
+= self
.xer_r
.ren
.eq(0b111111) # enable all
457 d_xer_delay
= Signal()
458 sync
+= d_xer_delay
.eq(d_xer
.req
)
459 with m
.If(d_xer_delay
):
460 # data arrives one clock later
461 comb
+= d_xer
.data
.eq(self
.xer_r
.o_data
)
462 comb
+= d_xer
.ack
.eq(1)
464 def tb_dec_fsm(self
, m
, spr_dec
):
467 this is a FSM for updating either dec or tb. it runs alternately
468 DEC, TB, DEC, TB. note that SPR pipeline could have written a new
469 value to DEC, however the regfile has "passthrough" on it so this
472 see v3.0B p1097-1099 for Timeer Resource and p1065 and p1076
475 comb
, sync
= m
.d
.comb
, m
.d
.sync
476 fast_rf
= self
.core
.regs
.rf
['fast']
477 fast_r_dectb
= fast_rf
.r_ports
['issue'] # DEC/TB
478 fast_w_dectb
= fast_rf
.w_ports
['issue'] # DEC/TB
482 # initiates read of current DEC
483 with m
.State("DEC_READ"):
484 comb
+= fast_r_dectb
.addr
.eq(FastRegs
.DEC
)
485 comb
+= fast_r_dectb
.ren
.eq(1)
488 # waits for DEC read to arrive (1 cycle), updates with new value
489 with m
.State("DEC_WRITE"):
491 # TODO: MSR.LPCR 32-bit decrement mode
492 comb
+= new_dec
.eq(fast_r_dectb
.o_data
- 1)
493 comb
+= fast_w_dectb
.addr
.eq(FastRegs
.DEC
)
494 comb
+= fast_w_dectb
.wen
.eq(1)
495 comb
+= fast_w_dectb
.i_data
.eq(new_dec
)
496 sync
+= spr_dec
.eq(new_dec
) # copy into cur_state for decoder
499 # initiates read of current TB
500 with m
.State("TB_READ"):
501 comb
+= fast_r_dectb
.addr
.eq(FastRegs
.TB
)
502 comb
+= fast_r_dectb
.ren
.eq(1)
505 # waits for read TB to arrive, initiates write of current TB
506 with m
.State("TB_WRITE"):
508 comb
+= new_tb
.eq(fast_r_dectb
.o_data
+ 1)
509 comb
+= fast_w_dectb
.addr
.eq(FastRegs
.TB
)
510 comb
+= fast_w_dectb
.wen
.eq(1)
511 comb
+= fast_w_dectb
.i_data
.eq(new_tb
)
516 def elaborate(self
, platform
):
519 comb
, sync
= m
.d
.comb
, m
.d
.sync
520 cur_state
= self
.cur_state
521 pdecode2
= self
.pdecode2
524 # set up peripherals and core
525 core_rst
= self
.core_rst
526 self
.setup_peripherals(m
)
528 # reset current state if core reset requested
530 m
.d
.sync
+= self
.cur_state
.eq(0)
532 # check halted condition: requested PC to execute matches DMI stop addr
533 # and immediately stop. address of 0xffff_ffff_ffff_ffff can never
536 comb
+= halted
.eq(dbg
.stop_addr_o
== dbg
.state
.pc
)
538 comb
+= dbg
.core_stopped_i
.eq(1)
539 comb
+= dbg
.terminate_i
.eq(1)
541 # PC and instruction from I-Memory
542 comb
+= self
.pc_o
.eq(cur_state
.pc
)
543 self
.pc_changed
= Signal() # note write to PC
544 self
.msr_changed
= Signal() # note write to MSR
545 self
.sv_changed
= Signal() # note write to SVSTATE
547 # read state either from incoming override or from regfile
548 state
= CoreState("get") # current state (MSR/PC/SVSTATE)
549 state_get(m
, state
.msr
, core_rst
, self
.msr_i
,
551 self
.state_r_msr
, StateRegs
.MSR
)
552 state_get(m
, state
.pc
, core_rst
, self
.pc_i
,
554 self
.state_r_pc
, StateRegs
.PC
)
555 state_get(m
, state
.svstate
, core_rst
, self
.svstate_i
,
556 "svstate", # read SVSTATE
557 self
.state_r_sv
, StateRegs
.SVSTATE
)
559 # don't write pc every cycle
560 comb
+= self
.state_w_pc
.wen
.eq(0)
561 comb
+= self
.state_w_pc
.i_data
.eq(0)
563 # connect up debug state. note "combinatorially same" below,
564 # this is a bit naff, passing state over in the dbg class, but
565 # because it is combinatorial it achieves the desired goal
566 comb
+= dbg
.state
.eq(state
)
568 # this bit doesn't have to be in the FSM: connect up to read
569 # regfiles on demand from DMI
572 # DEC and TB inc/dec FSM. copy of DEC is put into CoreState,
573 # (which uses that in PowerDecoder2 to raise 0x900 exception)
574 self
.tb_dec_fsm(m
, cur_state
.dec
)
576 # while stopped, allow updating the MSR, PC and SVSTATE.
577 # these are mainly for debugging purposes (including DMI/JTAG)
578 with m
.If(dbg
.core_stopped_i
):
579 with m
.If(self
.pc_i
.ok
):
580 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
581 comb
+= self
.state_w_pc
.i_data
.eq(self
.pc_i
.data
)
582 sync
+= self
.pc_changed
.eq(1)
583 with m
.If(self
.msr_i
.ok
):
584 comb
+= self
.state_w_msr
.wen
.eq(1 << StateRegs
.MSR
)
585 comb
+= self
.state_w_msr
.i_data
.eq(self
.msr_i
.data
)
586 sync
+= self
.msr_changed
.eq(1)
587 with m
.If(self
.svstate_i
.ok | self
.update_svstate
):
588 with m
.If(self
.svstate_i
.ok
): # over-ride from external source
589 comb
+= self
.new_svstate
.eq(self
.svstate_i
.data
)
590 comb
+= self
.state_w_sv
.wen
.eq(1 << StateRegs
.SVSTATE
)
591 comb
+= self
.state_w_sv
.i_data
.eq(self
.new_svstate
)
592 sync
+= self
.sv_changed
.eq(1)
597 yield from self
.pc_i
.ports()
598 yield from self
.msr_i
.ports()
601 yield from self
.core
.ports()
602 yield from self
.imem
.ports()
603 yield self
.core_bigendian_i
609 def external_ports(self
):
610 ports
= self
.pc_i
.ports()
611 ports
= self
.msr_i
.ports()
612 ports
+= [self
.pc_o
, self
.memerr_o
, self
.core_bigendian_i
, self
.busy_o
,
616 ports
+= list(self
.jtag
.external_ports())
618 # don't add DMI if JTAG is enabled
619 ports
+= list(self
.dbg
.dmi
.ports())
621 ports
+= list(self
.imem
.ibus
.fields
.values())
622 ports
+= list(self
.core
.l0
.cmpi
.wb_bus().fields
.values())
625 for sram
in self
.sram4k
:
626 ports
+= list(sram
.bus
.fields
.values())
629 ports
+= list(self
.xics_icp
.bus
.fields
.values())
630 ports
+= list(self
.xics_ics
.bus
.fields
.values())
631 ports
.append(self
.int_level_i
)
633 ports
.append(self
.ext_irq
)
636 ports
+= list(self
.simple_gpio
.bus
.fields
.values())
637 ports
.append(self
.gpio_o
)
646 # Fetch Finite State Machine.
647 # WARNING: there are currently DriverConflicts but it's actually working.
648 # TODO, here: everything that is global in nature, information from the
649 # main TestIssuerInternal, needs to move to either ispec() or ospec().
650 # not only that: TestIssuerInternal.imem can entirely move into here
651 # because imem is only ever accessed inside the FetchFSM.
652 class FetchFSM(ControlBase
):
653 def __init__(self
, allow_overlap
, svp64_en
, imem
, core_rst
,
655 dbg
, core
, svstate
, nia
, is_svp64_mode
):
656 self
.allow_overlap
= allow_overlap
657 self
.svp64_en
= svp64_en
659 self
.core_rst
= core_rst
660 self
.pdecode2
= pdecode2
661 self
.cur_state
= cur_state
664 self
.svstate
= svstate
666 self
.is_svp64_mode
= is_svp64_mode
668 # set up pipeline ControlBase and allocate i/o specs
669 # (unusual: normally done by the Pipeline API)
670 super().__init
__(stage
=self
)
671 self
.p
.i_data
, self
.n
.o_data
= self
.new_specs(None)
672 self
.i
, self
.o
= self
.p
.i_data
, self
.n
.o_data
674 # next 3 functions are Stage API Compliance
675 def setup(self
, m
, i
):
684 def elaborate(self
, platform
):
687 this FSM performs fetch of raw instruction data, partial-decodes
688 it 32-bit at a time to detect SVP64 prefixes, and will optionally
689 read a 2nd 32-bit quantity if that occurs.
691 m
= super().elaborate(platform
)
697 svstate
= self
.svstate
699 is_svp64_mode
= self
.is_svp64_mode
700 fetch_pc_o_ready
= self
.p
.o_ready
701 fetch_pc_i_valid
= self
.p
.i_valid
702 fetch_insn_o_valid
= self
.n
.o_valid
703 fetch_insn_i_ready
= self
.n
.i_ready
707 pdecode2
= self
.pdecode2
708 cur_state
= self
.cur_state
709 dec_opcode_o
= pdecode2
.dec
.raw_opcode_in
# raw opcode
711 # also note instruction fetch failed
712 if hasattr(core
, "icache"):
713 fetch_failed
= core
.icache
.i_out
.fetch_failed
716 fetch_failed
= Const(0, 1)
719 # set priv / virt mode on I-Cache, sigh
720 if isinstance(self
.imem
, ICache
):
721 comb
+= self
.imem
.i_in
.priv_mode
.eq(~msr
[MSR
.PR
])
722 comb
+= self
.imem
.i_in
.virt_mode
.eq(msr
[MSR
.IR
]) # Instr. Redir (VM)
724 with m
.FSM(name
='fetch_fsm'):
727 with m
.State("IDLE"):
728 # fetch allowed if not failed and stopped but not stepping
729 # (see dmi.py for how core_stop_o is generated)
730 with m
.If(~fetch_failed
& ~dbg
.core_stop_o
):
731 comb
+= fetch_pc_o_ready
.eq(1)
732 with m
.If(fetch_pc_i_valid
& ~pdecode2
.instr_fault
734 # instruction allowed to go: start by reading the PC
735 # capture the PC and also drop it into Insn Memory
736 # we have joined a pair of combinatorial memory
737 # lookups together. this is Generally Bad.
738 comb
+= self
.imem
.a_pc_i
.eq(pc
)
739 comb
+= self
.imem
.a_i_valid
.eq(1)
740 comb
+= self
.imem
.f_i_valid
.eq(1)
741 # transfer state to output
742 sync
+= cur_state
.pc
.eq(pc
)
743 sync
+= cur_state
.svstate
.eq(svstate
) # and svstate
744 sync
+= cur_state
.msr
.eq(msr
) # and msr
746 m
.next
= "INSN_READ" # move to "wait for bus" phase
748 # dummy pause to find out why simulation is not keeping up
749 with m
.State("INSN_READ"):
750 # when using "single-step" mode, checking dbg.stopping_o
751 # prevents progress. allow fetch to proceed once started
753 #if self.allow_overlap:
754 # stopping = dbg.stopping_o
756 # stopping: jump back to idle
759 with m
.If(self
.imem
.f_busy_o
&
760 ~pdecode2
.instr_fault
): # zzz...
761 # busy but not fetch failed: stay in wait-read
762 comb
+= self
.imem
.a_pc_i
.eq(pc
)
763 comb
+= self
.imem
.a_i_valid
.eq(1)
764 comb
+= self
.imem
.f_i_valid
.eq(1)
766 # not busy (or fetch failed!): instruction fetched
767 # when fetch failed, the instruction gets ignored
769 if hasattr(core
, "icache"):
770 # blech, icache returns actual instruction
771 insn
= self
.imem
.f_instr_o
773 # but these return raw memory
774 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
)
777 # decode the SVP64 prefix, if any
778 comb
+= svp64
.raw_opcode_in
.eq(insn
)
779 comb
+= svp64
.bigendian
.eq(self
.core_bigendian_i
)
780 # pass the decoded prefix (if any) to PowerDecoder2
781 sync
+= pdecode2
.sv_rm
.eq(svp64
.svp64_rm
)
782 sync
+= pdecode2
.is_svp64_mode
.eq(is_svp64_mode
)
783 # remember whether this is a prefixed instruction,
784 # so the FSM can readily loop when VL==0
785 sync
+= is_svp64_mode
.eq(svp64
.is_svp64_mode
)
786 # calculate the address of the following instruction
787 insn_size
= Mux(svp64
.is_svp64_mode
, 8, 4)
788 sync
+= nia
.eq(cur_state
.pc
+ insn_size
)
789 with m
.If(~svp64
.is_svp64_mode
):
790 # with no prefix, store the instruction
791 # and hand it directly to the next FSM
792 sync
+= dec_opcode_o
.eq(insn
)
793 m
.next
= "INSN_READY"
795 # fetch the rest of the instruction from memory
796 comb
+= self
.imem
.a_pc_i
.eq(cur_state
.pc
+ 4)
797 comb
+= self
.imem
.a_i_valid
.eq(1)
798 comb
+= self
.imem
.f_i_valid
.eq(1)
799 m
.next
= "INSN_READ2"
801 # not SVP64 - 32-bit only
802 sync
+= nia
.eq(cur_state
.pc
+ 4)
803 sync
+= dec_opcode_o
.eq(insn
)
804 m
.next
= "INSN_READY"
806 with m
.State("INSN_READ2"):
807 with m
.If(self
.imem
.f_busy_o
): # zzz...
808 # busy: stay in wait-read
809 comb
+= self
.imem
.a_i_valid
.eq(1)
810 comb
+= self
.imem
.f_i_valid
.eq(1)
812 # not busy: instruction fetched
813 if hasattr(core
, "icache"):
814 # blech, icache returns actual instruction
815 insn
= self
.imem
.f_instr_o
817 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
+4)
818 sync
+= dec_opcode_o
.eq(insn
)
819 m
.next
= "INSN_READY"
820 # TODO: probably can start looking at pdecode2.rm_dec
821 # here or maybe even in INSN_READ state, if svp64_mode
822 # detected, in order to trigger - and wait for - the
825 pmode
= pdecode2
.rm_dec
.predmode
827 if pmode != SVP64PredMode.ALWAYS.value:
828 fire predicate loading FSM and wait before
831 sync += self.srcmask.eq(-1) # set to all 1s
832 sync += self.dstmask.eq(-1) # set to all 1s
833 m.next = "INSN_READY"
836 with m
.State("INSN_READY"):
837 # hand over the instruction, to be decoded
838 comb
+= fetch_insn_o_valid
.eq(1)
839 with m
.If(fetch_insn_i_ready
):
842 # whatever was done above, over-ride it if core reset is held
843 with m
.If(self
.core_rst
):
849 class TestIssuerInternal(TestIssuerBase
):
850 """TestIssuer - reads instructions from TestMemory and issues them
852 efficiency and speed is not the main goal here: functional correctness
853 and code clarity is. optimisations (which almost 100% interfere with
854 easy understanding) come later.
857 def fetch_predicate_fsm(self
, m
,
858 pred_insn_i_valid
, pred_insn_o_ready
,
859 pred_mask_o_valid
, pred_mask_i_ready
):
860 """fetch_predicate_fsm - obtains (constructs in the case of CR)
861 src/dest predicate masks
863 https://bugs.libre-soc.org/show_bug.cgi?id=617
864 the predicates can be read here, by using IntRegs r_ports['pred']
865 or CRRegs r_ports['pred']. in the case of CRs it will have to
866 be done through multiple reads, extracting one relevant at a time.
867 later, a faster way would be to use the 32-bit-wide CR port but
868 this is more complex decoding, here. equivalent code used in
869 ISACaller is "from openpower.decoder.isa.caller import get_predcr"
871 note: this ENTIRE FSM is not to be called when svp64 is disabled
875 pdecode2
= self
.pdecode2
876 rm_dec
= pdecode2
.rm_dec
# SVP64RMModeDecode
877 predmode
= rm_dec
.predmode
878 srcpred
, dstpred
= rm_dec
.srcpred
, rm_dec
.dstpred
879 cr_pred
, int_pred
= self
.cr_pred
, self
.int_pred
# read regfiles
880 # get src/dst step, so we can skip already used mask bits
881 cur_state
= self
.cur_state
882 srcstep
= cur_state
.svstate
.srcstep
883 dststep
= cur_state
.svstate
.dststep
884 cur_vl
= cur_state
.svstate
.vl
887 sregread
, sinvert
, sunary
, sall1s
= get_predint(m
, srcpred
, 's')
888 dregread
, dinvert
, dunary
, dall1s
= get_predint(m
, dstpred
, 'd')
889 sidx
, scrinvert
= get_predcr(m
, srcpred
, 's')
890 didx
, dcrinvert
= get_predcr(m
, dstpred
, 'd')
892 # store fetched masks, for either intpred or crpred
893 # when src/dst step is not zero, the skipped mask bits need to be
894 # shifted-out, before actually storing them in src/dest mask
895 new_srcmask
= Signal(64, reset_less
=True)
896 new_dstmask
= Signal(64, reset_less
=True)
898 with m
.FSM(name
="fetch_predicate"):
900 with m
.State("FETCH_PRED_IDLE"):
901 comb
+= pred_insn_o_ready
.eq(1)
902 with m
.If(pred_insn_i_valid
):
903 with m
.If(predmode
== SVP64PredMode
.INT
):
904 # skip fetching destination mask register, when zero
906 sync
+= new_dstmask
.eq(-1)
907 # directly go to fetch source mask register
908 # guaranteed not to be zero (otherwise predmode
909 # would be SVP64PredMode.ALWAYS, not INT)
910 comb
+= int_pred
.addr
.eq(sregread
)
911 comb
+= int_pred
.ren
.eq(1)
912 m
.next
= "INT_SRC_READ"
913 # fetch destination predicate register
915 comb
+= int_pred
.addr
.eq(dregread
)
916 comb
+= int_pred
.ren
.eq(1)
917 m
.next
= "INT_DST_READ"
918 with m
.Elif(predmode
== SVP64PredMode
.CR
):
919 # go fetch masks from the CR register file
920 sync
+= new_srcmask
.eq(0)
921 sync
+= new_dstmask
.eq(0)
924 sync
+= self
.srcmask
.eq(-1)
925 sync
+= self
.dstmask
.eq(-1)
926 m
.next
= "FETCH_PRED_DONE"
928 with m
.State("INT_DST_READ"):
929 # store destination mask
930 inv
= Repl(dinvert
, 64)
932 # set selected mask bit for 1<<r3 mode
933 dst_shift
= Signal(range(64))
934 comb
+= dst_shift
.eq(self
.int_pred
.o_data
& 0b111111)
935 sync
+= new_dstmask
.eq(1 << dst_shift
)
937 # invert mask if requested
938 sync
+= new_dstmask
.eq(self
.int_pred
.o_data ^ inv
)
939 # skip fetching source mask register, when zero
941 sync
+= new_srcmask
.eq(-1)
942 m
.next
= "FETCH_PRED_SHIFT_MASK"
943 # fetch source predicate register
945 comb
+= int_pred
.addr
.eq(sregread
)
946 comb
+= int_pred
.ren
.eq(1)
947 m
.next
= "INT_SRC_READ"
949 with m
.State("INT_SRC_READ"):
951 inv
= Repl(sinvert
, 64)
953 # set selected mask bit for 1<<r3 mode
954 src_shift
= Signal(range(64))
955 comb
+= src_shift
.eq(self
.int_pred
.o_data
& 0b111111)
956 sync
+= new_srcmask
.eq(1 << src_shift
)
958 # invert mask if requested
959 sync
+= new_srcmask
.eq(self
.int_pred
.o_data ^ inv
)
960 m
.next
= "FETCH_PRED_SHIFT_MASK"
962 # fetch masks from the CR register file
963 # implements the following loop:
964 # idx, inv = get_predcr(mask)
966 # for cr_idx in range(vl):
967 # cr = crl[cr_idx + SVP64CROffs.CRPred] # takes one cycle
969 # mask |= 1 << cr_idx
971 with m
.State("CR_READ"):
972 # CR index to be read, which will be ready by the next cycle
973 cr_idx
= Signal
.like(cur_vl
, reset_less
=True)
974 # submit the read operation to the regfile
975 with m
.If(cr_idx
!= cur_vl
):
976 # the CR read port is unary ...
978 # ... in MSB0 convention ...
979 # ren = 1 << (7 - cr_idx)
980 # ... and with an offset:
981 # ren = 1 << (7 - off - cr_idx)
982 idx
= SVP64CROffs
.CRPred
+ cr_idx
983 comb
+= cr_pred
.ren
.eq(1 << (7 - idx
))
984 # signal data valid in the next cycle
985 cr_read
= Signal(reset_less
=True)
986 sync
+= cr_read
.eq(1)
987 # load the next index
988 sync
+= cr_idx
.eq(cr_idx
+ 1)
991 sync
+= cr_read
.eq(0)
993 m
.next
= "FETCH_PRED_SHIFT_MASK"
995 # compensate for the one cycle delay on the regfile
996 cur_cr_idx
= Signal
.like(cur_vl
)
997 comb
+= cur_cr_idx
.eq(cr_idx
- 1)
998 # read the CR field, select the appropriate bit
1002 comb
+= cr_field
.eq(cr_pred
.o_data
)
1003 comb
+= scr_bit
.eq(cr_field
.bit_select(sidx
, 1)
1005 comb
+= dcr_bit
.eq(cr_field
.bit_select(didx
, 1)
1007 # set the corresponding mask bit
1008 bit_to_set
= Signal
.like(self
.srcmask
)
1009 comb
+= bit_to_set
.eq(1 << cur_cr_idx
)
1011 sync
+= new_srcmask
.eq(new_srcmask | bit_to_set
)
1013 sync
+= new_dstmask
.eq(new_dstmask | bit_to_set
)
1015 with m
.State("FETCH_PRED_SHIFT_MASK"):
1016 # shift-out skipped mask bits
1017 sync
+= self
.srcmask
.eq(new_srcmask
>> srcstep
)
1018 sync
+= self
.dstmask
.eq(new_dstmask
>> dststep
)
1019 m
.next
= "FETCH_PRED_DONE"
1021 with m
.State("FETCH_PRED_DONE"):
1022 comb
+= pred_mask_o_valid
.eq(1)
1023 with m
.If(pred_mask_i_ready
):
1024 m
.next
= "FETCH_PRED_IDLE"
1026 def issue_fsm(self
, m
, core
, nia
,
1027 dbg
, core_rst
, is_svp64_mode
,
1028 fetch_pc_o_ready
, fetch_pc_i_valid
,
1029 fetch_insn_o_valid
, fetch_insn_i_ready
,
1030 pred_insn_i_valid
, pred_insn_o_ready
,
1031 pred_mask_o_valid
, pred_mask_i_ready
,
1032 exec_insn_i_valid
, exec_insn_o_ready
,
1033 exec_pc_o_valid
, exec_pc_i_ready
):
1036 decode / issue FSM. this interacts with the "fetch" FSM
1037 through fetch_insn_ready/valid (incoming) and fetch_pc_ready/valid
1038 (outgoing). also interacts with the "execute" FSM
1039 through exec_insn_ready/valid (outgoing) and exec_pc_ready/valid
1041 SVP64 RM prefixes have already been set up by the
1042 "fetch" phase, so execute is fairly straightforward.
1047 pdecode2
= self
.pdecode2
1048 cur_state
= self
.cur_state
1049 new_svstate
= self
.new_svstate
1052 dec_opcode_i
= pdecode2
.dec
.raw_opcode_in
# raw opcode
1054 # for updating svstate (things like srcstep etc.)
1055 comb
+= new_svstate
.eq(cur_state
.svstate
)
1057 # precalculate srcstep+1 and dststep+1
1058 cur_srcstep
= cur_state
.svstate
.srcstep
1059 cur_dststep
= cur_state
.svstate
.dststep
1060 next_srcstep
= Signal
.like(cur_srcstep
)
1061 next_dststep
= Signal
.like(cur_dststep
)
1062 comb
+= next_srcstep
.eq(cur_state
.svstate
.srcstep
+1)
1063 comb
+= next_dststep
.eq(cur_state
.svstate
.dststep
+1)
1065 # note if an exception happened. in a pipelined or OoO design
1066 # this needs to be accompanied by "shadowing" (or stalling)
1067 exc_happened
= self
.core
.o
.exc_happened
1068 # also note instruction fetch failed
1069 if hasattr(core
, "icache"):
1070 fetch_failed
= core
.icache
.i_out
.fetch_failed
1072 # set to fault in decoder
1073 # update (highest priority) instruction fault
1074 rising_fetch_failed
= rising_edge(m
, fetch_failed
)
1075 with m
.If(rising_fetch_failed
):
1076 sync
+= pdecode2
.instr_fault
.eq(1)
1078 fetch_failed
= Const(0, 1)
1079 flush_needed
= False
1081 with m
.FSM(name
="issue_fsm"):
1083 # sync with the "fetch" phase which is reading the instruction
1084 # at this point, there is no instruction running, that
1085 # could inadvertently update the PC.
1086 with m
.State("ISSUE_START"):
1087 # reset instruction fault
1088 sync
+= pdecode2
.instr_fault
.eq(0)
1089 # wait on "core stop" release, before next fetch
1090 # need to do this here, in case we are in a VL==0 loop
1091 with m
.If(~dbg
.core_stop_o
& ~core_rst
):
1092 comb
+= fetch_pc_i_valid
.eq(1) # tell fetch to start
1093 with m
.If(fetch_pc_o_ready
): # fetch acknowledged us
1094 m
.next
= "INSN_WAIT"
1096 # tell core it's stopped, and acknowledge debug handshake
1097 comb
+= dbg
.core_stopped_i
.eq(1)
1098 # while stopped, allow updating SVSTATE
1099 with m
.If(self
.svstate_i
.ok
):
1100 comb
+= new_svstate
.eq(self
.svstate_i
.data
)
1101 comb
+= self
.update_svstate
.eq(1)
1102 sync
+= self
.sv_changed
.eq(1)
1104 # wait for an instruction to arrive from Fetch
1105 with m
.State("INSN_WAIT"):
1106 # when using "single-step" mode, checking dbg.stopping_o
1107 # prevents progress. allow issue to proceed once started
1109 #if self.allow_overlap:
1110 # stopping = dbg.stopping_o
1111 with m
.If(stopping
):
1112 # stopping: jump back to idle
1113 m
.next
= "ISSUE_START"
1115 # request the icache to stop asserting "failed"
1116 comb
+= core
.icache
.flush_in
.eq(1)
1117 # stop instruction fault
1118 sync
+= pdecode2
.instr_fault
.eq(0)
1120 comb
+= fetch_insn_i_ready
.eq(1)
1121 with m
.If(fetch_insn_o_valid
):
1122 # loop into ISSUE_START if it's a SVP64 instruction
1123 # and VL == 0. this because VL==0 is a for-loop
1124 # from 0 to 0 i.e. always, always a NOP.
1125 cur_vl
= cur_state
.svstate
.vl
1126 with m
.If(is_svp64_mode
& (cur_vl
== 0)):
1127 # update the PC before fetching the next instruction
1128 # since we are in a VL==0 loop, no instruction was
1129 # executed that we could be overwriting
1130 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1131 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1132 comb
+= self
.insn_done
.eq(1)
1133 m
.next
= "ISSUE_START"
1136 m
.next
= "PRED_START" # fetching predicate
1138 m
.next
= "DECODE_SV" # skip predication
1140 with m
.State("PRED_START"):
1141 comb
+= pred_insn_i_valid
.eq(1) # tell fetch_pred to start
1142 with m
.If(pred_insn_o_ready
): # fetch_pred acknowledged us
1143 m
.next
= "MASK_WAIT"
1145 with m
.State("MASK_WAIT"):
1146 comb
+= pred_mask_i_ready
.eq(1) # ready to receive the masks
1147 with m
.If(pred_mask_o_valid
): # predication masks are ready
1148 m
.next
= "PRED_SKIP"
1150 # skip zeros in predicate
1151 with m
.State("PRED_SKIP"):
1152 with m
.If(~is_svp64_mode
):
1153 m
.next
= "DECODE_SV" # nothing to do
1156 pred_src_zero
= pdecode2
.rm_dec
.pred_sz
1157 pred_dst_zero
= pdecode2
.rm_dec
.pred_dz
1159 # new srcstep, after skipping zeros
1160 skip_srcstep
= Signal
.like(cur_srcstep
)
1161 # value to be added to the current srcstep
1162 src_delta
= Signal
.like(cur_srcstep
)
1163 # add leading zeros to srcstep, if not in zero mode
1164 with m
.If(~pred_src_zero
):
1165 # priority encoder (count leading zeros)
1166 # append guard bit, in case the mask is all zeros
1167 pri_enc_src
= PriorityEncoder(65)
1168 m
.submodules
.pri_enc_src
= pri_enc_src
1169 comb
+= pri_enc_src
.i
.eq(Cat(self
.srcmask
,
1171 comb
+= src_delta
.eq(pri_enc_src
.o
)
1172 # apply delta to srcstep
1173 comb
+= skip_srcstep
.eq(cur_srcstep
+ src_delta
)
1174 # shift-out all leading zeros from the mask
1175 # plus the leading "one" bit
1176 # TODO count leading zeros and shift-out the zero
1177 # bits, in the same step, in hardware
1178 sync
+= self
.srcmask
.eq(self
.srcmask
>> (src_delta
+1))
1180 # same as above, but for dststep
1181 skip_dststep
= Signal
.like(cur_dststep
)
1182 dst_delta
= Signal
.like(cur_dststep
)
1183 with m
.If(~pred_dst_zero
):
1184 pri_enc_dst
= PriorityEncoder(65)
1185 m
.submodules
.pri_enc_dst
= pri_enc_dst
1186 comb
+= pri_enc_dst
.i
.eq(Cat(self
.dstmask
,
1188 comb
+= dst_delta
.eq(pri_enc_dst
.o
)
1189 comb
+= skip_dststep
.eq(cur_dststep
+ dst_delta
)
1190 sync
+= self
.dstmask
.eq(self
.dstmask
>> (dst_delta
+1))
1192 # TODO: initialize mask[VL]=1 to avoid passing past VL
1193 with m
.If((skip_srcstep
>= cur_vl
) |
1194 (skip_dststep
>= cur_vl
)):
1195 # end of VL loop. Update PC and reset src/dst step
1196 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1197 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1198 comb
+= new_svstate
.srcstep
.eq(0)
1199 comb
+= new_svstate
.dststep
.eq(0)
1200 comb
+= self
.update_svstate
.eq(1)
1201 # synchronize with the simulator
1202 comb
+= self
.insn_done
.eq(1)
1204 m
.next
= "ISSUE_START"
1206 # update new src/dst step
1207 comb
+= new_svstate
.srcstep
.eq(skip_srcstep
)
1208 comb
+= new_svstate
.dststep
.eq(skip_dststep
)
1209 comb
+= self
.update_svstate
.eq(1)
1211 m
.next
= "DECODE_SV"
1213 # pass predicate mask bits through to satellite decoders
1214 # TODO: for SIMD this will be *multiple* bits
1215 sync
+= core
.i
.sv_pred_sm
.eq(self
.srcmask
[0])
1216 sync
+= core
.i
.sv_pred_dm
.eq(self
.dstmask
[0])
1218 # after src/dst step have been updated, we are ready
1219 # to decode the instruction
1220 with m
.State("DECODE_SV"):
1221 # decode the instruction
1222 with m
.If(~fetch_failed
):
1223 sync
+= pdecode2
.instr_fault
.eq(0)
1224 sync
+= core
.i
.e
.eq(pdecode2
.e
)
1225 sync
+= core
.i
.state
.eq(cur_state
)
1226 sync
+= core
.i
.raw_insn_i
.eq(dec_opcode_i
)
1227 sync
+= core
.i
.bigendian_i
.eq(self
.core_bigendian_i
)
1229 sync
+= core
.i
.sv_rm
.eq(pdecode2
.sv_rm
)
1230 # set RA_OR_ZERO detection in satellite decoders
1231 sync
+= core
.i
.sv_a_nz
.eq(pdecode2
.sv_a_nz
)
1232 # and svp64 detection
1233 sync
+= core
.i
.is_svp64_mode
.eq(is_svp64_mode
)
1234 # and svp64 bit-rev'd ldst mode
1235 ldst_dec
= pdecode2
.use_svp64_ldst_dec
1236 sync
+= core
.i
.use_svp64_ldst_dec
.eq(ldst_dec
)
1237 # after decoding, reset any previous exception condition,
1238 # allowing it to be set again during the next execution
1239 sync
+= pdecode2
.ldst_exc
.eq(0)
1241 m
.next
= "INSN_EXECUTE" # move to "execute"
1243 # handshake with execution FSM, move to "wait" once acknowledged
1244 with m
.State("INSN_EXECUTE"):
1245 # when using "single-step" mode, checking dbg.stopping_o
1246 # prevents progress. allow execute to proceed once started
1248 #if self.allow_overlap:
1249 # stopping = dbg.stopping_o
1250 with m
.If(stopping
):
1251 # stopping: jump back to idle
1252 m
.next
= "ISSUE_START"
1254 # request the icache to stop asserting "failed"
1255 comb
+= core
.icache
.flush_in
.eq(1)
1256 # stop instruction fault
1257 sync
+= pdecode2
.instr_fault
.eq(0)
1259 comb
+= exec_insn_i_valid
.eq(1) # trigger execute
1260 with m
.If(exec_insn_o_ready
): # execute acknowledged us
1261 m
.next
= "EXECUTE_WAIT"
1263 with m
.State("EXECUTE_WAIT"):
1264 comb
+= exec_pc_i_ready
.eq(1)
1265 # see https://bugs.libre-soc.org/show_bug.cgi?id=636
1266 # the exception info needs to be blatted into
1267 # pdecode.ldst_exc, and the instruction "re-run".
1268 # when ldst_exc.happened is set, the PowerDecoder2
1269 # reacts very differently: it re-writes the instruction
1270 # with a "trap" (calls PowerDecoder2.trap()) which
1271 # will *overwrite* whatever was requested and jump the
1272 # PC to the exception address, as well as alter MSR.
1273 # nothing else needs to be done other than to note
1274 # the change of PC and MSR (and, later, SVSTATE)
1275 with m
.If(exc_happened
):
1276 mmu
= core
.fus
.get_exc("mmu0")
1277 ldst
= core
.fus
.get_exc("ldst0")
1279 with m
.If(fetch_failed
):
1280 # instruction fetch: exception is from MMU
1281 # reset instr_fault (highest priority)
1282 sync
+= pdecode2
.ldst_exc
.eq(mmu
)
1283 sync
+= pdecode2
.instr_fault
.eq(0)
1285 # request icache to stop asserting "failed"
1286 comb
+= core
.icache
.flush_in
.eq(1)
1287 with m
.If(~fetch_failed
):
1288 # otherwise assume it was a LDST exception
1289 sync
+= pdecode2
.ldst_exc
.eq(ldst
)
1291 with m
.If(exec_pc_o_valid
):
1293 # was this the last loop iteration?
1295 cur_vl
= cur_state
.svstate
.vl
1296 comb
+= is_last
.eq(next_srcstep
== cur_vl
)
1298 with m
.If(pdecode2
.instr_fault
):
1299 # reset instruction fault, try again
1300 sync
+= pdecode2
.instr_fault
.eq(0)
1301 m
.next
= "ISSUE_START"
1303 # return directly to Decode if Execute generated an
1305 with m
.Elif(pdecode2
.ldst_exc
.happened
):
1306 m
.next
= "DECODE_SV"
1308 # if MSR, PC or SVSTATE were changed by the previous
1309 # instruction, go directly back to Fetch, without
1310 # updating either MSR PC or SVSTATE
1311 with m
.Elif(self
.msr_changed | self
.pc_changed |
1313 m
.next
= "ISSUE_START"
1315 # also return to Fetch, when no output was a vector
1316 # (regardless of SRCSTEP and VL), or when the last
1317 # instruction was really the last one of the VL loop
1318 with m
.Elif((~pdecode2
.loop_continue
) | is_last
):
1319 # before going back to fetch, update the PC state
1320 # register with the NIA.
1321 # ok here we are not reading the branch unit.
1322 # TODO: this just blithely overwrites whatever
1323 # pipeline updated the PC
1324 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1325 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1326 # reset SRCSTEP before returning to Fetch
1328 with m
.If(pdecode2
.loop_continue
):
1329 comb
+= new_svstate
.srcstep
.eq(0)
1330 comb
+= new_svstate
.dststep
.eq(0)
1331 comb
+= self
.update_svstate
.eq(1)
1333 comb
+= new_svstate
.srcstep
.eq(0)
1334 comb
+= new_svstate
.dststep
.eq(0)
1335 comb
+= self
.update_svstate
.eq(1)
1336 m
.next
= "ISSUE_START"
1338 # returning to Execute? then, first update SRCSTEP
1340 comb
+= new_svstate
.srcstep
.eq(next_srcstep
)
1341 comb
+= new_svstate
.dststep
.eq(next_dststep
)
1342 comb
+= self
.update_svstate
.eq(1)
1343 # return to mask skip loop
1344 m
.next
= "PRED_SKIP"
1347 # check if svstate needs updating: if so, write it to State Regfile
1348 with m
.If(self
.update_svstate
):
1349 sync
+= cur_state
.svstate
.eq(self
.new_svstate
) # for next clock
1351 def execute_fsm(self
, m
, core
,
1352 exec_insn_i_valid
, exec_insn_o_ready
,
1353 exec_pc_o_valid
, exec_pc_i_ready
):
1356 execute FSM. this interacts with the "issue" FSM
1357 through exec_insn_ready/valid (incoming) and exec_pc_ready/valid
1358 (outgoing). SVP64 RM prefixes have already been set up by the
1359 "issue" phase, so execute is fairly straightforward.
1365 pdecode2
= self
.pdecode2
1368 core_busy_o
= core
.n
.o_data
.busy_o
# core is busy
1369 core_ivalid_i
= core
.p
.i_valid
# instruction is valid
1371 if hasattr(core
, "icache"):
1372 fetch_failed
= core
.icache
.i_out
.fetch_failed
1374 fetch_failed
= Const(0, 1)
1376 with m
.FSM(name
="exec_fsm"):
1378 # waiting for instruction bus (stays there until not busy)
1379 with m
.State("INSN_START"):
1380 comb
+= exec_insn_o_ready
.eq(1)
1381 with m
.If(exec_insn_i_valid
):
1382 comb
+= core_ivalid_i
.eq(1) # instruction is valid/issued
1383 sync
+= self
.sv_changed
.eq(0)
1384 sync
+= self
.pc_changed
.eq(0)
1385 sync
+= self
.msr_changed
.eq(0)
1386 with m
.If(core
.p
.o_ready
): # only move if accepted
1387 m
.next
= "INSN_ACTIVE" # move to "wait completion"
1389 # instruction started: must wait till it finishes
1390 with m
.State("INSN_ACTIVE"):
1391 # note changes to MSR, PC and SVSTATE
1392 # XXX oops, really must monitor *all* State Regfile write
1393 # ports looking for changes!
1394 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.SVSTATE
)):
1395 sync
+= self
.sv_changed
.eq(1)
1396 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.MSR
)):
1397 sync
+= self
.msr_changed
.eq(1)
1398 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.PC
)):
1399 sync
+= self
.pc_changed
.eq(1)
1400 with m
.If(~core_busy_o
): # instruction done!
1401 comb
+= exec_pc_o_valid
.eq(1)
1402 with m
.If(exec_pc_i_ready
):
1403 # when finished, indicate "done".
1404 # however, if there was an exception, the instruction
1405 # is *not* yet done. this is an implementation
1406 # detail: we choose to implement exceptions by
1407 # taking the exception information from the LDST
1408 # unit, putting that *back* into the PowerDecoder2,
1409 # and *re-running the entire instruction*.
1410 # if we erroneously indicate "done" here, it is as if
1411 # there were *TWO* instructions:
1412 # 1) the failed LDST 2) a TRAP.
1413 with m
.If(~pdecode2
.ldst_exc
.happened
&
1414 ~pdecode2
.instr_fault
):
1415 comb
+= self
.insn_done
.eq(1)
1416 m
.next
= "INSN_START" # back to fetch
1417 # terminate returns directly to INSN_START
1418 with m
.If(dbg
.terminate_i
):
1419 # comb += self.insn_done.eq(1) - no because it's not
1420 m
.next
= "INSN_START" # back to fetch
1422 def elaborate(self
, platform
):
1423 m
= super().elaborate(platform
)
1425 comb
, sync
= m
.d
.comb
, m
.d
.sync
1426 cur_state
= self
.cur_state
1427 pdecode2
= self
.pdecode2
1431 # set up peripherals and core
1432 core_rst
= self
.core_rst
1434 # indicate to outside world if any FU is still executing
1435 comb
+= self
.any_busy
.eq(core
.n
.o_data
.any_busy_o
) # any FU executing
1437 # address of the next instruction, in the absence of a branch
1438 # depends on the instruction size
1441 # connect up debug signals
1442 with m
.If(core
.o
.core_terminate_o
):
1443 comb
+= dbg
.terminate_i
.eq(1)
1445 # pass the prefix mode from Fetch to Issue, so the latter can loop
1447 is_svp64_mode
= Signal()
1449 # there are *THREE^WFOUR-if-SVP64-enabled* FSMs, fetch (32/64-bit)
1450 # issue, decode/execute, now joined by "Predicate fetch/calculate".
1451 # these are the handshake signals between each
1453 # fetch FSM can run as soon as the PC is valid
1454 fetch_pc_i_valid
= Signal() # Execute tells Fetch "start next read"
1455 fetch_pc_o_ready
= Signal() # Fetch Tells SVSTATE "proceed"
1457 # fetch FSM hands over the instruction to be decoded / issued
1458 fetch_insn_o_valid
= Signal()
1459 fetch_insn_i_ready
= Signal()
1461 # predicate fetch FSM decodes and fetches the predicate
1462 pred_insn_i_valid
= Signal()
1463 pred_insn_o_ready
= Signal()
1465 # predicate fetch FSM delivers the masks
1466 pred_mask_o_valid
= Signal()
1467 pred_mask_i_ready
= Signal()
1469 # issue FSM delivers the instruction to the be executed
1470 exec_insn_i_valid
= Signal()
1471 exec_insn_o_ready
= Signal()
1473 # execute FSM, hands over the PC/SVSTATE back to the issue FSM
1474 exec_pc_o_valid
= Signal()
1475 exec_pc_i_ready
= Signal()
1477 # the FSMs here are perhaps unusual in that they detect conditions
1478 # then "hold" information, combinatorially, for the core
1479 # (as opposed to using sync - which would be on a clock's delay)
1480 # this includes the actual opcode, valid flags and so on.
1482 # Fetch, then predicate fetch, then Issue, then Execute.
1483 # Issue is where the VL for-loop # lives. the ready/valid
1484 # signalling is used to communicate between the four.
1487 fetch
= FetchFSM(self
.allow_overlap
, self
.svp64_en
,
1488 self
.imem
, core_rst
, pdecode2
, cur_state
,
1490 dbg
.state
.svstate
, # combinatorially same
1492 m
.submodules
.fetch
= fetch
1493 # connect up in/out data to existing Signals
1494 comb
+= fetch
.p
.i_data
.pc
.eq(dbg
.state
.pc
) # combinatorially same
1495 comb
+= fetch
.p
.i_data
.msr
.eq(dbg
.state
.msr
) # combinatorially same
1496 # and the ready/valid signalling
1497 comb
+= fetch_pc_o_ready
.eq(fetch
.p
.o_ready
)
1498 comb
+= fetch
.p
.i_valid
.eq(fetch_pc_i_valid
)
1499 comb
+= fetch_insn_o_valid
.eq(fetch
.n
.o_valid
)
1500 comb
+= fetch
.n
.i_ready
.eq(fetch_insn_i_ready
)
1502 self
.issue_fsm(m
, core
, nia
,
1503 dbg
, core_rst
, is_svp64_mode
,
1504 fetch_pc_o_ready
, fetch_pc_i_valid
,
1505 fetch_insn_o_valid
, fetch_insn_i_ready
,
1506 pred_insn_i_valid
, pred_insn_o_ready
,
1507 pred_mask_o_valid
, pred_mask_i_ready
,
1508 exec_insn_i_valid
, exec_insn_o_ready
,
1509 exec_pc_o_valid
, exec_pc_i_ready
)
1512 self
.fetch_predicate_fsm(m
,
1513 pred_insn_i_valid
, pred_insn_o_ready
,
1514 pred_mask_o_valid
, pred_mask_i_ready
)
1516 self
.execute_fsm(m
, core
,
1517 exec_insn_i_valid
, exec_insn_o_ready
,
1518 exec_pc_o_valid
, exec_pc_i_ready
)
1523 class TestIssuer(Elaboratable
):
1524 def __init__(self
, pspec
):
1525 self
.ti
= TestIssuerInternal(pspec
)
1526 self
.pll
= DummyPLL(instance
=True)
1528 self
.dbg_rst_i
= Signal(reset_less
=True)
1530 # PLL direct clock or not
1531 self
.pll_en
= hasattr(pspec
, "use_pll") and pspec
.use_pll
1533 self
.pll_test_o
= Signal(reset_less
=True)
1534 self
.pll_vco_o
= Signal(reset_less
=True)
1535 self
.clk_sel_i
= Signal(2, reset_less
=True)
1536 self
.ref_clk
= ClockSignal() # can't rename it but that's ok
1537 self
.pllclk_clk
= ClockSignal("pllclk")
1539 def elaborate(self
, platform
):
1543 # TestIssuer nominally runs at main clock, actually it is
1544 # all combinatorial internally except for coresync'd components
1545 m
.submodules
.ti
= ti
= self
.ti
1548 # ClockSelect runs at PLL output internal clock rate
1549 m
.submodules
.wrappll
= pll
= self
.pll
1551 # add clock domains from PLL
1552 cd_pll
= ClockDomain("pllclk")
1555 # PLL clock established. has the side-effect of running clklsel
1556 # at the PLL's speed (see DomainRenamer("pllclk") above)
1557 pllclk
= self
.pllclk_clk
1558 comb
+= pllclk
.eq(pll
.clk_pll_o
)
1560 # wire up external 24mhz to PLL
1561 #comb += pll.clk_24_i.eq(self.ref_clk)
1562 # output 18 mhz PLL test signal, and analog oscillator out
1563 comb
+= self
.pll_test_o
.eq(pll
.pll_test_o
)
1564 comb
+= self
.pll_vco_o
.eq(pll
.pll_vco_o
)
1566 # input to pll clock selection
1567 comb
+= pll
.clk_sel_i
.eq(self
.clk_sel_i
)
1569 # now wire up ResetSignals. don't mind them being in this domain
1570 pll_rst
= ResetSignal("pllclk")
1571 comb
+= pll_rst
.eq(ResetSignal())
1573 # internal clock is set to selector clock-out. has the side-effect of
1574 # running TestIssuer at this speed (see DomainRenamer("intclk") above)
1575 # debug clock runs at coresync internal clock
1576 if self
.ti
.dbg_domain
!= 'sync':
1577 cd_dbgsync
= ClockDomain("dbgsync")
1578 intclk
= ClockSignal(self
.ti
.core_domain
)
1579 dbgclk
= ClockSignal(self
.ti
.dbg_domain
)
1580 # XXX BYPASS PLL XXX
1581 # XXX BYPASS PLL XXX
1582 # XXX BYPASS PLL XXX
1584 comb
+= intclk
.eq(self
.ref_clk
)
1585 assert self
.ti
.core_domain
!= 'sync', \
1586 "cannot set core_domain to sync and use pll at the same time"
1588 if self
.ti
.core_domain
!= 'sync':
1589 comb
+= intclk
.eq(ClockSignal())
1590 if self
.ti
.dbg_domain
!= 'sync':
1591 dbgclk
= ClockSignal(self
.ti
.dbg_domain
)
1592 comb
+= dbgclk
.eq(intclk
)
1593 comb
+= self
.ti
.dbg_rst_i
.eq(self
.dbg_rst_i
)
1598 return list(self
.ti
.ports()) + list(self
.pll
.ports()) + \
1599 [ClockSignal(), ResetSignal()]
1601 def external_ports(self
):
1602 ports
= self
.ti
.external_ports()
1603 ports
.append(ClockSignal())
1604 ports
.append(ResetSignal())
1606 ports
.append(self
.clk_sel_i
)
1607 ports
.append(self
.pll
.clk_24_i
)
1608 ports
.append(self
.pll_test_o
)
1609 ports
.append(self
.pll_vco_o
)
1610 ports
.append(self
.pllclk_clk
)
1611 ports
.append(self
.ref_clk
)
1615 if __name__
== '__main__':
1616 units
= {'alu': 1, 'cr': 1, 'branch': 1, 'trap': 1, 'logical': 1,
1622 pspec
= TestMemPspec(ldst_ifacetype
='bare_wb',
1623 imem_ifacetype
='bare_wb',
1628 dut
= TestIssuer(pspec
)
1629 vl
= main(dut
, ports
=dut
.ports(), name
="test_issuer")
1631 if len(sys
.argv
) == 1:
1632 vl
= rtlil
.convert(dut
, ports
=dut
.external_ports(), name
="test_issuer")
1633 with
open("test_issuer.il", "w") as f
: