3 not in any way intended for production use. this runs a FSM that:
5 * reads the Program Counter from StateRegs
6 * reads an instruction from a fixed-size Test Memory
7 * issues it to the Simple Core
8 * waits for it to complete
10 * does it all over again
12 the purpose of this module is to verify the functional correctness
13 of the Function Units in the absolute simplest and clearest possible
14 way, and to at provide something that can be further incrementally
18 from nmigen
import (Elaboratable
, Module
, Signal
, ClockSignal
, ResetSignal
,
19 ClockDomain
, DomainRenamer
, Mux
, Const
, Repl
, Cat
)
20 from nmigen
.cli
import rtlil
21 from nmigen
.cli
import main
24 from nmutil
.singlepipe
import ControlBase
25 from soc
.simple
.core_data
import FetchOutput
, FetchInput
27 from nmigen
.lib
.coding
import PriorityEncoder
29 from openpower
.decoder
.power_decoder
import create_pdecode
30 from openpower
.decoder
.power_decoder2
import PowerDecode2
, SVP64PrefixDecoder
31 from openpower
.decoder
.decode2execute1
import IssuerDecode2ToOperand
32 from openpower
.decoder
.decode2execute1
import Data
33 from openpower
.decoder
.power_enums
import (MicrOp
, SVP64PredInt
, SVP64PredCR
,
35 from openpower
.state
import CoreState
36 from openpower
.consts
import (CR
, SVP64CROffs
, MSR
)
37 from soc
.experiment
.testmem
import TestMemory
# test only for instructions
38 from soc
.regfile
.regfiles
import StateRegs
, FastRegs
39 from soc
.simple
.core
import NonProductionCore
40 from soc
.config
.test
.test_loadstore
import TestMemPspec
41 from soc
.config
.ifetch
import ConfigFetchUnit
42 from soc
.debug
.dmi
import CoreDebug
, DMIInterface
43 from soc
.debug
.jtag
import JTAG
44 from soc
.config
.pinouts
import get_pinspecs
45 from soc
.interrupts
.xics
import XICS_ICP
, XICS_ICS
46 from soc
.bus
.simple_gpio
import SimpleGPIO
47 from soc
.bus
.SPBlock512W64B8W
import SPBlock512W64B8W
48 from soc
.clock
.select
import ClockSelect
49 from soc
.clock
.dummypll
import DummyPLL
50 from openpower
.sv
.svstate
import SVSTATERec
51 from soc
.experiment
.icache
import ICache
53 from nmutil
.util
import rising_edge
56 def get_insn(f_instr_o
, pc
):
57 if f_instr_o
.width
== 32:
60 # 64-bit: bit 2 of pc decides which word to select
61 return f_instr_o
.word_select(pc
[2], 32)
63 # gets state input or reads from state regfile
66 def state_get(m
, res
, core_rst
, state_i
, name
, regfile
, regnum
):
69 # read the {insert state variable here}
70 res_ok_delay
= Signal(name
="%s_ok_delay" % name
)
72 sync
+= res_ok_delay
.eq(~state_i
.ok
)
73 with m
.If(state_i
.ok
):
74 # incoming override (start from pc_i)
75 comb
+= res
.eq(state_i
.data
)
77 # otherwise read StateRegs regfile for {insert state here}...
78 comb
+= regfile
.ren
.eq(1 << regnum
)
79 # ... but on a 1-clock delay
80 with m
.If(res_ok_delay
):
81 comb
+= res
.eq(regfile
.o_data
)
84 def get_predint(m
, mask
, name
):
85 """decode SVP64 predicate integer mask field to reg number and invert
86 this is identical to the equivalent function in ISACaller except that
87 it doesn't read the INT directly, it just decodes "what needs to be done"
88 i.e. which INT reg, whether it is shifted and whether it is bit-inverted.
90 * all1s is set to indicate that no mask is to be applied.
91 * regread indicates the GPR register number to be read
92 * invert is set to indicate that the register value is to be inverted
93 * unary indicates that the contents of the register is to be shifted 1<<r3
96 regread
= Signal(5, name
=name
+"regread")
97 invert
= Signal(name
=name
+"invert")
98 unary
= Signal(name
=name
+"unary")
99 all1s
= Signal(name
=name
+"all1s")
101 with m
.Case(SVP64PredInt
.ALWAYS
.value
):
102 comb
+= all1s
.eq(1) # use 0b1111 (all ones)
103 with m
.Case(SVP64PredInt
.R3_UNARY
.value
):
104 comb
+= regread
.eq(3)
105 comb
+= unary
.eq(1) # 1<<r3 - shift r3 (single bit)
106 with m
.Case(SVP64PredInt
.R3
.value
):
107 comb
+= regread
.eq(3)
108 with m
.Case(SVP64PredInt
.R3_N
.value
):
109 comb
+= regread
.eq(3)
111 with m
.Case(SVP64PredInt
.R10
.value
):
112 comb
+= regread
.eq(10)
113 with m
.Case(SVP64PredInt
.R10_N
.value
):
114 comb
+= regread
.eq(10)
116 with m
.Case(SVP64PredInt
.R30
.value
):
117 comb
+= regread
.eq(30)
118 with m
.Case(SVP64PredInt
.R30_N
.value
):
119 comb
+= regread
.eq(30)
121 return regread
, invert
, unary
, all1s
124 def get_predcr(m
, mask
, name
):
125 """decode SVP64 predicate CR to reg number field and invert status
126 this is identical to _get_predcr in ISACaller
129 idx
= Signal(2, name
=name
+"idx")
130 invert
= Signal(name
=name
+"crinvert")
132 with m
.Case(SVP64PredCR
.LT
.value
):
133 comb
+= idx
.eq(CR
.LT
)
135 with m
.Case(SVP64PredCR
.GE
.value
):
136 comb
+= idx
.eq(CR
.LT
)
138 with m
.Case(SVP64PredCR
.GT
.value
):
139 comb
+= idx
.eq(CR
.GT
)
141 with m
.Case(SVP64PredCR
.LE
.value
):
142 comb
+= idx
.eq(CR
.GT
)
144 with m
.Case(SVP64PredCR
.EQ
.value
):
145 comb
+= idx
.eq(CR
.EQ
)
147 with m
.Case(SVP64PredCR
.NE
.value
):
148 comb
+= idx
.eq(CR
.EQ
)
150 with m
.Case(SVP64PredCR
.SO
.value
):
151 comb
+= idx
.eq(CR
.SO
)
153 with m
.Case(SVP64PredCR
.NS
.value
):
154 comb
+= idx
.eq(CR
.SO
)
159 class TestIssuerBase(Elaboratable
):
160 """TestIssuerBase - common base class for Issuers
162 takes care of power-on reset, peripherals, debug, DEC/TB,
163 and gets PC/MSR/SVSTATE from the State Regfile etc.
166 def __init__(self
, pspec
):
168 # test is SVP64 is to be enabled
169 self
.svp64_en
= hasattr(pspec
, "svp64") and (pspec
.svp64
== True)
171 # and if regfiles are reduced
172 self
.regreduce_en
= (hasattr(pspec
, "regreduce") and
173 (pspec
.regreduce
== True))
175 # and if overlap requested
176 self
.allow_overlap
= (hasattr(pspec
, "allow_overlap") and
177 (pspec
.allow_overlap
== True))
179 # and get the core domain
180 self
.core_domain
= "coresync"
181 if (hasattr(pspec
, "core_domain") and
182 isinstance(pspec
.core_domain
, str)):
183 self
.core_domain
= pspec
.core_domain
185 # JTAG interface. add this right at the start because if it's
186 # added it *modifies* the pspec, by adding enable/disable signals
187 # for parts of the rest of the core
188 self
.jtag_en
= hasattr(pspec
, "debug") and pspec
.debug
== 'jtag'
189 #self.dbg_domain = "sync" # sigh "dbgsunc" too problematic
190 self
.dbg_domain
= "dbgsync" # domain for DMI/JTAG clock
192 # XXX MUST keep this up-to-date with litex, and
193 # soc-cocotb-sim, and err.. all needs sorting out, argh
196 'eint', 'gpio', 'mspi0',
197 # 'mspi1', - disabled for now
198 # 'pwm', 'sd0', - disabled for now
200 self
.jtag
= JTAG(get_pinspecs(subset
=subset
),
201 domain
=self
.dbg_domain
)
202 # add signals to pspec to enable/disable icache and dcache
203 # (or data and intstruction wishbone if icache/dcache not included)
204 # https://bugs.libre-soc.org/show_bug.cgi?id=520
205 # TODO: do we actually care if these are not domain-synchronised?
206 # honestly probably not.
207 pspec
.wb_icache_en
= self
.jtag
.wb_icache_en
208 pspec
.wb_dcache_en
= self
.jtag
.wb_dcache_en
209 self
.wb_sram_en
= self
.jtag
.wb_sram_en
211 self
.wb_sram_en
= Const(1)
213 # add 4k sram blocks?
214 self
.sram4x4k
= (hasattr(pspec
, "sram4x4kblock") and
215 pspec
.sram4x4kblock
== True)
219 self
.sram4k
.append(SPBlock512W64B8W(name
="sram4k_%d" % i
,
223 # add interrupt controller?
224 self
.xics
= hasattr(pspec
, "xics") and pspec
.xics
== True
226 self
.xics_icp
= XICS_ICP()
227 self
.xics_ics
= XICS_ICS()
228 self
.int_level_i
= self
.xics_ics
.int_level_i
230 # add GPIO peripheral?
231 self
.gpio
= hasattr(pspec
, "gpio") and pspec
.gpio
== True
233 self
.simple_gpio
= SimpleGPIO()
234 self
.gpio_o
= self
.simple_gpio
.gpio_o
236 # main instruction core. suitable for prototyping / demo only
237 self
.core
= core
= NonProductionCore(pspec
)
238 self
.core_rst
= ResetSignal(self
.core_domain
)
240 # instruction decoder. goes into Trap Record
241 #pdecode = create_pdecode()
242 self
.cur_state
= CoreState("cur") # current state (MSR/PC/SVSTATE)
243 self
.pdecode2
= PowerDecode2(None, state
=self
.cur_state
,
244 opkls
=IssuerDecode2ToOperand
,
245 svp64_en
=self
.svp64_en
,
246 regreduce_en
=self
.regreduce_en
)
247 pdecode
= self
.pdecode2
.dec
250 self
.svp64
= SVP64PrefixDecoder() # for decoding SVP64 prefix
252 self
.update_svstate
= Signal() # set this if updating svstate
253 self
.new_svstate
= new_svstate
= SVSTATERec("new_svstate")
255 # Test Instruction memory
256 if hasattr(core
, "icache"):
257 # XXX BLECH! use pspec to transfer the I-Cache to ConfigFetchUnit
258 # truly dreadful. needs a huge reorg.
259 pspec
.icache
= core
.icache
260 self
.imem
= ConfigFetchUnit(pspec
).fu
263 self
.dbg
= CoreDebug()
264 self
.dbg_rst_i
= Signal(reset_less
=True)
266 # instruction go/monitor
267 self
.pc_o
= Signal(64, reset_less
=True)
268 self
.pc_i
= Data(64, "pc_i") # set "ok" to indicate "please change me"
269 self
.msr_i
= Data(64, "msr_i") # set "ok" to indicate "please change me"
270 self
.svstate_i
= Data(64, "svstate_i") # ditto
271 self
.core_bigendian_i
= Signal() # TODO: set based on MSR.LE
272 self
.busy_o
= Signal(reset_less
=True)
273 self
.memerr_o
= Signal(reset_less
=True)
275 # STATE regfile read /write ports for PC, MSR, SVSTATE
276 staterf
= self
.core
.regs
.rf
['state']
277 self
.state_r_msr
= staterf
.r_ports
['msr'] # MSR rd
278 self
.state_r_pc
= staterf
.r_ports
['cia'] # PC rd
279 self
.state_r_sv
= staterf
.r_ports
['sv'] # SVSTATE rd
281 self
.state_w_msr
= staterf
.w_ports
['msr'] # MSR wr
282 self
.state_w_pc
= staterf
.w_ports
['d_wr1'] # PC wr
283 self
.state_w_sv
= staterf
.w_ports
['sv'] # SVSTATE wr
285 # DMI interface access
286 intrf
= self
.core
.regs
.rf
['int']
287 crrf
= self
.core
.regs
.rf
['cr']
288 xerrf
= self
.core
.regs
.rf
['xer']
289 self
.int_r
= intrf
.r_ports
['dmi'] # INT read
290 self
.cr_r
= crrf
.r_ports
['full_cr_dbg'] # CR read
291 self
.xer_r
= xerrf
.r_ports
['full_xer'] # XER read
295 self
.int_pred
= intrf
.r_ports
['pred'] # INT predicate read
296 self
.cr_pred
= crrf
.r_ports
['cr_pred'] # CR predicate read
298 # hack method of keeping an eye on whether branch/trap set the PC
299 self
.state_nia
= self
.core
.regs
.rf
['state'].w_ports
['nia']
300 self
.state_nia
.wen
.name
= 'state_nia_wen'
302 # pulse to synchronize the simulator at instruction end
303 self
.insn_done
= Signal()
305 # indicate any instruction still outstanding, in execution
306 self
.any_busy
= Signal()
309 # store copies of predicate masks
310 self
.srcmask
= Signal(64)
311 self
.dstmask
= Signal(64)
313 def setup_peripherals(self
, m
):
314 comb
, sync
= m
.d
.comb
, m
.d
.sync
316 # okaaaay so the debug module must be in coresync clock domain
317 # but NOT its reset signal. to cope with this, set every single
318 # submodule explicitly in coresync domain, debug and JTAG
319 # in their own one but using *external* reset.
320 csd
= DomainRenamer(self
.core_domain
)
321 dbd
= DomainRenamer(self
.dbg_domain
)
323 m
.submodules
.core
= core
= csd(self
.core
)
324 # this _so_ needs sorting out. ICache is added down inside
325 # LoadStore1 and is already a submodule of LoadStore1
326 if not isinstance(self
.imem
, ICache
):
327 m
.submodules
.imem
= imem
= csd(self
.imem
)
328 m
.submodules
.dbg
= dbg
= dbd(self
.dbg
)
330 m
.submodules
.jtag
= jtag
= dbd(self
.jtag
)
331 # TODO: UART2GDB mux, here, from external pin
332 # see https://bugs.libre-soc.org/show_bug.cgi?id=499
333 sync
+= dbg
.dmi
.connect_to(jtag
.dmi
)
335 cur_state
= self
.cur_state
337 # 4x 4k SRAM blocks. these simply "exist", they get routed in litex
339 for i
, sram
in enumerate(self
.sram4k
):
340 m
.submodules
["sram4k_%d" % i
] = csd(sram
)
341 comb
+= sram
.enable
.eq(self
.wb_sram_en
)
343 # XICS interrupt handler
345 m
.submodules
.xics_icp
= icp
= csd(self
.xics_icp
)
346 m
.submodules
.xics_ics
= ics
= csd(self
.xics_ics
)
347 comb
+= icp
.ics_i
.eq(ics
.icp_o
) # connect ICS to ICP
348 sync
+= cur_state
.eint
.eq(icp
.core_irq_o
) # connect ICP to core
350 # GPIO test peripheral
352 m
.submodules
.simple_gpio
= simple_gpio
= csd(self
.simple_gpio
)
354 # connect one GPIO output to ICS bit 15 (like in microwatt soc.vhdl)
355 # XXX causes litex ECP5 test to get wrong idea about input and output
356 # (but works with verilator sim *sigh*)
357 # if self.gpio and self.xics:
358 # comb += self.int_level_i[15].eq(simple_gpio.gpio_o[0])
360 # instruction decoder
361 pdecode
= create_pdecode()
362 m
.submodules
.dec2
= pdecode2
= csd(self
.pdecode2
)
364 m
.submodules
.svp64
= svp64
= csd(self
.svp64
)
367 dmi
, d_reg
, d_cr
, d_xer
, = dbg
.dmi
, dbg
.d_gpr
, dbg
.d_cr
, dbg
.d_xer
368 intrf
= self
.core
.regs
.rf
['int']
370 # clock delay power-on reset
371 cd_por
= ClockDomain(reset_less
=True)
372 cd_sync
= ClockDomain()
373 m
.domains
+= cd_por
, cd_sync
374 core_sync
= ClockDomain(self
.core_domain
)
375 if self
.core_domain
!= "sync":
376 m
.domains
+= core_sync
377 if self
.dbg_domain
!= "sync":
378 dbg_sync
= ClockDomain(self
.dbg_domain
)
379 m
.domains
+= dbg_sync
381 ti_rst
= Signal(reset_less
=True)
382 delay
= Signal(range(4), reset
=3)
383 with m
.If(delay
!= 0):
384 m
.d
.por
+= delay
.eq(delay
- 1)
385 comb
+= cd_por
.clk
.eq(ClockSignal())
387 # power-on reset delay
388 core_rst
= ResetSignal(self
.core_domain
)
389 if self
.core_domain
!= "sync":
390 comb
+= ti_rst
.eq(delay
!= 0 | dbg
.core_rst_o |
ResetSignal())
391 comb
+= core_rst
.eq(ti_rst
)
393 with m
.If(delay
!= 0 | dbg
.core_rst_o
):
394 comb
+= core_rst
.eq(1)
396 # connect external reset signal to DMI Reset
397 if self
.dbg_domain
!= "sync":
398 dbg_rst
= ResetSignal(self
.dbg_domain
)
399 comb
+= dbg_rst
.eq(self
.dbg_rst_i
)
401 # busy/halted signals from core
402 core_busy_o
= ~core
.p
.o_ready | core
.n
.o_data
.busy_o
# core is busy
403 comb
+= self
.busy_o
.eq(core_busy_o
)
404 comb
+= pdecode2
.dec
.bigendian
.eq(self
.core_bigendian_i
)
406 # temporary hack: says "go" immediately for both address gen and ST
408 ldst
= core
.fus
.fus
['ldst0']
409 st_go_edge
= rising_edge(m
, ldst
.st
.rel_o
)
410 # link addr-go direct to rel
411 m
.d
.comb
+= ldst
.ad
.go_i
.eq(ldst
.ad
.rel_o
)
412 m
.d
.comb
+= ldst
.st
.go_i
.eq(st_go_edge
) # link store-go to rising rel
414 def do_dmi(self
, m
, dbg
):
415 """deals with DMI debug requests
417 currently only provides read requests for the INT regfile, CR and XER
418 it will later also deal with *writing* to these regfiles.
422 dmi
, d_reg
, d_cr
, d_xer
, = dbg
.dmi
, dbg
.d_gpr
, dbg
.d_cr
, dbg
.d_xer
423 intrf
= self
.core
.regs
.rf
['int']
425 with m
.If(d_reg
.req
): # request for regfile access being made
426 # TODO: error-check this
427 # XXX should this be combinatorial? sync better?
429 comb
+= self
.int_r
.ren
.eq(1 << d_reg
.addr
)
431 comb
+= self
.int_r
.addr
.eq(d_reg
.addr
)
432 comb
+= self
.int_r
.ren
.eq(1)
433 d_reg_delay
= Signal()
434 sync
+= d_reg_delay
.eq(d_reg
.req
)
435 with m
.If(d_reg_delay
):
436 # data arrives one clock later
437 comb
+= d_reg
.data
.eq(self
.int_r
.o_data
)
438 comb
+= d_reg
.ack
.eq(1)
440 # sigh same thing for CR debug
441 with m
.If(d_cr
.req
): # request for regfile access being made
442 comb
+= self
.cr_r
.ren
.eq(0b11111111) # enable all
443 d_cr_delay
= Signal()
444 sync
+= d_cr_delay
.eq(d_cr
.req
)
445 with m
.If(d_cr_delay
):
446 # data arrives one clock later
447 comb
+= d_cr
.data
.eq(self
.cr_r
.o_data
)
448 comb
+= d_cr
.ack
.eq(1)
451 with m
.If(d_xer
.req
): # request for regfile access being made
452 comb
+= self
.xer_r
.ren
.eq(0b111111) # enable all
453 d_xer_delay
= Signal()
454 sync
+= d_xer_delay
.eq(d_xer
.req
)
455 with m
.If(d_xer_delay
):
456 # data arrives one clock later
457 comb
+= d_xer
.data
.eq(self
.xer_r
.o_data
)
458 comb
+= d_xer
.ack
.eq(1)
460 def tb_dec_fsm(self
, m
, spr_dec
):
463 this is a FSM for updating either dec or tb. it runs alternately
464 DEC, TB, DEC, TB. note that SPR pipeline could have written a new
465 value to DEC, however the regfile has "passthrough" on it so this
468 see v3.0B p1097-1099 for Timeer Resource and p1065 and p1076
471 comb
, sync
= m
.d
.comb
, m
.d
.sync
472 fast_rf
= self
.core
.regs
.rf
['fast']
473 fast_r_dectb
= fast_rf
.r_ports
['issue'] # DEC/TB
474 fast_w_dectb
= fast_rf
.w_ports
['issue'] # DEC/TB
478 # initiates read of current DEC
479 with m
.State("DEC_READ"):
480 comb
+= fast_r_dectb
.addr
.eq(FastRegs
.DEC
)
481 comb
+= fast_r_dectb
.ren
.eq(1)
484 # waits for DEC read to arrive (1 cycle), updates with new value
485 with m
.State("DEC_WRITE"):
487 # TODO: MSR.LPCR 32-bit decrement mode
488 comb
+= new_dec
.eq(fast_r_dectb
.o_data
- 1)
489 comb
+= fast_w_dectb
.addr
.eq(FastRegs
.DEC
)
490 comb
+= fast_w_dectb
.wen
.eq(1)
491 comb
+= fast_w_dectb
.i_data
.eq(new_dec
)
492 sync
+= spr_dec
.eq(new_dec
) # copy into cur_state for decoder
495 # initiates read of current TB
496 with m
.State("TB_READ"):
497 comb
+= fast_r_dectb
.addr
.eq(FastRegs
.TB
)
498 comb
+= fast_r_dectb
.ren
.eq(1)
501 # waits for read TB to arrive, initiates write of current TB
502 with m
.State("TB_WRITE"):
504 comb
+= new_tb
.eq(fast_r_dectb
.o_data
+ 1)
505 comb
+= fast_w_dectb
.addr
.eq(FastRegs
.TB
)
506 comb
+= fast_w_dectb
.wen
.eq(1)
507 comb
+= fast_w_dectb
.i_data
.eq(new_tb
)
512 def elaborate(self
, platform
):
515 comb
, sync
= m
.d
.comb
, m
.d
.sync
516 cur_state
= self
.cur_state
517 pdecode2
= self
.pdecode2
520 # set up peripherals and core
521 core_rst
= self
.core_rst
522 self
.setup_peripherals(m
)
524 # reset current state if core reset requested
526 m
.d
.sync
+= self
.cur_state
.eq(0)
528 # check halted condition: requested PC to execute matches DMI stop addr
529 # and immediately stop. address of 0xffff_ffff_ffff_ffff can never
532 comb
+= halted
.eq(dbg
.stop_addr_o
== dbg
.state
.pc
)
534 comb
+= dbg
.core_stopped_i
.eq(1)
535 comb
+= dbg
.terminate_i
.eq(1)
537 # PC and instruction from I-Memory
538 comb
+= self
.pc_o
.eq(cur_state
.pc
)
539 self
.pc_changed
= Signal() # note write to PC
540 self
.msr_changed
= Signal() # note write to MSR
541 self
.sv_changed
= Signal() # note write to SVSTATE
543 # read state either from incoming override or from regfile
544 state
= CoreState("get") # current state (MSR/PC/SVSTATE)
545 state_get(m
, state
.msr
, core_rst
, self
.msr_i
,
547 self
.state_r_msr
, StateRegs
.MSR
)
548 state_get(m
, state
.pc
, core_rst
, self
.pc_i
,
550 self
.state_r_pc
, StateRegs
.PC
)
551 state_get(m
, state
.svstate
, core_rst
, self
.svstate_i
,
552 "svstate", # read SVSTATE
553 self
.state_r_sv
, StateRegs
.SVSTATE
)
555 # don't write pc every cycle
556 comb
+= self
.state_w_pc
.wen
.eq(0)
557 comb
+= self
.state_w_pc
.i_data
.eq(0)
559 # connect up debug state. note "combinatorially same" below,
560 # this is a bit naff, passing state over in the dbg class, but
561 # because it is combinatorial it achieves the desired goal
562 comb
+= dbg
.state
.eq(state
)
564 # this bit doesn't have to be in the FSM: connect up to read
565 # regfiles on demand from DMI
568 # DEC and TB inc/dec FSM. copy of DEC is put into CoreState,
569 # (which uses that in PowerDecoder2 to raise 0x900 exception)
570 self
.tb_dec_fsm(m
, cur_state
.dec
)
572 # while stopped, allow updating the MSR, PC and SVSTATE.
573 # these are mainly for debugging purposes (including DMI/JTAG)
574 with m
.If(dbg
.core_stopped_i
):
575 with m
.If(self
.pc_i
.ok
):
576 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
577 comb
+= self
.state_w_pc
.i_data
.eq(self
.pc_i
.data
)
578 sync
+= self
.pc_changed
.eq(1)
579 with m
.If(self
.msr_i
.ok
):
580 comb
+= self
.state_w_msr
.wen
.eq(1 << StateRegs
.MSR
)
581 comb
+= self
.state_w_msr
.i_data
.eq(self
.msr_i
.data
)
582 sync
+= self
.msr_changed
.eq(1)
583 with m
.If(self
.svstate_i
.ok | self
.update_svstate
):
584 with m
.If(self
.svstate_i
.ok
): # over-ride from external source
585 comb
+= self
.new_svstate
.eq(self
.svstate_i
.data
)
586 comb
+= self
.state_w_sv
.wen
.eq(1 << StateRegs
.SVSTATE
)
587 comb
+= self
.state_w_sv
.i_data
.eq(self
.new_svstate
)
588 sync
+= self
.sv_changed
.eq(1)
593 yield from self
.pc_i
.ports()
594 yield from self
.msr_i
.ports()
597 yield from self
.core
.ports()
598 yield from self
.imem
.ports()
599 yield self
.core_bigendian_i
605 def external_ports(self
):
606 ports
= self
.pc_i
.ports()
607 ports
= self
.msr_i
.ports()
608 ports
+= [self
.pc_o
, self
.memerr_o
, self
.core_bigendian_i
, self
.busy_o
,
612 ports
+= list(self
.jtag
.external_ports())
614 # don't add DMI if JTAG is enabled
615 ports
+= list(self
.dbg
.dmi
.ports())
617 ports
+= list(self
.imem
.ibus
.fields
.values())
618 ports
+= list(self
.core
.l0
.cmpi
.wb_bus().fields
.values())
621 for sram
in self
.sram4k
:
622 ports
+= list(sram
.bus
.fields
.values())
625 ports
+= list(self
.xics_icp
.bus
.fields
.values())
626 ports
+= list(self
.xics_ics
.bus
.fields
.values())
627 ports
.append(self
.int_level_i
)
630 ports
+= list(self
.simple_gpio
.bus
.fields
.values())
631 ports
.append(self
.gpio_o
)
640 # Fetch Finite State Machine.
641 # WARNING: there are currently DriverConflicts but it's actually working.
642 # TODO, here: everything that is global in nature, information from the
643 # main TestIssuerInternal, needs to move to either ispec() or ospec().
644 # not only that: TestIssuerInternal.imem can entirely move into here
645 # because imem is only ever accessed inside the FetchFSM.
646 class FetchFSM(ControlBase
):
647 def __init__(self
, allow_overlap
, svp64_en
, imem
, core_rst
,
649 dbg
, core
, svstate
, nia
, is_svp64_mode
):
650 self
.allow_overlap
= allow_overlap
651 self
.svp64_en
= svp64_en
653 self
.core_rst
= core_rst
654 self
.pdecode2
= pdecode2
655 self
.cur_state
= cur_state
658 self
.svstate
= svstate
660 self
.is_svp64_mode
= is_svp64_mode
662 # set up pipeline ControlBase and allocate i/o specs
663 # (unusual: normally done by the Pipeline API)
664 super().__init
__(stage
=self
)
665 self
.p
.i_data
, self
.n
.o_data
= self
.new_specs(None)
666 self
.i
, self
.o
= self
.p
.i_data
, self
.n
.o_data
668 # next 3 functions are Stage API Compliance
669 def setup(self
, m
, i
):
678 def elaborate(self
, platform
):
681 this FSM performs fetch of raw instruction data, partial-decodes
682 it 32-bit at a time to detect SVP64 prefixes, and will optionally
683 read a 2nd 32-bit quantity if that occurs.
685 m
= super().elaborate(platform
)
691 svstate
= self
.svstate
693 is_svp64_mode
= self
.is_svp64_mode
694 fetch_pc_o_ready
= self
.p
.o_ready
695 fetch_pc_i_valid
= self
.p
.i_valid
696 fetch_insn_o_valid
= self
.n
.o_valid
697 fetch_insn_i_ready
= self
.n
.i_ready
701 pdecode2
= self
.pdecode2
702 cur_state
= self
.cur_state
703 dec_opcode_o
= pdecode2
.dec
.raw_opcode_in
# raw opcode
705 # also note instruction fetch failed
706 if hasattr(core
, "icache"):
707 fetch_failed
= core
.icache
.i_out
.fetch_failed
710 fetch_failed
= Const(0, 1)
713 # set priv / virt mode on I-Cache, sigh
714 if isinstance(self
.imem
, ICache
):
715 comb
+= self
.imem
.i_in
.priv_mode
.eq(~msr
[MSR
.PR
])
716 comb
+= self
.imem
.i_in
.virt_mode
.eq(msr
[MSR
.IR
]) # Instr. Redir (VM)
718 with m
.FSM(name
='fetch_fsm'):
721 with m
.State("IDLE"):
722 with m
.If(~dbg
.stopping_o
& ~fetch_failed
& ~dbg
.core_stop_o
):
723 comb
+= fetch_pc_o_ready
.eq(1)
724 with m
.If(fetch_pc_i_valid
& ~pdecode2
.instr_fault
726 # instruction allowed to go: start by reading the PC
727 # capture the PC and also drop it into Insn Memory
728 # we have joined a pair of combinatorial memory
729 # lookups together. this is Generally Bad.
730 comb
+= self
.imem
.a_pc_i
.eq(pc
)
731 comb
+= self
.imem
.a_i_valid
.eq(1)
732 comb
+= self
.imem
.f_i_valid
.eq(1)
733 # transfer state to output
734 sync
+= cur_state
.pc
.eq(pc
)
735 sync
+= cur_state
.svstate
.eq(svstate
) # and svstate
736 sync
+= cur_state
.msr
.eq(msr
) # and msr
738 m
.next
= "INSN_READ" # move to "wait for bus" phase
740 # dummy pause to find out why simulation is not keeping up
741 with m
.State("INSN_READ"):
742 if self
.allow_overlap
:
743 stopping
= dbg
.stopping_o
747 # stopping: jump back to idle
750 with m
.If(self
.imem
.f_busy_o
&
751 ~pdecode2
.instr_fault
): # zzz...
752 # busy but not fetch failed: stay in wait-read
753 comb
+= self
.imem
.a_pc_i
.eq(pc
)
754 comb
+= self
.imem
.a_i_valid
.eq(1)
755 comb
+= self
.imem
.f_i_valid
.eq(1)
757 # not busy (or fetch failed!): instruction fetched
758 # when fetch failed, the instruction gets ignored
760 if hasattr(core
, "icache"):
761 # blech, icache returns actual instruction
762 insn
= self
.imem
.f_instr_o
764 # but these return raw memory
765 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
)
768 # decode the SVP64 prefix, if any
769 comb
+= svp64
.raw_opcode_in
.eq(insn
)
770 comb
+= svp64
.bigendian
.eq(self
.core_bigendian_i
)
771 # pass the decoded prefix (if any) to PowerDecoder2
772 sync
+= pdecode2
.sv_rm
.eq(svp64
.svp64_rm
)
773 sync
+= pdecode2
.is_svp64_mode
.eq(is_svp64_mode
)
774 # remember whether this is a prefixed instruction,
775 # so the FSM can readily loop when VL==0
776 sync
+= is_svp64_mode
.eq(svp64
.is_svp64_mode
)
777 # calculate the address of the following instruction
778 insn_size
= Mux(svp64
.is_svp64_mode
, 8, 4)
779 sync
+= nia
.eq(cur_state
.pc
+ insn_size
)
780 with m
.If(~svp64
.is_svp64_mode
):
781 # with no prefix, store the instruction
782 # and hand it directly to the next FSM
783 sync
+= dec_opcode_o
.eq(insn
)
784 m
.next
= "INSN_READY"
786 # fetch the rest of the instruction from memory
787 comb
+= self
.imem
.a_pc_i
.eq(cur_state
.pc
+ 4)
788 comb
+= self
.imem
.a_i_valid
.eq(1)
789 comb
+= self
.imem
.f_i_valid
.eq(1)
790 m
.next
= "INSN_READ2"
792 # not SVP64 - 32-bit only
793 sync
+= nia
.eq(cur_state
.pc
+ 4)
794 sync
+= dec_opcode_o
.eq(insn
)
795 m
.next
= "INSN_READY"
797 with m
.State("INSN_READ2"):
798 with m
.If(self
.imem
.f_busy_o
): # zzz...
799 # busy: stay in wait-read
800 comb
+= self
.imem
.a_i_valid
.eq(1)
801 comb
+= self
.imem
.f_i_valid
.eq(1)
803 # not busy: instruction fetched
804 if hasattr(core
, "icache"):
805 # blech, icache returns actual instruction
806 insn
= self
.imem
.f_instr_o
808 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
+4)
809 sync
+= dec_opcode_o
.eq(insn
)
810 m
.next
= "INSN_READY"
811 # TODO: probably can start looking at pdecode2.rm_dec
812 # here or maybe even in INSN_READ state, if svp64_mode
813 # detected, in order to trigger - and wait for - the
816 pmode
= pdecode2
.rm_dec
.predmode
818 if pmode != SVP64PredMode.ALWAYS.value:
819 fire predicate loading FSM and wait before
822 sync += self.srcmask.eq(-1) # set to all 1s
823 sync += self.dstmask.eq(-1) # set to all 1s
824 m.next = "INSN_READY"
827 with m
.State("INSN_READY"):
828 # hand over the instruction, to be decoded
829 comb
+= fetch_insn_o_valid
.eq(1)
830 with m
.If(fetch_insn_i_ready
):
833 # whatever was done above, over-ride it if core reset is held
834 with m
.If(self
.core_rst
):
840 class TestIssuerInternal(TestIssuerBase
):
841 """TestIssuer - reads instructions from TestMemory and issues them
843 efficiency and speed is not the main goal here: functional correctness
844 and code clarity is. optimisations (which almost 100% interfere with
845 easy understanding) come later.
848 def fetch_predicate_fsm(self
, m
,
849 pred_insn_i_valid
, pred_insn_o_ready
,
850 pred_mask_o_valid
, pred_mask_i_ready
):
851 """fetch_predicate_fsm - obtains (constructs in the case of CR)
852 src/dest predicate masks
854 https://bugs.libre-soc.org/show_bug.cgi?id=617
855 the predicates can be read here, by using IntRegs r_ports['pred']
856 or CRRegs r_ports['pred']. in the case of CRs it will have to
857 be done through multiple reads, extracting one relevant at a time.
858 later, a faster way would be to use the 32-bit-wide CR port but
859 this is more complex decoding, here. equivalent code used in
860 ISACaller is "from openpower.decoder.isa.caller import get_predcr"
862 note: this ENTIRE FSM is not to be called when svp64 is disabled
866 pdecode2
= self
.pdecode2
867 rm_dec
= pdecode2
.rm_dec
# SVP64RMModeDecode
868 predmode
= rm_dec
.predmode
869 srcpred
, dstpred
= rm_dec
.srcpred
, rm_dec
.dstpred
870 cr_pred
, int_pred
= self
.cr_pred
, self
.int_pred
# read regfiles
871 # get src/dst step, so we can skip already used mask bits
872 cur_state
= self
.cur_state
873 srcstep
= cur_state
.svstate
.srcstep
874 dststep
= cur_state
.svstate
.dststep
875 cur_vl
= cur_state
.svstate
.vl
878 sregread
, sinvert
, sunary
, sall1s
= get_predint(m
, srcpred
, 's')
879 dregread
, dinvert
, dunary
, dall1s
= get_predint(m
, dstpred
, 'd')
880 sidx
, scrinvert
= get_predcr(m
, srcpred
, 's')
881 didx
, dcrinvert
= get_predcr(m
, dstpred
, 'd')
883 # store fetched masks, for either intpred or crpred
884 # when src/dst step is not zero, the skipped mask bits need to be
885 # shifted-out, before actually storing them in src/dest mask
886 new_srcmask
= Signal(64, reset_less
=True)
887 new_dstmask
= Signal(64, reset_less
=True)
889 with m
.FSM(name
="fetch_predicate"):
891 with m
.State("FETCH_PRED_IDLE"):
892 comb
+= pred_insn_o_ready
.eq(1)
893 with m
.If(pred_insn_i_valid
):
894 with m
.If(predmode
== SVP64PredMode
.INT
):
895 # skip fetching destination mask register, when zero
897 sync
+= new_dstmask
.eq(-1)
898 # directly go to fetch source mask register
899 # guaranteed not to be zero (otherwise predmode
900 # would be SVP64PredMode.ALWAYS, not INT)
901 comb
+= int_pred
.addr
.eq(sregread
)
902 comb
+= int_pred
.ren
.eq(1)
903 m
.next
= "INT_SRC_READ"
904 # fetch destination predicate register
906 comb
+= int_pred
.addr
.eq(dregread
)
907 comb
+= int_pred
.ren
.eq(1)
908 m
.next
= "INT_DST_READ"
909 with m
.Elif(predmode
== SVP64PredMode
.CR
):
910 # go fetch masks from the CR register file
911 sync
+= new_srcmask
.eq(0)
912 sync
+= new_dstmask
.eq(0)
915 sync
+= self
.srcmask
.eq(-1)
916 sync
+= self
.dstmask
.eq(-1)
917 m
.next
= "FETCH_PRED_DONE"
919 with m
.State("INT_DST_READ"):
920 # store destination mask
921 inv
= Repl(dinvert
, 64)
923 # set selected mask bit for 1<<r3 mode
924 dst_shift
= Signal(range(64))
925 comb
+= dst_shift
.eq(self
.int_pred
.o_data
& 0b111111)
926 sync
+= new_dstmask
.eq(1 << dst_shift
)
928 # invert mask if requested
929 sync
+= new_dstmask
.eq(self
.int_pred
.o_data ^ inv
)
930 # skip fetching source mask register, when zero
932 sync
+= new_srcmask
.eq(-1)
933 m
.next
= "FETCH_PRED_SHIFT_MASK"
934 # fetch source predicate register
936 comb
+= int_pred
.addr
.eq(sregread
)
937 comb
+= int_pred
.ren
.eq(1)
938 m
.next
= "INT_SRC_READ"
940 with m
.State("INT_SRC_READ"):
942 inv
= Repl(sinvert
, 64)
944 # set selected mask bit for 1<<r3 mode
945 src_shift
= Signal(range(64))
946 comb
+= src_shift
.eq(self
.int_pred
.o_data
& 0b111111)
947 sync
+= new_srcmask
.eq(1 << src_shift
)
949 # invert mask if requested
950 sync
+= new_srcmask
.eq(self
.int_pred
.o_data ^ inv
)
951 m
.next
= "FETCH_PRED_SHIFT_MASK"
953 # fetch masks from the CR register file
954 # implements the following loop:
955 # idx, inv = get_predcr(mask)
957 # for cr_idx in range(vl):
958 # cr = crl[cr_idx + SVP64CROffs.CRPred] # takes one cycle
960 # mask |= 1 << cr_idx
962 with m
.State("CR_READ"):
963 # CR index to be read, which will be ready by the next cycle
964 cr_idx
= Signal
.like(cur_vl
, reset_less
=True)
965 # submit the read operation to the regfile
966 with m
.If(cr_idx
!= cur_vl
):
967 # the CR read port is unary ...
969 # ... in MSB0 convention ...
970 # ren = 1 << (7 - cr_idx)
971 # ... and with an offset:
972 # ren = 1 << (7 - off - cr_idx)
973 idx
= SVP64CROffs
.CRPred
+ cr_idx
974 comb
+= cr_pred
.ren
.eq(1 << (7 - idx
))
975 # signal data valid in the next cycle
976 cr_read
= Signal(reset_less
=True)
977 sync
+= cr_read
.eq(1)
978 # load the next index
979 sync
+= cr_idx
.eq(cr_idx
+ 1)
982 sync
+= cr_read
.eq(0)
984 m
.next
= "FETCH_PRED_SHIFT_MASK"
986 # compensate for the one cycle delay on the regfile
987 cur_cr_idx
= Signal
.like(cur_vl
)
988 comb
+= cur_cr_idx
.eq(cr_idx
- 1)
989 # read the CR field, select the appropriate bit
993 comb
+= cr_field
.eq(cr_pred
.o_data
)
994 comb
+= scr_bit
.eq(cr_field
.bit_select(sidx
, 1)
996 comb
+= dcr_bit
.eq(cr_field
.bit_select(didx
, 1)
998 # set the corresponding mask bit
999 bit_to_set
= Signal
.like(self
.srcmask
)
1000 comb
+= bit_to_set
.eq(1 << cur_cr_idx
)
1002 sync
+= new_srcmask
.eq(new_srcmask | bit_to_set
)
1004 sync
+= new_dstmask
.eq(new_dstmask | bit_to_set
)
1006 with m
.State("FETCH_PRED_SHIFT_MASK"):
1007 # shift-out skipped mask bits
1008 sync
+= self
.srcmask
.eq(new_srcmask
>> srcstep
)
1009 sync
+= self
.dstmask
.eq(new_dstmask
>> dststep
)
1010 m
.next
= "FETCH_PRED_DONE"
1012 with m
.State("FETCH_PRED_DONE"):
1013 comb
+= pred_mask_o_valid
.eq(1)
1014 with m
.If(pred_mask_i_ready
):
1015 m
.next
= "FETCH_PRED_IDLE"
1017 def issue_fsm(self
, m
, core
, nia
,
1018 dbg
, core_rst
, is_svp64_mode
,
1019 fetch_pc_o_ready
, fetch_pc_i_valid
,
1020 fetch_insn_o_valid
, fetch_insn_i_ready
,
1021 pred_insn_i_valid
, pred_insn_o_ready
,
1022 pred_mask_o_valid
, pred_mask_i_ready
,
1023 exec_insn_i_valid
, exec_insn_o_ready
,
1024 exec_pc_o_valid
, exec_pc_i_ready
):
1027 decode / issue FSM. this interacts with the "fetch" FSM
1028 through fetch_insn_ready/valid (incoming) and fetch_pc_ready/valid
1029 (outgoing). also interacts with the "execute" FSM
1030 through exec_insn_ready/valid (outgoing) and exec_pc_ready/valid
1032 SVP64 RM prefixes have already been set up by the
1033 "fetch" phase, so execute is fairly straightforward.
1038 pdecode2
= self
.pdecode2
1039 cur_state
= self
.cur_state
1040 new_svstate
= self
.new_svstate
1043 dec_opcode_i
= pdecode2
.dec
.raw_opcode_in
# raw opcode
1045 # for updating svstate (things like srcstep etc.)
1046 comb
+= new_svstate
.eq(cur_state
.svstate
)
1048 # precalculate srcstep+1 and dststep+1
1049 cur_srcstep
= cur_state
.svstate
.srcstep
1050 cur_dststep
= cur_state
.svstate
.dststep
1051 next_srcstep
= Signal
.like(cur_srcstep
)
1052 next_dststep
= Signal
.like(cur_dststep
)
1053 comb
+= next_srcstep
.eq(cur_state
.svstate
.srcstep
+1)
1054 comb
+= next_dststep
.eq(cur_state
.svstate
.dststep
+1)
1056 # note if an exception happened. in a pipelined or OoO design
1057 # this needs to be accompanied by "shadowing" (or stalling)
1058 exc_happened
= self
.core
.o
.exc_happened
1059 # also note instruction fetch failed
1060 if hasattr(core
, "icache"):
1061 fetch_failed
= core
.icache
.i_out
.fetch_failed
1063 # set to fault in decoder
1064 # update (highest priority) instruction fault
1065 rising_fetch_failed
= rising_edge(m
, fetch_failed
)
1066 with m
.If(rising_fetch_failed
):
1067 sync
+= pdecode2
.instr_fault
.eq(1)
1069 fetch_failed
= Const(0, 1)
1070 flush_needed
= False
1072 with m
.FSM(name
="issue_fsm"):
1074 # sync with the "fetch" phase which is reading the instruction
1075 # at this point, there is no instruction running, that
1076 # could inadvertently update the PC.
1077 with m
.State("ISSUE_START"):
1078 # reset instruction fault
1079 sync
+= pdecode2
.instr_fault
.eq(0)
1080 # wait on "core stop" release, before next fetch
1081 # need to do this here, in case we are in a VL==0 loop
1082 with m
.If(~dbg
.core_stop_o
& ~core_rst
):
1083 comb
+= fetch_pc_i_valid
.eq(1) # tell fetch to start
1084 with m
.If(fetch_pc_o_ready
): # fetch acknowledged us
1085 m
.next
= "INSN_WAIT"
1087 # tell core it's stopped, and acknowledge debug handshake
1088 comb
+= dbg
.core_stopped_i
.eq(1)
1089 # while stopped, allow updating SVSTATE
1090 with m
.If(self
.svstate_i
.ok
):
1091 comb
+= new_svstate
.eq(self
.svstate_i
.data
)
1092 comb
+= self
.update_svstate
.eq(1)
1093 sync
+= self
.sv_changed
.eq(1)
1095 # wait for an instruction to arrive from Fetch
1096 with m
.State("INSN_WAIT"):
1097 if self
.allow_overlap
:
1098 stopping
= dbg
.stopping_o
1101 with m
.If(stopping
):
1102 # stopping: jump back to idle
1103 m
.next
= "ISSUE_START"
1105 # request the icache to stop asserting "failed"
1106 comb
+= core
.icache
.flush_in
.eq(1)
1107 # stop instruction fault
1108 sync
+= pdecode2
.instr_fault
.eq(0)
1110 comb
+= fetch_insn_i_ready
.eq(1)
1111 with m
.If(fetch_insn_o_valid
):
1112 # loop into ISSUE_START if it's a SVP64 instruction
1113 # and VL == 0. this because VL==0 is a for-loop
1114 # from 0 to 0 i.e. always, always a NOP.
1115 cur_vl
= cur_state
.svstate
.vl
1116 with m
.If(is_svp64_mode
& (cur_vl
== 0)):
1117 # update the PC before fetching the next instruction
1118 # since we are in a VL==0 loop, no instruction was
1119 # executed that we could be overwriting
1120 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1121 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1122 comb
+= self
.insn_done
.eq(1)
1123 m
.next
= "ISSUE_START"
1126 m
.next
= "PRED_START" # fetching predicate
1128 m
.next
= "DECODE_SV" # skip predication
1130 with m
.State("PRED_START"):
1131 comb
+= pred_insn_i_valid
.eq(1) # tell fetch_pred to start
1132 with m
.If(pred_insn_o_ready
): # fetch_pred acknowledged us
1133 m
.next
= "MASK_WAIT"
1135 with m
.State("MASK_WAIT"):
1136 comb
+= pred_mask_i_ready
.eq(1) # ready to receive the masks
1137 with m
.If(pred_mask_o_valid
): # predication masks are ready
1138 m
.next
= "PRED_SKIP"
1140 # skip zeros in predicate
1141 with m
.State("PRED_SKIP"):
1142 with m
.If(~is_svp64_mode
):
1143 m
.next
= "DECODE_SV" # nothing to do
1146 pred_src_zero
= pdecode2
.rm_dec
.pred_sz
1147 pred_dst_zero
= pdecode2
.rm_dec
.pred_dz
1149 # new srcstep, after skipping zeros
1150 skip_srcstep
= Signal
.like(cur_srcstep
)
1151 # value to be added to the current srcstep
1152 src_delta
= Signal
.like(cur_srcstep
)
1153 # add leading zeros to srcstep, if not in zero mode
1154 with m
.If(~pred_src_zero
):
1155 # priority encoder (count leading zeros)
1156 # append guard bit, in case the mask is all zeros
1157 pri_enc_src
= PriorityEncoder(65)
1158 m
.submodules
.pri_enc_src
= pri_enc_src
1159 comb
+= pri_enc_src
.i
.eq(Cat(self
.srcmask
,
1161 comb
+= src_delta
.eq(pri_enc_src
.o
)
1162 # apply delta to srcstep
1163 comb
+= skip_srcstep
.eq(cur_srcstep
+ src_delta
)
1164 # shift-out all leading zeros from the mask
1165 # plus the leading "one" bit
1166 # TODO count leading zeros and shift-out the zero
1167 # bits, in the same step, in hardware
1168 sync
+= self
.srcmask
.eq(self
.srcmask
>> (src_delta
+1))
1170 # same as above, but for dststep
1171 skip_dststep
= Signal
.like(cur_dststep
)
1172 dst_delta
= Signal
.like(cur_dststep
)
1173 with m
.If(~pred_dst_zero
):
1174 pri_enc_dst
= PriorityEncoder(65)
1175 m
.submodules
.pri_enc_dst
= pri_enc_dst
1176 comb
+= pri_enc_dst
.i
.eq(Cat(self
.dstmask
,
1178 comb
+= dst_delta
.eq(pri_enc_dst
.o
)
1179 comb
+= skip_dststep
.eq(cur_dststep
+ dst_delta
)
1180 sync
+= self
.dstmask
.eq(self
.dstmask
>> (dst_delta
+1))
1182 # TODO: initialize mask[VL]=1 to avoid passing past VL
1183 with m
.If((skip_srcstep
>= cur_vl
) |
1184 (skip_dststep
>= cur_vl
)):
1185 # end of VL loop. Update PC and reset src/dst step
1186 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1187 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1188 comb
+= new_svstate
.srcstep
.eq(0)
1189 comb
+= new_svstate
.dststep
.eq(0)
1190 comb
+= self
.update_svstate
.eq(1)
1191 # synchronize with the simulator
1192 comb
+= self
.insn_done
.eq(1)
1194 m
.next
= "ISSUE_START"
1196 # update new src/dst step
1197 comb
+= new_svstate
.srcstep
.eq(skip_srcstep
)
1198 comb
+= new_svstate
.dststep
.eq(skip_dststep
)
1199 comb
+= self
.update_svstate
.eq(1)
1201 m
.next
= "DECODE_SV"
1203 # pass predicate mask bits through to satellite decoders
1204 # TODO: for SIMD this will be *multiple* bits
1205 sync
+= core
.i
.sv_pred_sm
.eq(self
.srcmask
[0])
1206 sync
+= core
.i
.sv_pred_dm
.eq(self
.dstmask
[0])
1208 # after src/dst step have been updated, we are ready
1209 # to decode the instruction
1210 with m
.State("DECODE_SV"):
1211 # decode the instruction
1212 with m
.If(~fetch_failed
):
1213 sync
+= pdecode2
.instr_fault
.eq(0)
1214 sync
+= core
.i
.e
.eq(pdecode2
.e
)
1215 sync
+= core
.i
.state
.eq(cur_state
)
1216 sync
+= core
.i
.raw_insn_i
.eq(dec_opcode_i
)
1217 sync
+= core
.i
.bigendian_i
.eq(self
.core_bigendian_i
)
1219 sync
+= core
.i
.sv_rm
.eq(pdecode2
.sv_rm
)
1220 # set RA_OR_ZERO detection in satellite decoders
1221 sync
+= core
.i
.sv_a_nz
.eq(pdecode2
.sv_a_nz
)
1222 # and svp64 detection
1223 sync
+= core
.i
.is_svp64_mode
.eq(is_svp64_mode
)
1224 # and svp64 bit-rev'd ldst mode
1225 ldst_dec
= pdecode2
.use_svp64_ldst_dec
1226 sync
+= core
.i
.use_svp64_ldst_dec
.eq(ldst_dec
)
1227 # after decoding, reset any previous exception condition,
1228 # allowing it to be set again during the next execution
1229 sync
+= pdecode2
.ldst_exc
.eq(0)
1231 m
.next
= "INSN_EXECUTE" # move to "execute"
1233 # handshake with execution FSM, move to "wait" once acknowledged
1234 with m
.State("INSN_EXECUTE"):
1235 if self
.allow_overlap
:
1236 stopping
= dbg
.stopping_o
1239 with m
.If(stopping
):
1240 # stopping: jump back to idle
1241 m
.next
= "ISSUE_START"
1243 # request the icache to stop asserting "failed"
1244 comb
+= core
.icache
.flush_in
.eq(1)
1245 # stop instruction fault
1246 sync
+= pdecode2
.instr_fault
.eq(0)
1248 comb
+= exec_insn_i_valid
.eq(1) # trigger execute
1249 with m
.If(exec_insn_o_ready
): # execute acknowledged us
1250 m
.next
= "EXECUTE_WAIT"
1252 with m
.State("EXECUTE_WAIT"):
1253 # wait on "core stop" release, at instruction end
1254 # need to do this here, in case we are in a VL>1 loop
1255 with m
.If(~dbg
.core_stop_o
& ~core_rst
):
1256 comb
+= exec_pc_i_ready
.eq(1)
1257 # see https://bugs.libre-soc.org/show_bug.cgi?id=636
1258 # the exception info needs to be blatted into
1259 # pdecode.ldst_exc, and the instruction "re-run".
1260 # when ldst_exc.happened is set, the PowerDecoder2
1261 # reacts very differently: it re-writes the instruction
1262 # with a "trap" (calls PowerDecoder2.trap()) which
1263 # will *overwrite* whatever was requested and jump the
1264 # PC to the exception address, as well as alter MSR.
1265 # nothing else needs to be done other than to note
1266 # the change of PC and MSR (and, later, SVSTATE)
1267 with m
.If(exc_happened
):
1268 mmu
= core
.fus
.get_exc("mmu0")
1269 ldst
= core
.fus
.get_exc("ldst0")
1271 with m
.If(fetch_failed
):
1272 # instruction fetch: exception is from MMU
1273 # reset instr_fault (highest priority)
1274 sync
+= pdecode2
.ldst_exc
.eq(mmu
)
1275 sync
+= pdecode2
.instr_fault
.eq(0)
1277 # request icache to stop asserting "failed"
1278 comb
+= core
.icache
.flush_in
.eq(1)
1279 with m
.If(~fetch_failed
):
1280 # otherwise assume it was a LDST exception
1281 sync
+= pdecode2
.ldst_exc
.eq(ldst
)
1283 with m
.If(exec_pc_o_valid
):
1285 # was this the last loop iteration?
1287 cur_vl
= cur_state
.svstate
.vl
1288 comb
+= is_last
.eq(next_srcstep
== cur_vl
)
1290 with m
.If(pdecode2
.instr_fault
):
1291 # reset instruction fault, try again
1292 sync
+= pdecode2
.instr_fault
.eq(0)
1293 m
.next
= "ISSUE_START"
1295 # return directly to Decode if Execute generated an
1297 with m
.Elif(pdecode2
.ldst_exc
.happened
):
1298 m
.next
= "DECODE_SV"
1300 # if MSR, PC or SVSTATE were changed by the previous
1301 # instruction, go directly back to Fetch, without
1302 # updating either MSR PC or SVSTATE
1303 with m
.Elif(self
.msr_changed | self
.pc_changed |
1305 m
.next
= "ISSUE_START"
1307 # also return to Fetch, when no output was a vector
1308 # (regardless of SRCSTEP and VL), or when the last
1309 # instruction was really the last one of the VL loop
1310 with m
.Elif((~pdecode2
.loop_continue
) | is_last
):
1311 # before going back to fetch, update the PC state
1312 # register with the NIA.
1313 # ok here we are not reading the branch unit.
1314 # TODO: this just blithely overwrites whatever
1315 # pipeline updated the PC
1316 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1317 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1318 # reset SRCSTEP before returning to Fetch
1320 with m
.If(pdecode2
.loop_continue
):
1321 comb
+= new_svstate
.srcstep
.eq(0)
1322 comb
+= new_svstate
.dststep
.eq(0)
1323 comb
+= self
.update_svstate
.eq(1)
1325 comb
+= new_svstate
.srcstep
.eq(0)
1326 comb
+= new_svstate
.dststep
.eq(0)
1327 comb
+= self
.update_svstate
.eq(1)
1328 m
.next
= "ISSUE_START"
1330 # returning to Execute? then, first update SRCSTEP
1332 comb
+= new_svstate
.srcstep
.eq(next_srcstep
)
1333 comb
+= new_svstate
.dststep
.eq(next_dststep
)
1334 comb
+= self
.update_svstate
.eq(1)
1335 # return to mask skip loop
1336 m
.next
= "PRED_SKIP"
1339 comb
+= dbg
.core_stopped_i
.eq(1)
1341 # request the icache to stop asserting "failed"
1342 comb
+= core
.icache
.flush_in
.eq(1)
1343 # stop instruction fault
1344 sync
+= pdecode2
.instr_fault
.eq(0)
1345 # if terminated return to idle
1346 with m
.If(dbg
.terminate_i
):
1347 m
.next
= "ISSUE_START"
1349 # check if svstate needs updating: if so, write it to State Regfile
1350 with m
.If(self
.update_svstate
):
1351 sync
+= cur_state
.svstate
.eq(self
.new_svstate
) # for next clock
1353 def execute_fsm(self
, m
, core
,
1354 exec_insn_i_valid
, exec_insn_o_ready
,
1355 exec_pc_o_valid
, exec_pc_i_ready
):
1358 execute FSM. this interacts with the "issue" FSM
1359 through exec_insn_ready/valid (incoming) and exec_pc_ready/valid
1360 (outgoing). SVP64 RM prefixes have already been set up by the
1361 "issue" phase, so execute is fairly straightforward.
1367 pdecode2
= self
.pdecode2
1370 core_busy_o
= core
.n
.o_data
.busy_o
# core is busy
1371 core_ivalid_i
= core
.p
.i_valid
# instruction is valid
1373 if hasattr(core
, "icache"):
1374 fetch_failed
= core
.icache
.i_out
.fetch_failed
1376 fetch_failed
= Const(0, 1)
1378 with m
.FSM(name
="exec_fsm"):
1380 # waiting for instruction bus (stays there until not busy)
1381 with m
.State("INSN_START"):
1382 comb
+= exec_insn_o_ready
.eq(1)
1383 with m
.If(exec_insn_i_valid
):
1384 comb
+= core_ivalid_i
.eq(1) # instruction is valid/issued
1385 sync
+= self
.sv_changed
.eq(0)
1386 sync
+= self
.pc_changed
.eq(0)
1387 sync
+= self
.msr_changed
.eq(0)
1388 with m
.If(core
.p
.o_ready
): # only move if accepted
1389 m
.next
= "INSN_ACTIVE" # move to "wait completion"
1391 # instruction started: must wait till it finishes
1392 with m
.State("INSN_ACTIVE"):
1393 # note changes to MSR, PC and SVSTATE
1394 # XXX oops, really must monitor *all* State Regfile write
1395 # ports looking for changes!
1396 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.SVSTATE
)):
1397 sync
+= self
.sv_changed
.eq(1)
1398 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.MSR
)):
1399 sync
+= self
.msr_changed
.eq(1)
1400 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.PC
)):
1401 sync
+= self
.pc_changed
.eq(1)
1402 with m
.If(~core_busy_o
): # instruction done!
1403 comb
+= exec_pc_o_valid
.eq(1)
1404 with m
.If(exec_pc_i_ready
):
1405 # when finished, indicate "done".
1406 # however, if there was an exception, the instruction
1407 # is *not* yet done. this is an implementation
1408 # detail: we choose to implement exceptions by
1409 # taking the exception information from the LDST
1410 # unit, putting that *back* into the PowerDecoder2,
1411 # and *re-running the entire instruction*.
1412 # if we erroneously indicate "done" here, it is as if
1413 # there were *TWO* instructions:
1414 # 1) the failed LDST 2) a TRAP.
1415 with m
.If(~pdecode2
.ldst_exc
.happened
&
1416 ~pdecode2
.instr_fault
):
1417 comb
+= self
.insn_done
.eq(1)
1418 m
.next
= "INSN_START" # back to fetch
1419 # terminate returns directly to INSN_START
1420 with m
.If(dbg
.terminate_i
):
1421 # comb += self.insn_done.eq(1) - no because it's not
1422 m
.next
= "INSN_START" # back to fetch
1424 def elaborate(self
, platform
):
1425 m
= super().elaborate(platform
)
1427 comb
, sync
= m
.d
.comb
, m
.d
.sync
1428 cur_state
= self
.cur_state
1429 pdecode2
= self
.pdecode2
1433 # set up peripherals and core
1434 core_rst
= self
.core_rst
1436 # indicate to outside world if any FU is still executing
1437 comb
+= self
.any_busy
.eq(core
.n
.o_data
.any_busy_o
) # any FU executing
1439 # address of the next instruction, in the absence of a branch
1440 # depends on the instruction size
1443 # connect up debug signals
1444 with m
.If(core
.o
.core_terminate_o
):
1445 comb
+= dbg
.terminate_i
.eq(1)
1447 # pass the prefix mode from Fetch to Issue, so the latter can loop
1449 is_svp64_mode
= Signal()
1451 # there are *THREE^WFOUR-if-SVP64-enabled* FSMs, fetch (32/64-bit)
1452 # issue, decode/execute, now joined by "Predicate fetch/calculate".
1453 # these are the handshake signals between each
1455 # fetch FSM can run as soon as the PC is valid
1456 fetch_pc_i_valid
= Signal() # Execute tells Fetch "start next read"
1457 fetch_pc_o_ready
= Signal() # Fetch Tells SVSTATE "proceed"
1459 # fetch FSM hands over the instruction to be decoded / issued
1460 fetch_insn_o_valid
= Signal()
1461 fetch_insn_i_ready
= Signal()
1463 # predicate fetch FSM decodes and fetches the predicate
1464 pred_insn_i_valid
= Signal()
1465 pred_insn_o_ready
= Signal()
1467 # predicate fetch FSM delivers the masks
1468 pred_mask_o_valid
= Signal()
1469 pred_mask_i_ready
= Signal()
1471 # issue FSM delivers the instruction to the be executed
1472 exec_insn_i_valid
= Signal()
1473 exec_insn_o_ready
= Signal()
1475 # execute FSM, hands over the PC/SVSTATE back to the issue FSM
1476 exec_pc_o_valid
= Signal()
1477 exec_pc_i_ready
= Signal()
1479 # the FSMs here are perhaps unusual in that they detect conditions
1480 # then "hold" information, combinatorially, for the core
1481 # (as opposed to using sync - which would be on a clock's delay)
1482 # this includes the actual opcode, valid flags and so on.
1484 # Fetch, then predicate fetch, then Issue, then Execute.
1485 # Issue is where the VL for-loop # lives. the ready/valid
1486 # signalling is used to communicate between the four.
1489 fetch
= FetchFSM(self
.allow_overlap
, self
.svp64_en
,
1490 self
.imem
, core_rst
, pdecode2
, cur_state
,
1492 dbg
.state
.svstate
, # combinatorially same
1494 m
.submodules
.fetch
= fetch
1495 # connect up in/out data to existing Signals
1496 comb
+= fetch
.p
.i_data
.pc
.eq(dbg
.state
.pc
) # combinatorially same
1497 comb
+= fetch
.p
.i_data
.msr
.eq(dbg
.state
.msr
) # combinatorially same
1498 # and the ready/valid signalling
1499 comb
+= fetch_pc_o_ready
.eq(fetch
.p
.o_ready
)
1500 comb
+= fetch
.p
.i_valid
.eq(fetch_pc_i_valid
)
1501 comb
+= fetch_insn_o_valid
.eq(fetch
.n
.o_valid
)
1502 comb
+= fetch
.n
.i_ready
.eq(fetch_insn_i_ready
)
1504 self
.issue_fsm(m
, core
, nia
,
1505 dbg
, core_rst
, is_svp64_mode
,
1506 fetch_pc_o_ready
, fetch_pc_i_valid
,
1507 fetch_insn_o_valid
, fetch_insn_i_ready
,
1508 pred_insn_i_valid
, pred_insn_o_ready
,
1509 pred_mask_o_valid
, pred_mask_i_ready
,
1510 exec_insn_i_valid
, exec_insn_o_ready
,
1511 exec_pc_o_valid
, exec_pc_i_ready
)
1514 self
.fetch_predicate_fsm(m
,
1515 pred_insn_i_valid
, pred_insn_o_ready
,
1516 pred_mask_o_valid
, pred_mask_i_ready
)
1518 self
.execute_fsm(m
, core
,
1519 exec_insn_i_valid
, exec_insn_o_ready
,
1520 exec_pc_o_valid
, exec_pc_i_ready
)
1525 class TestIssuer(Elaboratable
):
1526 def __init__(self
, pspec
):
1527 self
.ti
= TestIssuerInternal(pspec
)
1528 # XXX TODO: make this a command-line selectable option from pspec
1529 #from soc.simple.inorder import TestIssuerInternalInOrder
1530 #self.ti = TestIssuerInternalInOrder(pspec)
1531 self
.pll
= DummyPLL(instance
=True)
1533 self
.dbg_rst_i
= Signal(reset_less
=True)
1535 # PLL direct clock or not
1536 self
.pll_en
= hasattr(pspec
, "use_pll") and pspec
.use_pll
1538 self
.pll_test_o
= Signal(reset_less
=True)
1539 self
.pll_vco_o
= Signal(reset_less
=True)
1540 self
.clk_sel_i
= Signal(2, reset_less
=True)
1541 self
.ref_clk
= ClockSignal() # can't rename it but that's ok
1542 self
.pllclk_clk
= ClockSignal("pllclk")
1544 def elaborate(self
, platform
):
1548 # TestIssuer nominally runs at main clock, actually it is
1549 # all combinatorial internally except for coresync'd components
1550 m
.submodules
.ti
= ti
= self
.ti
1553 # ClockSelect runs at PLL output internal clock rate
1554 m
.submodules
.wrappll
= pll
= self
.pll
1556 # add clock domains from PLL
1557 cd_pll
= ClockDomain("pllclk")
1560 # PLL clock established. has the side-effect of running clklsel
1561 # at the PLL's speed (see DomainRenamer("pllclk") above)
1562 pllclk
= self
.pllclk_clk
1563 comb
+= pllclk
.eq(pll
.clk_pll_o
)
1565 # wire up external 24mhz to PLL
1566 #comb += pll.clk_24_i.eq(self.ref_clk)
1567 # output 18 mhz PLL test signal, and analog oscillator out
1568 comb
+= self
.pll_test_o
.eq(pll
.pll_test_o
)
1569 comb
+= self
.pll_vco_o
.eq(pll
.pll_vco_o
)
1571 # input to pll clock selection
1572 comb
+= pll
.clk_sel_i
.eq(self
.clk_sel_i
)
1574 # now wire up ResetSignals. don't mind them being in this domain
1575 pll_rst
= ResetSignal("pllclk")
1576 comb
+= pll_rst
.eq(ResetSignal())
1578 # internal clock is set to selector clock-out. has the side-effect of
1579 # running TestIssuer at this speed (see DomainRenamer("intclk") above)
1580 # debug clock runs at coresync internal clock
1581 if self
.ti
.dbg_domain
!= 'sync':
1582 cd_dbgsync
= ClockDomain("dbgsync")
1583 intclk
= ClockSignal(self
.ti
.core_domain
)
1584 dbgclk
= ClockSignal(self
.ti
.dbg_domain
)
1585 # XXX BYPASS PLL XXX
1586 # XXX BYPASS PLL XXX
1587 # XXX BYPASS PLL XXX
1589 comb
+= intclk
.eq(self
.ref_clk
)
1590 assert self
.ti
.core_domain
!= 'sync', \
1591 "cannot set core_domain to sync and use pll at the same time"
1593 if self
.ti
.core_domain
!= 'sync':
1594 comb
+= intclk
.eq(ClockSignal())
1595 if self
.ti
.dbg_domain
!= 'sync':
1596 dbgclk
= ClockSignal(self
.ti
.dbg_domain
)
1597 comb
+= dbgclk
.eq(intclk
)
1598 comb
+= self
.ti
.dbg_rst_i
.eq(self
.dbg_rst_i
)
1603 return list(self
.ti
.ports()) + list(self
.pll
.ports()) + \
1604 [ClockSignal(), ResetSignal()]
1606 def external_ports(self
):
1607 ports
= self
.ti
.external_ports()
1608 ports
.append(ClockSignal())
1609 ports
.append(ResetSignal())
1611 ports
.append(self
.clk_sel_i
)
1612 ports
.append(self
.pll
.clk_24_i
)
1613 ports
.append(self
.pll_test_o
)
1614 ports
.append(self
.pll_vco_o
)
1615 ports
.append(self
.pllclk_clk
)
1616 ports
.append(self
.ref_clk
)
1620 if __name__
== '__main__':
1621 units
= {'alu': 1, 'cr': 1, 'branch': 1, 'trap': 1, 'logical': 1,
1627 pspec
= TestMemPspec(ldst_ifacetype
='bare_wb',
1628 imem_ifacetype
='bare_wb',
1633 dut
= TestIssuer(pspec
)
1634 vl
= main(dut
, ports
=dut
.ports(), name
="test_issuer")
1636 if len(sys
.argv
) == 1:
1637 vl
= rtlil
.convert(dut
, ports
=dut
.external_ports(), name
="test_issuer")
1638 with
open("test_issuer.il", "w") as f
: