3 not in any way intended for production use. this runs a FSM that:
5 * reads the Program Counter from StateRegs
6 * reads an instruction from a fixed-size Test Memory
7 * issues it to the Simple Core
8 * waits for it to complete
10 * does it all over again
12 the purpose of this module is to verify the functional correctness
13 of the Function Units in the absolute simplest and clearest possible
14 way, and to at provide something that can be further incrementally
18 from nmigen
import (Elaboratable
, Module
, Signal
, ClockSignal
, ResetSignal
,
19 ClockDomain
, DomainRenamer
, Mux
, Const
, Repl
, Cat
)
20 from nmigen
.cli
import rtlil
21 from nmigen
.cli
import main
24 from nmutil
.singlepipe
import ControlBase
25 from soc
.simple
.core_data
import FetchOutput
, FetchInput
27 from nmigen
.lib
.coding
import PriorityEncoder
29 from openpower
.decoder
.power_decoder
import create_pdecode
30 from openpower
.decoder
.power_decoder2
import PowerDecode2
, SVP64PrefixDecoder
31 from openpower
.decoder
.decode2execute1
import IssuerDecode2ToOperand
32 from openpower
.decoder
.decode2execute1
import Data
33 from openpower
.decoder
.power_enums
import (MicrOp
, SVP64PredInt
, SVP64PredCR
,
35 from openpower
.state
import CoreState
36 from openpower
.consts
import (CR
, SVP64CROffs
)
37 from soc
.experiment
.testmem
import TestMemory
# test only for instructions
38 from soc
.regfile
.regfiles
import StateRegs
, FastRegs
39 from soc
.simple
.core
import NonProductionCore
40 from soc
.config
.test
.test_loadstore
import TestMemPspec
41 from soc
.config
.ifetch
import ConfigFetchUnit
42 from soc
.debug
.dmi
import CoreDebug
, DMIInterface
43 from soc
.debug
.jtag
import JTAG
44 from soc
.config
.pinouts
import get_pinspecs
45 from soc
.interrupts
.xics
import XICS_ICP
, XICS_ICS
46 from soc
.bus
.simple_gpio
import SimpleGPIO
47 from soc
.bus
.SPBlock512W64B8W
import SPBlock512W64B8W
48 from soc
.clock
.select
import ClockSelect
49 from soc
.clock
.dummypll
import DummyPLL
50 from openpower
.sv
.svstate
import SVSTATERec
53 from nmutil
.util
import rising_edge
56 def get_insn(f_instr_o
, pc
):
57 if f_instr_o
.width
== 32:
60 # 64-bit: bit 2 of pc decides which word to select
61 return f_instr_o
.word_select(pc
[2], 32)
63 # gets state input or reads from state regfile
66 def state_get(m
, core_rst
, state_i
, name
, regfile
, regnum
):
70 res
= Signal(64, reset_less
=True, name
=name
)
71 res_ok_delay
= Signal(name
="%s_ok_delay" % name
)
73 sync
+= res_ok_delay
.eq(~state_i
.ok
)
74 with m
.If(state_i
.ok
):
75 # incoming override (start from pc_i)
76 comb
+= res
.eq(state_i
.data
)
78 # otherwise read StateRegs regfile for PC...
79 comb
+= regfile
.ren
.eq(1 << regnum
)
80 # ... but on a 1-clock delay
81 with m
.If(res_ok_delay
):
82 comb
+= res
.eq(regfile
.o_data
)
86 def get_predint(m
, mask
, name
):
87 """decode SVP64 predicate integer mask field to reg number and invert
88 this is identical to the equivalent function in ISACaller except that
89 it doesn't read the INT directly, it just decodes "what needs to be done"
90 i.e. which INT reg, whether it is shifted and whether it is bit-inverted.
92 * all1s is set to indicate that no mask is to be applied.
93 * regread indicates the GPR register number to be read
94 * invert is set to indicate that the register value is to be inverted
95 * unary indicates that the contents of the register is to be shifted 1<<r3
98 regread
= Signal(5, name
=name
+"regread")
99 invert
= Signal(name
=name
+"invert")
100 unary
= Signal(name
=name
+"unary")
101 all1s
= Signal(name
=name
+"all1s")
103 with m
.Case(SVP64PredInt
.ALWAYS
.value
):
104 comb
+= all1s
.eq(1) # use 0b1111 (all ones)
105 with m
.Case(SVP64PredInt
.R3_UNARY
.value
):
106 comb
+= regread
.eq(3)
107 comb
+= unary
.eq(1) # 1<<r3 - shift r3 (single bit)
108 with m
.Case(SVP64PredInt
.R3
.value
):
109 comb
+= regread
.eq(3)
110 with m
.Case(SVP64PredInt
.R3_N
.value
):
111 comb
+= regread
.eq(3)
113 with m
.Case(SVP64PredInt
.R10
.value
):
114 comb
+= regread
.eq(10)
115 with m
.Case(SVP64PredInt
.R10_N
.value
):
116 comb
+= regread
.eq(10)
118 with m
.Case(SVP64PredInt
.R30
.value
):
119 comb
+= regread
.eq(30)
120 with m
.Case(SVP64PredInt
.R30_N
.value
):
121 comb
+= regread
.eq(30)
123 return regread
, invert
, unary
, all1s
126 def get_predcr(m
, mask
, name
):
127 """decode SVP64 predicate CR to reg number field and invert status
128 this is identical to _get_predcr in ISACaller
131 idx
= Signal(2, name
=name
+"idx")
132 invert
= Signal(name
=name
+"crinvert")
134 with m
.Case(SVP64PredCR
.LT
.value
):
135 comb
+= idx
.eq(CR
.LT
)
137 with m
.Case(SVP64PredCR
.GE
.value
):
138 comb
+= idx
.eq(CR
.LT
)
140 with m
.Case(SVP64PredCR
.GT
.value
):
141 comb
+= idx
.eq(CR
.GT
)
143 with m
.Case(SVP64PredCR
.LE
.value
):
144 comb
+= idx
.eq(CR
.GT
)
146 with m
.Case(SVP64PredCR
.EQ
.value
):
147 comb
+= idx
.eq(CR
.EQ
)
149 with m
.Case(SVP64PredCR
.NE
.value
):
150 comb
+= idx
.eq(CR
.EQ
)
152 with m
.Case(SVP64PredCR
.SO
.value
):
153 comb
+= idx
.eq(CR
.SO
)
155 with m
.Case(SVP64PredCR
.NS
.value
):
156 comb
+= idx
.eq(CR
.SO
)
161 # Fetch Finite State Machine.
162 # WARNING: there are currently DriverConflicts but it's actually working.
163 # TODO, here: everything that is global in nature, information from the
164 # main TestIssuerInternal, needs to move to either ispec() or ospec().
165 # not only that: TestIssuerInternal.imem can entirely move into here
166 # because imem is only ever accessed inside the FetchFSM.
167 class FetchFSM(ControlBase
):
168 def __init__(self
, allow_overlap
, svp64_en
, imem
, core_rst
,
170 dbg
, core
, svstate
, nia
, is_svp64_mode
):
171 self
.allow_overlap
= allow_overlap
172 self
.svp64_en
= svp64_en
174 self
.core_rst
= core_rst
175 self
.pdecode2
= pdecode2
176 self
.cur_state
= cur_state
179 self
.svstate
= svstate
181 self
.is_svp64_mode
= is_svp64_mode
183 # set up pipeline ControlBase and allocate i/o specs
184 # (unusual: normally done by the Pipeline API)
185 super().__init
__(stage
=self
)
186 self
.p
.i_data
, self
.n
.o_data
= self
.new_specs(None)
187 self
.i
, self
.o
= self
.p
.i_data
, self
.n
.o_data
189 # next 3 functions are Stage API Compliance
190 def setup(self
, m
, i
):
199 def elaborate(self
, platform
):
202 this FSM performs fetch of raw instruction data, partial-decodes
203 it 32-bit at a time to detect SVP64 prefixes, and will optionally
204 read a 2nd 32-bit quantity if that occurs.
206 m
= super().elaborate(platform
)
211 svstate
= self
.svstate
213 is_svp64_mode
= self
.is_svp64_mode
214 fetch_pc_o_ready
= self
.p
.o_ready
215 fetch_pc_i_valid
= self
.p
.i_valid
216 fetch_insn_o_valid
= self
.n
.o_valid
217 fetch_insn_i_ready
= self
.n
.i_ready
221 pdecode2
= self
.pdecode2
222 cur_state
= self
.cur_state
223 dec_opcode_o
= pdecode2
.dec
.raw_opcode_in
# raw opcode
225 msr_read
= Signal(reset
=1)
227 # don't read msr every cycle
228 staterf
= self
.core
.regs
.rf
['state']
229 state_r_msr
= staterf
.r_ports
['msr'] # MSR rd
231 comb
+= state_r_msr
.ren
.eq(0)
233 with m
.FSM(name
='fetch_fsm'):
236 with m
.State("IDLE"):
237 with m
.If(~dbg
.stopping_o
):
238 comb
+= fetch_pc_o_ready
.eq(1)
239 with m
.If(fetch_pc_i_valid
):
240 # instruction allowed to go: start by reading the PC
241 # capture the PC and also drop it into Insn Memory
242 # we have joined a pair of combinatorial memory
243 # lookups together. this is Generally Bad.
244 comb
+= self
.imem
.a_pc_i
.eq(pc
)
245 comb
+= self
.imem
.a_i_valid
.eq(1)
246 comb
+= self
.imem
.f_i_valid
.eq(1)
247 sync
+= cur_state
.pc
.eq(pc
)
248 sync
+= cur_state
.svstate
.eq(svstate
) # and svstate
250 # initiate read of MSR. arrives one clock later
251 comb
+= state_r_msr
.ren
.eq(1 << StateRegs
.MSR
)
252 sync
+= msr_read
.eq(0)
254 m
.next
= "INSN_READ" # move to "wait for bus" phase
256 # dummy pause to find out why simulation is not keeping up
257 with m
.State("INSN_READ"):
258 if self
.allow_overlap
:
259 stopping
= dbg
.stopping_o
263 # stopping: jump back to idle
266 # one cycle later, msr/sv read arrives. valid only once.
267 with m
.If(~msr_read
):
268 sync
+= msr_read
.eq(1) # yeah don't read it again
269 sync
+= cur_state
.msr
.eq(state_r_msr
.o_data
)
270 with m
.If(self
.imem
.f_busy_o
): # zzz...
271 # busy: stay in wait-read
272 comb
+= self
.imem
.a_i_valid
.eq(1)
273 comb
+= self
.imem
.f_i_valid
.eq(1)
275 # not busy: instruction fetched
276 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
)
279 # decode the SVP64 prefix, if any
280 comb
+= svp64
.raw_opcode_in
.eq(insn
)
281 comb
+= svp64
.bigendian
.eq(self
.core_bigendian_i
)
282 # pass the decoded prefix (if any) to PowerDecoder2
283 sync
+= pdecode2
.sv_rm
.eq(svp64
.svp64_rm
)
284 sync
+= pdecode2
.is_svp64_mode
.eq(is_svp64_mode
)
285 # remember whether this is a prefixed instruction,
286 # so the FSM can readily loop when VL==0
287 sync
+= is_svp64_mode
.eq(svp64
.is_svp64_mode
)
288 # calculate the address of the following instruction
289 insn_size
= Mux(svp64
.is_svp64_mode
, 8, 4)
290 sync
+= nia
.eq(cur_state
.pc
+ insn_size
)
291 with m
.If(~svp64
.is_svp64_mode
):
292 # with no prefix, store the instruction
293 # and hand it directly to the next FSM
294 sync
+= dec_opcode_o
.eq(insn
)
295 m
.next
= "INSN_READY"
297 # fetch the rest of the instruction from memory
298 comb
+= self
.imem
.a_pc_i
.eq(cur_state
.pc
+ 4)
299 comb
+= self
.imem
.a_i_valid
.eq(1)
300 comb
+= self
.imem
.f_i_valid
.eq(1)
301 m
.next
= "INSN_READ2"
303 # not SVP64 - 32-bit only
304 sync
+= nia
.eq(cur_state
.pc
+ 4)
305 sync
+= dec_opcode_o
.eq(insn
)
306 m
.next
= "INSN_READY"
308 with m
.State("INSN_READ2"):
309 with m
.If(self
.imem
.f_busy_o
): # zzz...
310 # busy: stay in wait-read
311 comb
+= self
.imem
.a_i_valid
.eq(1)
312 comb
+= self
.imem
.f_i_valid
.eq(1)
314 # not busy: instruction fetched
315 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
+4)
316 sync
+= dec_opcode_o
.eq(insn
)
317 m
.next
= "INSN_READY"
318 # TODO: probably can start looking at pdecode2.rm_dec
319 # here or maybe even in INSN_READ state, if svp64_mode
320 # detected, in order to trigger - and wait for - the
323 pmode
= pdecode2
.rm_dec
.predmode
325 if pmode != SVP64PredMode.ALWAYS.value:
326 fire predicate loading FSM and wait before
329 sync += self.srcmask.eq(-1) # set to all 1s
330 sync += self.dstmask.eq(-1) # set to all 1s
331 m.next = "INSN_READY"
334 with m
.State("INSN_READY"):
335 # hand over the instruction, to be decoded
336 comb
+= fetch_insn_o_valid
.eq(1)
337 with m
.If(fetch_insn_i_ready
):
340 # whatever was done above, over-ride it if core reset is held
341 with m
.If(self
.core_rst
):
347 class TestIssuerInternal(Elaboratable
):
348 """TestIssuer - reads instructions from TestMemory and issues them
350 efficiency and speed is not the main goal here: functional correctness
351 and code clarity is. optimisations (which almost 100% interfere with
352 easy understanding) come later.
355 def __init__(self
, pspec
):
357 # test is SVP64 is to be enabled
358 self
.svp64_en
= hasattr(pspec
, "svp64") and (pspec
.svp64
== True)
360 # and if regfiles are reduced
361 self
.regreduce_en
= (hasattr(pspec
, "regreduce") and
362 (pspec
.regreduce
== True))
364 # and if overlap requested
365 self
.allow_overlap
= (hasattr(pspec
, "allow_overlap") and
366 (pspec
.allow_overlap
== True))
368 # JTAG interface. add this right at the start because if it's
369 # added it *modifies* the pspec, by adding enable/disable signals
370 # for parts of the rest of the core
371 self
.jtag_en
= hasattr(pspec
, "debug") and pspec
.debug
== 'jtag'
372 self
.dbg_domain
= "sync" # sigh "dbgsunc" too problematic
373 # self.dbg_domain = "dbgsync" # domain for DMI/JTAG clock
375 # XXX MUST keep this up-to-date with litex, and
376 # soc-cocotb-sim, and err.. all needs sorting out, argh
379 'eint', 'gpio', 'mspi0',
380 # 'mspi1', - disabled for now
381 # 'pwm', 'sd0', - disabled for now
383 self
.jtag
= JTAG(get_pinspecs(subset
=subset
),
384 domain
=self
.dbg_domain
)
385 # add signals to pspec to enable/disable icache and dcache
386 # (or data and intstruction wishbone if icache/dcache not included)
387 # https://bugs.libre-soc.org/show_bug.cgi?id=520
388 # TODO: do we actually care if these are not domain-synchronised?
389 # honestly probably not.
390 pspec
.wb_icache_en
= self
.jtag
.wb_icache_en
391 pspec
.wb_dcache_en
= self
.jtag
.wb_dcache_en
392 self
.wb_sram_en
= self
.jtag
.wb_sram_en
394 self
.wb_sram_en
= Const(1)
396 # add 4k sram blocks?
397 self
.sram4x4k
= (hasattr(pspec
, "sram4x4kblock") and
398 pspec
.sram4x4kblock
== True)
402 self
.sram4k
.append(SPBlock512W64B8W(name
="sram4k_%d" % i
,
406 # add interrupt controller?
407 self
.xics
= hasattr(pspec
, "xics") and pspec
.xics
== True
409 self
.xics_icp
= XICS_ICP()
410 self
.xics_ics
= XICS_ICS()
411 self
.int_level_i
= self
.xics_ics
.int_level_i
413 # add GPIO peripheral?
414 self
.gpio
= hasattr(pspec
, "gpio") and pspec
.gpio
== True
416 self
.simple_gpio
= SimpleGPIO()
417 self
.gpio_o
= self
.simple_gpio
.gpio_o
419 # main instruction core. suitable for prototyping / demo only
420 self
.core
= core
= NonProductionCore(pspec
)
421 self
.core_rst
= ResetSignal("coresync")
423 # instruction decoder. goes into Trap Record
424 #pdecode = create_pdecode()
425 self
.cur_state
= CoreState("cur") # current state (MSR/PC/SVSTATE)
426 self
.pdecode2
= PowerDecode2(None, state
=self
.cur_state
,
427 opkls
=IssuerDecode2ToOperand
,
428 svp64_en
=self
.svp64_en
,
429 regreduce_en
=self
.regreduce_en
)
430 pdecode
= self
.pdecode2
.dec
433 self
.svp64
= SVP64PrefixDecoder() # for decoding SVP64 prefix
435 # Test Instruction memory
436 self
.imem
= ConfigFetchUnit(pspec
).fu
439 self
.dbg
= CoreDebug()
441 # instruction go/monitor
442 self
.pc_o
= Signal(64, reset_less
=True)
443 self
.pc_i
= Data(64, "pc_i") # set "ok" to indicate "please change me"
444 self
.svstate_i
= Data(64, "svstate_i") # ditto
445 self
.core_bigendian_i
= Signal() # TODO: set based on MSR.LE
446 self
.busy_o
= Signal(reset_less
=True)
447 self
.memerr_o
= Signal(reset_less
=True)
449 # STATE regfile read /write ports for PC, MSR, SVSTATE
450 staterf
= self
.core
.regs
.rf
['state']
451 self
.state_r_pc
= staterf
.r_ports
['cia'] # PC rd
452 self
.state_w_pc
= staterf
.w_ports
['d_wr1'] # PC wr
453 self
.state_r_sv
= staterf
.r_ports
['sv'] # SVSTATE rd
454 self
.state_w_sv
= staterf
.w_ports
['sv'] # SVSTATE wr
456 # DMI interface access
457 intrf
= self
.core
.regs
.rf
['int']
458 crrf
= self
.core
.regs
.rf
['cr']
459 xerrf
= self
.core
.regs
.rf
['xer']
460 self
.int_r
= intrf
.r_ports
['dmi'] # INT read
461 self
.cr_r
= crrf
.r_ports
['full_cr_dbg'] # CR read
462 self
.xer_r
= xerrf
.r_ports
['full_xer'] # XER read
466 self
.int_pred
= intrf
.r_ports
['pred'] # INT predicate read
467 self
.cr_pred
= crrf
.r_ports
['cr_pred'] # CR predicate read
469 # hack method of keeping an eye on whether branch/trap set the PC
470 self
.state_nia
= self
.core
.regs
.rf
['state'].w_ports
['nia']
471 self
.state_nia
.wen
.name
= 'state_nia_wen'
473 # pulse to synchronize the simulator at instruction end
474 self
.insn_done
= Signal()
476 # indicate any instruction still outstanding, in execution
477 self
.any_busy
= Signal()
480 # store copies of predicate masks
481 self
.srcmask
= Signal(64)
482 self
.dstmask
= Signal(64)
484 def fetch_predicate_fsm(self
, m
,
485 pred_insn_i_valid
, pred_insn_o_ready
,
486 pred_mask_o_valid
, pred_mask_i_ready
):
487 """fetch_predicate_fsm - obtains (constructs in the case of CR)
488 src/dest predicate masks
490 https://bugs.libre-soc.org/show_bug.cgi?id=617
491 the predicates can be read here, by using IntRegs r_ports['pred']
492 or CRRegs r_ports['pred']. in the case of CRs it will have to
493 be done through multiple reads, extracting one relevant at a time.
494 later, a faster way would be to use the 32-bit-wide CR port but
495 this is more complex decoding, here. equivalent code used in
496 ISACaller is "from openpower.decoder.isa.caller import get_predcr"
498 note: this ENTIRE FSM is not to be called when svp64 is disabled
502 pdecode2
= self
.pdecode2
503 rm_dec
= pdecode2
.rm_dec
# SVP64RMModeDecode
504 predmode
= rm_dec
.predmode
505 srcpred
, dstpred
= rm_dec
.srcpred
, rm_dec
.dstpred
506 cr_pred
, int_pred
= self
.cr_pred
, self
.int_pred
# read regfiles
507 # get src/dst step, so we can skip already used mask bits
508 cur_state
= self
.cur_state
509 srcstep
= cur_state
.svstate
.srcstep
510 dststep
= cur_state
.svstate
.dststep
511 cur_vl
= cur_state
.svstate
.vl
514 sregread
, sinvert
, sunary
, sall1s
= get_predint(m
, srcpred
, 's')
515 dregread
, dinvert
, dunary
, dall1s
= get_predint(m
, dstpred
, 'd')
516 sidx
, scrinvert
= get_predcr(m
, srcpred
, 's')
517 didx
, dcrinvert
= get_predcr(m
, dstpred
, 'd')
519 # store fetched masks, for either intpred or crpred
520 # when src/dst step is not zero, the skipped mask bits need to be
521 # shifted-out, before actually storing them in src/dest mask
522 new_srcmask
= Signal(64, reset_less
=True)
523 new_dstmask
= Signal(64, reset_less
=True)
525 with m
.FSM(name
="fetch_predicate"):
527 with m
.State("FETCH_PRED_IDLE"):
528 comb
+= pred_insn_o_ready
.eq(1)
529 with m
.If(pred_insn_i_valid
):
530 with m
.If(predmode
== SVP64PredMode
.INT
):
531 # skip fetching destination mask register, when zero
533 sync
+= new_dstmask
.eq(-1)
534 # directly go to fetch source mask register
535 # guaranteed not to be zero (otherwise predmode
536 # would be SVP64PredMode.ALWAYS, not INT)
537 comb
+= int_pred
.addr
.eq(sregread
)
538 comb
+= int_pred
.ren
.eq(1)
539 m
.next
= "INT_SRC_READ"
540 # fetch destination predicate register
542 comb
+= int_pred
.addr
.eq(dregread
)
543 comb
+= int_pred
.ren
.eq(1)
544 m
.next
= "INT_DST_READ"
545 with m
.Elif(predmode
== SVP64PredMode
.CR
):
546 # go fetch masks from the CR register file
547 sync
+= new_srcmask
.eq(0)
548 sync
+= new_dstmask
.eq(0)
551 sync
+= self
.srcmask
.eq(-1)
552 sync
+= self
.dstmask
.eq(-1)
553 m
.next
= "FETCH_PRED_DONE"
555 with m
.State("INT_DST_READ"):
556 # store destination mask
557 inv
= Repl(dinvert
, 64)
559 # set selected mask bit for 1<<r3 mode
560 dst_shift
= Signal(range(64))
561 comb
+= dst_shift
.eq(self
.int_pred
.o_data
& 0b111111)
562 sync
+= new_dstmask
.eq(1 << dst_shift
)
564 # invert mask if requested
565 sync
+= new_dstmask
.eq(self
.int_pred
.o_data ^ inv
)
566 # skip fetching source mask register, when zero
568 sync
+= new_srcmask
.eq(-1)
569 m
.next
= "FETCH_PRED_SHIFT_MASK"
570 # fetch source predicate register
572 comb
+= int_pred
.addr
.eq(sregread
)
573 comb
+= int_pred
.ren
.eq(1)
574 m
.next
= "INT_SRC_READ"
576 with m
.State("INT_SRC_READ"):
578 inv
= Repl(sinvert
, 64)
580 # set selected mask bit for 1<<r3 mode
581 src_shift
= Signal(range(64))
582 comb
+= src_shift
.eq(self
.int_pred
.o_data
& 0b111111)
583 sync
+= new_srcmask
.eq(1 << src_shift
)
585 # invert mask if requested
586 sync
+= new_srcmask
.eq(self
.int_pred
.o_data ^ inv
)
587 m
.next
= "FETCH_PRED_SHIFT_MASK"
589 # fetch masks from the CR register file
590 # implements the following loop:
591 # idx, inv = get_predcr(mask)
593 # for cr_idx in range(vl):
594 # cr = crl[cr_idx + SVP64CROffs.CRPred] # takes one cycle
596 # mask |= 1 << cr_idx
598 with m
.State("CR_READ"):
599 # CR index to be read, which will be ready by the next cycle
600 cr_idx
= Signal
.like(cur_vl
, reset_less
=True)
601 # submit the read operation to the regfile
602 with m
.If(cr_idx
!= cur_vl
):
603 # the CR read port is unary ...
605 # ... in MSB0 convention ...
606 # ren = 1 << (7 - cr_idx)
607 # ... and with an offset:
608 # ren = 1 << (7 - off - cr_idx)
609 idx
= SVP64CROffs
.CRPred
+ cr_idx
610 comb
+= cr_pred
.ren
.eq(1 << (7 - idx
))
611 # signal data valid in the next cycle
612 cr_read
= Signal(reset_less
=True)
613 sync
+= cr_read
.eq(1)
614 # load the next index
615 sync
+= cr_idx
.eq(cr_idx
+ 1)
618 sync
+= cr_read
.eq(0)
620 m
.next
= "FETCH_PRED_SHIFT_MASK"
622 # compensate for the one cycle delay on the regfile
623 cur_cr_idx
= Signal
.like(cur_vl
)
624 comb
+= cur_cr_idx
.eq(cr_idx
- 1)
625 # read the CR field, select the appropriate bit
629 comb
+= cr_field
.eq(cr_pred
.o_data
)
630 comb
+= scr_bit
.eq(cr_field
.bit_select(sidx
, 1)
632 comb
+= dcr_bit
.eq(cr_field
.bit_select(didx
, 1)
634 # set the corresponding mask bit
635 bit_to_set
= Signal
.like(self
.srcmask
)
636 comb
+= bit_to_set
.eq(1 << cur_cr_idx
)
638 sync
+= new_srcmask
.eq(new_srcmask | bit_to_set
)
640 sync
+= new_dstmask
.eq(new_dstmask | bit_to_set
)
642 with m
.State("FETCH_PRED_SHIFT_MASK"):
643 # shift-out skipped mask bits
644 sync
+= self
.srcmask
.eq(new_srcmask
>> srcstep
)
645 sync
+= self
.dstmask
.eq(new_dstmask
>> dststep
)
646 m
.next
= "FETCH_PRED_DONE"
648 with m
.State("FETCH_PRED_DONE"):
649 comb
+= pred_mask_o_valid
.eq(1)
650 with m
.If(pred_mask_i_ready
):
651 m
.next
= "FETCH_PRED_IDLE"
653 def issue_fsm(self
, m
, core
, pc_changed
, sv_changed
, nia
,
654 dbg
, core_rst
, is_svp64_mode
,
655 fetch_pc_o_ready
, fetch_pc_i_valid
,
656 fetch_insn_o_valid
, fetch_insn_i_ready
,
657 pred_insn_i_valid
, pred_insn_o_ready
,
658 pred_mask_o_valid
, pred_mask_i_ready
,
659 exec_insn_i_valid
, exec_insn_o_ready
,
660 exec_pc_o_valid
, exec_pc_i_ready
):
663 decode / issue FSM. this interacts with the "fetch" FSM
664 through fetch_insn_ready/valid (incoming) and fetch_pc_ready/valid
665 (outgoing). also interacts with the "execute" FSM
666 through exec_insn_ready/valid (outgoing) and exec_pc_ready/valid
668 SVP64 RM prefixes have already been set up by the
669 "fetch" phase, so execute is fairly straightforward.
674 pdecode2
= self
.pdecode2
675 cur_state
= self
.cur_state
678 dec_opcode_i
= pdecode2
.dec
.raw_opcode_in
# raw opcode
680 # for updating svstate (things like srcstep etc.)
681 update_svstate
= Signal() # set this (below) if updating
682 new_svstate
= SVSTATERec("new_svstate")
683 comb
+= new_svstate
.eq(cur_state
.svstate
)
685 # precalculate srcstep+1 and dststep+1
686 cur_srcstep
= cur_state
.svstate
.srcstep
687 cur_dststep
= cur_state
.svstate
.dststep
688 next_srcstep
= Signal
.like(cur_srcstep
)
689 next_dststep
= Signal
.like(cur_dststep
)
690 comb
+= next_srcstep
.eq(cur_state
.svstate
.srcstep
+1)
691 comb
+= next_dststep
.eq(cur_state
.svstate
.dststep
+1)
693 # note if an exception happened. in a pipelined or OoO design
694 # this needs to be accompanied by "shadowing" (or stalling)
695 exc_happened
= self
.core
.o
.exc_happened
697 with m
.FSM(name
="issue_fsm"):
699 # sync with the "fetch" phase which is reading the instruction
700 # at this point, there is no instruction running, that
701 # could inadvertently update the PC.
702 with m
.State("ISSUE_START"):
703 # wait on "core stop" release, before next fetch
704 # need to do this here, in case we are in a VL==0 loop
705 with m
.If(~dbg
.core_stop_o
& ~core_rst
):
706 comb
+= fetch_pc_i_valid
.eq(1) # tell fetch to start
707 with m
.If(fetch_pc_o_ready
): # fetch acknowledged us
710 # tell core it's stopped, and acknowledge debug handshake
711 comb
+= dbg
.core_stopped_i
.eq(1)
712 # while stopped, allow updating the PC and SVSTATE
713 with m
.If(self
.pc_i
.ok
):
714 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
715 comb
+= self
.state_w_pc
.i_data
.eq(self
.pc_i
.data
)
716 sync
+= pc_changed
.eq(1)
717 with m
.If(self
.svstate_i
.ok
):
718 comb
+= new_svstate
.eq(self
.svstate_i
.data
)
719 comb
+= update_svstate
.eq(1)
720 sync
+= sv_changed
.eq(1)
722 # wait for an instruction to arrive from Fetch
723 with m
.State("INSN_WAIT"):
724 if self
.allow_overlap
:
725 stopping
= dbg
.stopping_o
729 # stopping: jump back to idle
730 m
.next
= "ISSUE_START"
732 comb
+= fetch_insn_i_ready
.eq(1)
733 with m
.If(fetch_insn_o_valid
):
734 # loop into ISSUE_START if it's a SVP64 instruction
735 # and VL == 0. this because VL==0 is a for-loop
736 # from 0 to 0 i.e. always, always a NOP.
737 cur_vl
= cur_state
.svstate
.vl
738 with m
.If(is_svp64_mode
& (cur_vl
== 0)):
739 # update the PC before fetching the next instruction
740 # since we are in a VL==0 loop, no instruction was
741 # executed that we could be overwriting
742 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
743 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
744 comb
+= self
.insn_done
.eq(1)
745 m
.next
= "ISSUE_START"
748 m
.next
= "PRED_START" # fetching predicate
750 m
.next
= "DECODE_SV" # skip predication
752 with m
.State("PRED_START"):
753 comb
+= pred_insn_i_valid
.eq(1) # tell fetch_pred to start
754 with m
.If(pred_insn_o_ready
): # fetch_pred acknowledged us
757 with m
.State("MASK_WAIT"):
758 comb
+= pred_mask_i_ready
.eq(1) # ready to receive the masks
759 with m
.If(pred_mask_o_valid
): # predication masks are ready
762 # skip zeros in predicate
763 with m
.State("PRED_SKIP"):
764 with m
.If(~is_svp64_mode
):
765 m
.next
= "DECODE_SV" # nothing to do
768 pred_src_zero
= pdecode2
.rm_dec
.pred_sz
769 pred_dst_zero
= pdecode2
.rm_dec
.pred_dz
771 # new srcstep, after skipping zeros
772 skip_srcstep
= Signal
.like(cur_srcstep
)
773 # value to be added to the current srcstep
774 src_delta
= Signal
.like(cur_srcstep
)
775 # add leading zeros to srcstep, if not in zero mode
776 with m
.If(~pred_src_zero
):
777 # priority encoder (count leading zeros)
778 # append guard bit, in case the mask is all zeros
779 pri_enc_src
= PriorityEncoder(65)
780 m
.submodules
.pri_enc_src
= pri_enc_src
781 comb
+= pri_enc_src
.i
.eq(Cat(self
.srcmask
,
783 comb
+= src_delta
.eq(pri_enc_src
.o
)
784 # apply delta to srcstep
785 comb
+= skip_srcstep
.eq(cur_srcstep
+ src_delta
)
786 # shift-out all leading zeros from the mask
787 # plus the leading "one" bit
788 # TODO count leading zeros and shift-out the zero
789 # bits, in the same step, in hardware
790 sync
+= self
.srcmask
.eq(self
.srcmask
>> (src_delta
+1))
792 # same as above, but for dststep
793 skip_dststep
= Signal
.like(cur_dststep
)
794 dst_delta
= Signal
.like(cur_dststep
)
795 with m
.If(~pred_dst_zero
):
796 pri_enc_dst
= PriorityEncoder(65)
797 m
.submodules
.pri_enc_dst
= pri_enc_dst
798 comb
+= pri_enc_dst
.i
.eq(Cat(self
.dstmask
,
800 comb
+= dst_delta
.eq(pri_enc_dst
.o
)
801 comb
+= skip_dststep
.eq(cur_dststep
+ dst_delta
)
802 sync
+= self
.dstmask
.eq(self
.dstmask
>> (dst_delta
+1))
804 # TODO: initialize mask[VL]=1 to avoid passing past VL
805 with m
.If((skip_srcstep
>= cur_vl
) |
806 (skip_dststep
>= cur_vl
)):
807 # end of VL loop. Update PC and reset src/dst step
808 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
809 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
810 comb
+= new_svstate
.srcstep
.eq(0)
811 comb
+= new_svstate
.dststep
.eq(0)
812 comb
+= update_svstate
.eq(1)
813 # synchronize with the simulator
814 comb
+= self
.insn_done
.eq(1)
816 m
.next
= "ISSUE_START"
818 # update new src/dst step
819 comb
+= new_svstate
.srcstep
.eq(skip_srcstep
)
820 comb
+= new_svstate
.dststep
.eq(skip_dststep
)
821 comb
+= update_svstate
.eq(1)
825 # pass predicate mask bits through to satellite decoders
826 # TODO: for SIMD this will be *multiple* bits
827 sync
+= core
.i
.sv_pred_sm
.eq(self
.srcmask
[0])
828 sync
+= core
.i
.sv_pred_dm
.eq(self
.dstmask
[0])
830 # after src/dst step have been updated, we are ready
831 # to decode the instruction
832 with m
.State("DECODE_SV"):
833 # decode the instruction
834 sync
+= core
.i
.e
.eq(pdecode2
.e
)
835 sync
+= core
.i
.state
.eq(cur_state
)
836 sync
+= core
.i
.raw_insn_i
.eq(dec_opcode_i
)
837 sync
+= core
.i
.bigendian_i
.eq(self
.core_bigendian_i
)
839 sync
+= core
.i
.sv_rm
.eq(pdecode2
.sv_rm
)
840 # set RA_OR_ZERO detection in satellite decoders
841 sync
+= core
.i
.sv_a_nz
.eq(pdecode2
.sv_a_nz
)
842 # and svp64 detection
843 sync
+= core
.i
.is_svp64_mode
.eq(is_svp64_mode
)
844 # and svp64 bit-rev'd ldst mode
845 ldst_dec
= pdecode2
.use_svp64_ldst_dec
846 sync
+= core
.i
.use_svp64_ldst_dec
.eq(ldst_dec
)
847 # after decoding, reset any previous exception condition,
848 # allowing it to be set again during the next execution
849 sync
+= pdecode2
.ldst_exc
.eq(0)
851 m
.next
= "INSN_EXECUTE" # move to "execute"
853 # handshake with execution FSM, move to "wait" once acknowledged
854 with m
.State("INSN_EXECUTE"):
855 comb
+= exec_insn_i_valid
.eq(1) # trigger execute
856 with m
.If(exec_insn_o_ready
): # execute acknowledged us
857 m
.next
= "EXECUTE_WAIT"
859 with m
.State("EXECUTE_WAIT"):
860 # wait on "core stop" release, at instruction end
861 # need to do this here, in case we are in a VL>1 loop
862 with m
.If(~dbg
.core_stop_o
& ~core_rst
):
863 comb
+= exec_pc_i_ready
.eq(1)
864 # see https://bugs.libre-soc.org/show_bug.cgi?id=636
865 # the exception info needs to be blatted into
866 # pdecode.ldst_exc, and the instruction "re-run".
867 # when ldst_exc.happened is set, the PowerDecoder2
868 # reacts very differently: it re-writes the instruction
869 # with a "trap" (calls PowerDecoder2.trap()) which
870 # will *overwrite* whatever was requested and jump the
871 # PC to the exception address, as well as alter MSR.
872 # nothing else needs to be done other than to note
873 # the change of PC and MSR (and, later, SVSTATE)
874 with m
.If(exc_happened
):
875 sync
+= pdecode2
.ldst_exc
.eq(core
.fus
.get_exc("ldst0"))
877 with m
.If(exec_pc_o_valid
):
879 # was this the last loop iteration?
881 cur_vl
= cur_state
.svstate
.vl
882 comb
+= is_last
.eq(next_srcstep
== cur_vl
)
884 # return directly to Decode if Execute generated an
886 with m
.If(pdecode2
.ldst_exc
.happened
):
889 # if either PC or SVSTATE were changed by the previous
890 # instruction, go directly back to Fetch, without
891 # updating either PC or SVSTATE
892 with m
.Elif(pc_changed | sv_changed
):
893 m
.next
= "ISSUE_START"
895 # also return to Fetch, when no output was a vector
896 # (regardless of SRCSTEP and VL), or when the last
897 # instruction was really the last one of the VL loop
898 with m
.Elif((~pdecode2
.loop_continue
) | is_last
):
899 # before going back to fetch, update the PC state
900 # register with the NIA.
901 # ok here we are not reading the branch unit.
902 # TODO: this just blithely overwrites whatever
903 # pipeline updated the PC
904 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
905 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
906 # reset SRCSTEP before returning to Fetch
908 with m
.If(pdecode2
.loop_continue
):
909 comb
+= new_svstate
.srcstep
.eq(0)
910 comb
+= new_svstate
.dststep
.eq(0)
911 comb
+= update_svstate
.eq(1)
913 comb
+= new_svstate
.srcstep
.eq(0)
914 comb
+= new_svstate
.dststep
.eq(0)
915 comb
+= update_svstate
.eq(1)
916 m
.next
= "ISSUE_START"
918 # returning to Execute? then, first update SRCSTEP
920 comb
+= new_svstate
.srcstep
.eq(next_srcstep
)
921 comb
+= new_svstate
.dststep
.eq(next_dststep
)
922 comb
+= update_svstate
.eq(1)
923 # return to mask skip loop
927 comb
+= dbg
.core_stopped_i
.eq(1)
928 # while stopped, allow updating the PC and SVSTATE
929 with m
.If(self
.pc_i
.ok
):
930 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
931 comb
+= self
.state_w_pc
.i_data
.eq(self
.pc_i
.data
)
932 sync
+= pc_changed
.eq(1)
933 with m
.If(self
.svstate_i
.ok
):
934 comb
+= new_svstate
.eq(self
.svstate_i
.data
)
935 comb
+= update_svstate
.eq(1)
936 sync
+= sv_changed
.eq(1)
938 # check if svstate needs updating: if so, write it to State Regfile
939 with m
.If(update_svstate
):
940 comb
+= self
.state_w_sv
.wen
.eq(1 << StateRegs
.SVSTATE
)
941 comb
+= self
.state_w_sv
.i_data
.eq(new_svstate
)
942 sync
+= cur_state
.svstate
.eq(new_svstate
) # for next clock
944 def execute_fsm(self
, m
, core
, pc_changed
, sv_changed
,
945 exec_insn_i_valid
, exec_insn_o_ready
,
946 exec_pc_o_valid
, exec_pc_i_ready
):
949 execute FSM. this interacts with the "issue" FSM
950 through exec_insn_ready/valid (incoming) and exec_pc_ready/valid
951 (outgoing). SVP64 RM prefixes have already been set up by the
952 "issue" phase, so execute is fairly straightforward.
957 pdecode2
= self
.pdecode2
960 core_busy_o
= core
.n
.o_data
.busy_o
# core is busy
961 core_ivalid_i
= core
.p
.i_valid
# instruction is valid
963 with m
.FSM(name
="exec_fsm"):
965 # waiting for instruction bus (stays there until not busy)
966 with m
.State("INSN_START"):
967 comb
+= exec_insn_o_ready
.eq(1)
968 with m
.If(exec_insn_i_valid
):
969 comb
+= core_ivalid_i
.eq(1) # instruction is valid/issued
970 sync
+= sv_changed
.eq(0)
971 sync
+= pc_changed
.eq(0)
972 with m
.If(core
.p
.o_ready
): # only move if accepted
973 m
.next
= "INSN_ACTIVE" # move to "wait completion"
975 # instruction started: must wait till it finishes
976 with m
.State("INSN_ACTIVE"):
977 # note changes to PC and SVSTATE
978 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.SVSTATE
)):
979 sync
+= sv_changed
.eq(1)
980 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.PC
)):
981 sync
+= pc_changed
.eq(1)
982 with m
.If(~core_busy_o
): # instruction done!
983 comb
+= exec_pc_o_valid
.eq(1)
984 with m
.If(exec_pc_i_ready
):
985 # when finished, indicate "done".
986 # however, if there was an exception, the instruction
987 # is *not* yet done. this is an implementation
988 # detail: we choose to implement exceptions by
989 # taking the exception information from the LDST
990 # unit, putting that *back* into the PowerDecoder2,
991 # and *re-running the entire instruction*.
992 # if we erroneously indicate "done" here, it is as if
993 # there were *TWO* instructions:
994 # 1) the failed LDST 2) a TRAP.
995 with m
.If(~pdecode2
.ldst_exc
.happened
):
996 comb
+= self
.insn_done
.eq(1)
997 m
.next
= "INSN_START" # back to fetch
999 def setup_peripherals(self
, m
):
1000 comb
, sync
= m
.d
.comb
, m
.d
.sync
1002 # okaaaay so the debug module must be in coresync clock domain
1003 # but NOT its reset signal. to cope with this, set every single
1004 # submodule explicitly in coresync domain, debug and JTAG
1005 # in their own one but using *external* reset.
1006 csd
= DomainRenamer("coresync")
1007 dbd
= DomainRenamer(self
.dbg_domain
)
1009 m
.submodules
.core
= core
= csd(self
.core
)
1010 m
.submodules
.imem
= imem
= csd(self
.imem
)
1011 m
.submodules
.dbg
= dbg
= dbd(self
.dbg
)
1013 m
.submodules
.jtag
= jtag
= dbd(self
.jtag
)
1014 # TODO: UART2GDB mux, here, from external pin
1015 # see https://bugs.libre-soc.org/show_bug.cgi?id=499
1016 sync
+= dbg
.dmi
.connect_to(jtag
.dmi
)
1018 cur_state
= self
.cur_state
1020 # 4x 4k SRAM blocks. these simply "exist", they get routed in litex
1022 for i
, sram
in enumerate(self
.sram4k
):
1023 m
.submodules
["sram4k_%d" % i
] = csd(sram
)
1024 comb
+= sram
.enable
.eq(self
.wb_sram_en
)
1026 # XICS interrupt handler
1028 m
.submodules
.xics_icp
= icp
= csd(self
.xics_icp
)
1029 m
.submodules
.xics_ics
= ics
= csd(self
.xics_ics
)
1030 comb
+= icp
.ics_i
.eq(ics
.icp_o
) # connect ICS to ICP
1031 sync
+= cur_state
.eint
.eq(icp
.core_irq_o
) # connect ICP to core
1033 # GPIO test peripheral
1035 m
.submodules
.simple_gpio
= simple_gpio
= csd(self
.simple_gpio
)
1037 # connect one GPIO output to ICS bit 15 (like in microwatt soc.vhdl)
1038 # XXX causes litex ECP5 test to get wrong idea about input and output
1039 # (but works with verilator sim *sigh*)
1040 # if self.gpio and self.xics:
1041 # comb += self.int_level_i[15].eq(simple_gpio.gpio_o[0])
1043 # instruction decoder
1044 pdecode
= create_pdecode()
1045 m
.submodules
.dec2
= pdecode2
= csd(self
.pdecode2
)
1047 m
.submodules
.svp64
= svp64
= csd(self
.svp64
)
1050 dmi
, d_reg
, d_cr
, d_xer
, = dbg
.dmi
, dbg
.d_gpr
, dbg
.d_cr
, dbg
.d_xer
1051 intrf
= self
.core
.regs
.rf
['int']
1053 # clock delay power-on reset
1054 cd_por
= ClockDomain(reset_less
=True)
1055 cd_sync
= ClockDomain()
1056 core_sync
= ClockDomain("coresync")
1057 m
.domains
+= cd_por
, cd_sync
, core_sync
1058 if self
.dbg_domain
!= "sync":
1059 dbg_sync
= ClockDomain(self
.dbg_domain
)
1060 m
.domains
+= dbg_sync
1062 ti_rst
= Signal(reset_less
=True)
1063 delay
= Signal(range(4), reset
=3)
1064 with m
.If(delay
!= 0):
1065 m
.d
.por
+= delay
.eq(delay
- 1)
1066 comb
+= cd_por
.clk
.eq(ClockSignal())
1068 # power-on reset delay
1069 core_rst
= ResetSignal("coresync")
1070 comb
+= ti_rst
.eq(delay
!= 0 | dbg
.core_rst_o |
ResetSignal())
1071 comb
+= core_rst
.eq(ti_rst
)
1073 # debug clock is same as coresync, but reset is *main external*
1074 if self
.dbg_domain
!= "sync":
1075 dbg_rst
= ResetSignal(self
.dbg_domain
)
1076 comb
+= dbg_rst
.eq(ResetSignal())
1078 # busy/halted signals from core
1079 core_busy_o
= ~core
.p
.o_ready | core
.n
.o_data
.busy_o
# core is busy
1080 comb
+= self
.busy_o
.eq(core_busy_o
)
1081 comb
+= pdecode2
.dec
.bigendian
.eq(self
.core_bigendian_i
)
1083 # temporary hack: says "go" immediately for both address gen and ST
1085 ldst
= core
.fus
.fus
['ldst0']
1086 st_go_edge
= rising_edge(m
, ldst
.st
.rel_o
)
1087 # link addr-go direct to rel
1088 m
.d
.comb
+= ldst
.ad
.go_i
.eq(ldst
.ad
.rel_o
)
1089 m
.d
.comb
+= ldst
.st
.go_i
.eq(st_go_edge
) # link store-go to rising rel
1091 def elaborate(self
, platform
):
1094 comb
, sync
= m
.d
.comb
, m
.d
.sync
1095 cur_state
= self
.cur_state
1096 pdecode2
= self
.pdecode2
1100 # set up peripherals and core
1101 core_rst
= self
.core_rst
1102 self
.setup_peripherals(m
)
1104 # reset current state if core reset requested
1105 with m
.If(core_rst
):
1106 m
.d
.sync
+= self
.cur_state
.eq(0)
1108 # PC and instruction from I-Memory
1109 comb
+= self
.pc_o
.eq(cur_state
.pc
)
1110 pc_changed
= Signal() # note write to PC
1111 sv_changed
= Signal() # note write to SVSTATE
1113 # indicate to outside world if any FU is still executing
1114 comb
+= self
.any_busy
.eq(core
.n
.o_data
.any_busy_o
) # any FU executing
1116 # read state either from incoming override or from regfile
1117 # TODO: really should be doing MSR in the same way
1118 pc
= state_get(m
, core_rst
, self
.pc_i
,
1120 self
.state_r_pc
, StateRegs
.PC
)
1121 svstate
= state_get(m
, core_rst
, self
.svstate_i
,
1122 "svstate", # read SVSTATE
1123 self
.state_r_sv
, StateRegs
.SVSTATE
)
1125 # don't write pc every cycle
1126 comb
+= self
.state_w_pc
.wen
.eq(0)
1127 comb
+= self
.state_w_pc
.i_data
.eq(0)
1129 # address of the next instruction, in the absence of a branch
1130 # depends on the instruction size
1133 # connect up debug signals
1134 # TODO comb += core.icache_rst_i.eq(dbg.icache_rst_o)
1135 comb
+= dbg
.terminate_i
.eq(core
.o
.core_terminate_o
)
1136 comb
+= dbg
.state
.pc
.eq(pc
)
1137 comb
+= dbg
.state
.svstate
.eq(svstate
)
1138 comb
+= dbg
.state
.msr
.eq(cur_state
.msr
)
1140 # pass the prefix mode from Fetch to Issue, so the latter can loop
1142 is_svp64_mode
= Signal()
1144 # there are *THREE^WFOUR-if-SVP64-enabled* FSMs, fetch (32/64-bit)
1145 # issue, decode/execute, now joined by "Predicate fetch/calculate".
1146 # these are the handshake signals between each
1148 # fetch FSM can run as soon as the PC is valid
1149 fetch_pc_i_valid
= Signal() # Execute tells Fetch "start next read"
1150 fetch_pc_o_ready
= Signal() # Fetch Tells SVSTATE "proceed"
1152 # fetch FSM hands over the instruction to be decoded / issued
1153 fetch_insn_o_valid
= Signal()
1154 fetch_insn_i_ready
= Signal()
1156 # predicate fetch FSM decodes and fetches the predicate
1157 pred_insn_i_valid
= Signal()
1158 pred_insn_o_ready
= Signal()
1160 # predicate fetch FSM delivers the masks
1161 pred_mask_o_valid
= Signal()
1162 pred_mask_i_ready
= Signal()
1164 # issue FSM delivers the instruction to the be executed
1165 exec_insn_i_valid
= Signal()
1166 exec_insn_o_ready
= Signal()
1168 # execute FSM, hands over the PC/SVSTATE back to the issue FSM
1169 exec_pc_o_valid
= Signal()
1170 exec_pc_i_ready
= Signal()
1172 # the FSMs here are perhaps unusual in that they detect conditions
1173 # then "hold" information, combinatorially, for the core
1174 # (as opposed to using sync - which would be on a clock's delay)
1175 # this includes the actual opcode, valid flags and so on.
1177 # Fetch, then predicate fetch, then Issue, then Execute.
1178 # Issue is where the VL for-loop # lives. the ready/valid
1179 # signalling is used to communicate between the four.
1182 fetch
= FetchFSM(self
.allow_overlap
, self
.svp64_en
,
1183 self
.imem
, core_rst
, pdecode2
, cur_state
,
1184 dbg
, core
, svstate
, nia
, is_svp64_mode
)
1185 m
.submodules
.fetch
= fetch
1186 # connect up in/out data to existing Signals
1187 comb
+= fetch
.p
.i_data
.pc
.eq(pc
)
1188 # and the ready/valid signalling
1189 comb
+= fetch_pc_o_ready
.eq(fetch
.p
.o_ready
)
1190 comb
+= fetch
.p
.i_valid
.eq(fetch_pc_i_valid
)
1191 comb
+= fetch_insn_o_valid
.eq(fetch
.n
.o_valid
)
1192 comb
+= fetch
.n
.i_ready
.eq(fetch_insn_i_ready
)
1194 self
.issue_fsm(m
, core
, pc_changed
, sv_changed
, nia
,
1195 dbg
, core_rst
, is_svp64_mode
,
1196 fetch_pc_o_ready
, fetch_pc_i_valid
,
1197 fetch_insn_o_valid
, fetch_insn_i_ready
,
1198 pred_insn_i_valid
, pred_insn_o_ready
,
1199 pred_mask_o_valid
, pred_mask_i_ready
,
1200 exec_insn_i_valid
, exec_insn_o_ready
,
1201 exec_pc_o_valid
, exec_pc_i_ready
)
1204 self
.fetch_predicate_fsm(m
,
1205 pred_insn_i_valid
, pred_insn_o_ready
,
1206 pred_mask_o_valid
, pred_mask_i_ready
)
1208 self
.execute_fsm(m
, core
, pc_changed
, sv_changed
,
1209 exec_insn_i_valid
, exec_insn_o_ready
,
1210 exec_pc_o_valid
, exec_pc_i_ready
)
1212 # this bit doesn't have to be in the FSM: connect up to read
1213 # regfiles on demand from DMI
1216 # DEC and TB inc/dec FSM. copy of DEC is put into CoreState,
1217 # (which uses that in PowerDecoder2 to raise 0x900 exception)
1218 self
.tb_dec_fsm(m
, cur_state
.dec
)
1222 def do_dmi(self
, m
, dbg
):
1223 """deals with DMI debug requests
1225 currently only provides read requests for the INT regfile, CR and XER
1226 it will later also deal with *writing* to these regfiles.
1230 dmi
, d_reg
, d_cr
, d_xer
, = dbg
.dmi
, dbg
.d_gpr
, dbg
.d_cr
, dbg
.d_xer
1231 intrf
= self
.core
.regs
.rf
['int']
1233 with m
.If(d_reg
.req
): # request for regfile access being made
1234 # TODO: error-check this
1235 # XXX should this be combinatorial? sync better?
1237 comb
+= self
.int_r
.ren
.eq(1 << d_reg
.addr
)
1239 comb
+= self
.int_r
.addr
.eq(d_reg
.addr
)
1240 comb
+= self
.int_r
.ren
.eq(1)
1241 d_reg_delay
= Signal()
1242 sync
+= d_reg_delay
.eq(d_reg
.req
)
1243 with m
.If(d_reg_delay
):
1244 # data arrives one clock later
1245 comb
+= d_reg
.data
.eq(self
.int_r
.o_data
)
1246 comb
+= d_reg
.ack
.eq(1)
1248 # sigh same thing for CR debug
1249 with m
.If(d_cr
.req
): # request for regfile access being made
1250 comb
+= self
.cr_r
.ren
.eq(0b11111111) # enable all
1251 d_cr_delay
= Signal()
1252 sync
+= d_cr_delay
.eq(d_cr
.req
)
1253 with m
.If(d_cr_delay
):
1254 # data arrives one clock later
1255 comb
+= d_cr
.data
.eq(self
.cr_r
.o_data
)
1256 comb
+= d_cr
.ack
.eq(1)
1259 with m
.If(d_xer
.req
): # request for regfile access being made
1260 comb
+= self
.xer_r
.ren
.eq(0b111111) # enable all
1261 d_xer_delay
= Signal()
1262 sync
+= d_xer_delay
.eq(d_xer
.req
)
1263 with m
.If(d_xer_delay
):
1264 # data arrives one clock later
1265 comb
+= d_xer
.data
.eq(self
.xer_r
.o_data
)
1266 comb
+= d_xer
.ack
.eq(1)
1268 def tb_dec_fsm(self
, m
, spr_dec
):
1271 this is a FSM for updating either dec or tb. it runs alternately
1272 DEC, TB, DEC, TB. note that SPR pipeline could have written a new
1273 value to DEC, however the regfile has "passthrough" on it so this
1276 see v3.0B p1097-1099 for Timeer Resource and p1065 and p1076
1279 comb
, sync
= m
.d
.comb
, m
.d
.sync
1280 fast_rf
= self
.core
.regs
.rf
['fast']
1281 fast_r_dectb
= fast_rf
.r_ports
['issue'] # DEC/TB
1282 fast_w_dectb
= fast_rf
.w_ports
['issue'] # DEC/TB
1284 with m
.FSM() as fsm
:
1286 # initiates read of current DEC
1287 with m
.State("DEC_READ"):
1288 comb
+= fast_r_dectb
.addr
.eq(FastRegs
.DEC
)
1289 comb
+= fast_r_dectb
.ren
.eq(1)
1290 m
.next
= "DEC_WRITE"
1292 # waits for DEC read to arrive (1 cycle), updates with new value
1293 with m
.State("DEC_WRITE"):
1294 new_dec
= Signal(64)
1295 # TODO: MSR.LPCR 32-bit decrement mode
1296 comb
+= new_dec
.eq(fast_r_dectb
.o_data
- 1)
1297 comb
+= fast_w_dectb
.addr
.eq(FastRegs
.DEC
)
1298 comb
+= fast_w_dectb
.wen
.eq(1)
1299 comb
+= fast_w_dectb
.i_data
.eq(new_dec
)
1300 sync
+= spr_dec
.eq(new_dec
) # copy into cur_state for decoder
1303 # initiates read of current TB
1304 with m
.State("TB_READ"):
1305 comb
+= fast_r_dectb
.addr
.eq(FastRegs
.TB
)
1306 comb
+= fast_r_dectb
.ren
.eq(1)
1309 # waits for read TB to arrive, initiates write of current TB
1310 with m
.State("TB_WRITE"):
1312 comb
+= new_tb
.eq(fast_r_dectb
.o_data
+ 1)
1313 comb
+= fast_w_dectb
.addr
.eq(FastRegs
.TB
)
1314 comb
+= fast_w_dectb
.wen
.eq(1)
1315 comb
+= fast_w_dectb
.i_data
.eq(new_tb
)
1321 yield from self
.pc_i
.ports()
1324 yield from self
.core
.ports()
1325 yield from self
.imem
.ports()
1326 yield self
.core_bigendian_i
1332 def external_ports(self
):
1333 ports
= self
.pc_i
.ports()
1334 ports
+= [self
.pc_o
, self
.memerr_o
, self
.core_bigendian_i
, self
.busy_o
,
1338 ports
+= list(self
.jtag
.external_ports())
1340 # don't add DMI if JTAG is enabled
1341 ports
+= list(self
.dbg
.dmi
.ports())
1343 ports
+= list(self
.imem
.ibus
.fields
.values())
1344 ports
+= list(self
.core
.l0
.cmpi
.wb_bus().fields
.values())
1347 for sram
in self
.sram4k
:
1348 ports
+= list(sram
.bus
.fields
.values())
1351 ports
+= list(self
.xics_icp
.bus
.fields
.values())
1352 ports
+= list(self
.xics_ics
.bus
.fields
.values())
1353 ports
.append(self
.int_level_i
)
1356 ports
+= list(self
.simple_gpio
.bus
.fields
.values())
1357 ports
.append(self
.gpio_o
)
1365 class TestIssuer(Elaboratable
):
1366 def __init__(self
, pspec
):
1367 self
.ti
= TestIssuerInternal(pspec
)
1368 self
.pll
= DummyPLL(instance
=True)
1370 # PLL direct clock or not
1371 self
.pll_en
= hasattr(pspec
, "use_pll") and pspec
.use_pll
1373 self
.pll_test_o
= Signal(reset_less
=True)
1374 self
.pll_vco_o
= Signal(reset_less
=True)
1375 self
.clk_sel_i
= Signal(2, reset_less
=True)
1376 self
.ref_clk
= ClockSignal() # can't rename it but that's ok
1377 self
.pllclk_clk
= ClockSignal("pllclk")
1379 def elaborate(self
, platform
):
1383 # TestIssuer nominally runs at main clock, actually it is
1384 # all combinatorial internally except for coresync'd components
1385 m
.submodules
.ti
= ti
= self
.ti
1388 # ClockSelect runs at PLL output internal clock rate
1389 m
.submodules
.wrappll
= pll
= self
.pll
1391 # add clock domains from PLL
1392 cd_pll
= ClockDomain("pllclk")
1395 # PLL clock established. has the side-effect of running clklsel
1396 # at the PLL's speed (see DomainRenamer("pllclk") above)
1397 pllclk
= self
.pllclk_clk
1398 comb
+= pllclk
.eq(pll
.clk_pll_o
)
1400 # wire up external 24mhz to PLL
1401 #comb += pll.clk_24_i.eq(self.ref_clk)
1402 # output 18 mhz PLL test signal, and analog oscillator out
1403 comb
+= self
.pll_test_o
.eq(pll
.pll_test_o
)
1404 comb
+= self
.pll_vco_o
.eq(pll
.pll_vco_o
)
1406 # input to pll clock selection
1407 comb
+= pll
.clk_sel_i
.eq(self
.clk_sel_i
)
1409 # now wire up ResetSignals. don't mind them being in this domain
1410 pll_rst
= ResetSignal("pllclk")
1411 comb
+= pll_rst
.eq(ResetSignal())
1413 # internal clock is set to selector clock-out. has the side-effect of
1414 # running TestIssuer at this speed (see DomainRenamer("intclk") above)
1415 # debug clock runs at coresync internal clock
1416 cd_coresync
= ClockDomain("coresync")
1417 #m.domains += cd_coresync
1418 if self
.ti
.dbg_domain
!= 'sync':
1419 cd_dbgsync
= ClockDomain("dbgsync")
1420 #m.domains += cd_dbgsync
1421 intclk
= ClockSignal("coresync")
1422 dbgclk
= ClockSignal(self
.ti
.dbg_domain
)
1423 # XXX BYPASS PLL XXX
1424 # XXX BYPASS PLL XXX
1425 # XXX BYPASS PLL XXX
1427 comb
+= intclk
.eq(self
.ref_clk
)
1429 comb
+= intclk
.eq(ClockSignal())
1430 if self
.ti
.dbg_domain
!= 'sync':
1431 dbgclk
= ClockSignal(self
.ti
.dbg_domain
)
1432 comb
+= dbgclk
.eq(intclk
)
1437 return list(self
.ti
.ports()) + list(self
.pll
.ports()) + \
1438 [ClockSignal(), ResetSignal()]
1440 def external_ports(self
):
1441 ports
= self
.ti
.external_ports()
1442 ports
.append(ClockSignal())
1443 ports
.append(ResetSignal())
1445 ports
.append(self
.clk_sel_i
)
1446 ports
.append(self
.pll
.clk_24_i
)
1447 ports
.append(self
.pll_test_o
)
1448 ports
.append(self
.pll_vco_o
)
1449 ports
.append(self
.pllclk_clk
)
1450 ports
.append(self
.ref_clk
)
1454 if __name__
== '__main__':
1455 units
= {'alu': 1, 'cr': 1, 'branch': 1, 'trap': 1, 'logical': 1,
1461 pspec
= TestMemPspec(ldst_ifacetype
='bare_wb',
1462 imem_ifacetype
='bare_wb',
1467 dut
= TestIssuer(pspec
)
1468 vl
= main(dut
, ports
=dut
.ports(), name
="test_issuer")
1470 if len(sys
.argv
) == 1:
1471 vl
= rtlil
.convert(dut
, ports
=dut
.external_ports(), name
="test_issuer")
1472 with
open("test_issuer.il", "w") as f
: