3 not in any way intended for production use. this runs a FSM that:
5 * reads the Program Counter from StateRegs
6 * reads an instruction from a fixed-size Test Memory
7 * issues it to the Simple Core
8 * waits for it to complete
10 * does it all over again
12 the purpose of this module is to verify the functional correctness
13 of the Function Units in the absolute simplest and clearest possible
14 way, and to at provide something that can be further incrementally
18 from nmigen
import (Elaboratable
, Module
, Signal
, ClockSignal
, ResetSignal
,
19 ClockDomain
, DomainRenamer
, Mux
, Const
, Repl
, Cat
)
20 from nmigen
.cli
import rtlil
21 from nmigen
.cli
import main
24 from nmigen
.lib
.coding
import PriorityEncoder
26 from openpower
.decoder
.power_decoder
import create_pdecode
27 from openpower
.decoder
.power_decoder2
import PowerDecode2
, SVP64PrefixDecoder
28 from openpower
.decoder
.decode2execute1
import IssuerDecode2ToOperand
29 from openpower
.decoder
.decode2execute1
import Data
30 from openpower
.decoder
.power_enums
import (MicrOp
, SVP64PredInt
, SVP64PredCR
,
32 from openpower
.state
import CoreState
33 from openpower
.consts
import (CR
, SVP64CROffs
)
34 from soc
.experiment
.testmem
import TestMemory
# test only for instructions
35 from soc
.regfile
.regfiles
import StateRegs
, FastRegs
36 from soc
.simple
.core
import NonProductionCore
37 from soc
.config
.test
.test_loadstore
import TestMemPspec
38 from soc
.config
.ifetch
import ConfigFetchUnit
39 from soc
.debug
.dmi
import CoreDebug
, DMIInterface
40 from soc
.debug
.jtag
import JTAG
41 from soc
.config
.pinouts
import get_pinspecs
42 from soc
.interrupts
.xics
import XICS_ICP
, XICS_ICS
43 from soc
.bus
.simple_gpio
import SimpleGPIO
44 from soc
.bus
.SPBlock512W64B8W
import SPBlock512W64B8W
45 from soc
.clock
.select
import ClockSelect
46 from soc
.clock
.dummypll
import DummyPLL
47 from openpower
.sv
.svstate
import SVSTATERec
50 from nmutil
.util
import rising_edge
52 def get_insn(f_instr_o
, pc
):
53 if f_instr_o
.width
== 32:
56 # 64-bit: bit 2 of pc decides which word to select
57 return f_instr_o
.word_select(pc
[2], 32)
59 # gets state input or reads from state regfile
60 def state_get(m
, core_rst
, state_i
, name
, regfile
, regnum
):
64 res
= Signal(64, reset_less
=True, name
=name
)
65 res_ok_delay
= Signal(name
="%s_ok_delay" % name
)
67 sync
+= res_ok_delay
.eq(~state_i
.ok
)
68 with m
.If(state_i
.ok
):
69 # incoming override (start from pc_i)
70 comb
+= res
.eq(state_i
.data
)
72 # otherwise read StateRegs regfile for PC...
73 comb
+= regfile
.ren
.eq(1<<regnum
)
74 # ... but on a 1-clock delay
75 with m
.If(res_ok_delay
):
76 comb
+= res
.eq(regfile
.data_o
)
79 def get_predint(m
, mask
, name
):
80 """decode SVP64 predicate integer mask field to reg number and invert
81 this is identical to the equivalent function in ISACaller except that
82 it doesn't read the INT directly, it just decodes "what needs to be done"
83 i.e. which INT reg, whether it is shifted and whether it is bit-inverted.
85 * all1s is set to indicate that no mask is to be applied.
86 * regread indicates the GPR register number to be read
87 * invert is set to indicate that the register value is to be inverted
88 * unary indicates that the contents of the register is to be shifted 1<<r3
91 regread
= Signal(5, name
=name
+"regread")
92 invert
= Signal(name
=name
+"invert")
93 unary
= Signal(name
=name
+"unary")
94 all1s
= Signal(name
=name
+"all1s")
96 with m
.Case(SVP64PredInt
.ALWAYS
.value
):
97 comb
+= all1s
.eq(1) # use 0b1111 (all ones)
98 with m
.Case(SVP64PredInt
.R3_UNARY
.value
):
100 comb
+= unary
.eq(1) # 1<<r3 - shift r3 (single bit)
101 with m
.Case(SVP64PredInt
.R3
.value
):
102 comb
+= regread
.eq(3)
103 with m
.Case(SVP64PredInt
.R3_N
.value
):
104 comb
+= regread
.eq(3)
106 with m
.Case(SVP64PredInt
.R10
.value
):
107 comb
+= regread
.eq(10)
108 with m
.Case(SVP64PredInt
.R10_N
.value
):
109 comb
+= regread
.eq(10)
111 with m
.Case(SVP64PredInt
.R30
.value
):
112 comb
+= regread
.eq(30)
113 with m
.Case(SVP64PredInt
.R30_N
.value
):
114 comb
+= regread
.eq(30)
116 return regread
, invert
, unary
, all1s
118 def get_predcr(m
, mask
, name
):
119 """decode SVP64 predicate CR to reg number field and invert status
120 this is identical to _get_predcr in ISACaller
123 idx
= Signal(2, name
=name
+"idx")
124 invert
= Signal(name
=name
+"crinvert")
126 with m
.Case(SVP64PredCR
.LT
.value
):
127 comb
+= idx
.eq(CR
.LT
)
129 with m
.Case(SVP64PredCR
.GE
.value
):
130 comb
+= idx
.eq(CR
.LT
)
132 with m
.Case(SVP64PredCR
.GT
.value
):
133 comb
+= idx
.eq(CR
.GT
)
135 with m
.Case(SVP64PredCR
.LE
.value
):
136 comb
+= idx
.eq(CR
.GT
)
138 with m
.Case(SVP64PredCR
.EQ
.value
):
139 comb
+= idx
.eq(CR
.EQ
)
141 with m
.Case(SVP64PredCR
.NE
.value
):
142 comb
+= idx
.eq(CR
.EQ
)
144 with m
.Case(SVP64PredCR
.SO
.value
):
145 comb
+= idx
.eq(CR
.SO
)
147 with m
.Case(SVP64PredCR
.NS
.value
):
148 comb
+= idx
.eq(CR
.SO
)
153 class TestIssuerInternal(Elaboratable
):
154 """TestIssuer - reads instructions from TestMemory and issues them
156 efficiency and speed is not the main goal here: functional correctness
157 and code clarity is. optimisations (which almost 100% interfere with
158 easy understanding) come later.
160 def __init__(self
, pspec
):
162 # test is SVP64 is to be enabled
163 self
.svp64_en
= hasattr(pspec
, "svp64") and (pspec
.svp64
== True)
165 # and if regfiles are reduced
166 self
.regreduce_en
= (hasattr(pspec
, "regreduce") and
167 (pspec
.regreduce
== True))
169 # JTAG interface. add this right at the start because if it's
170 # added it *modifies* the pspec, by adding enable/disable signals
171 # for parts of the rest of the core
172 self
.jtag_en
= hasattr(pspec
, "debug") and pspec
.debug
== 'jtag'
174 # XXX MUST keep this up-to-date with litex, and
175 # soc-cocotb-sim, and err.. all needs sorting out, argh
178 'eint', 'gpio', 'mspi0',
179 # 'mspi1', - disabled for now
180 # 'pwm', 'sd0', - disabled for now
182 self
.jtag
= JTAG(get_pinspecs(subset
=subset
))
183 # add signals to pspec to enable/disable icache and dcache
184 # (or data and intstruction wishbone if icache/dcache not included)
185 # https://bugs.libre-soc.org/show_bug.cgi?id=520
186 # TODO: do we actually care if these are not domain-synchronised?
187 # honestly probably not.
188 pspec
.wb_icache_en
= self
.jtag
.wb_icache_en
189 pspec
.wb_dcache_en
= self
.jtag
.wb_dcache_en
190 self
.wb_sram_en
= self
.jtag
.wb_sram_en
192 self
.wb_sram_en
= Const(1)
194 # add 4k sram blocks?
195 self
.sram4x4k
= (hasattr(pspec
, "sram4x4kblock") and
196 pspec
.sram4x4kblock
== True)
200 self
.sram4k
.append(SPBlock512W64B8W(name
="sram4k_%d" % i
,
204 # add interrupt controller?
205 self
.xics
= hasattr(pspec
, "xics") and pspec
.xics
== True
207 self
.xics_icp
= XICS_ICP()
208 self
.xics_ics
= XICS_ICS()
209 self
.int_level_i
= self
.xics_ics
.int_level_i
211 # add GPIO peripheral?
212 self
.gpio
= hasattr(pspec
, "gpio") and pspec
.gpio
== True
214 self
.simple_gpio
= SimpleGPIO()
215 self
.gpio_o
= self
.simple_gpio
.gpio_o
217 # main instruction core. suitable for prototyping / demo only
218 self
.core
= core
= NonProductionCore(pspec
)
219 self
.core_rst
= ResetSignal("coresync")
221 # instruction decoder. goes into Trap Record
222 pdecode
= create_pdecode()
223 self
.cur_state
= CoreState("cur") # current state (MSR/PC/SVSTATE)
224 self
.pdecode2
= PowerDecode2(pdecode
, state
=self
.cur_state
,
225 opkls
=IssuerDecode2ToOperand
,
226 svp64_en
=self
.svp64_en
,
227 regreduce_en
=self
.regreduce_en
)
229 self
.svp64
= SVP64PrefixDecoder() # for decoding SVP64 prefix
231 # Test Instruction memory
232 self
.imem
= ConfigFetchUnit(pspec
).fu
235 self
.dbg
= CoreDebug()
237 # instruction go/monitor
238 self
.pc_o
= Signal(64, reset_less
=True)
239 self
.pc_i
= Data(64, "pc_i") # set "ok" to indicate "please change me"
240 self
.svstate_i
= Data(32, "svstate_i") # ditto
241 self
.core_bigendian_i
= Signal() # TODO: set based on MSR.LE
242 self
.busy_o
= Signal(reset_less
=True)
243 self
.memerr_o
= Signal(reset_less
=True)
245 # STATE regfile read /write ports for PC, MSR, SVSTATE
246 staterf
= self
.core
.regs
.rf
['state']
247 self
.state_r_pc
= staterf
.r_ports
['cia'] # PC rd
248 self
.state_w_pc
= staterf
.w_ports
['d_wr1'] # PC wr
249 self
.state_r_msr
= staterf
.r_ports
['msr'] # MSR rd
250 self
.state_r_sv
= staterf
.r_ports
['sv'] # SVSTATE rd
251 self
.state_w_sv
= staterf
.w_ports
['sv'] # SVSTATE wr
253 # DMI interface access
254 intrf
= self
.core
.regs
.rf
['int']
255 crrf
= self
.core
.regs
.rf
['cr']
256 xerrf
= self
.core
.regs
.rf
['xer']
257 self
.int_r
= intrf
.r_ports
['dmi'] # INT read
258 self
.cr_r
= crrf
.r_ports
['full_cr_dbg'] # CR read
259 self
.xer_r
= xerrf
.r_ports
['full_xer'] # XER read
263 self
.int_pred
= intrf
.r_ports
['pred'] # INT predicate read
264 self
.cr_pred
= crrf
.r_ports
['cr_pred'] # CR predicate read
266 # hack method of keeping an eye on whether branch/trap set the PC
267 self
.state_nia
= self
.core
.regs
.rf
['state'].w_ports
['nia']
268 self
.state_nia
.wen
.name
= 'state_nia_wen'
270 # pulse to synchronize the simulator at instruction end
271 self
.insn_done
= Signal()
274 # store copies of predicate masks
275 self
.srcmask
= Signal(64)
276 self
.dstmask
= Signal(64)
278 def fetch_fsm(self
, m
, core
, pc
, svstate
, nia
, is_svp64_mode
,
279 fetch_pc_ready_o
, fetch_pc_valid_i
,
280 fetch_insn_valid_o
, fetch_insn_ready_i
):
283 this FSM performs fetch of raw instruction data, partial-decodes
284 it 32-bit at a time to detect SVP64 prefixes, and will optionally
285 read a 2nd 32-bit quantity if that occurs.
289 pdecode2
= self
.pdecode2
290 cur_state
= self
.cur_state
291 dec_opcode_i
= pdecode2
.dec
.raw_opcode_in
# raw opcode
293 msr_read
= Signal(reset
=1)
295 with m
.FSM(name
='fetch_fsm'):
298 with m
.State("IDLE"):
299 comb
+= fetch_pc_ready_o
.eq(1)
300 with m
.If(fetch_pc_valid_i
):
301 # instruction allowed to go: start by reading the PC
302 # capture the PC and also drop it into Insn Memory
303 # we have joined a pair of combinatorial memory
304 # lookups together. this is Generally Bad.
305 comb
+= self
.imem
.a_pc_i
.eq(pc
)
306 comb
+= self
.imem
.a_valid_i
.eq(1)
307 comb
+= self
.imem
.f_valid_i
.eq(1)
308 sync
+= cur_state
.pc
.eq(pc
)
309 sync
+= cur_state
.svstate
.eq(svstate
) # and svstate
311 # initiate read of MSR. arrives one clock later
312 comb
+= self
.state_r_msr
.ren
.eq(1 << StateRegs
.MSR
)
313 sync
+= msr_read
.eq(0)
315 m
.next
= "INSN_READ" # move to "wait for bus" phase
317 # dummy pause to find out why simulation is not keeping up
318 with m
.State("INSN_READ"):
319 # one cycle later, msr/sv read arrives. valid only once.
320 with m
.If(~msr_read
):
321 sync
+= msr_read
.eq(1) # yeah don't read it again
322 sync
+= cur_state
.msr
.eq(self
.state_r_msr
.data_o
)
323 with m
.If(self
.imem
.f_busy_o
): # zzz...
324 # busy: stay in wait-read
325 comb
+= self
.imem
.a_valid_i
.eq(1)
326 comb
+= self
.imem
.f_valid_i
.eq(1)
328 # not busy: instruction fetched
329 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
)
332 # decode the SVP64 prefix, if any
333 comb
+= svp64
.raw_opcode_in
.eq(insn
)
334 comb
+= svp64
.bigendian
.eq(self
.core_bigendian_i
)
335 # pass the decoded prefix (if any) to PowerDecoder2
336 sync
+= pdecode2
.sv_rm
.eq(svp64
.svp64_rm
)
337 # remember whether this is a prefixed instruction, so
338 # the FSM can readily loop when VL==0
339 sync
+= is_svp64_mode
.eq(svp64
.is_svp64_mode
)
340 # calculate the address of the following instruction
341 insn_size
= Mux(svp64
.is_svp64_mode
, 8, 4)
342 sync
+= nia
.eq(cur_state
.pc
+ insn_size
)
343 with m
.If(~svp64
.is_svp64_mode
):
344 # with no prefix, store the instruction
345 # and hand it directly to the next FSM
346 sync
+= dec_opcode_i
.eq(insn
)
347 m
.next
= "INSN_READY"
349 # fetch the rest of the instruction from memory
350 comb
+= self
.imem
.a_pc_i
.eq(cur_state
.pc
+ 4)
351 comb
+= self
.imem
.a_valid_i
.eq(1)
352 comb
+= self
.imem
.f_valid_i
.eq(1)
353 m
.next
= "INSN_READ2"
355 # not SVP64 - 32-bit only
356 sync
+= nia
.eq(cur_state
.pc
+ 4)
357 sync
+= dec_opcode_i
.eq(insn
)
358 m
.next
= "INSN_READY"
360 with m
.State("INSN_READ2"):
361 with m
.If(self
.imem
.f_busy_o
): # zzz...
362 # busy: stay in wait-read
363 comb
+= self
.imem
.a_valid_i
.eq(1)
364 comb
+= self
.imem
.f_valid_i
.eq(1)
366 # not busy: instruction fetched
367 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
+4)
368 sync
+= dec_opcode_i
.eq(insn
)
369 m
.next
= "INSN_READY"
370 # TODO: probably can start looking at pdecode2.rm_dec
371 # here or maybe even in INSN_READ state, if svp64_mode
372 # detected, in order to trigger - and wait for - the
375 pmode
= pdecode2
.rm_dec
.predmode
377 if pmode != SVP64PredMode.ALWAYS.value:
378 fire predicate loading FSM and wait before
381 sync += self.srcmask.eq(-1) # set to all 1s
382 sync += self.dstmask.eq(-1) # set to all 1s
383 m.next = "INSN_READY"
386 with m
.State("INSN_READY"):
387 # hand over the instruction, to be decoded
388 comb
+= fetch_insn_valid_o
.eq(1)
389 with m
.If(fetch_insn_ready_i
):
392 def fetch_predicate_fsm(self
, m
,
393 pred_insn_valid_i
, pred_insn_ready_o
,
394 pred_mask_valid_o
, pred_mask_ready_i
):
395 """fetch_predicate_fsm - obtains (constructs in the case of CR)
396 src/dest predicate masks
398 https://bugs.libre-soc.org/show_bug.cgi?id=617
399 the predicates can be read here, by using IntRegs r_ports['pred']
400 or CRRegs r_ports['pred']. in the case of CRs it will have to
401 be done through multiple reads, extracting one relevant at a time.
402 later, a faster way would be to use the 32-bit-wide CR port but
403 this is more complex decoding, here. equivalent code used in
404 ISACaller is "from openpower.decoder.isa.caller import get_predcr"
406 note: this ENTIRE FSM is not to be called when svp64 is disabled
410 pdecode2
= self
.pdecode2
411 rm_dec
= pdecode2
.rm_dec
# SVP64RMModeDecode
412 predmode
= rm_dec
.predmode
413 srcpred
, dstpred
= rm_dec
.srcpred
, rm_dec
.dstpred
414 cr_pred
, int_pred
= self
.cr_pred
, self
.int_pred
# read regfiles
415 # get src/dst step, so we can skip already used mask bits
416 cur_state
= self
.cur_state
417 srcstep
= cur_state
.svstate
.srcstep
418 dststep
= cur_state
.svstate
.dststep
419 cur_vl
= cur_state
.svstate
.vl
422 sregread
, sinvert
, sunary
, sall1s
= get_predint(m
, srcpred
, 's')
423 dregread
, dinvert
, dunary
, dall1s
= get_predint(m
, dstpred
, 'd')
424 sidx
, scrinvert
= get_predcr(m
, srcpred
, 's')
425 didx
, dcrinvert
= get_predcr(m
, dstpred
, 'd')
427 # store fetched masks, for either intpred or crpred
428 # when src/dst step is not zero, the skipped mask bits need to be
429 # shifted-out, before actually storing them in src/dest mask
430 new_srcmask
= Signal(64, reset_less
=True)
431 new_dstmask
= Signal(64, reset_less
=True)
433 with m
.FSM(name
="fetch_predicate"):
435 with m
.State("FETCH_PRED_IDLE"):
436 comb
+= pred_insn_ready_o
.eq(1)
437 with m
.If(pred_insn_valid_i
):
438 with m
.If(predmode
== SVP64PredMode
.INT
):
439 # skip fetching destination mask register, when zero
441 sync
+= new_dstmask
.eq(-1)
442 # directly go to fetch source mask register
443 # guaranteed not to be zero (otherwise predmode
444 # would be SVP64PredMode.ALWAYS, not INT)
445 comb
+= int_pred
.addr
.eq(sregread
)
446 comb
+= int_pred
.ren
.eq(1)
447 m
.next
= "INT_SRC_READ"
448 # fetch destination predicate register
450 comb
+= int_pred
.addr
.eq(dregread
)
451 comb
+= int_pred
.ren
.eq(1)
452 m
.next
= "INT_DST_READ"
453 with m
.Elif(predmode
== SVP64PredMode
.CR
):
454 # go fetch masks from the CR register file
455 sync
+= new_srcmask
.eq(0)
456 sync
+= new_dstmask
.eq(0)
459 sync
+= self
.srcmask
.eq(-1)
460 sync
+= self
.dstmask
.eq(-1)
461 m
.next
= "FETCH_PRED_DONE"
463 with m
.State("INT_DST_READ"):
464 # store destination mask
465 inv
= Repl(dinvert
, 64)
467 # set selected mask bit for 1<<r3 mode
468 dst_shift
= Signal(range(64))
469 comb
+= dst_shift
.eq(self
.int_pred
.data_o
& 0b111111)
470 sync
+= new_dstmask
.eq(1 << dst_shift
)
472 # invert mask if requested
473 sync
+= new_dstmask
.eq(self
.int_pred
.data_o ^ inv
)
474 # skip fetching source mask register, when zero
476 sync
+= new_srcmask
.eq(-1)
477 m
.next
= "FETCH_PRED_SHIFT_MASK"
478 # fetch source predicate register
480 comb
+= int_pred
.addr
.eq(sregread
)
481 comb
+= int_pred
.ren
.eq(1)
482 m
.next
= "INT_SRC_READ"
484 with m
.State("INT_SRC_READ"):
486 inv
= Repl(sinvert
, 64)
488 # set selected mask bit for 1<<r3 mode
489 src_shift
= Signal(range(64))
490 comb
+= src_shift
.eq(self
.int_pred
.data_o
& 0b111111)
491 sync
+= new_srcmask
.eq(1 << src_shift
)
493 # invert mask if requested
494 sync
+= new_srcmask
.eq(self
.int_pred
.data_o ^ inv
)
495 m
.next
= "FETCH_PRED_SHIFT_MASK"
497 # fetch masks from the CR register file
498 # implements the following loop:
499 # idx, inv = get_predcr(mask)
501 # for cr_idx in range(vl):
502 # cr = crl[cr_idx + SVP64CROffs.CRPred] # takes one cycle
504 # mask |= 1 << cr_idx
506 with m
.State("CR_READ"):
507 # CR index to be read, which will be ready by the next cycle
508 cr_idx
= Signal
.like(cur_vl
, reset_less
=True)
509 # submit the read operation to the regfile
510 with m
.If(cr_idx
!= cur_vl
):
511 # the CR read port is unary ...
513 # ... in MSB0 convention ...
514 # ren = 1 << (7 - cr_idx)
515 # ... and with an offset:
516 # ren = 1 << (7 - off - cr_idx)
517 idx
= SVP64CROffs
.CRPred
+ cr_idx
518 comb
+= cr_pred
.ren
.eq(1 << (7 - idx
))
519 # signal data valid in the next cycle
520 cr_read
= Signal(reset_less
=True)
521 sync
+= cr_read
.eq(1)
522 # load the next index
523 sync
+= cr_idx
.eq(cr_idx
+ 1)
526 sync
+= cr_read
.eq(0)
528 m
.next
= "FETCH_PRED_SHIFT_MASK"
530 # compensate for the one cycle delay on the regfile
531 cur_cr_idx
= Signal
.like(cur_vl
)
532 comb
+= cur_cr_idx
.eq(cr_idx
- 1)
533 # read the CR field, select the appropriate bit
537 comb
+= cr_field
.eq(cr_pred
.data_o
)
538 comb
+= scr_bit
.eq(cr_field
.bit_select(sidx
, 1) ^ scrinvert
)
539 comb
+= dcr_bit
.eq(cr_field
.bit_select(didx
, 1) ^ dcrinvert
)
540 # set the corresponding mask bit
541 bit_to_set
= Signal
.like(self
.srcmask
)
542 comb
+= bit_to_set
.eq(1 << cur_cr_idx
)
544 sync
+= new_srcmask
.eq(new_srcmask | bit_to_set
)
546 sync
+= new_dstmask
.eq(new_dstmask | bit_to_set
)
548 with m
.State("FETCH_PRED_SHIFT_MASK"):
549 # shift-out skipped mask bits
550 sync
+= self
.srcmask
.eq(new_srcmask
>> srcstep
)
551 sync
+= self
.dstmask
.eq(new_dstmask
>> dststep
)
552 m
.next
= "FETCH_PRED_DONE"
554 with m
.State("FETCH_PRED_DONE"):
555 comb
+= pred_mask_valid_o
.eq(1)
556 with m
.If(pred_mask_ready_i
):
557 m
.next
= "FETCH_PRED_IDLE"
559 def issue_fsm(self
, m
, core
, pc_changed
, sv_changed
, nia
,
560 dbg
, core_rst
, is_svp64_mode
,
561 fetch_pc_ready_o
, fetch_pc_valid_i
,
562 fetch_insn_valid_o
, fetch_insn_ready_i
,
563 pred_insn_valid_i
, pred_insn_ready_o
,
564 pred_mask_valid_o
, pred_mask_ready_i
,
565 exec_insn_valid_i
, exec_insn_ready_o
,
566 exec_pc_valid_o
, exec_pc_ready_i
):
569 decode / issue FSM. this interacts with the "fetch" FSM
570 through fetch_insn_ready/valid (incoming) and fetch_pc_ready/valid
571 (outgoing). also interacts with the "execute" FSM
572 through exec_insn_ready/valid (outgoing) and exec_pc_ready/valid
574 SVP64 RM prefixes have already been set up by the
575 "fetch" phase, so execute is fairly straightforward.
580 pdecode2
= self
.pdecode2
581 cur_state
= self
.cur_state
584 dec_opcode_i
= pdecode2
.dec
.raw_opcode_in
# raw opcode
586 # for updating svstate (things like srcstep etc.)
587 update_svstate
= Signal() # set this (below) if updating
588 new_svstate
= SVSTATERec("new_svstate")
589 comb
+= new_svstate
.eq(cur_state
.svstate
)
591 # precalculate srcstep+1 and dststep+1
592 cur_srcstep
= cur_state
.svstate
.srcstep
593 cur_dststep
= cur_state
.svstate
.dststep
594 next_srcstep
= Signal
.like(cur_srcstep
)
595 next_dststep
= Signal
.like(cur_dststep
)
596 comb
+= next_srcstep
.eq(cur_state
.svstate
.srcstep
+1)
597 comb
+= next_dststep
.eq(cur_state
.svstate
.dststep
+1)
599 # note if an exception happened. in a pipelined or OoO design
600 # this needs to be accompanied by "shadowing" (or stalling)
602 for exc
in core
.fus
.excs
.values():
603 el
.append(exc
.happened
)
604 exc_happened
= Signal()
605 if len(el
) > 0: # at least one exception
606 comb
+= exc_happened
.eq(Cat(*el
).bool())
608 with m
.FSM(name
="issue_fsm"):
610 # sync with the "fetch" phase which is reading the instruction
611 # at this point, there is no instruction running, that
612 # could inadvertently update the PC.
613 with m
.State("ISSUE_START"):
614 # wait on "core stop" release, before next fetch
615 # need to do this here, in case we are in a VL==0 loop
616 with m
.If(~dbg
.core_stop_o
& ~core_rst
):
617 comb
+= fetch_pc_valid_i
.eq(1) # tell fetch to start
618 with m
.If(fetch_pc_ready_o
): # fetch acknowledged us
621 # tell core it's stopped, and acknowledge debug handshake
622 comb
+= dbg
.core_stopped_i
.eq(1)
623 # while stopped, allow updating the PC and SVSTATE
624 with m
.If(self
.pc_i
.ok
):
625 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
626 comb
+= self
.state_w_pc
.data_i
.eq(self
.pc_i
.data
)
627 sync
+= pc_changed
.eq(1)
628 with m
.If(self
.svstate_i
.ok
):
629 comb
+= new_svstate
.eq(self
.svstate_i
.data
)
630 comb
+= update_svstate
.eq(1)
631 sync
+= sv_changed
.eq(1)
633 # wait for an instruction to arrive from Fetch
634 with m
.State("INSN_WAIT"):
635 comb
+= fetch_insn_ready_i
.eq(1)
636 with m
.If(fetch_insn_valid_o
):
637 # loop into ISSUE_START if it's a SVP64 instruction
638 # and VL == 0. this because VL==0 is a for-loop
639 # from 0 to 0 i.e. always, always a NOP.
640 cur_vl
= cur_state
.svstate
.vl
641 with m
.If(is_svp64_mode
& (cur_vl
== 0)):
642 # update the PC before fetching the next instruction
643 # since we are in a VL==0 loop, no instruction was
644 # executed that we could be overwriting
645 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
646 comb
+= self
.state_w_pc
.data_i
.eq(nia
)
647 comb
+= self
.insn_done
.eq(1)
648 m
.next
= "ISSUE_START"
651 m
.next
= "PRED_START" # start fetching predicate
653 m
.next
= "DECODE_SV" # skip predication
655 with m
.State("PRED_START"):
656 comb
+= pred_insn_valid_i
.eq(1) # tell fetch_pred to start
657 with m
.If(pred_insn_ready_o
): # fetch_pred acknowledged us
660 with m
.State("MASK_WAIT"):
661 comb
+= pred_mask_ready_i
.eq(1) # ready to receive the masks
662 with m
.If(pred_mask_valid_o
): # predication masks are ready
665 # skip zeros in predicate
666 with m
.State("PRED_SKIP"):
667 with m
.If(~is_svp64_mode
):
668 m
.next
= "DECODE_SV" # nothing to do
671 pred_src_zero
= pdecode2
.rm_dec
.pred_sz
672 pred_dst_zero
= pdecode2
.rm_dec
.pred_dz
674 # new srcstep, after skipping zeros
675 skip_srcstep
= Signal
.like(cur_srcstep
)
676 # value to be added to the current srcstep
677 src_delta
= Signal
.like(cur_srcstep
)
678 # add leading zeros to srcstep, if not in zero mode
679 with m
.If(~pred_src_zero
):
680 # priority encoder (count leading zeros)
681 # append guard bit, in case the mask is all zeros
682 pri_enc_src
= PriorityEncoder(65)
683 m
.submodules
.pri_enc_src
= pri_enc_src
684 comb
+= pri_enc_src
.i
.eq(Cat(self
.srcmask
,
686 comb
+= src_delta
.eq(pri_enc_src
.o
)
687 # apply delta to srcstep
688 comb
+= skip_srcstep
.eq(cur_srcstep
+ src_delta
)
689 # shift-out all leading zeros from the mask
690 # plus the leading "one" bit
691 # TODO count leading zeros and shift-out the zero
692 # bits, in the same step, in hardware
693 sync
+= self
.srcmask
.eq(self
.srcmask
>> (src_delta
+1))
695 # same as above, but for dststep
696 skip_dststep
= Signal
.like(cur_dststep
)
697 dst_delta
= Signal
.like(cur_dststep
)
698 with m
.If(~pred_dst_zero
):
699 pri_enc_dst
= PriorityEncoder(65)
700 m
.submodules
.pri_enc_dst
= pri_enc_dst
701 comb
+= pri_enc_dst
.i
.eq(Cat(self
.dstmask
,
703 comb
+= dst_delta
.eq(pri_enc_dst
.o
)
704 comb
+= skip_dststep
.eq(cur_dststep
+ dst_delta
)
705 sync
+= self
.dstmask
.eq(self
.dstmask
>> (dst_delta
+1))
707 # TODO: initialize mask[VL]=1 to avoid passing past VL
708 with m
.If((skip_srcstep
>= cur_vl
) |
709 (skip_dststep
>= cur_vl
)):
710 # end of VL loop. Update PC and reset src/dst step
711 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
712 comb
+= self
.state_w_pc
.data_i
.eq(nia
)
713 comb
+= new_svstate
.srcstep
.eq(0)
714 comb
+= new_svstate
.dststep
.eq(0)
715 comb
+= update_svstate
.eq(1)
716 # synchronize with the simulator
717 comb
+= self
.insn_done
.eq(1)
719 m
.next
= "ISSUE_START"
721 # update new src/dst step
722 comb
+= new_svstate
.srcstep
.eq(skip_srcstep
)
723 comb
+= new_svstate
.dststep
.eq(skip_dststep
)
724 comb
+= update_svstate
.eq(1)
728 # pass predicate mask bits through to satellite decoders
729 # TODO: for SIMD this will be *multiple* bits
730 sync
+= core
.sv_pred_sm
.eq(self
.srcmask
[0])
731 sync
+= core
.sv_pred_dm
.eq(self
.dstmask
[0])
733 # after src/dst step have been updated, we are ready
734 # to decode the instruction
735 with m
.State("DECODE_SV"):
736 # decode the instruction
737 sync
+= core
.e
.eq(pdecode2
.e
)
738 sync
+= core
.state
.eq(cur_state
)
739 sync
+= core
.raw_insn_i
.eq(dec_opcode_i
)
740 sync
+= core
.bigendian_i
.eq(self
.core_bigendian_i
)
742 sync
+= core
.sv_rm
.eq(pdecode2
.sv_rm
)
743 # set RA_OR_ZERO detection in satellite decoders
744 sync
+= core
.sv_a_nz
.eq(pdecode2
.sv_a_nz
)
746 m
.next
= "INSN_EXECUTE" # move to "execute"
748 # handshake with execution FSM, move to "wait" once acknowledged
749 with m
.State("INSN_EXECUTE"):
750 comb
+= exec_insn_valid_i
.eq(1) # trigger execute
751 with m
.If(exec_insn_ready_o
): # execute acknowledged us
752 m
.next
= "EXECUTE_WAIT"
754 with m
.State("EXECUTE_WAIT"):
755 # wait on "core stop" release, at instruction end
756 # need to do this here, in case we are in a VL>1 loop
757 with m
.If(~dbg
.core_stop_o
& ~core_rst
):
758 comb
+= exec_pc_ready_i
.eq(1)
759 # see https://bugs.libre-soc.org/show_bug.cgi?id=636
760 #with m.If(exec_pc_valid_o & exc_happened):
761 # probably something like this:
762 # sync += pdecode2.ldst_exc.eq(core.fus.get_exc("ldst0")
763 # TODO: the exception info needs to be blatted
764 # into pdecode.ldst_exc, and the instruction "re-run".
765 # when ldst_exc.happened is set, the PowerDecoder2
766 # reacts very differently: it re-writes the instruction
767 # with a "trap" (calls PowerDecoder2.trap()) which
768 # will *overwrite* whatever was requested and jump the
769 # PC to the exception address, as well as alter MSR.
770 # nothing else needs to be done other than to note
771 # the change of PC and MSR (and, later, SVSTATE)
772 #with m.Elif(exec_pc_valid_o):
773 with m
.If(exec_pc_valid_o
): # replace with Elif (above)
775 # was this the last loop iteration?
777 cur_vl
= cur_state
.svstate
.vl
778 comb
+= is_last
.eq(next_srcstep
== cur_vl
)
780 # if either PC or SVSTATE were changed by the previous
781 # instruction, go directly back to Fetch, without
782 # updating either PC or SVSTATE
783 with m
.If(pc_changed | sv_changed
):
784 m
.next
= "ISSUE_START"
786 # also return to Fetch, when no output was a vector
787 # (regardless of SRCSTEP and VL), or when the last
788 # instruction was really the last one of the VL loop
789 with m
.Elif((~pdecode2
.loop_continue
) | is_last
):
790 # before going back to fetch, update the PC state
791 # register with the NIA.
792 # ok here we are not reading the branch unit.
793 # TODO: this just blithely overwrites whatever
794 # pipeline updated the PC
795 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
796 comb
+= self
.state_w_pc
.data_i
.eq(nia
)
797 # reset SRCSTEP before returning to Fetch
799 with m
.If(pdecode2
.loop_continue
):
800 comb
+= new_svstate
.srcstep
.eq(0)
801 comb
+= new_svstate
.dststep
.eq(0)
802 comb
+= update_svstate
.eq(1)
804 comb
+= new_svstate
.srcstep
.eq(0)
805 comb
+= new_svstate
.dststep
.eq(0)
806 comb
+= update_svstate
.eq(1)
807 m
.next
= "ISSUE_START"
809 # returning to Execute? then, first update SRCSTEP
811 comb
+= new_svstate
.srcstep
.eq(next_srcstep
)
812 comb
+= new_svstate
.dststep
.eq(next_dststep
)
813 comb
+= update_svstate
.eq(1)
814 # return to mask skip loop
818 comb
+= dbg
.core_stopped_i
.eq(1)
819 # while stopped, allow updating the PC and SVSTATE
820 with m
.If(self
.pc_i
.ok
):
821 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
822 comb
+= self
.state_w_pc
.data_i
.eq(self
.pc_i
.data
)
823 sync
+= pc_changed
.eq(1)
824 with m
.If(self
.svstate_i
.ok
):
825 comb
+= new_svstate
.eq(self
.svstate_i
.data
)
826 comb
+= update_svstate
.eq(1)
827 sync
+= sv_changed
.eq(1)
829 # check if svstate needs updating: if so, write it to State Regfile
830 with m
.If(update_svstate
):
831 comb
+= self
.state_w_sv
.wen
.eq(1<<StateRegs
.SVSTATE
)
832 comb
+= self
.state_w_sv
.data_i
.eq(new_svstate
)
833 sync
+= cur_state
.svstate
.eq(new_svstate
) # for next clock
835 def execute_fsm(self
, m
, core
, pc_changed
, sv_changed
,
836 exec_insn_valid_i
, exec_insn_ready_o
,
837 exec_pc_valid_o
, exec_pc_ready_i
):
840 execute FSM. this interacts with the "issue" FSM
841 through exec_insn_ready/valid (incoming) and exec_pc_ready/valid
842 (outgoing). SVP64 RM prefixes have already been set up by the
843 "issue" phase, so execute is fairly straightforward.
848 pdecode2
= self
.pdecode2
851 core_busy_o
= core
.busy_o
# core is busy
852 core_ivalid_i
= core
.ivalid_i
# instruction is valid
853 core_issue_i
= core
.issue_i
# instruction is issued
854 insn_type
= core
.e
.do
.insn_type
# instruction MicroOp type
856 with m
.FSM(name
="exec_fsm"):
858 # waiting for instruction bus (stays there until not busy)
859 with m
.State("INSN_START"):
860 comb
+= exec_insn_ready_o
.eq(1)
861 with m
.If(exec_insn_valid_i
):
862 comb
+= core_ivalid_i
.eq(1) # instruction is valid
863 comb
+= core_issue_i
.eq(1) # and issued
864 sync
+= sv_changed
.eq(0)
865 sync
+= pc_changed
.eq(0)
866 m
.next
= "INSN_ACTIVE" # move to "wait completion"
868 # instruction started: must wait till it finishes
869 with m
.State("INSN_ACTIVE"):
870 with m
.If(insn_type
!= MicrOp
.OP_NOP
):
871 comb
+= core_ivalid_i
.eq(1) # instruction is valid
872 # note changes to PC and SVSTATE
873 with m
.If(self
.state_nia
.wen
& (1<<StateRegs
.SVSTATE
)):
874 sync
+= sv_changed
.eq(1)
875 with m
.If(self
.state_nia
.wen
& (1<<StateRegs
.PC
)):
876 sync
+= pc_changed
.eq(1)
877 with m
.If(~core_busy_o
): # instruction done!
878 comb
+= exec_pc_valid_o
.eq(1)
879 with m
.If(exec_pc_ready_i
):
880 comb
+= self
.insn_done
.eq(1)
881 m
.next
= "INSN_START" # back to fetch
883 def setup_peripherals(self
, m
):
884 comb
, sync
= m
.d
.comb
, m
.d
.sync
886 m
.submodules
.core
= core
= DomainRenamer("coresync")(self
.core
)
887 m
.submodules
.imem
= imem
= self
.imem
888 m
.submodules
.dbg
= dbg
= self
.dbg
890 m
.submodules
.jtag
= jtag
= self
.jtag
891 # TODO: UART2GDB mux, here, from external pin
892 # see https://bugs.libre-soc.org/show_bug.cgi?id=499
893 sync
+= dbg
.dmi
.connect_to(jtag
.dmi
)
895 cur_state
= self
.cur_state
897 # 4x 4k SRAM blocks. these simply "exist", they get routed in litex
899 for i
, sram
in enumerate(self
.sram4k
):
900 m
.submodules
["sram4k_%d" % i
] = sram
901 comb
+= sram
.enable
.eq(self
.wb_sram_en
)
903 # XICS interrupt handler
905 m
.submodules
.xics_icp
= icp
= self
.xics_icp
906 m
.submodules
.xics_ics
= ics
= self
.xics_ics
907 comb
+= icp
.ics_i
.eq(ics
.icp_o
) # connect ICS to ICP
908 sync
+= cur_state
.eint
.eq(icp
.core_irq_o
) # connect ICP to core
910 # GPIO test peripheral
912 m
.submodules
.simple_gpio
= simple_gpio
= self
.simple_gpio
914 # connect one GPIO output to ICS bit 15 (like in microwatt soc.vhdl)
915 # XXX causes litex ECP5 test to get wrong idea about input and output
916 # (but works with verilator sim *sigh*)
917 #if self.gpio and self.xics:
918 # comb += self.int_level_i[15].eq(simple_gpio.gpio_o[0])
920 # instruction decoder
921 pdecode
= create_pdecode()
922 m
.submodules
.dec2
= pdecode2
= self
.pdecode2
924 m
.submodules
.svp64
= svp64
= self
.svp64
927 dmi
, d_reg
, d_cr
, d_xer
, = dbg
.dmi
, dbg
.d_gpr
, dbg
.d_cr
, dbg
.d_xer
928 intrf
= self
.core
.regs
.rf
['int']
930 # clock delay power-on reset
931 cd_por
= ClockDomain(reset_less
=True)
932 cd_sync
= ClockDomain()
933 core_sync
= ClockDomain("coresync")
934 m
.domains
+= cd_por
, cd_sync
, core_sync
936 ti_rst
= Signal(reset_less
=True)
937 delay
= Signal(range(4), reset
=3)
938 with m
.If(delay
!= 0):
939 m
.d
.por
+= delay
.eq(delay
- 1)
940 comb
+= cd_por
.clk
.eq(ClockSignal())
942 # power-on reset delay
943 core_rst
= ResetSignal("coresync")
944 comb
+= ti_rst
.eq(delay
!= 0 | dbg
.core_rst_o |
ResetSignal())
945 comb
+= core_rst
.eq(ti_rst
)
947 # busy/halted signals from core
948 comb
+= self
.busy_o
.eq(core
.busy_o
)
949 comb
+= pdecode2
.dec
.bigendian
.eq(self
.core_bigendian_i
)
951 # temporary hack: says "go" immediately for both address gen and ST
953 ldst
= core
.fus
.fus
['ldst0']
954 st_go_edge
= rising_edge(m
, ldst
.st
.rel_o
)
955 m
.d
.comb
+= ldst
.ad
.go_i
.eq(ldst
.ad
.rel_o
) # link addr-go direct to rel
956 m
.d
.comb
+= ldst
.st
.go_i
.eq(st_go_edge
) # link store-go to rising rel
960 def elaborate(self
, platform
):
963 comb
, sync
= m
.d
.comb
, m
.d
.sync
964 cur_state
= self
.cur_state
965 pdecode2
= self
.pdecode2
969 # set up peripherals and core
970 core_rst
= self
.core_rst
971 self
.setup_peripherals(m
)
973 # reset current state if core reset requested
975 m
.d
.sync
+= self
.cur_state
.eq(0)
977 # PC and instruction from I-Memory
978 comb
+= self
.pc_o
.eq(cur_state
.pc
)
979 pc_changed
= Signal() # note write to PC
980 sv_changed
= Signal() # note write to SVSTATE
982 # read state either from incoming override or from regfile
983 # TODO: really should be doing MSR in the same way
984 pc
= state_get(m
, core_rst
, self
.pc_i
,
986 self
.state_r_pc
, StateRegs
.PC
)
987 svstate
= state_get(m
, core_rst
, self
.svstate_i
,
988 "svstate", # read SVSTATE
989 self
.state_r_sv
, StateRegs
.SVSTATE
)
991 # don't write pc every cycle
992 comb
+= self
.state_w_pc
.wen
.eq(0)
993 comb
+= self
.state_w_pc
.data_i
.eq(0)
995 # don't read msr every cycle
996 comb
+= self
.state_r_msr
.ren
.eq(0)
998 # address of the next instruction, in the absence of a branch
999 # depends on the instruction size
1002 # connect up debug signals
1003 # TODO comb += core.icache_rst_i.eq(dbg.icache_rst_o)
1004 comb
+= dbg
.terminate_i
.eq(core
.core_terminate_o
)
1005 comb
+= dbg
.state
.pc
.eq(pc
)
1006 comb
+= dbg
.state
.svstate
.eq(svstate
)
1007 comb
+= dbg
.state
.msr
.eq(cur_state
.msr
)
1009 # pass the prefix mode from Fetch to Issue, so the latter can loop
1011 is_svp64_mode
= Signal()
1013 # there are *THREE^WFOUR-if-SVP64-enabled* FSMs, fetch (32/64-bit)
1014 # issue, decode/execute, now joined by "Predicate fetch/calculate".
1015 # these are the handshake signals between each
1017 # fetch FSM can run as soon as the PC is valid
1018 fetch_pc_valid_i
= Signal() # Execute tells Fetch "start next read"
1019 fetch_pc_ready_o
= Signal() # Fetch Tells SVSTATE "proceed"
1021 # fetch FSM hands over the instruction to be decoded / issued
1022 fetch_insn_valid_o
= Signal()
1023 fetch_insn_ready_i
= Signal()
1025 # predicate fetch FSM decodes and fetches the predicate
1026 pred_insn_valid_i
= Signal()
1027 pred_insn_ready_o
= Signal()
1029 # predicate fetch FSM delivers the masks
1030 pred_mask_valid_o
= Signal()
1031 pred_mask_ready_i
= Signal()
1033 # issue FSM delivers the instruction to the be executed
1034 exec_insn_valid_i
= Signal()
1035 exec_insn_ready_o
= Signal()
1037 # execute FSM, hands over the PC/SVSTATE back to the issue FSM
1038 exec_pc_valid_o
= Signal()
1039 exec_pc_ready_i
= Signal()
1041 # the FSMs here are perhaps unusual in that they detect conditions
1042 # then "hold" information, combinatorially, for the core
1043 # (as opposed to using sync - which would be on a clock's delay)
1044 # this includes the actual opcode, valid flags and so on.
1046 # Fetch, then predicate fetch, then Issue, then Execute.
1047 # Issue is where the VL for-loop # lives. the ready/valid
1048 # signalling is used to communicate between the four.
1050 self
.fetch_fsm(m
, core
, pc
, svstate
, nia
, is_svp64_mode
,
1051 fetch_pc_ready_o
, fetch_pc_valid_i
,
1052 fetch_insn_valid_o
, fetch_insn_ready_i
)
1054 self
.issue_fsm(m
, core
, pc_changed
, sv_changed
, nia
,
1055 dbg
, core_rst
, is_svp64_mode
,
1056 fetch_pc_ready_o
, fetch_pc_valid_i
,
1057 fetch_insn_valid_o
, fetch_insn_ready_i
,
1058 pred_insn_valid_i
, pred_insn_ready_o
,
1059 pred_mask_valid_o
, pred_mask_ready_i
,
1060 exec_insn_valid_i
, exec_insn_ready_o
,
1061 exec_pc_valid_o
, exec_pc_ready_i
)
1064 self
.fetch_predicate_fsm(m
,
1065 pred_insn_valid_i
, pred_insn_ready_o
,
1066 pred_mask_valid_o
, pred_mask_ready_i
)
1068 self
.execute_fsm(m
, core
, pc_changed
, sv_changed
,
1069 exec_insn_valid_i
, exec_insn_ready_o
,
1070 exec_pc_valid_o
, exec_pc_ready_i
)
1072 # whatever was done above, over-ride it if core reset is held
1073 with m
.If(core_rst
):
1076 # this bit doesn't have to be in the FSM: connect up to read
1077 # regfiles on demand from DMI
1080 # DEC and TB inc/dec FSM. copy of DEC is put into CoreState,
1081 # (which uses that in PowerDecoder2 to raise 0x900 exception)
1082 self
.tb_dec_fsm(m
, cur_state
.dec
)
1086 def do_dmi(self
, m
, dbg
):
1087 """deals with DMI debug requests
1089 currently only provides read requests for the INT regfile, CR and XER
1090 it will later also deal with *writing* to these regfiles.
1094 dmi
, d_reg
, d_cr
, d_xer
, = dbg
.dmi
, dbg
.d_gpr
, dbg
.d_cr
, dbg
.d_xer
1095 intrf
= self
.core
.regs
.rf
['int']
1097 with m
.If(d_reg
.req
): # request for regfile access being made
1098 # TODO: error-check this
1099 # XXX should this be combinatorial? sync better?
1101 comb
+= self
.int_r
.ren
.eq(1<<d_reg
.addr
)
1103 comb
+= self
.int_r
.addr
.eq(d_reg
.addr
)
1104 comb
+= self
.int_r
.ren
.eq(1)
1105 d_reg_delay
= Signal()
1106 sync
+= d_reg_delay
.eq(d_reg
.req
)
1107 with m
.If(d_reg_delay
):
1108 # data arrives one clock later
1109 comb
+= d_reg
.data
.eq(self
.int_r
.data_o
)
1110 comb
+= d_reg
.ack
.eq(1)
1112 # sigh same thing for CR debug
1113 with m
.If(d_cr
.req
): # request for regfile access being made
1114 comb
+= self
.cr_r
.ren
.eq(0b11111111) # enable all
1115 d_cr_delay
= Signal()
1116 sync
+= d_cr_delay
.eq(d_cr
.req
)
1117 with m
.If(d_cr_delay
):
1118 # data arrives one clock later
1119 comb
+= d_cr
.data
.eq(self
.cr_r
.data_o
)
1120 comb
+= d_cr
.ack
.eq(1)
1123 with m
.If(d_xer
.req
): # request for regfile access being made
1124 comb
+= self
.xer_r
.ren
.eq(0b111111) # enable all
1125 d_xer_delay
= Signal()
1126 sync
+= d_xer_delay
.eq(d_xer
.req
)
1127 with m
.If(d_xer_delay
):
1128 # data arrives one clock later
1129 comb
+= d_xer
.data
.eq(self
.xer_r
.data_o
)
1130 comb
+= d_xer
.ack
.eq(1)
1132 def tb_dec_fsm(self
, m
, spr_dec
):
1135 this is a FSM for updating either dec or tb. it runs alternately
1136 DEC, TB, DEC, TB. note that SPR pipeline could have written a new
1137 value to DEC, however the regfile has "passthrough" on it so this
1140 see v3.0B p1097-1099 for Timeer Resource and p1065 and p1076
1143 comb
, sync
= m
.d
.comb
, m
.d
.sync
1144 fast_rf
= self
.core
.regs
.rf
['fast']
1145 fast_r_dectb
= fast_rf
.r_ports
['issue'] # DEC/TB
1146 fast_w_dectb
= fast_rf
.w_ports
['issue'] # DEC/TB
1148 with m
.FSM() as fsm
:
1150 # initiates read of current DEC
1151 with m
.State("DEC_READ"):
1152 comb
+= fast_r_dectb
.addr
.eq(FastRegs
.DEC
)
1153 comb
+= fast_r_dectb
.ren
.eq(1)
1154 m
.next
= "DEC_WRITE"
1156 # waits for DEC read to arrive (1 cycle), updates with new value
1157 with m
.State("DEC_WRITE"):
1158 new_dec
= Signal(64)
1159 # TODO: MSR.LPCR 32-bit decrement mode
1160 comb
+= new_dec
.eq(fast_r_dectb
.data_o
- 1)
1161 comb
+= fast_w_dectb
.addr
.eq(FastRegs
.DEC
)
1162 comb
+= fast_w_dectb
.wen
.eq(1)
1163 comb
+= fast_w_dectb
.data_i
.eq(new_dec
)
1164 sync
+= spr_dec
.eq(new_dec
) # copy into cur_state for decoder
1167 # initiates read of current TB
1168 with m
.State("TB_READ"):
1169 comb
+= fast_r_dectb
.addr
.eq(FastRegs
.TB
)
1170 comb
+= fast_r_dectb
.ren
.eq(1)
1173 # waits for read TB to arrive, initiates write of current TB
1174 with m
.State("TB_WRITE"):
1176 comb
+= new_tb
.eq(fast_r_dectb
.data_o
+ 1)
1177 comb
+= fast_w_dectb
.addr
.eq(FastRegs
.TB
)
1178 comb
+= fast_w_dectb
.wen
.eq(1)
1179 comb
+= fast_w_dectb
.data_i
.eq(new_tb
)
1185 yield from self
.pc_i
.ports()
1188 yield from self
.core
.ports()
1189 yield from self
.imem
.ports()
1190 yield self
.core_bigendian_i
1196 def external_ports(self
):
1197 ports
= self
.pc_i
.ports()
1198 ports
+= [self
.pc_o
, self
.memerr_o
, self
.core_bigendian_i
, self
.busy_o
,
1202 ports
+= list(self
.jtag
.external_ports())
1204 # don't add DMI if JTAG is enabled
1205 ports
+= list(self
.dbg
.dmi
.ports())
1207 ports
+= list(self
.imem
.ibus
.fields
.values())
1208 ports
+= list(self
.core
.l0
.cmpi
.wb_bus().fields
.values())
1211 for sram
in self
.sram4k
:
1212 ports
+= list(sram
.bus
.fields
.values())
1215 ports
+= list(self
.xics_icp
.bus
.fields
.values())
1216 ports
+= list(self
.xics_ics
.bus
.fields
.values())
1217 ports
.append(self
.int_level_i
)
1220 ports
+= list(self
.simple_gpio
.bus
.fields
.values())
1221 ports
.append(self
.gpio_o
)
1229 class TestIssuer(Elaboratable
):
1230 def __init__(self
, pspec
):
1231 self
.ti
= TestIssuerInternal(pspec
)
1233 self
.pll
= DummyPLL(instance
=True)
1235 # PLL direct clock or not
1236 self
.pll_en
= hasattr(pspec
, "use_pll") and pspec
.use_pll
1238 self
.pll_test_o
= Signal(reset_less
=True)
1239 self
.pll_vco_o
= Signal(reset_less
=True)
1240 self
.clk_sel_i
= Signal(2, reset_less
=True)
1242 def elaborate(self
, platform
):
1246 # TestIssuer runs at direct clock
1247 m
.submodules
.ti
= ti
= self
.ti
1248 cd_int
= ClockDomain("coresync")
1251 # ClockSelect runs at PLL output internal clock rate
1252 m
.submodules
.wrappll
= pll
= self
.pll
1254 # add clock domains from PLL
1255 cd_pll
= ClockDomain("pllclk")
1258 # PLL clock established. has the side-effect of running clklsel
1259 # at the PLL's speed (see DomainRenamer("pllclk") above)
1260 pllclk
= ClockSignal("pllclk")
1261 comb
+= pllclk
.eq(pll
.clk_pll_o
)
1263 # wire up external 24mhz to PLL
1264 comb
+= pll
.clk_24_i
.eq(ClockSignal())
1266 # output 18 mhz PLL test signal, and analog oscillator out
1267 comb
+= self
.pll_test_o
.eq(pll
.pll_test_o
)
1268 comb
+= self
.pll_vco_o
.eq(pll
.pll_vco_o
)
1270 # input to pll clock selection
1271 comb
+= pll
.clk_sel_i
.eq(self
.clk_sel_i
)
1273 # now wire up ResetSignals. don't mind them being in this domain
1274 pll_rst
= ResetSignal("pllclk")
1275 comb
+= pll_rst
.eq(ResetSignal())
1277 # internal clock is set to selector clock-out. has the side-effect of
1278 # running TestIssuer at this speed (see DomainRenamer("intclk") above)
1279 intclk
= ClockSignal("coresync")
1281 comb
+= intclk
.eq(pllclk
)
1283 comb
+= intclk
.eq(ClockSignal())
1288 return list(self
.ti
.ports()) + list(self
.pll
.ports()) + \
1289 [ClockSignal(), ResetSignal()]
1291 def external_ports(self
):
1292 ports
= self
.ti
.external_ports()
1293 ports
.append(ClockSignal())
1294 ports
.append(ResetSignal())
1296 ports
.append(self
.clk_sel_i
)
1297 ports
.append(self
.pll_test_o
)
1298 ports
.append(self
.pll_vco_o
)
1302 if __name__
== '__main__':
1303 units
= {'alu': 1, 'cr': 1, 'branch': 1, 'trap': 1, 'logical': 1,
1309 pspec
= TestMemPspec(ldst_ifacetype
='bare_wb',
1310 imem_ifacetype
='bare_wb',
1315 dut
= TestIssuer(pspec
)
1316 vl
= main(dut
, ports
=dut
.ports(), name
="test_issuer")
1318 if len(sys
.argv
) == 1:
1319 vl
= rtlil
.convert(dut
, ports
=dut
.external_ports(), name
="test_issuer")
1320 with
open("test_issuer.il", "w") as f
: