bring external irq out for microwatt-compatible mode in testissuer
[soc.git] / src / soc / simple / issuer.py
1 """simple core issuer
2
3 not in any way intended for production use. this runs a FSM that:
4
5 * reads the Program Counter from StateRegs
6 * reads an instruction from a fixed-size Test Memory
7 * issues it to the Simple Core
8 * waits for it to complete
9 * increments the PC
10 * does it all over again
11
12 the purpose of this module is to verify the functional correctness
13 of the Function Units in the absolute simplest and clearest possible
14 way, and to at provide something that can be further incrementally
15 improved.
16 """
17
18 from nmigen import (Elaboratable, Module, Signal, ClockSignal, ResetSignal,
19 ClockDomain, DomainRenamer, Mux, Const, Repl, Cat)
20 from nmigen.cli import rtlil
21 from nmigen.cli import main
22 import sys
23
24 from nmutil.singlepipe import ControlBase
25 from soc.simple.core_data import FetchOutput, FetchInput
26
27 from nmigen.lib.coding import PriorityEncoder
28
29 from openpower.decoder.power_decoder import create_pdecode
30 from openpower.decoder.power_decoder2 import PowerDecode2, SVP64PrefixDecoder
31 from openpower.decoder.decode2execute1 import IssuerDecode2ToOperand
32 from openpower.decoder.decode2execute1 import Data
33 from openpower.decoder.power_enums import (MicrOp, SVP64PredInt, SVP64PredCR,
34 SVP64PredMode)
35 from openpower.state import CoreState
36 from openpower.consts import (CR, SVP64CROffs, MSR)
37 from soc.experiment.testmem import TestMemory # test only for instructions
38 from soc.regfile.regfiles import StateRegs, FastRegs
39 from soc.simple.core import NonProductionCore
40 from soc.config.test.test_loadstore import TestMemPspec
41 from soc.config.ifetch import ConfigFetchUnit
42 from soc.debug.dmi import CoreDebug, DMIInterface
43 from soc.debug.jtag import JTAG
44 from soc.config.pinouts import get_pinspecs
45 from soc.interrupts.xics import XICS_ICP, XICS_ICS
46 from soc.bus.simple_gpio import SimpleGPIO
47 from soc.bus.SPBlock512W64B8W import SPBlock512W64B8W
48 from soc.clock.select import ClockSelect
49 from soc.clock.dummypll import DummyPLL
50 from openpower.sv.svstate import SVSTATERec
51 from soc.experiment.icache import ICache
52
53 from nmutil.util import rising_edge
54
55
56 def get_insn(f_instr_o, pc):
57 if f_instr_o.width == 32:
58 return f_instr_o
59 else:
60 # 64-bit: bit 2 of pc decides which word to select
61 return f_instr_o.word_select(pc[2], 32)
62
63 # gets state input or reads from state regfile
64
65
66 def state_get(m, res, core_rst, state_i, name, regfile, regnum):
67 comb = m.d.comb
68 sync = m.d.sync
69 # read the {insert state variable here}
70 res_ok_delay = Signal(name="%s_ok_delay" % name)
71 with m.If(~core_rst):
72 sync += res_ok_delay.eq(~state_i.ok)
73 with m.If(state_i.ok):
74 # incoming override (start from pc_i)
75 comb += res.eq(state_i.data)
76 with m.Else():
77 # otherwise read StateRegs regfile for {insert state here}...
78 comb += regfile.ren.eq(1 << regnum)
79 # ... but on a 1-clock delay
80 with m.If(res_ok_delay):
81 comb += res.eq(regfile.o_data)
82
83
84 def get_predint(m, mask, name):
85 """decode SVP64 predicate integer mask field to reg number and invert
86 this is identical to the equivalent function in ISACaller except that
87 it doesn't read the INT directly, it just decodes "what needs to be done"
88 i.e. which INT reg, whether it is shifted and whether it is bit-inverted.
89
90 * all1s is set to indicate that no mask is to be applied.
91 * regread indicates the GPR register number to be read
92 * invert is set to indicate that the register value is to be inverted
93 * unary indicates that the contents of the register is to be shifted 1<<r3
94 """
95 comb = m.d.comb
96 regread = Signal(5, name=name+"regread")
97 invert = Signal(name=name+"invert")
98 unary = Signal(name=name+"unary")
99 all1s = Signal(name=name+"all1s")
100 with m.Switch(mask):
101 with m.Case(SVP64PredInt.ALWAYS.value):
102 comb += all1s.eq(1) # use 0b1111 (all ones)
103 with m.Case(SVP64PredInt.R3_UNARY.value):
104 comb += regread.eq(3)
105 comb += unary.eq(1) # 1<<r3 - shift r3 (single bit)
106 with m.Case(SVP64PredInt.R3.value):
107 comb += regread.eq(3)
108 with m.Case(SVP64PredInt.R3_N.value):
109 comb += regread.eq(3)
110 comb += invert.eq(1)
111 with m.Case(SVP64PredInt.R10.value):
112 comb += regread.eq(10)
113 with m.Case(SVP64PredInt.R10_N.value):
114 comb += regread.eq(10)
115 comb += invert.eq(1)
116 with m.Case(SVP64PredInt.R30.value):
117 comb += regread.eq(30)
118 with m.Case(SVP64PredInt.R30_N.value):
119 comb += regread.eq(30)
120 comb += invert.eq(1)
121 return regread, invert, unary, all1s
122
123
124 def get_predcr(m, mask, name):
125 """decode SVP64 predicate CR to reg number field and invert status
126 this is identical to _get_predcr in ISACaller
127 """
128 comb = m.d.comb
129 idx = Signal(2, name=name+"idx")
130 invert = Signal(name=name+"crinvert")
131 with m.Switch(mask):
132 with m.Case(SVP64PredCR.LT.value):
133 comb += idx.eq(CR.LT)
134 comb += invert.eq(0)
135 with m.Case(SVP64PredCR.GE.value):
136 comb += idx.eq(CR.LT)
137 comb += invert.eq(1)
138 with m.Case(SVP64PredCR.GT.value):
139 comb += idx.eq(CR.GT)
140 comb += invert.eq(0)
141 with m.Case(SVP64PredCR.LE.value):
142 comb += idx.eq(CR.GT)
143 comb += invert.eq(1)
144 with m.Case(SVP64PredCR.EQ.value):
145 comb += idx.eq(CR.EQ)
146 comb += invert.eq(0)
147 with m.Case(SVP64PredCR.NE.value):
148 comb += idx.eq(CR.EQ)
149 comb += invert.eq(1)
150 with m.Case(SVP64PredCR.SO.value):
151 comb += idx.eq(CR.SO)
152 comb += invert.eq(0)
153 with m.Case(SVP64PredCR.NS.value):
154 comb += idx.eq(CR.SO)
155 comb += invert.eq(1)
156 return idx, invert
157
158
159 class TestIssuerBase(Elaboratable):
160 """TestIssuerBase - common base class for Issuers
161
162 takes care of power-on reset, peripherals, debug, DEC/TB,
163 and gets PC/MSR/SVSTATE from the State Regfile etc.
164 """
165
166 def __init__(self, pspec):
167
168 # test is SVP64 is to be enabled
169 self.svp64_en = hasattr(pspec, "svp64") and (pspec.svp64 == True)
170
171 # and if regfiles are reduced
172 self.regreduce_en = (hasattr(pspec, "regreduce") and
173 (pspec.regreduce == True))
174
175 # and if overlap requested
176 self.allow_overlap = (hasattr(pspec, "allow_overlap") and
177 (pspec.allow_overlap == True))
178
179 # and get the core domain
180 self.core_domain = "coresync"
181 if (hasattr(pspec, "core_domain") and
182 isinstance(pspec.core_domain, str)):
183 self.core_domain = pspec.core_domain
184
185 # JTAG interface. add this right at the start because if it's
186 # added it *modifies* the pspec, by adding enable/disable signals
187 # for parts of the rest of the core
188 self.jtag_en = hasattr(pspec, "debug") and pspec.debug == 'jtag'
189 #self.dbg_domain = "sync" # sigh "dbgsunc" too problematic
190 self.dbg_domain = "dbgsync" # domain for DMI/JTAG clock
191 if self.jtag_en:
192 # XXX MUST keep this up-to-date with litex, and
193 # soc-cocotb-sim, and err.. all needs sorting out, argh
194 subset = ['uart',
195 'mtwi',
196 'eint', 'gpio', 'mspi0',
197 # 'mspi1', - disabled for now
198 # 'pwm', 'sd0', - disabled for now
199 'sdr']
200 self.jtag = JTAG(get_pinspecs(subset=subset),
201 domain=self.dbg_domain)
202 # add signals to pspec to enable/disable icache and dcache
203 # (or data and intstruction wishbone if icache/dcache not included)
204 # https://bugs.libre-soc.org/show_bug.cgi?id=520
205 # TODO: do we actually care if these are not domain-synchronised?
206 # honestly probably not.
207 pspec.wb_icache_en = self.jtag.wb_icache_en
208 pspec.wb_dcache_en = self.jtag.wb_dcache_en
209 self.wb_sram_en = self.jtag.wb_sram_en
210 else:
211 self.wb_sram_en = Const(1)
212
213 # add 4k sram blocks?
214 self.sram4x4k = (hasattr(pspec, "sram4x4kblock") and
215 pspec.sram4x4kblock == True)
216 if self.sram4x4k:
217 self.sram4k = []
218 for i in range(4):
219 self.sram4k.append(SPBlock512W64B8W(name="sram4k_%d" % i,
220 # features={'err'}
221 ))
222
223 # add interrupt controller?
224 self.xics = hasattr(pspec, "xics") and pspec.xics == True
225 if self.xics:
226 self.xics_icp = XICS_ICP()
227 self.xics_ics = XICS_ICS()
228 self.int_level_i = self.xics_ics.int_level_i
229 else:
230 self.ext_irq = Signal()
231
232 # add GPIO peripheral?
233 self.gpio = hasattr(pspec, "gpio") and pspec.gpio == True
234 if self.gpio:
235 self.simple_gpio = SimpleGPIO()
236 self.gpio_o = self.simple_gpio.gpio_o
237
238 # main instruction core. suitable for prototyping / demo only
239 self.core = core = NonProductionCore(pspec)
240 self.core_rst = ResetSignal(self.core_domain)
241
242 # instruction decoder. goes into Trap Record
243 #pdecode = create_pdecode()
244 self.cur_state = CoreState("cur") # current state (MSR/PC/SVSTATE)
245 self.pdecode2 = PowerDecode2(None, state=self.cur_state,
246 opkls=IssuerDecode2ToOperand,
247 svp64_en=self.svp64_en,
248 regreduce_en=self.regreduce_en)
249 pdecode = self.pdecode2.dec
250
251 if self.svp64_en:
252 self.svp64 = SVP64PrefixDecoder() # for decoding SVP64 prefix
253
254 self.update_svstate = Signal() # set this if updating svstate
255 self.new_svstate = new_svstate = SVSTATERec("new_svstate")
256
257 # Test Instruction memory
258 if hasattr(core, "icache"):
259 # XXX BLECH! use pspec to transfer the I-Cache to ConfigFetchUnit
260 # truly dreadful. needs a huge reorg.
261 pspec.icache = core.icache
262 self.imem = ConfigFetchUnit(pspec).fu
263
264 # DMI interface
265 self.dbg = CoreDebug()
266 self.dbg_rst_i = Signal(reset_less=True)
267
268 # instruction go/monitor
269 self.pc_o = Signal(64, reset_less=True)
270 self.pc_i = Data(64, "pc_i") # set "ok" to indicate "please change me"
271 self.msr_i = Data(64, "msr_i") # set "ok" to indicate "please change me"
272 self.svstate_i = Data(64, "svstate_i") # ditto
273 self.core_bigendian_i = Signal() # TODO: set based on MSR.LE
274 self.busy_o = Signal(reset_less=True)
275 self.memerr_o = Signal(reset_less=True)
276
277 # STATE regfile read /write ports for PC, MSR, SVSTATE
278 staterf = self.core.regs.rf['state']
279 self.state_r_msr = staterf.r_ports['msr'] # MSR rd
280 self.state_r_pc = staterf.r_ports['cia'] # PC rd
281 self.state_r_sv = staterf.r_ports['sv'] # SVSTATE rd
282
283 self.state_w_msr = staterf.w_ports['msr'] # MSR wr
284 self.state_w_pc = staterf.w_ports['d_wr1'] # PC wr
285 self.state_w_sv = staterf.w_ports['sv'] # SVSTATE wr
286
287 # DMI interface access
288 intrf = self.core.regs.rf['int']
289 crrf = self.core.regs.rf['cr']
290 xerrf = self.core.regs.rf['xer']
291 self.int_r = intrf.r_ports['dmi'] # INT read
292 self.cr_r = crrf.r_ports['full_cr_dbg'] # CR read
293 self.xer_r = xerrf.r_ports['full_xer'] # XER read
294
295 if self.svp64_en:
296 # for predication
297 self.int_pred = intrf.r_ports['pred'] # INT predicate read
298 self.cr_pred = crrf.r_ports['cr_pred'] # CR predicate read
299
300 # hack method of keeping an eye on whether branch/trap set the PC
301 self.state_nia = self.core.regs.rf['state'].w_ports['nia']
302 self.state_nia.wen.name = 'state_nia_wen'
303
304 # pulse to synchronize the simulator at instruction end
305 self.insn_done = Signal()
306
307 # indicate any instruction still outstanding, in execution
308 self.any_busy = Signal()
309
310 if self.svp64_en:
311 # store copies of predicate masks
312 self.srcmask = Signal(64)
313 self.dstmask = Signal(64)
314
315 def setup_peripherals(self, m):
316 comb, sync = m.d.comb, m.d.sync
317
318 # okaaaay so the debug module must be in coresync clock domain
319 # but NOT its reset signal. to cope with this, set every single
320 # submodule explicitly in coresync domain, debug and JTAG
321 # in their own one but using *external* reset.
322 csd = DomainRenamer(self.core_domain)
323 dbd = DomainRenamer(self.dbg_domain)
324
325 m.submodules.core = core = csd(self.core)
326 # this _so_ needs sorting out. ICache is added down inside
327 # LoadStore1 and is already a submodule of LoadStore1
328 if not isinstance(self.imem, ICache):
329 m.submodules.imem = imem = csd(self.imem)
330 m.submodules.dbg = dbg = dbd(self.dbg)
331 if self.jtag_en:
332 m.submodules.jtag = jtag = dbd(self.jtag)
333 # TODO: UART2GDB mux, here, from external pin
334 # see https://bugs.libre-soc.org/show_bug.cgi?id=499
335 sync += dbg.dmi.connect_to(jtag.dmi)
336
337 cur_state = self.cur_state
338
339 # 4x 4k SRAM blocks. these simply "exist", they get routed in litex
340 if self.sram4x4k:
341 for i, sram in enumerate(self.sram4k):
342 m.submodules["sram4k_%d" % i] = csd(sram)
343 comb += sram.enable.eq(self.wb_sram_en)
344
345 # XICS interrupt handler
346 if self.xics:
347 m.submodules.xics_icp = icp = csd(self.xics_icp)
348 m.submodules.xics_ics = ics = csd(self.xics_ics)
349 comb += icp.ics_i.eq(ics.icp_o) # connect ICS to ICP
350 sync += cur_state.eint.eq(icp.core_irq_o) # connect ICP to core
351 else:
352 sync += cur_state.eint.eq(self.ext_irq) # connect externally
353
354 # GPIO test peripheral
355 if self.gpio:
356 m.submodules.simple_gpio = simple_gpio = csd(self.simple_gpio)
357
358 # connect one GPIO output to ICS bit 15 (like in microwatt soc.vhdl)
359 # XXX causes litex ECP5 test to get wrong idea about input and output
360 # (but works with verilator sim *sigh*)
361 # if self.gpio and self.xics:
362 # comb += self.int_level_i[15].eq(simple_gpio.gpio_o[0])
363
364 # instruction decoder
365 pdecode = create_pdecode()
366 m.submodules.dec2 = pdecode2 = csd(self.pdecode2)
367 if self.svp64_en:
368 m.submodules.svp64 = svp64 = csd(self.svp64)
369
370 # convenience
371 dmi, d_reg, d_cr, d_xer, = dbg.dmi, dbg.d_gpr, dbg.d_cr, dbg.d_xer
372 intrf = self.core.regs.rf['int']
373
374 # clock delay power-on reset
375 cd_por = ClockDomain(reset_less=True)
376 cd_sync = ClockDomain()
377 m.domains += cd_por, cd_sync
378 core_sync = ClockDomain(self.core_domain)
379 if self.core_domain != "sync":
380 m.domains += core_sync
381 if self.dbg_domain != "sync":
382 dbg_sync = ClockDomain(self.dbg_domain)
383 m.domains += dbg_sync
384
385 ti_rst = Signal(reset_less=True)
386 delay = Signal(range(4), reset=3)
387 with m.If(delay != 0):
388 m.d.por += delay.eq(delay - 1)
389 comb += cd_por.clk.eq(ClockSignal())
390
391 # power-on reset delay
392 core_rst = ResetSignal(self.core_domain)
393 if self.core_domain != "sync":
394 comb += ti_rst.eq(delay != 0 | dbg.core_rst_o | ResetSignal())
395 comb += core_rst.eq(ti_rst)
396 else:
397 with m.If(delay != 0 | dbg.core_rst_o):
398 comb += core_rst.eq(1)
399
400 # connect external reset signal to DMI Reset
401 if self.dbg_domain != "sync":
402 dbg_rst = ResetSignal(self.dbg_domain)
403 comb += dbg_rst.eq(self.dbg_rst_i)
404
405 # busy/halted signals from core
406 core_busy_o = ~core.p.o_ready | core.n.o_data.busy_o # core is busy
407 comb += self.busy_o.eq(core_busy_o)
408 comb += pdecode2.dec.bigendian.eq(self.core_bigendian_i)
409
410 # temporary hack: says "go" immediately for both address gen and ST
411 l0 = core.l0
412 ldst = core.fus.fus['ldst0']
413 st_go_edge = rising_edge(m, ldst.st.rel_o)
414 # link addr-go direct to rel
415 m.d.comb += ldst.ad.go_i.eq(ldst.ad.rel_o)
416 m.d.comb += ldst.st.go_i.eq(st_go_edge) # link store-go to rising rel
417
418 def do_dmi(self, m, dbg):
419 """deals with DMI debug requests
420
421 currently only provides read requests for the INT regfile, CR and XER
422 it will later also deal with *writing* to these regfiles.
423 """
424 comb = m.d.comb
425 sync = m.d.sync
426 dmi, d_reg, d_cr, d_xer, = dbg.dmi, dbg.d_gpr, dbg.d_cr, dbg.d_xer
427 intrf = self.core.regs.rf['int']
428
429 with m.If(d_reg.req): # request for regfile access being made
430 # TODO: error-check this
431 # XXX should this be combinatorial? sync better?
432 if intrf.unary:
433 comb += self.int_r.ren.eq(1 << d_reg.addr)
434 else:
435 comb += self.int_r.addr.eq(d_reg.addr)
436 comb += self.int_r.ren.eq(1)
437 d_reg_delay = Signal()
438 sync += d_reg_delay.eq(d_reg.req)
439 with m.If(d_reg_delay):
440 # data arrives one clock later
441 comb += d_reg.data.eq(self.int_r.o_data)
442 comb += d_reg.ack.eq(1)
443
444 # sigh same thing for CR debug
445 with m.If(d_cr.req): # request for regfile access being made
446 comb += self.cr_r.ren.eq(0b11111111) # enable all
447 d_cr_delay = Signal()
448 sync += d_cr_delay.eq(d_cr.req)
449 with m.If(d_cr_delay):
450 # data arrives one clock later
451 comb += d_cr.data.eq(self.cr_r.o_data)
452 comb += d_cr.ack.eq(1)
453
454 # aaand XER...
455 with m.If(d_xer.req): # request for regfile access being made
456 comb += self.xer_r.ren.eq(0b111111) # enable all
457 d_xer_delay = Signal()
458 sync += d_xer_delay.eq(d_xer.req)
459 with m.If(d_xer_delay):
460 # data arrives one clock later
461 comb += d_xer.data.eq(self.xer_r.o_data)
462 comb += d_xer.ack.eq(1)
463
464 def tb_dec_fsm(self, m, spr_dec):
465 """tb_dec_fsm
466
467 this is a FSM for updating either dec or tb. it runs alternately
468 DEC, TB, DEC, TB. note that SPR pipeline could have written a new
469 value to DEC, however the regfile has "passthrough" on it so this
470 *should* be ok.
471
472 see v3.0B p1097-1099 for Timeer Resource and p1065 and p1076
473 """
474
475 comb, sync = m.d.comb, m.d.sync
476 fast_rf = self.core.regs.rf['fast']
477 fast_r_dectb = fast_rf.r_ports['issue'] # DEC/TB
478 fast_w_dectb = fast_rf.w_ports['issue'] # DEC/TB
479
480 with m.FSM() as fsm:
481
482 # initiates read of current DEC
483 with m.State("DEC_READ"):
484 comb += fast_r_dectb.addr.eq(FastRegs.DEC)
485 comb += fast_r_dectb.ren.eq(1)
486 m.next = "DEC_WRITE"
487
488 # waits for DEC read to arrive (1 cycle), updates with new value
489 with m.State("DEC_WRITE"):
490 new_dec = Signal(64)
491 # TODO: MSR.LPCR 32-bit decrement mode
492 comb += new_dec.eq(fast_r_dectb.o_data - 1)
493 comb += fast_w_dectb.addr.eq(FastRegs.DEC)
494 comb += fast_w_dectb.wen.eq(1)
495 comb += fast_w_dectb.i_data.eq(new_dec)
496 sync += spr_dec.eq(new_dec) # copy into cur_state for decoder
497 m.next = "TB_READ"
498
499 # initiates read of current TB
500 with m.State("TB_READ"):
501 comb += fast_r_dectb.addr.eq(FastRegs.TB)
502 comb += fast_r_dectb.ren.eq(1)
503 m.next = "TB_WRITE"
504
505 # waits for read TB to arrive, initiates write of current TB
506 with m.State("TB_WRITE"):
507 new_tb = Signal(64)
508 comb += new_tb.eq(fast_r_dectb.o_data + 1)
509 comb += fast_w_dectb.addr.eq(FastRegs.TB)
510 comb += fast_w_dectb.wen.eq(1)
511 comb += fast_w_dectb.i_data.eq(new_tb)
512 m.next = "DEC_READ"
513
514 return m
515
516 def elaborate(self, platform):
517 m = Module()
518 # convenience
519 comb, sync = m.d.comb, m.d.sync
520 cur_state = self.cur_state
521 pdecode2 = self.pdecode2
522 dbg = self.dbg
523
524 # set up peripherals and core
525 core_rst = self.core_rst
526 self.setup_peripherals(m)
527
528 # reset current state if core reset requested
529 with m.If(core_rst):
530 m.d.sync += self.cur_state.eq(0)
531
532 # check halted condition: requested PC to execute matches DMI stop addr
533 # and immediately stop. address of 0xffff_ffff_ffff_ffff can never
534 # match
535 halted = Signal()
536 comb += halted.eq(dbg.stop_addr_o == dbg.state.pc)
537 with m.If(halted):
538 comb += dbg.core_stopped_i.eq(1)
539 comb += dbg.terminate_i.eq(1)
540
541 # PC and instruction from I-Memory
542 comb += self.pc_o.eq(cur_state.pc)
543 self.pc_changed = Signal() # note write to PC
544 self.msr_changed = Signal() # note write to MSR
545 self.sv_changed = Signal() # note write to SVSTATE
546
547 # read state either from incoming override or from regfile
548 state = CoreState("get") # current state (MSR/PC/SVSTATE)
549 state_get(m, state.msr, core_rst, self.msr_i,
550 "msr", # read MSR
551 self.state_r_msr, StateRegs.MSR)
552 state_get(m, state.pc, core_rst, self.pc_i,
553 "pc", # read PC
554 self.state_r_pc, StateRegs.PC)
555 state_get(m, state.svstate, core_rst, self.svstate_i,
556 "svstate", # read SVSTATE
557 self.state_r_sv, StateRegs.SVSTATE)
558
559 # don't write pc every cycle
560 comb += self.state_w_pc.wen.eq(0)
561 comb += self.state_w_pc.i_data.eq(0)
562
563 # connect up debug state. note "combinatorially same" below,
564 # this is a bit naff, passing state over in the dbg class, but
565 # because it is combinatorial it achieves the desired goal
566 comb += dbg.state.eq(state)
567
568 # this bit doesn't have to be in the FSM: connect up to read
569 # regfiles on demand from DMI
570 self.do_dmi(m, dbg)
571
572 # DEC and TB inc/dec FSM. copy of DEC is put into CoreState,
573 # (which uses that in PowerDecoder2 to raise 0x900 exception)
574 self.tb_dec_fsm(m, cur_state.dec)
575
576 # while stopped, allow updating the MSR, PC and SVSTATE.
577 # these are mainly for debugging purposes (including DMI/JTAG)
578 with m.If(dbg.core_stopped_i):
579 with m.If(self.pc_i.ok):
580 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
581 comb += self.state_w_pc.i_data.eq(self.pc_i.data)
582 sync += self.pc_changed.eq(1)
583 with m.If(self.msr_i.ok):
584 comb += self.state_w_msr.wen.eq(1 << StateRegs.MSR)
585 comb += self.state_w_msr.i_data.eq(self.msr_i.data)
586 sync += self.msr_changed.eq(1)
587 with m.If(self.svstate_i.ok | self.update_svstate):
588 with m.If(self.svstate_i.ok): # over-ride from external source
589 comb += self.new_svstate.eq(self.svstate_i.data)
590 comb += self.state_w_sv.wen.eq(1 << StateRegs.SVSTATE)
591 comb += self.state_w_sv.i_data.eq(self.new_svstate)
592 sync += self.sv_changed.eq(1)
593
594 return m
595
596 def __iter__(self):
597 yield from self.pc_i.ports()
598 yield from self.msr_i.ports()
599 yield self.pc_o
600 yield self.memerr_o
601 yield from self.core.ports()
602 yield from self.imem.ports()
603 yield self.core_bigendian_i
604 yield self.busy_o
605
606 def ports(self):
607 return list(self)
608
609 def external_ports(self):
610 ports = self.pc_i.ports()
611 ports = self.msr_i.ports()
612 ports += [self.pc_o, self.memerr_o, self.core_bigendian_i, self.busy_o,
613 ]
614
615 if self.jtag_en:
616 ports += list(self.jtag.external_ports())
617 else:
618 # don't add DMI if JTAG is enabled
619 ports += list(self.dbg.dmi.ports())
620
621 ports += list(self.imem.ibus.fields.values())
622 ports += list(self.core.l0.cmpi.wb_bus().fields.values())
623
624 if self.sram4x4k:
625 for sram in self.sram4k:
626 ports += list(sram.bus.fields.values())
627
628 if self.xics:
629 ports += list(self.xics_icp.bus.fields.values())
630 ports += list(self.xics_ics.bus.fields.values())
631 ports.append(self.int_level_i)
632 else:
633 ports.append(self.ext_irq)
634
635 if self.gpio:
636 ports += list(self.simple_gpio.bus.fields.values())
637 ports.append(self.gpio_o)
638
639 return ports
640
641 def ports(self):
642 return list(self)
643
644
645
646 # Fetch Finite State Machine.
647 # WARNING: there are currently DriverConflicts but it's actually working.
648 # TODO, here: everything that is global in nature, information from the
649 # main TestIssuerInternal, needs to move to either ispec() or ospec().
650 # not only that: TestIssuerInternal.imem can entirely move into here
651 # because imem is only ever accessed inside the FetchFSM.
652 class FetchFSM(ControlBase):
653 def __init__(self, allow_overlap, svp64_en, imem, core_rst,
654 pdecode2, cur_state,
655 dbg, core, svstate, nia, is_svp64_mode):
656 self.allow_overlap = allow_overlap
657 self.svp64_en = svp64_en
658 self.imem = imem
659 self.core_rst = core_rst
660 self.pdecode2 = pdecode2
661 self.cur_state = cur_state
662 self.dbg = dbg
663 self.core = core
664 self.svstate = svstate
665 self.nia = nia
666 self.is_svp64_mode = is_svp64_mode
667
668 # set up pipeline ControlBase and allocate i/o specs
669 # (unusual: normally done by the Pipeline API)
670 super().__init__(stage=self)
671 self.p.i_data, self.n.o_data = self.new_specs(None)
672 self.i, self.o = self.p.i_data, self.n.o_data
673
674 # next 3 functions are Stage API Compliance
675 def setup(self, m, i):
676 pass
677
678 def ispec(self):
679 return FetchInput()
680
681 def ospec(self):
682 return FetchOutput()
683
684 def elaborate(self, platform):
685 """fetch FSM
686
687 this FSM performs fetch of raw instruction data, partial-decodes
688 it 32-bit at a time to detect SVP64 prefixes, and will optionally
689 read a 2nd 32-bit quantity if that occurs.
690 """
691 m = super().elaborate(platform)
692
693 dbg = self.dbg
694 core = self.core
695 pc = self.i.pc
696 msr = self.i.msr
697 svstate = self.svstate
698 nia = self.nia
699 is_svp64_mode = self.is_svp64_mode
700 fetch_pc_o_ready = self.p.o_ready
701 fetch_pc_i_valid = self.p.i_valid
702 fetch_insn_o_valid = self.n.o_valid
703 fetch_insn_i_ready = self.n.i_ready
704
705 comb = m.d.comb
706 sync = m.d.sync
707 pdecode2 = self.pdecode2
708 cur_state = self.cur_state
709 dec_opcode_o = pdecode2.dec.raw_opcode_in # raw opcode
710
711 # also note instruction fetch failed
712 if hasattr(core, "icache"):
713 fetch_failed = core.icache.i_out.fetch_failed
714 flush_needed = True
715 else:
716 fetch_failed = Const(0, 1)
717 flush_needed = False
718
719 # set priv / virt mode on I-Cache, sigh
720 if isinstance(self.imem, ICache):
721 comb += self.imem.i_in.priv_mode.eq(~msr[MSR.PR])
722 comb += self.imem.i_in.virt_mode.eq(msr[MSR.IR]) # Instr. Redir (VM)
723
724 with m.FSM(name='fetch_fsm'):
725
726 # waiting (zzz)
727 with m.State("IDLE"):
728 # fetch allowed if not failed and stopped but not stepping
729 # (see dmi.py for how core_stop_o is generated)
730 with m.If(~fetch_failed & ~dbg.core_stop_o):
731 comb += fetch_pc_o_ready.eq(1)
732 with m.If(fetch_pc_i_valid & ~pdecode2.instr_fault
733 & ~dbg.core_stop_o):
734 # instruction allowed to go: start by reading the PC
735 # capture the PC and also drop it into Insn Memory
736 # we have joined a pair of combinatorial memory
737 # lookups together. this is Generally Bad.
738 comb += self.imem.a_pc_i.eq(pc)
739 comb += self.imem.a_i_valid.eq(1)
740 comb += self.imem.f_i_valid.eq(1)
741 # transfer state to output
742 sync += cur_state.pc.eq(pc)
743 sync += cur_state.svstate.eq(svstate) # and svstate
744 sync += cur_state.msr.eq(msr) # and msr
745
746 m.next = "INSN_READ" # move to "wait for bus" phase
747
748 # dummy pause to find out why simulation is not keeping up
749 with m.State("INSN_READ"):
750 # when using "single-step" mode, checking dbg.stopping_o
751 # prevents progress. allow fetch to proceed once started
752 stopping = Const(0)
753 #if self.allow_overlap:
754 # stopping = dbg.stopping_o
755 with m.If(stopping):
756 # stopping: jump back to idle
757 m.next = "IDLE"
758 with m.Else():
759 with m.If(self.imem.f_busy_o &
760 ~pdecode2.instr_fault): # zzz...
761 # busy but not fetch failed: stay in wait-read
762 comb += self.imem.a_pc_i.eq(pc)
763 comb += self.imem.a_i_valid.eq(1)
764 comb += self.imem.f_i_valid.eq(1)
765 with m.Else():
766 # not busy (or fetch failed!): instruction fetched
767 # when fetch failed, the instruction gets ignored
768 # by the decoder
769 if hasattr(core, "icache"):
770 # blech, icache returns actual instruction
771 insn = self.imem.f_instr_o
772 else:
773 # but these return raw memory
774 insn = get_insn(self.imem.f_instr_o, cur_state.pc)
775 if self.svp64_en:
776 svp64 = self.svp64
777 # decode the SVP64 prefix, if any
778 comb += svp64.raw_opcode_in.eq(insn)
779 comb += svp64.bigendian.eq(self.core_bigendian_i)
780 # pass the decoded prefix (if any) to PowerDecoder2
781 sync += pdecode2.sv_rm.eq(svp64.svp64_rm)
782 sync += pdecode2.is_svp64_mode.eq(is_svp64_mode)
783 # remember whether this is a prefixed instruction,
784 # so the FSM can readily loop when VL==0
785 sync += is_svp64_mode.eq(svp64.is_svp64_mode)
786 # calculate the address of the following instruction
787 insn_size = Mux(svp64.is_svp64_mode, 8, 4)
788 sync += nia.eq(cur_state.pc + insn_size)
789 with m.If(~svp64.is_svp64_mode):
790 # with no prefix, store the instruction
791 # and hand it directly to the next FSM
792 sync += dec_opcode_o.eq(insn)
793 m.next = "INSN_READY"
794 with m.Else():
795 # fetch the rest of the instruction from memory
796 comb += self.imem.a_pc_i.eq(cur_state.pc + 4)
797 comb += self.imem.a_i_valid.eq(1)
798 comb += self.imem.f_i_valid.eq(1)
799 m.next = "INSN_READ2"
800 else:
801 # not SVP64 - 32-bit only
802 sync += nia.eq(cur_state.pc + 4)
803 sync += dec_opcode_o.eq(insn)
804 m.next = "INSN_READY"
805
806 with m.State("INSN_READ2"):
807 with m.If(self.imem.f_busy_o): # zzz...
808 # busy: stay in wait-read
809 comb += self.imem.a_i_valid.eq(1)
810 comb += self.imem.f_i_valid.eq(1)
811 with m.Else():
812 # not busy: instruction fetched
813 if hasattr(core, "icache"):
814 # blech, icache returns actual instruction
815 insn = self.imem.f_instr_o
816 else:
817 insn = get_insn(self.imem.f_instr_o, cur_state.pc+4)
818 sync += dec_opcode_o.eq(insn)
819 m.next = "INSN_READY"
820 # TODO: probably can start looking at pdecode2.rm_dec
821 # here or maybe even in INSN_READ state, if svp64_mode
822 # detected, in order to trigger - and wait for - the
823 # predicate reading.
824 if self.svp64_en:
825 pmode = pdecode2.rm_dec.predmode
826 """
827 if pmode != SVP64PredMode.ALWAYS.value:
828 fire predicate loading FSM and wait before
829 moving to INSN_READY
830 else:
831 sync += self.srcmask.eq(-1) # set to all 1s
832 sync += self.dstmask.eq(-1) # set to all 1s
833 m.next = "INSN_READY"
834 """
835
836 with m.State("INSN_READY"):
837 # hand over the instruction, to be decoded
838 comb += fetch_insn_o_valid.eq(1)
839 with m.If(fetch_insn_i_ready):
840 m.next = "IDLE"
841
842 # whatever was done above, over-ride it if core reset is held
843 with m.If(self.core_rst):
844 sync += nia.eq(0)
845
846 return m
847
848
849 class TestIssuerInternal(TestIssuerBase):
850 """TestIssuer - reads instructions from TestMemory and issues them
851
852 efficiency and speed is not the main goal here: functional correctness
853 and code clarity is. optimisations (which almost 100% interfere with
854 easy understanding) come later.
855 """
856
857 def fetch_predicate_fsm(self, m,
858 pred_insn_i_valid, pred_insn_o_ready,
859 pred_mask_o_valid, pred_mask_i_ready):
860 """fetch_predicate_fsm - obtains (constructs in the case of CR)
861 src/dest predicate masks
862
863 https://bugs.libre-soc.org/show_bug.cgi?id=617
864 the predicates can be read here, by using IntRegs r_ports['pred']
865 or CRRegs r_ports['pred']. in the case of CRs it will have to
866 be done through multiple reads, extracting one relevant at a time.
867 later, a faster way would be to use the 32-bit-wide CR port but
868 this is more complex decoding, here. equivalent code used in
869 ISACaller is "from openpower.decoder.isa.caller import get_predcr"
870
871 note: this ENTIRE FSM is not to be called when svp64 is disabled
872 """
873 comb = m.d.comb
874 sync = m.d.sync
875 pdecode2 = self.pdecode2
876 rm_dec = pdecode2.rm_dec # SVP64RMModeDecode
877 predmode = rm_dec.predmode
878 srcpred, dstpred = rm_dec.srcpred, rm_dec.dstpred
879 cr_pred, int_pred = self.cr_pred, self.int_pred # read regfiles
880 # get src/dst step, so we can skip already used mask bits
881 cur_state = self.cur_state
882 srcstep = cur_state.svstate.srcstep
883 dststep = cur_state.svstate.dststep
884 cur_vl = cur_state.svstate.vl
885
886 # decode predicates
887 sregread, sinvert, sunary, sall1s = get_predint(m, srcpred, 's')
888 dregread, dinvert, dunary, dall1s = get_predint(m, dstpred, 'd')
889 sidx, scrinvert = get_predcr(m, srcpred, 's')
890 didx, dcrinvert = get_predcr(m, dstpred, 'd')
891
892 # store fetched masks, for either intpred or crpred
893 # when src/dst step is not zero, the skipped mask bits need to be
894 # shifted-out, before actually storing them in src/dest mask
895 new_srcmask = Signal(64, reset_less=True)
896 new_dstmask = Signal(64, reset_less=True)
897
898 with m.FSM(name="fetch_predicate"):
899
900 with m.State("FETCH_PRED_IDLE"):
901 comb += pred_insn_o_ready.eq(1)
902 with m.If(pred_insn_i_valid):
903 with m.If(predmode == SVP64PredMode.INT):
904 # skip fetching destination mask register, when zero
905 with m.If(dall1s):
906 sync += new_dstmask.eq(-1)
907 # directly go to fetch source mask register
908 # guaranteed not to be zero (otherwise predmode
909 # would be SVP64PredMode.ALWAYS, not INT)
910 comb += int_pred.addr.eq(sregread)
911 comb += int_pred.ren.eq(1)
912 m.next = "INT_SRC_READ"
913 # fetch destination predicate register
914 with m.Else():
915 comb += int_pred.addr.eq(dregread)
916 comb += int_pred.ren.eq(1)
917 m.next = "INT_DST_READ"
918 with m.Elif(predmode == SVP64PredMode.CR):
919 # go fetch masks from the CR register file
920 sync += new_srcmask.eq(0)
921 sync += new_dstmask.eq(0)
922 m.next = "CR_READ"
923 with m.Else():
924 sync += self.srcmask.eq(-1)
925 sync += self.dstmask.eq(-1)
926 m.next = "FETCH_PRED_DONE"
927
928 with m.State("INT_DST_READ"):
929 # store destination mask
930 inv = Repl(dinvert, 64)
931 with m.If(dunary):
932 # set selected mask bit for 1<<r3 mode
933 dst_shift = Signal(range(64))
934 comb += dst_shift.eq(self.int_pred.o_data & 0b111111)
935 sync += new_dstmask.eq(1 << dst_shift)
936 with m.Else():
937 # invert mask if requested
938 sync += new_dstmask.eq(self.int_pred.o_data ^ inv)
939 # skip fetching source mask register, when zero
940 with m.If(sall1s):
941 sync += new_srcmask.eq(-1)
942 m.next = "FETCH_PRED_SHIFT_MASK"
943 # fetch source predicate register
944 with m.Else():
945 comb += int_pred.addr.eq(sregread)
946 comb += int_pred.ren.eq(1)
947 m.next = "INT_SRC_READ"
948
949 with m.State("INT_SRC_READ"):
950 # store source mask
951 inv = Repl(sinvert, 64)
952 with m.If(sunary):
953 # set selected mask bit for 1<<r3 mode
954 src_shift = Signal(range(64))
955 comb += src_shift.eq(self.int_pred.o_data & 0b111111)
956 sync += new_srcmask.eq(1 << src_shift)
957 with m.Else():
958 # invert mask if requested
959 sync += new_srcmask.eq(self.int_pred.o_data ^ inv)
960 m.next = "FETCH_PRED_SHIFT_MASK"
961
962 # fetch masks from the CR register file
963 # implements the following loop:
964 # idx, inv = get_predcr(mask)
965 # mask = 0
966 # for cr_idx in range(vl):
967 # cr = crl[cr_idx + SVP64CROffs.CRPred] # takes one cycle
968 # if cr[idx] ^ inv:
969 # mask |= 1 << cr_idx
970 # return mask
971 with m.State("CR_READ"):
972 # CR index to be read, which will be ready by the next cycle
973 cr_idx = Signal.like(cur_vl, reset_less=True)
974 # submit the read operation to the regfile
975 with m.If(cr_idx != cur_vl):
976 # the CR read port is unary ...
977 # ren = 1 << cr_idx
978 # ... in MSB0 convention ...
979 # ren = 1 << (7 - cr_idx)
980 # ... and with an offset:
981 # ren = 1 << (7 - off - cr_idx)
982 idx = SVP64CROffs.CRPred + cr_idx
983 comb += cr_pred.ren.eq(1 << (7 - idx))
984 # signal data valid in the next cycle
985 cr_read = Signal(reset_less=True)
986 sync += cr_read.eq(1)
987 # load the next index
988 sync += cr_idx.eq(cr_idx + 1)
989 with m.Else():
990 # exit on loop end
991 sync += cr_read.eq(0)
992 sync += cr_idx.eq(0)
993 m.next = "FETCH_PRED_SHIFT_MASK"
994 with m.If(cr_read):
995 # compensate for the one cycle delay on the regfile
996 cur_cr_idx = Signal.like(cur_vl)
997 comb += cur_cr_idx.eq(cr_idx - 1)
998 # read the CR field, select the appropriate bit
999 cr_field = Signal(4)
1000 scr_bit = Signal()
1001 dcr_bit = Signal()
1002 comb += cr_field.eq(cr_pred.o_data)
1003 comb += scr_bit.eq(cr_field.bit_select(sidx, 1)
1004 ^ scrinvert)
1005 comb += dcr_bit.eq(cr_field.bit_select(didx, 1)
1006 ^ dcrinvert)
1007 # set the corresponding mask bit
1008 bit_to_set = Signal.like(self.srcmask)
1009 comb += bit_to_set.eq(1 << cur_cr_idx)
1010 with m.If(scr_bit):
1011 sync += new_srcmask.eq(new_srcmask | bit_to_set)
1012 with m.If(dcr_bit):
1013 sync += new_dstmask.eq(new_dstmask | bit_to_set)
1014
1015 with m.State("FETCH_PRED_SHIFT_MASK"):
1016 # shift-out skipped mask bits
1017 sync += self.srcmask.eq(new_srcmask >> srcstep)
1018 sync += self.dstmask.eq(new_dstmask >> dststep)
1019 m.next = "FETCH_PRED_DONE"
1020
1021 with m.State("FETCH_PRED_DONE"):
1022 comb += pred_mask_o_valid.eq(1)
1023 with m.If(pred_mask_i_ready):
1024 m.next = "FETCH_PRED_IDLE"
1025
1026 def issue_fsm(self, m, core, nia,
1027 dbg, core_rst, is_svp64_mode,
1028 fetch_pc_o_ready, fetch_pc_i_valid,
1029 fetch_insn_o_valid, fetch_insn_i_ready,
1030 pred_insn_i_valid, pred_insn_o_ready,
1031 pred_mask_o_valid, pred_mask_i_ready,
1032 exec_insn_i_valid, exec_insn_o_ready,
1033 exec_pc_o_valid, exec_pc_i_ready):
1034 """issue FSM
1035
1036 decode / issue FSM. this interacts with the "fetch" FSM
1037 through fetch_insn_ready/valid (incoming) and fetch_pc_ready/valid
1038 (outgoing). also interacts with the "execute" FSM
1039 through exec_insn_ready/valid (outgoing) and exec_pc_ready/valid
1040 (incoming).
1041 SVP64 RM prefixes have already been set up by the
1042 "fetch" phase, so execute is fairly straightforward.
1043 """
1044
1045 comb = m.d.comb
1046 sync = m.d.sync
1047 pdecode2 = self.pdecode2
1048 cur_state = self.cur_state
1049 new_svstate = self.new_svstate
1050
1051 # temporaries
1052 dec_opcode_i = pdecode2.dec.raw_opcode_in # raw opcode
1053
1054 # for updating svstate (things like srcstep etc.)
1055 comb += new_svstate.eq(cur_state.svstate)
1056
1057 # precalculate srcstep+1 and dststep+1
1058 cur_srcstep = cur_state.svstate.srcstep
1059 cur_dststep = cur_state.svstate.dststep
1060 next_srcstep = Signal.like(cur_srcstep)
1061 next_dststep = Signal.like(cur_dststep)
1062 comb += next_srcstep.eq(cur_state.svstate.srcstep+1)
1063 comb += next_dststep.eq(cur_state.svstate.dststep+1)
1064
1065 # note if an exception happened. in a pipelined or OoO design
1066 # this needs to be accompanied by "shadowing" (or stalling)
1067 exc_happened = self.core.o.exc_happened
1068 # also note instruction fetch failed
1069 if hasattr(core, "icache"):
1070 fetch_failed = core.icache.i_out.fetch_failed
1071 flush_needed = True
1072 # set to fault in decoder
1073 # update (highest priority) instruction fault
1074 rising_fetch_failed = rising_edge(m, fetch_failed)
1075 with m.If(rising_fetch_failed):
1076 sync += pdecode2.instr_fault.eq(1)
1077 else:
1078 fetch_failed = Const(0, 1)
1079 flush_needed = False
1080
1081 with m.FSM(name="issue_fsm"):
1082
1083 # sync with the "fetch" phase which is reading the instruction
1084 # at this point, there is no instruction running, that
1085 # could inadvertently update the PC.
1086 with m.State("ISSUE_START"):
1087 # reset instruction fault
1088 sync += pdecode2.instr_fault.eq(0)
1089 # wait on "core stop" release, before next fetch
1090 # need to do this here, in case we are in a VL==0 loop
1091 with m.If(~dbg.core_stop_o & ~core_rst):
1092 comb += fetch_pc_i_valid.eq(1) # tell fetch to start
1093 with m.If(fetch_pc_o_ready): # fetch acknowledged us
1094 m.next = "INSN_WAIT"
1095 with m.Else():
1096 # tell core it's stopped, and acknowledge debug handshake
1097 comb += dbg.core_stopped_i.eq(1)
1098 # while stopped, allow updating SVSTATE
1099 with m.If(self.svstate_i.ok):
1100 comb += new_svstate.eq(self.svstate_i.data)
1101 comb += self.update_svstate.eq(1)
1102 sync += self.sv_changed.eq(1)
1103
1104 # wait for an instruction to arrive from Fetch
1105 with m.State("INSN_WAIT"):
1106 # when using "single-step" mode, checking dbg.stopping_o
1107 # prevents progress. allow issue to proceed once started
1108 stopping = Const(0)
1109 #if self.allow_overlap:
1110 # stopping = dbg.stopping_o
1111 with m.If(stopping):
1112 # stopping: jump back to idle
1113 m.next = "ISSUE_START"
1114 if flush_needed:
1115 # request the icache to stop asserting "failed"
1116 comb += core.icache.flush_in.eq(1)
1117 # stop instruction fault
1118 sync += pdecode2.instr_fault.eq(0)
1119 with m.Else():
1120 comb += fetch_insn_i_ready.eq(1)
1121 with m.If(fetch_insn_o_valid):
1122 # loop into ISSUE_START if it's a SVP64 instruction
1123 # and VL == 0. this because VL==0 is a for-loop
1124 # from 0 to 0 i.e. always, always a NOP.
1125 cur_vl = cur_state.svstate.vl
1126 with m.If(is_svp64_mode & (cur_vl == 0)):
1127 # update the PC before fetching the next instruction
1128 # since we are in a VL==0 loop, no instruction was
1129 # executed that we could be overwriting
1130 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
1131 comb += self.state_w_pc.i_data.eq(nia)
1132 comb += self.insn_done.eq(1)
1133 m.next = "ISSUE_START"
1134 with m.Else():
1135 if self.svp64_en:
1136 m.next = "PRED_START" # fetching predicate
1137 else:
1138 m.next = "DECODE_SV" # skip predication
1139
1140 with m.State("PRED_START"):
1141 comb += pred_insn_i_valid.eq(1) # tell fetch_pred to start
1142 with m.If(pred_insn_o_ready): # fetch_pred acknowledged us
1143 m.next = "MASK_WAIT"
1144
1145 with m.State("MASK_WAIT"):
1146 comb += pred_mask_i_ready.eq(1) # ready to receive the masks
1147 with m.If(pred_mask_o_valid): # predication masks are ready
1148 m.next = "PRED_SKIP"
1149
1150 # skip zeros in predicate
1151 with m.State("PRED_SKIP"):
1152 with m.If(~is_svp64_mode):
1153 m.next = "DECODE_SV" # nothing to do
1154 with m.Else():
1155 if self.svp64_en:
1156 pred_src_zero = pdecode2.rm_dec.pred_sz
1157 pred_dst_zero = pdecode2.rm_dec.pred_dz
1158
1159 # new srcstep, after skipping zeros
1160 skip_srcstep = Signal.like(cur_srcstep)
1161 # value to be added to the current srcstep
1162 src_delta = Signal.like(cur_srcstep)
1163 # add leading zeros to srcstep, if not in zero mode
1164 with m.If(~pred_src_zero):
1165 # priority encoder (count leading zeros)
1166 # append guard bit, in case the mask is all zeros
1167 pri_enc_src = PriorityEncoder(65)
1168 m.submodules.pri_enc_src = pri_enc_src
1169 comb += pri_enc_src.i.eq(Cat(self.srcmask,
1170 Const(1, 1)))
1171 comb += src_delta.eq(pri_enc_src.o)
1172 # apply delta to srcstep
1173 comb += skip_srcstep.eq(cur_srcstep + src_delta)
1174 # shift-out all leading zeros from the mask
1175 # plus the leading "one" bit
1176 # TODO count leading zeros and shift-out the zero
1177 # bits, in the same step, in hardware
1178 sync += self.srcmask.eq(self.srcmask >> (src_delta+1))
1179
1180 # same as above, but for dststep
1181 skip_dststep = Signal.like(cur_dststep)
1182 dst_delta = Signal.like(cur_dststep)
1183 with m.If(~pred_dst_zero):
1184 pri_enc_dst = PriorityEncoder(65)
1185 m.submodules.pri_enc_dst = pri_enc_dst
1186 comb += pri_enc_dst.i.eq(Cat(self.dstmask,
1187 Const(1, 1)))
1188 comb += dst_delta.eq(pri_enc_dst.o)
1189 comb += skip_dststep.eq(cur_dststep + dst_delta)
1190 sync += self.dstmask.eq(self.dstmask >> (dst_delta+1))
1191
1192 # TODO: initialize mask[VL]=1 to avoid passing past VL
1193 with m.If((skip_srcstep >= cur_vl) |
1194 (skip_dststep >= cur_vl)):
1195 # end of VL loop. Update PC and reset src/dst step
1196 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
1197 comb += self.state_w_pc.i_data.eq(nia)
1198 comb += new_svstate.srcstep.eq(0)
1199 comb += new_svstate.dststep.eq(0)
1200 comb += self.update_svstate.eq(1)
1201 # synchronize with the simulator
1202 comb += self.insn_done.eq(1)
1203 # go back to Issue
1204 m.next = "ISSUE_START"
1205 with m.Else():
1206 # update new src/dst step
1207 comb += new_svstate.srcstep.eq(skip_srcstep)
1208 comb += new_svstate.dststep.eq(skip_dststep)
1209 comb += self.update_svstate.eq(1)
1210 # proceed to Decode
1211 m.next = "DECODE_SV"
1212
1213 # pass predicate mask bits through to satellite decoders
1214 # TODO: for SIMD this will be *multiple* bits
1215 sync += core.i.sv_pred_sm.eq(self.srcmask[0])
1216 sync += core.i.sv_pred_dm.eq(self.dstmask[0])
1217
1218 # after src/dst step have been updated, we are ready
1219 # to decode the instruction
1220 with m.State("DECODE_SV"):
1221 # decode the instruction
1222 with m.If(~fetch_failed):
1223 sync += pdecode2.instr_fault.eq(0)
1224 sync += core.i.e.eq(pdecode2.e)
1225 sync += core.i.state.eq(cur_state)
1226 sync += core.i.raw_insn_i.eq(dec_opcode_i)
1227 sync += core.i.bigendian_i.eq(self.core_bigendian_i)
1228 if self.svp64_en:
1229 sync += core.i.sv_rm.eq(pdecode2.sv_rm)
1230 # set RA_OR_ZERO detection in satellite decoders
1231 sync += core.i.sv_a_nz.eq(pdecode2.sv_a_nz)
1232 # and svp64 detection
1233 sync += core.i.is_svp64_mode.eq(is_svp64_mode)
1234 # and svp64 bit-rev'd ldst mode
1235 ldst_dec = pdecode2.use_svp64_ldst_dec
1236 sync += core.i.use_svp64_ldst_dec.eq(ldst_dec)
1237 # after decoding, reset any previous exception condition,
1238 # allowing it to be set again during the next execution
1239 sync += pdecode2.ldst_exc.eq(0)
1240
1241 m.next = "INSN_EXECUTE" # move to "execute"
1242
1243 # handshake with execution FSM, move to "wait" once acknowledged
1244 with m.State("INSN_EXECUTE"):
1245 # when using "single-step" mode, checking dbg.stopping_o
1246 # prevents progress. allow execute to proceed once started
1247 stopping = Const(0)
1248 #if self.allow_overlap:
1249 # stopping = dbg.stopping_o
1250 with m.If(stopping):
1251 # stopping: jump back to idle
1252 m.next = "ISSUE_START"
1253 if flush_needed:
1254 # request the icache to stop asserting "failed"
1255 comb += core.icache.flush_in.eq(1)
1256 # stop instruction fault
1257 sync += pdecode2.instr_fault.eq(0)
1258 with m.Else():
1259 comb += exec_insn_i_valid.eq(1) # trigger execute
1260 with m.If(exec_insn_o_ready): # execute acknowledged us
1261 m.next = "EXECUTE_WAIT"
1262
1263 with m.State("EXECUTE_WAIT"):
1264 comb += exec_pc_i_ready.eq(1)
1265 # see https://bugs.libre-soc.org/show_bug.cgi?id=636
1266 # the exception info needs to be blatted into
1267 # pdecode.ldst_exc, and the instruction "re-run".
1268 # when ldst_exc.happened is set, the PowerDecoder2
1269 # reacts very differently: it re-writes the instruction
1270 # with a "trap" (calls PowerDecoder2.trap()) which
1271 # will *overwrite* whatever was requested and jump the
1272 # PC to the exception address, as well as alter MSR.
1273 # nothing else needs to be done other than to note
1274 # the change of PC and MSR (and, later, SVSTATE)
1275 with m.If(exc_happened):
1276 mmu = core.fus.get_exc("mmu0")
1277 ldst = core.fus.get_exc("ldst0")
1278 if mmu is not None:
1279 with m.If(fetch_failed):
1280 # instruction fetch: exception is from MMU
1281 # reset instr_fault (highest priority)
1282 sync += pdecode2.ldst_exc.eq(mmu)
1283 sync += pdecode2.instr_fault.eq(0)
1284 if flush_needed:
1285 # request icache to stop asserting "failed"
1286 comb += core.icache.flush_in.eq(1)
1287 with m.If(~fetch_failed):
1288 # otherwise assume it was a LDST exception
1289 sync += pdecode2.ldst_exc.eq(ldst)
1290
1291 with m.If(exec_pc_o_valid):
1292
1293 # was this the last loop iteration?
1294 is_last = Signal()
1295 cur_vl = cur_state.svstate.vl
1296 comb += is_last.eq(next_srcstep == cur_vl)
1297
1298 with m.If(pdecode2.instr_fault):
1299 # reset instruction fault, try again
1300 sync += pdecode2.instr_fault.eq(0)
1301 m.next = "ISSUE_START"
1302
1303 # return directly to Decode if Execute generated an
1304 # exception.
1305 with m.Elif(pdecode2.ldst_exc.happened):
1306 m.next = "DECODE_SV"
1307
1308 # if MSR, PC or SVSTATE were changed by the previous
1309 # instruction, go directly back to Fetch, without
1310 # updating either MSR PC or SVSTATE
1311 with m.Elif(self.msr_changed | self.pc_changed |
1312 self.sv_changed):
1313 m.next = "ISSUE_START"
1314
1315 # also return to Fetch, when no output was a vector
1316 # (regardless of SRCSTEP and VL), or when the last
1317 # instruction was really the last one of the VL loop
1318 with m.Elif((~pdecode2.loop_continue) | is_last):
1319 # before going back to fetch, update the PC state
1320 # register with the NIA.
1321 # ok here we are not reading the branch unit.
1322 # TODO: this just blithely overwrites whatever
1323 # pipeline updated the PC
1324 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
1325 comb += self.state_w_pc.i_data.eq(nia)
1326 # reset SRCSTEP before returning to Fetch
1327 if self.svp64_en:
1328 with m.If(pdecode2.loop_continue):
1329 comb += new_svstate.srcstep.eq(0)
1330 comb += new_svstate.dststep.eq(0)
1331 comb += self.update_svstate.eq(1)
1332 else:
1333 comb += new_svstate.srcstep.eq(0)
1334 comb += new_svstate.dststep.eq(0)
1335 comb += self.update_svstate.eq(1)
1336 m.next = "ISSUE_START"
1337
1338 # returning to Execute? then, first update SRCSTEP
1339 with m.Else():
1340 comb += new_svstate.srcstep.eq(next_srcstep)
1341 comb += new_svstate.dststep.eq(next_dststep)
1342 comb += self.update_svstate.eq(1)
1343 # return to mask skip loop
1344 m.next = "PRED_SKIP"
1345
1346
1347 # check if svstate needs updating: if so, write it to State Regfile
1348 with m.If(self.update_svstate):
1349 sync += cur_state.svstate.eq(self.new_svstate) # for next clock
1350
1351 def execute_fsm(self, m, core,
1352 exec_insn_i_valid, exec_insn_o_ready,
1353 exec_pc_o_valid, exec_pc_i_ready):
1354 """execute FSM
1355
1356 execute FSM. this interacts with the "issue" FSM
1357 through exec_insn_ready/valid (incoming) and exec_pc_ready/valid
1358 (outgoing). SVP64 RM prefixes have already been set up by the
1359 "issue" phase, so execute is fairly straightforward.
1360 """
1361
1362 comb = m.d.comb
1363 sync = m.d.sync
1364 dbg = self.dbg
1365 pdecode2 = self.pdecode2
1366
1367 # temporaries
1368 core_busy_o = core.n.o_data.busy_o # core is busy
1369 core_ivalid_i = core.p.i_valid # instruction is valid
1370
1371 if hasattr(core, "icache"):
1372 fetch_failed = core.icache.i_out.fetch_failed
1373 else:
1374 fetch_failed = Const(0, 1)
1375
1376 with m.FSM(name="exec_fsm"):
1377
1378 # waiting for instruction bus (stays there until not busy)
1379 with m.State("INSN_START"):
1380 comb += exec_insn_o_ready.eq(1)
1381 with m.If(exec_insn_i_valid):
1382 comb += core_ivalid_i.eq(1) # instruction is valid/issued
1383 sync += self.sv_changed.eq(0)
1384 sync += self.pc_changed.eq(0)
1385 sync += self.msr_changed.eq(0)
1386 with m.If(core.p.o_ready): # only move if accepted
1387 m.next = "INSN_ACTIVE" # move to "wait completion"
1388
1389 # instruction started: must wait till it finishes
1390 with m.State("INSN_ACTIVE"):
1391 # note changes to MSR, PC and SVSTATE
1392 # XXX oops, really must monitor *all* State Regfile write
1393 # ports looking for changes!
1394 with m.If(self.state_nia.wen & (1 << StateRegs.SVSTATE)):
1395 sync += self.sv_changed.eq(1)
1396 with m.If(self.state_nia.wen & (1 << StateRegs.MSR)):
1397 sync += self.msr_changed.eq(1)
1398 with m.If(self.state_nia.wen & (1 << StateRegs.PC)):
1399 sync += self.pc_changed.eq(1)
1400 with m.If(~core_busy_o): # instruction done!
1401 comb += exec_pc_o_valid.eq(1)
1402 with m.If(exec_pc_i_ready):
1403 # when finished, indicate "done".
1404 # however, if there was an exception, the instruction
1405 # is *not* yet done. this is an implementation
1406 # detail: we choose to implement exceptions by
1407 # taking the exception information from the LDST
1408 # unit, putting that *back* into the PowerDecoder2,
1409 # and *re-running the entire instruction*.
1410 # if we erroneously indicate "done" here, it is as if
1411 # there were *TWO* instructions:
1412 # 1) the failed LDST 2) a TRAP.
1413 with m.If(~pdecode2.ldst_exc.happened &
1414 ~pdecode2.instr_fault):
1415 comb += self.insn_done.eq(1)
1416 m.next = "INSN_START" # back to fetch
1417 # terminate returns directly to INSN_START
1418 with m.If(dbg.terminate_i):
1419 # comb += self.insn_done.eq(1) - no because it's not
1420 m.next = "INSN_START" # back to fetch
1421
1422 def elaborate(self, platform):
1423 m = super().elaborate(platform)
1424 # convenience
1425 comb, sync = m.d.comb, m.d.sync
1426 cur_state = self.cur_state
1427 pdecode2 = self.pdecode2
1428 dbg = self.dbg
1429 core = self.core
1430
1431 # set up peripherals and core
1432 core_rst = self.core_rst
1433
1434 # indicate to outside world if any FU is still executing
1435 comb += self.any_busy.eq(core.n.o_data.any_busy_o) # any FU executing
1436
1437 # address of the next instruction, in the absence of a branch
1438 # depends on the instruction size
1439 nia = Signal(64)
1440
1441 # connect up debug signals
1442 with m.If(core.o.core_terminate_o):
1443 comb += dbg.terminate_i.eq(1)
1444
1445 # pass the prefix mode from Fetch to Issue, so the latter can loop
1446 # on VL==0
1447 is_svp64_mode = Signal()
1448
1449 # there are *THREE^WFOUR-if-SVP64-enabled* FSMs, fetch (32/64-bit)
1450 # issue, decode/execute, now joined by "Predicate fetch/calculate".
1451 # these are the handshake signals between each
1452
1453 # fetch FSM can run as soon as the PC is valid
1454 fetch_pc_i_valid = Signal() # Execute tells Fetch "start next read"
1455 fetch_pc_o_ready = Signal() # Fetch Tells SVSTATE "proceed"
1456
1457 # fetch FSM hands over the instruction to be decoded / issued
1458 fetch_insn_o_valid = Signal()
1459 fetch_insn_i_ready = Signal()
1460
1461 # predicate fetch FSM decodes and fetches the predicate
1462 pred_insn_i_valid = Signal()
1463 pred_insn_o_ready = Signal()
1464
1465 # predicate fetch FSM delivers the masks
1466 pred_mask_o_valid = Signal()
1467 pred_mask_i_ready = Signal()
1468
1469 # issue FSM delivers the instruction to the be executed
1470 exec_insn_i_valid = Signal()
1471 exec_insn_o_ready = Signal()
1472
1473 # execute FSM, hands over the PC/SVSTATE back to the issue FSM
1474 exec_pc_o_valid = Signal()
1475 exec_pc_i_ready = Signal()
1476
1477 # the FSMs here are perhaps unusual in that they detect conditions
1478 # then "hold" information, combinatorially, for the core
1479 # (as opposed to using sync - which would be on a clock's delay)
1480 # this includes the actual opcode, valid flags and so on.
1481
1482 # Fetch, then predicate fetch, then Issue, then Execute.
1483 # Issue is where the VL for-loop # lives. the ready/valid
1484 # signalling is used to communicate between the four.
1485
1486 # set up Fetch FSM
1487 fetch = FetchFSM(self.allow_overlap, self.svp64_en,
1488 self.imem, core_rst, pdecode2, cur_state,
1489 dbg, core,
1490 dbg.state.svstate, # combinatorially same
1491 nia, is_svp64_mode)
1492 m.submodules.fetch = fetch
1493 # connect up in/out data to existing Signals
1494 comb += fetch.p.i_data.pc.eq(dbg.state.pc) # combinatorially same
1495 comb += fetch.p.i_data.msr.eq(dbg.state.msr) # combinatorially same
1496 # and the ready/valid signalling
1497 comb += fetch_pc_o_ready.eq(fetch.p.o_ready)
1498 comb += fetch.p.i_valid.eq(fetch_pc_i_valid)
1499 comb += fetch_insn_o_valid.eq(fetch.n.o_valid)
1500 comb += fetch.n.i_ready.eq(fetch_insn_i_ready)
1501
1502 self.issue_fsm(m, core, nia,
1503 dbg, core_rst, is_svp64_mode,
1504 fetch_pc_o_ready, fetch_pc_i_valid,
1505 fetch_insn_o_valid, fetch_insn_i_ready,
1506 pred_insn_i_valid, pred_insn_o_ready,
1507 pred_mask_o_valid, pred_mask_i_ready,
1508 exec_insn_i_valid, exec_insn_o_ready,
1509 exec_pc_o_valid, exec_pc_i_ready)
1510
1511 if self.svp64_en:
1512 self.fetch_predicate_fsm(m,
1513 pred_insn_i_valid, pred_insn_o_ready,
1514 pred_mask_o_valid, pred_mask_i_ready)
1515
1516 self.execute_fsm(m, core,
1517 exec_insn_i_valid, exec_insn_o_ready,
1518 exec_pc_o_valid, exec_pc_i_ready)
1519
1520 return m
1521
1522
1523 class TestIssuer(Elaboratable):
1524 def __init__(self, pspec):
1525 self.ti = TestIssuerInternal(pspec)
1526 self.pll = DummyPLL(instance=True)
1527
1528 self.dbg_rst_i = Signal(reset_less=True)
1529
1530 # PLL direct clock or not
1531 self.pll_en = hasattr(pspec, "use_pll") and pspec.use_pll
1532 if self.pll_en:
1533 self.pll_test_o = Signal(reset_less=True)
1534 self.pll_vco_o = Signal(reset_less=True)
1535 self.clk_sel_i = Signal(2, reset_less=True)
1536 self.ref_clk = ClockSignal() # can't rename it but that's ok
1537 self.pllclk_clk = ClockSignal("pllclk")
1538
1539 def elaborate(self, platform):
1540 m = Module()
1541 comb = m.d.comb
1542
1543 # TestIssuer nominally runs at main clock, actually it is
1544 # all combinatorial internally except for coresync'd components
1545 m.submodules.ti = ti = self.ti
1546
1547 if self.pll_en:
1548 # ClockSelect runs at PLL output internal clock rate
1549 m.submodules.wrappll = pll = self.pll
1550
1551 # add clock domains from PLL
1552 cd_pll = ClockDomain("pllclk")
1553 m.domains += cd_pll
1554
1555 # PLL clock established. has the side-effect of running clklsel
1556 # at the PLL's speed (see DomainRenamer("pllclk") above)
1557 pllclk = self.pllclk_clk
1558 comb += pllclk.eq(pll.clk_pll_o)
1559
1560 # wire up external 24mhz to PLL
1561 #comb += pll.clk_24_i.eq(self.ref_clk)
1562 # output 18 mhz PLL test signal, and analog oscillator out
1563 comb += self.pll_test_o.eq(pll.pll_test_o)
1564 comb += self.pll_vco_o.eq(pll.pll_vco_o)
1565
1566 # input to pll clock selection
1567 comb += pll.clk_sel_i.eq(self.clk_sel_i)
1568
1569 # now wire up ResetSignals. don't mind them being in this domain
1570 pll_rst = ResetSignal("pllclk")
1571 comb += pll_rst.eq(ResetSignal())
1572
1573 # internal clock is set to selector clock-out. has the side-effect of
1574 # running TestIssuer at this speed (see DomainRenamer("intclk") above)
1575 # debug clock runs at coresync internal clock
1576 if self.ti.dbg_domain != 'sync':
1577 cd_dbgsync = ClockDomain("dbgsync")
1578 intclk = ClockSignal(self.ti.core_domain)
1579 dbgclk = ClockSignal(self.ti.dbg_domain)
1580 # XXX BYPASS PLL XXX
1581 # XXX BYPASS PLL XXX
1582 # XXX BYPASS PLL XXX
1583 if self.pll_en:
1584 comb += intclk.eq(self.ref_clk)
1585 assert self.ti.core_domain != 'sync', \
1586 "cannot set core_domain to sync and use pll at the same time"
1587 else:
1588 if self.ti.core_domain != 'sync':
1589 comb += intclk.eq(ClockSignal())
1590 if self.ti.dbg_domain != 'sync':
1591 dbgclk = ClockSignal(self.ti.dbg_domain)
1592 comb += dbgclk.eq(intclk)
1593 comb += self.ti.dbg_rst_i.eq(self.dbg_rst_i)
1594
1595 return m
1596
1597 def ports(self):
1598 return list(self.ti.ports()) + list(self.pll.ports()) + \
1599 [ClockSignal(), ResetSignal()]
1600
1601 def external_ports(self):
1602 ports = self.ti.external_ports()
1603 ports.append(ClockSignal())
1604 ports.append(ResetSignal())
1605 if self.pll_en:
1606 ports.append(self.clk_sel_i)
1607 ports.append(self.pll.clk_24_i)
1608 ports.append(self.pll_test_o)
1609 ports.append(self.pll_vco_o)
1610 ports.append(self.pllclk_clk)
1611 ports.append(self.ref_clk)
1612 return ports
1613
1614
1615 if __name__ == '__main__':
1616 units = {'alu': 1, 'cr': 1, 'branch': 1, 'trap': 1, 'logical': 1,
1617 'spr': 1,
1618 'div': 1,
1619 'mul': 1,
1620 'shiftrot': 1
1621 }
1622 pspec = TestMemPspec(ldst_ifacetype='bare_wb',
1623 imem_ifacetype='bare_wb',
1624 addr_wid=48,
1625 mask_wid=8,
1626 reg_wid=64,
1627 units=units)
1628 dut = TestIssuer(pspec)
1629 vl = main(dut, ports=dut.ports(), name="test_issuer")
1630
1631 if len(sys.argv) == 1:
1632 vl = rtlil.convert(dut, ports=dut.external_ports(), name="test_issuer")
1633 with open("test_issuer.il", "w") as f:
1634 f.write(vl)