pass SVP64 ReMap field through to core and then on to FU decoders
[soc.git] / src / soc / simple / issuer.py
1 """simple core issuer
2
3 not in any way intended for production use. this runs a FSM that:
4
5 * reads the Program Counter from StateRegs
6 * reads an instruction from a fixed-size Test Memory
7 * issues it to the Simple Core
8 * waits for it to complete
9 * increments the PC
10 * does it all over again
11
12 the purpose of this module is to verify the functional correctness
13 of the Function Units in the absolute simplest and clearest possible
14 way, and to at provide something that can be further incrementally
15 improved.
16 """
17
18 from nmigen import (Elaboratable, Module, Signal, ClockSignal, ResetSignal,
19 ClockDomain, DomainRenamer, Mux, Const, Repl, Cat)
20 from nmigen.cli import rtlil
21 from nmigen.cli import main
22 import sys
23
24 from nmigen.lib.coding import PriorityEncoder
25
26 from openpower.decoder.power_decoder import create_pdecode
27 from openpower.decoder.power_decoder2 import PowerDecode2, SVP64PrefixDecoder
28 from openpower.decoder.decode2execute1 import IssuerDecode2ToOperand
29 from openpower.decoder.decode2execute1 import Data
30 from openpower.decoder.power_enums import (MicrOp, SVP64PredInt, SVP64PredCR,
31 SVP64PredMode)
32 from openpower.state import CoreState
33 from openpower.consts import (CR, SVP64CROffs)
34 from soc.experiment.testmem import TestMemory # test only for instructions
35 from soc.regfile.regfiles import StateRegs, FastRegs
36 from soc.simple.core import NonProductionCore
37 from soc.config.test.test_loadstore import TestMemPspec
38 from soc.config.ifetch import ConfigFetchUnit
39 from soc.debug.dmi import CoreDebug, DMIInterface
40 from soc.debug.jtag import JTAG
41 from soc.config.pinouts import get_pinspecs
42 from soc.interrupts.xics import XICS_ICP, XICS_ICS
43 from soc.bus.simple_gpio import SimpleGPIO
44 from soc.bus.SPBlock512W64B8W import SPBlock512W64B8W
45 from soc.clock.select import ClockSelect
46 from soc.clock.dummypll import DummyPLL
47 from openpower.sv.svstate import SVSTATERec
48
49
50 from nmutil.util import rising_edge
51
52 def get_insn(f_instr_o, pc):
53 if f_instr_o.width == 32:
54 return f_instr_o
55 else:
56 # 64-bit: bit 2 of pc decides which word to select
57 return f_instr_o.word_select(pc[2], 32)
58
59 # gets state input or reads from state regfile
60 def state_get(m, core_rst, state_i, name, regfile, regnum):
61 comb = m.d.comb
62 sync = m.d.sync
63 # read the PC
64 res = Signal(64, reset_less=True, name=name)
65 res_ok_delay = Signal(name="%s_ok_delay" % name)
66 with m.If(~core_rst):
67 sync += res_ok_delay.eq(~state_i.ok)
68 with m.If(state_i.ok):
69 # incoming override (start from pc_i)
70 comb += res.eq(state_i.data)
71 with m.Else():
72 # otherwise read StateRegs regfile for PC...
73 comb += regfile.ren.eq(1<<regnum)
74 # ... but on a 1-clock delay
75 with m.If(res_ok_delay):
76 comb += res.eq(regfile.data_o)
77 return res
78
79 def get_predint(m, mask, name):
80 """decode SVP64 predicate integer mask field to reg number and invert
81 this is identical to the equivalent function in ISACaller except that
82 it doesn't read the INT directly, it just decodes "what needs to be done"
83 i.e. which INT reg, whether it is shifted and whether it is bit-inverted.
84
85 * all1s is set to indicate that no mask is to be applied.
86 * regread indicates the GPR register number to be read
87 * invert is set to indicate that the register value is to be inverted
88 * unary indicates that the contents of the register is to be shifted 1<<r3
89 """
90 comb = m.d.comb
91 regread = Signal(5, name=name+"regread")
92 invert = Signal(name=name+"invert")
93 unary = Signal(name=name+"unary")
94 all1s = Signal(name=name+"all1s")
95 with m.Switch(mask):
96 with m.Case(SVP64PredInt.ALWAYS.value):
97 comb += all1s.eq(1) # use 0b1111 (all ones)
98 with m.Case(SVP64PredInt.R3_UNARY.value):
99 comb += regread.eq(3)
100 comb += unary.eq(1) # 1<<r3 - shift r3 (single bit)
101 with m.Case(SVP64PredInt.R3.value):
102 comb += regread.eq(3)
103 with m.Case(SVP64PredInt.R3_N.value):
104 comb += regread.eq(3)
105 comb += invert.eq(1)
106 with m.Case(SVP64PredInt.R10.value):
107 comb += regread.eq(10)
108 with m.Case(SVP64PredInt.R10_N.value):
109 comb += regread.eq(10)
110 comb += invert.eq(1)
111 with m.Case(SVP64PredInt.R30.value):
112 comb += regread.eq(30)
113 with m.Case(SVP64PredInt.R30_N.value):
114 comb += regread.eq(30)
115 comb += invert.eq(1)
116 return regread, invert, unary, all1s
117
118 def get_predcr(m, mask, name):
119 """decode SVP64 predicate CR to reg number field and invert status
120 this is identical to _get_predcr in ISACaller
121 """
122 comb = m.d.comb
123 idx = Signal(2, name=name+"idx")
124 invert = Signal(name=name+"crinvert")
125 with m.Switch(mask):
126 with m.Case(SVP64PredCR.LT.value):
127 comb += idx.eq(CR.LT)
128 comb += invert.eq(0)
129 with m.Case(SVP64PredCR.GE.value):
130 comb += idx.eq(CR.LT)
131 comb += invert.eq(1)
132 with m.Case(SVP64PredCR.GT.value):
133 comb += idx.eq(CR.GT)
134 comb += invert.eq(0)
135 with m.Case(SVP64PredCR.LE.value):
136 comb += idx.eq(CR.GT)
137 comb += invert.eq(1)
138 with m.Case(SVP64PredCR.EQ.value):
139 comb += idx.eq(CR.EQ)
140 comb += invert.eq(0)
141 with m.Case(SVP64PredCR.NE.value):
142 comb += idx.eq(CR.EQ)
143 comb += invert.eq(1)
144 with m.Case(SVP64PredCR.SO.value):
145 comb += idx.eq(CR.SO)
146 comb += invert.eq(0)
147 with m.Case(SVP64PredCR.NS.value):
148 comb += idx.eq(CR.SO)
149 comb += invert.eq(1)
150 return idx, invert
151
152
153 class TestIssuerInternal(Elaboratable):
154 """TestIssuer - reads instructions from TestMemory and issues them
155
156 efficiency and speed is not the main goal here: functional correctness
157 and code clarity is. optimisations (which almost 100% interfere with
158 easy understanding) come later.
159 """
160 def __init__(self, pspec):
161
162 # test is SVP64 is to be enabled
163 self.svp64_en = hasattr(pspec, "svp64") and (pspec.svp64 == True)
164
165 # and if regfiles are reduced
166 self.regreduce_en = (hasattr(pspec, "regreduce") and
167 (pspec.regreduce == True))
168
169 # JTAG interface. add this right at the start because if it's
170 # added it *modifies* the pspec, by adding enable/disable signals
171 # for parts of the rest of the core
172 self.jtag_en = hasattr(pspec, "debug") and pspec.debug == 'jtag'
173 if self.jtag_en:
174 # XXX MUST keep this up-to-date with litex, and
175 # soc-cocotb-sim, and err.. all needs sorting out, argh
176 subset = ['uart',
177 'mtwi',
178 'eint', 'gpio', 'mspi0',
179 # 'mspi1', - disabled for now
180 # 'pwm', 'sd0', - disabled for now
181 'sdr']
182 self.jtag = JTAG(get_pinspecs(subset=subset))
183 # add signals to pspec to enable/disable icache and dcache
184 # (or data and intstruction wishbone if icache/dcache not included)
185 # https://bugs.libre-soc.org/show_bug.cgi?id=520
186 # TODO: do we actually care if these are not domain-synchronised?
187 # honestly probably not.
188 pspec.wb_icache_en = self.jtag.wb_icache_en
189 pspec.wb_dcache_en = self.jtag.wb_dcache_en
190 self.wb_sram_en = self.jtag.wb_sram_en
191 else:
192 self.wb_sram_en = Const(1)
193
194 # add 4k sram blocks?
195 self.sram4x4k = (hasattr(pspec, "sram4x4kblock") and
196 pspec.sram4x4kblock == True)
197 if self.sram4x4k:
198 self.sram4k = []
199 for i in range(4):
200 self.sram4k.append(SPBlock512W64B8W(name="sram4k_%d" % i,
201 features={'err'}))
202
203 # add interrupt controller?
204 self.xics = hasattr(pspec, "xics") and pspec.xics == True
205 if self.xics:
206 self.xics_icp = XICS_ICP()
207 self.xics_ics = XICS_ICS()
208 self.int_level_i = self.xics_ics.int_level_i
209
210 # add GPIO peripheral?
211 self.gpio = hasattr(pspec, "gpio") and pspec.gpio == True
212 if self.gpio:
213 self.simple_gpio = SimpleGPIO()
214 self.gpio_o = self.simple_gpio.gpio_o
215
216 # main instruction core. suitable for prototyping / demo only
217 self.core = core = NonProductionCore(pspec)
218
219 # instruction decoder. goes into Trap Record
220 pdecode = create_pdecode()
221 self.cur_state = CoreState("cur") # current state (MSR/PC/SVSTATE)
222 self.pdecode2 = PowerDecode2(pdecode, state=self.cur_state,
223 opkls=IssuerDecode2ToOperand,
224 svp64_en=self.svp64_en,
225 regreduce_en=self.regreduce_en)
226 if self.svp64_en:
227 self.svp64 = SVP64PrefixDecoder() # for decoding SVP64 prefix
228
229 # Test Instruction memory
230 self.imem = ConfigFetchUnit(pspec).fu
231
232 # DMI interface
233 self.dbg = CoreDebug()
234
235 # instruction go/monitor
236 self.pc_o = Signal(64, reset_less=True)
237 self.pc_i = Data(64, "pc_i") # set "ok" to indicate "please change me"
238 self.svstate_i = Data(32, "svstate_i") # ditto
239 self.core_bigendian_i = Signal() # TODO: set based on MSR.LE
240 self.busy_o = Signal(reset_less=True)
241 self.memerr_o = Signal(reset_less=True)
242
243 # STATE regfile read /write ports for PC, MSR, SVSTATE
244 staterf = self.core.regs.rf['state']
245 self.state_r_pc = staterf.r_ports['cia'] # PC rd
246 self.state_w_pc = staterf.w_ports['d_wr1'] # PC wr
247 self.state_r_msr = staterf.r_ports['msr'] # MSR rd
248 self.state_r_sv = staterf.r_ports['sv'] # SVSTATE rd
249 self.state_w_sv = staterf.w_ports['sv'] # SVSTATE wr
250
251 # DMI interface access
252 intrf = self.core.regs.rf['int']
253 crrf = self.core.regs.rf['cr']
254 xerrf = self.core.regs.rf['xer']
255 self.int_r = intrf.r_ports['dmi'] # INT read
256 self.cr_r = crrf.r_ports['full_cr_dbg'] # CR read
257 self.xer_r = xerrf.r_ports['full_xer'] # XER read
258
259 if self.svp64_en:
260 # for predication
261 self.int_pred = intrf.r_ports['pred'] # INT predicate read
262 self.cr_pred = crrf.r_ports['cr_pred'] # CR predicate read
263
264 # hack method of keeping an eye on whether branch/trap set the PC
265 self.state_nia = self.core.regs.rf['state'].w_ports['nia']
266 self.state_nia.wen.name = 'state_nia_wen'
267
268 # pulse to synchronize the simulator at instruction end
269 self.insn_done = Signal()
270
271 if self.svp64_en:
272 # store copies of predicate masks
273 self.srcmask = Signal(64)
274 self.dstmask = Signal(64)
275
276 def fetch_fsm(self, m, core, pc, svstate, nia, is_svp64_mode,
277 fetch_pc_ready_o, fetch_pc_valid_i,
278 fetch_insn_valid_o, fetch_insn_ready_i):
279 """fetch FSM
280
281 this FSM performs fetch of raw instruction data, partial-decodes
282 it 32-bit at a time to detect SVP64 prefixes, and will optionally
283 read a 2nd 32-bit quantity if that occurs.
284 """
285 comb = m.d.comb
286 sync = m.d.sync
287 pdecode2 = self.pdecode2
288 cur_state = self.cur_state
289 dec_opcode_i = pdecode2.dec.raw_opcode_in # raw opcode
290
291 msr_read = Signal(reset=1)
292
293 with m.FSM(name='fetch_fsm'):
294
295 # waiting (zzz)
296 with m.State("IDLE"):
297 comb += fetch_pc_ready_o.eq(1)
298 with m.If(fetch_pc_valid_i):
299 # instruction allowed to go: start by reading the PC
300 # capture the PC and also drop it into Insn Memory
301 # we have joined a pair of combinatorial memory
302 # lookups together. this is Generally Bad.
303 comb += self.imem.a_pc_i.eq(pc)
304 comb += self.imem.a_valid_i.eq(1)
305 comb += self.imem.f_valid_i.eq(1)
306 sync += cur_state.pc.eq(pc)
307 sync += cur_state.svstate.eq(svstate) # and svstate
308
309 # initiate read of MSR. arrives one clock later
310 comb += self.state_r_msr.ren.eq(1 << StateRegs.MSR)
311 sync += msr_read.eq(0)
312
313 m.next = "INSN_READ" # move to "wait for bus" phase
314
315 # dummy pause to find out why simulation is not keeping up
316 with m.State("INSN_READ"):
317 # one cycle later, msr/sv read arrives. valid only once.
318 with m.If(~msr_read):
319 sync += msr_read.eq(1) # yeah don't read it again
320 sync += cur_state.msr.eq(self.state_r_msr.data_o)
321 with m.If(self.imem.f_busy_o): # zzz...
322 # busy: stay in wait-read
323 comb += self.imem.a_valid_i.eq(1)
324 comb += self.imem.f_valid_i.eq(1)
325 with m.Else():
326 # not busy: instruction fetched
327 insn = get_insn(self.imem.f_instr_o, cur_state.pc)
328 if self.svp64_en:
329 svp64 = self.svp64
330 # decode the SVP64 prefix, if any
331 comb += svp64.raw_opcode_in.eq(insn)
332 comb += svp64.bigendian.eq(self.core_bigendian_i)
333 # pass the decoded prefix (if any) to PowerDecoder2
334 sync += pdecode2.sv_rm.eq(svp64.svp64_rm)
335 # remember whether this is a prefixed instruction, so
336 # the FSM can readily loop when VL==0
337 sync += is_svp64_mode.eq(svp64.is_svp64_mode)
338 # calculate the address of the following instruction
339 insn_size = Mux(svp64.is_svp64_mode, 8, 4)
340 sync += nia.eq(cur_state.pc + insn_size)
341 with m.If(~svp64.is_svp64_mode):
342 # with no prefix, store the instruction
343 # and hand it directly to the next FSM
344 sync += dec_opcode_i.eq(insn)
345 m.next = "INSN_READY"
346 with m.Else():
347 # fetch the rest of the instruction from memory
348 comb += self.imem.a_pc_i.eq(cur_state.pc + 4)
349 comb += self.imem.a_valid_i.eq(1)
350 comb += self.imem.f_valid_i.eq(1)
351 m.next = "INSN_READ2"
352 else:
353 # not SVP64 - 32-bit only
354 sync += nia.eq(cur_state.pc + 4)
355 sync += dec_opcode_i.eq(insn)
356 m.next = "INSN_READY"
357
358 with m.State("INSN_READ2"):
359 with m.If(self.imem.f_busy_o): # zzz...
360 # busy: stay in wait-read
361 comb += self.imem.a_valid_i.eq(1)
362 comb += self.imem.f_valid_i.eq(1)
363 with m.Else():
364 # not busy: instruction fetched
365 insn = get_insn(self.imem.f_instr_o, cur_state.pc+4)
366 sync += dec_opcode_i.eq(insn)
367 m.next = "INSN_READY"
368 # TODO: probably can start looking at pdecode2.rm_dec
369 # here or maybe even in INSN_READ state, if svp64_mode
370 # detected, in order to trigger - and wait for - the
371 # predicate reading.
372 if self.svp64_en:
373 pmode = pdecode2.rm_dec.predmode
374 """
375 if pmode != SVP64PredMode.ALWAYS.value:
376 fire predicate loading FSM and wait before
377 moving to INSN_READY
378 else:
379 sync += self.srcmask.eq(-1) # set to all 1s
380 sync += self.dstmask.eq(-1) # set to all 1s
381 m.next = "INSN_READY"
382 """
383
384 with m.State("INSN_READY"):
385 # hand over the instruction, to be decoded
386 comb += fetch_insn_valid_o.eq(1)
387 with m.If(fetch_insn_ready_i):
388 m.next = "IDLE"
389
390 def fetch_predicate_fsm(self, m,
391 pred_insn_valid_i, pred_insn_ready_o,
392 pred_mask_valid_o, pred_mask_ready_i):
393 """fetch_predicate_fsm - obtains (constructs in the case of CR)
394 src/dest predicate masks
395
396 https://bugs.libre-soc.org/show_bug.cgi?id=617
397 the predicates can be read here, by using IntRegs r_ports['pred']
398 or CRRegs r_ports['pred']. in the case of CRs it will have to
399 be done through multiple reads, extracting one relevant at a time.
400 later, a faster way would be to use the 32-bit-wide CR port but
401 this is more complex decoding, here. equivalent code used in
402 ISACaller is "from openpower.decoder.isa.caller import get_predcr"
403
404 note: this ENTIRE FSM is not to be called when svp64 is disabled
405 """
406 comb = m.d.comb
407 sync = m.d.sync
408 pdecode2 = self.pdecode2
409 rm_dec = pdecode2.rm_dec # SVP64RMModeDecode
410 predmode = rm_dec.predmode
411 srcpred, dstpred = rm_dec.srcpred, rm_dec.dstpred
412 cr_pred, int_pred = self.cr_pred, self.int_pred # read regfiles
413 # get src/dst step, so we can skip already used mask bits
414 cur_state = self.cur_state
415 srcstep = cur_state.svstate.srcstep
416 dststep = cur_state.svstate.dststep
417 cur_vl = cur_state.svstate.vl
418
419 # decode predicates
420 sregread, sinvert, sunary, sall1s = get_predint(m, srcpred, 's')
421 dregread, dinvert, dunary, dall1s = get_predint(m, dstpred, 'd')
422 sidx, scrinvert = get_predcr(m, srcpred, 's')
423 didx, dcrinvert = get_predcr(m, dstpred, 'd')
424
425 # store fetched masks, for either intpred or crpred
426 # when src/dst step is not zero, the skipped mask bits need to be
427 # shifted-out, before actually storing them in src/dest mask
428 new_srcmask = Signal(64, reset_less=True)
429 new_dstmask = Signal(64, reset_less=True)
430
431 with m.FSM(name="fetch_predicate"):
432
433 with m.State("FETCH_PRED_IDLE"):
434 comb += pred_insn_ready_o.eq(1)
435 with m.If(pred_insn_valid_i):
436 with m.If(predmode == SVP64PredMode.INT):
437 # skip fetching destination mask register, when zero
438 with m.If(dall1s):
439 sync += new_dstmask.eq(-1)
440 # directly go to fetch source mask register
441 # guaranteed not to be zero (otherwise predmode
442 # would be SVP64PredMode.ALWAYS, not INT)
443 comb += int_pred.addr.eq(sregread)
444 comb += int_pred.ren.eq(1)
445 m.next = "INT_SRC_READ"
446 # fetch destination predicate register
447 with m.Else():
448 comb += int_pred.addr.eq(dregread)
449 comb += int_pred.ren.eq(1)
450 m.next = "INT_DST_READ"
451 with m.Elif(predmode == SVP64PredMode.CR):
452 # go fetch masks from the CR register file
453 sync += new_srcmask.eq(0)
454 sync += new_dstmask.eq(0)
455 m.next = "CR_READ"
456 with m.Else():
457 sync += self.srcmask.eq(-1)
458 sync += self.dstmask.eq(-1)
459 m.next = "FETCH_PRED_DONE"
460
461 with m.State("INT_DST_READ"):
462 # store destination mask
463 inv = Repl(dinvert, 64)
464 with m.If(dunary):
465 # set selected mask bit for 1<<r3 mode
466 dst_shift = Signal(range(64))
467 comb += dst_shift.eq(self.int_pred.data_o & 0b111111)
468 sync += new_dstmask.eq(1 << dst_shift)
469 with m.Else():
470 # invert mask if requested
471 sync += new_dstmask.eq(self.int_pred.data_o ^ inv)
472 # skip fetching source mask register, when zero
473 with m.If(sall1s):
474 sync += new_srcmask.eq(-1)
475 m.next = "FETCH_PRED_SHIFT_MASK"
476 # fetch source predicate register
477 with m.Else():
478 comb += int_pred.addr.eq(sregread)
479 comb += int_pred.ren.eq(1)
480 m.next = "INT_SRC_READ"
481
482 with m.State("INT_SRC_READ"):
483 # store source mask
484 inv = Repl(sinvert, 64)
485 with m.If(sunary):
486 # set selected mask bit for 1<<r3 mode
487 src_shift = Signal(range(64))
488 comb += src_shift.eq(self.int_pred.data_o & 0b111111)
489 sync += new_srcmask.eq(1 << src_shift)
490 with m.Else():
491 # invert mask if requested
492 sync += new_srcmask.eq(self.int_pred.data_o ^ inv)
493 m.next = "FETCH_PRED_SHIFT_MASK"
494
495 # fetch masks from the CR register file
496 # implements the following loop:
497 # idx, inv = get_predcr(mask)
498 # mask = 0
499 # for cr_idx in range(vl):
500 # cr = crl[cr_idx + SVP64CROffs.CRPred] # takes one cycle
501 # if cr[idx] ^ inv:
502 # mask |= 1 << cr_idx
503 # return mask
504 with m.State("CR_READ"):
505 # CR index to be read, which will be ready by the next cycle
506 cr_idx = Signal.like(cur_vl, reset_less=True)
507 # submit the read operation to the regfile
508 with m.If(cr_idx != cur_vl):
509 # the CR read port is unary ...
510 # ren = 1 << cr_idx
511 # ... in MSB0 convention ...
512 # ren = 1 << (7 - cr_idx)
513 # ... and with an offset:
514 # ren = 1 << (7 - off - cr_idx)
515 idx = SVP64CROffs.CRPred + cr_idx
516 comb += cr_pred.ren.eq(1 << (7 - idx))
517 # signal data valid in the next cycle
518 cr_read = Signal(reset_less=True)
519 sync += cr_read.eq(1)
520 # load the next index
521 sync += cr_idx.eq(cr_idx + 1)
522 with m.Else():
523 # exit on loop end
524 sync += cr_read.eq(0)
525 sync += cr_idx.eq(0)
526 m.next = "FETCH_PRED_SHIFT_MASK"
527 with m.If(cr_read):
528 # compensate for the one cycle delay on the regfile
529 cur_cr_idx = Signal.like(cur_vl)
530 comb += cur_cr_idx.eq(cr_idx - 1)
531 # read the CR field, select the appropriate bit
532 cr_field = Signal(4)
533 scr_bit = Signal()
534 dcr_bit = Signal()
535 comb += cr_field.eq(cr_pred.data_o)
536 comb += scr_bit.eq(cr_field.bit_select(sidx, 1) ^ scrinvert)
537 comb += dcr_bit.eq(cr_field.bit_select(didx, 1) ^ dcrinvert)
538 # set the corresponding mask bit
539 bit_to_set = Signal.like(self.srcmask)
540 comb += bit_to_set.eq(1 << cur_cr_idx)
541 with m.If(scr_bit):
542 sync += new_srcmask.eq(new_srcmask | bit_to_set)
543 with m.If(dcr_bit):
544 sync += new_dstmask.eq(new_dstmask | bit_to_set)
545
546 with m.State("FETCH_PRED_SHIFT_MASK"):
547 # shift-out skipped mask bits
548 sync += self.srcmask.eq(new_srcmask >> srcstep)
549 sync += self.dstmask.eq(new_dstmask >> dststep)
550 m.next = "FETCH_PRED_DONE"
551
552 with m.State("FETCH_PRED_DONE"):
553 comb += pred_mask_valid_o.eq(1)
554 with m.If(pred_mask_ready_i):
555 m.next = "FETCH_PRED_IDLE"
556
557 def issue_fsm(self, m, core, pc_changed, sv_changed, nia,
558 dbg, core_rst, is_svp64_mode,
559 fetch_pc_ready_o, fetch_pc_valid_i,
560 fetch_insn_valid_o, fetch_insn_ready_i,
561 pred_insn_valid_i, pred_insn_ready_o,
562 pred_mask_valid_o, pred_mask_ready_i,
563 exec_insn_valid_i, exec_insn_ready_o,
564 exec_pc_valid_o, exec_pc_ready_i):
565 """issue FSM
566
567 decode / issue FSM. this interacts with the "fetch" FSM
568 through fetch_insn_ready/valid (incoming) and fetch_pc_ready/valid
569 (outgoing). also interacts with the "execute" FSM
570 through exec_insn_ready/valid (outgoing) and exec_pc_ready/valid
571 (incoming).
572 SVP64 RM prefixes have already been set up by the
573 "fetch" phase, so execute is fairly straightforward.
574 """
575
576 comb = m.d.comb
577 sync = m.d.sync
578 pdecode2 = self.pdecode2
579 cur_state = self.cur_state
580
581 # temporaries
582 dec_opcode_i = pdecode2.dec.raw_opcode_in # raw opcode
583
584 # for updating svstate (things like srcstep etc.)
585 update_svstate = Signal() # set this (below) if updating
586 new_svstate = SVSTATERec("new_svstate")
587 comb += new_svstate.eq(cur_state.svstate)
588
589 # precalculate srcstep+1 and dststep+1
590 cur_srcstep = cur_state.svstate.srcstep
591 cur_dststep = cur_state.svstate.dststep
592 next_srcstep = Signal.like(cur_srcstep)
593 next_dststep = Signal.like(cur_dststep)
594 comb += next_srcstep.eq(cur_state.svstate.srcstep+1)
595 comb += next_dststep.eq(cur_state.svstate.dststep+1)
596
597 # note if an exception happened. in a pipelined or OoO design
598 # this needs to be accompanied by "shadowing" (or stalling)
599 el = []
600 for exc in core.fus.excs.values():
601 el.append(exc.happened)
602 exc_happened = Signal()
603 if len(el) > 0: # at least one exception
604 comb += exc_happened.eq(Cat(*el).bool())
605
606 with m.FSM(name="issue_fsm"):
607
608 # sync with the "fetch" phase which is reading the instruction
609 # at this point, there is no instruction running, that
610 # could inadvertently update the PC.
611 with m.State("ISSUE_START"):
612 # wait on "core stop" release, before next fetch
613 # need to do this here, in case we are in a VL==0 loop
614 with m.If(~dbg.core_stop_o & ~core_rst):
615 comb += fetch_pc_valid_i.eq(1) # tell fetch to start
616 with m.If(fetch_pc_ready_o): # fetch acknowledged us
617 m.next = "INSN_WAIT"
618 with m.Else():
619 # tell core it's stopped, and acknowledge debug handshake
620 comb += dbg.core_stopped_i.eq(1)
621 # while stopped, allow updating the PC and SVSTATE
622 with m.If(self.pc_i.ok):
623 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
624 comb += self.state_w_pc.data_i.eq(self.pc_i.data)
625 sync += pc_changed.eq(1)
626 with m.If(self.svstate_i.ok):
627 comb += new_svstate.eq(self.svstate_i.data)
628 comb += update_svstate.eq(1)
629 sync += sv_changed.eq(1)
630
631 # wait for an instruction to arrive from Fetch
632 with m.State("INSN_WAIT"):
633 comb += fetch_insn_ready_i.eq(1)
634 with m.If(fetch_insn_valid_o):
635 # loop into ISSUE_START if it's a SVP64 instruction
636 # and VL == 0. this because VL==0 is a for-loop
637 # from 0 to 0 i.e. always, always a NOP.
638 cur_vl = cur_state.svstate.vl
639 with m.If(is_svp64_mode & (cur_vl == 0)):
640 # update the PC before fetching the next instruction
641 # since we are in a VL==0 loop, no instruction was
642 # executed that we could be overwriting
643 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
644 comb += self.state_w_pc.data_i.eq(nia)
645 comb += self.insn_done.eq(1)
646 m.next = "ISSUE_START"
647 with m.Else():
648 if self.svp64_en:
649 m.next = "PRED_START" # start fetching predicate
650 else:
651 m.next = "DECODE_SV" # skip predication
652
653 with m.State("PRED_START"):
654 comb += pred_insn_valid_i.eq(1) # tell fetch_pred to start
655 with m.If(pred_insn_ready_o): # fetch_pred acknowledged us
656 m.next = "MASK_WAIT"
657
658 with m.State("MASK_WAIT"):
659 comb += pred_mask_ready_i.eq(1) # ready to receive the masks
660 with m.If(pred_mask_valid_o): # predication masks are ready
661 m.next = "PRED_SKIP"
662
663 # skip zeros in predicate
664 with m.State("PRED_SKIP"):
665 with m.If(~is_svp64_mode):
666 m.next = "DECODE_SV" # nothing to do
667 with m.Else():
668 if self.svp64_en:
669 pred_src_zero = pdecode2.rm_dec.pred_sz
670 pred_dst_zero = pdecode2.rm_dec.pred_dz
671
672 # new srcstep, after skipping zeros
673 skip_srcstep = Signal.like(cur_srcstep)
674 # value to be added to the current srcstep
675 src_delta = Signal.like(cur_srcstep)
676 # add leading zeros to srcstep, if not in zero mode
677 with m.If(~pred_src_zero):
678 # priority encoder (count leading zeros)
679 # append guard bit, in case the mask is all zeros
680 pri_enc_src = PriorityEncoder(65)
681 m.submodules.pri_enc_src = pri_enc_src
682 comb += pri_enc_src.i.eq(Cat(self.srcmask,
683 Const(1, 1)))
684 comb += src_delta.eq(pri_enc_src.o)
685 # apply delta to srcstep
686 comb += skip_srcstep.eq(cur_srcstep + src_delta)
687 # shift-out all leading zeros from the mask
688 # plus the leading "one" bit
689 # TODO count leading zeros and shift-out the zero
690 # bits, in the same step, in hardware
691 sync += self.srcmask.eq(self.srcmask >> (src_delta+1))
692
693 # same as above, but for dststep
694 skip_dststep = Signal.like(cur_dststep)
695 dst_delta = Signal.like(cur_dststep)
696 with m.If(~pred_dst_zero):
697 pri_enc_dst = PriorityEncoder(65)
698 m.submodules.pri_enc_dst = pri_enc_dst
699 comb += pri_enc_dst.i.eq(Cat(self.dstmask,
700 Const(1, 1)))
701 comb += dst_delta.eq(pri_enc_dst.o)
702 comb += skip_dststep.eq(cur_dststep + dst_delta)
703 sync += self.dstmask.eq(self.dstmask >> (dst_delta+1))
704
705 # TODO: initialize mask[VL]=1 to avoid passing past VL
706 with m.If((skip_srcstep >= cur_vl) |
707 (skip_dststep >= cur_vl)):
708 # end of VL loop. Update PC and reset src/dst step
709 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
710 comb += self.state_w_pc.data_i.eq(nia)
711 comb += new_svstate.srcstep.eq(0)
712 comb += new_svstate.dststep.eq(0)
713 comb += update_svstate.eq(1)
714 # synchronize with the simulator
715 comb += self.insn_done.eq(1)
716 # go back to Issue
717 m.next = "ISSUE_START"
718 with m.Else():
719 # update new src/dst step
720 comb += new_svstate.srcstep.eq(skip_srcstep)
721 comb += new_svstate.dststep.eq(skip_dststep)
722 comb += update_svstate.eq(1)
723 # proceed to Decode
724 m.next = "DECODE_SV"
725
726 # after src/dst step have been updated, we are ready
727 # to decode the instruction
728 with m.State("DECODE_SV"):
729 # decode the instruction
730 sync += core.e.eq(pdecode2.e)
731 sync += core.state.eq(cur_state)
732 sync += core.raw_insn_i.eq(dec_opcode_i)
733 sync += core.bigendian_i.eq(self.core_bigendian_i)
734 sync += core.sv_rm.eq(pdecode2.sv_rm)
735 # set RA_OR_ZERO detection in satellite decoders
736 sync += core.sv_a_nz.eq(pdecode2.sv_a_nz)
737 m.next = "INSN_EXECUTE" # move to "execute"
738
739 # handshake with execution FSM, move to "wait" once acknowledged
740 with m.State("INSN_EXECUTE"):
741 comb += exec_insn_valid_i.eq(1) # trigger execute
742 with m.If(exec_insn_ready_o): # execute acknowledged us
743 m.next = "EXECUTE_WAIT"
744
745 with m.State("EXECUTE_WAIT"):
746 # wait on "core stop" release, at instruction end
747 # need to do this here, in case we are in a VL>1 loop
748 with m.If(~dbg.core_stop_o & ~core_rst):
749 comb += exec_pc_ready_i.eq(1)
750 # see https://bugs.libre-soc.org/show_bug.cgi?id=636
751 #with m.If(exec_pc_valid_o & exc_happened):
752 # TODO: the exception info needs to be blatted
753 # into pdecode.ldst_exc, and the instruction "re-run".
754 # when ldst_exc.happened is set, the PowerDecoder2
755 # reacts very differently: it re-writes the instruction
756 # with a "trap" (calls PowerDecoder2.trap()) which
757 # will *overwrite* whatever was requested and jump the
758 # PC to the exception address, as well as alter MSR.
759 # nothing else needs to be done other than to note
760 # the change of PC and MSR (and, later, SVSTATE)
761 #with m.Elif(exec_pc_valid_o):
762 with m.If(exec_pc_valid_o): # replace with Elif (above)
763
764 # was this the last loop iteration?
765 is_last = Signal()
766 cur_vl = cur_state.svstate.vl
767 comb += is_last.eq(next_srcstep == cur_vl)
768
769 # if either PC or SVSTATE were changed by the previous
770 # instruction, go directly back to Fetch, without
771 # updating either PC or SVSTATE
772 with m.If(pc_changed | sv_changed):
773 m.next = "ISSUE_START"
774
775 # also return to Fetch, when no output was a vector
776 # (regardless of SRCSTEP and VL), or when the last
777 # instruction was really the last one of the VL loop
778 with m.Elif((~pdecode2.loop_continue) | is_last):
779 # before going back to fetch, update the PC state
780 # register with the NIA.
781 # ok here we are not reading the branch unit.
782 # TODO: this just blithely overwrites whatever
783 # pipeline updated the PC
784 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
785 comb += self.state_w_pc.data_i.eq(nia)
786 # reset SRCSTEP before returning to Fetch
787 if self.svp64_en:
788 with m.If(pdecode2.loop_continue):
789 comb += new_svstate.srcstep.eq(0)
790 comb += new_svstate.dststep.eq(0)
791 comb += update_svstate.eq(1)
792 else:
793 comb += new_svstate.srcstep.eq(0)
794 comb += new_svstate.dststep.eq(0)
795 comb += update_svstate.eq(1)
796 m.next = "ISSUE_START"
797
798 # returning to Execute? then, first update SRCSTEP
799 with m.Else():
800 comb += new_svstate.srcstep.eq(next_srcstep)
801 comb += new_svstate.dststep.eq(next_dststep)
802 comb += update_svstate.eq(1)
803 # return to mask skip loop
804 m.next = "PRED_SKIP"
805
806 with m.Else():
807 comb += dbg.core_stopped_i.eq(1)
808 # while stopped, allow updating the PC and SVSTATE
809 with m.If(self.pc_i.ok):
810 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
811 comb += self.state_w_pc.data_i.eq(self.pc_i.data)
812 sync += pc_changed.eq(1)
813 with m.If(self.svstate_i.ok):
814 comb += new_svstate.eq(self.svstate_i.data)
815 comb += update_svstate.eq(1)
816 sync += sv_changed.eq(1)
817
818 # check if svstate needs updating: if so, write it to State Regfile
819 with m.If(update_svstate):
820 comb += self.state_w_sv.wen.eq(1<<StateRegs.SVSTATE)
821 comb += self.state_w_sv.data_i.eq(new_svstate)
822 sync += cur_state.svstate.eq(new_svstate) # for next clock
823
824 def execute_fsm(self, m, core, pc_changed, sv_changed,
825 exec_insn_valid_i, exec_insn_ready_o,
826 exec_pc_valid_o, exec_pc_ready_i):
827 """execute FSM
828
829 execute FSM. this interacts with the "issue" FSM
830 through exec_insn_ready/valid (incoming) and exec_pc_ready/valid
831 (outgoing). SVP64 RM prefixes have already been set up by the
832 "issue" phase, so execute is fairly straightforward.
833 """
834
835 comb = m.d.comb
836 sync = m.d.sync
837 pdecode2 = self.pdecode2
838
839 # temporaries
840 core_busy_o = core.busy_o # core is busy
841 core_ivalid_i = core.ivalid_i # instruction is valid
842 core_issue_i = core.issue_i # instruction is issued
843 insn_type = core.e.do.insn_type # instruction MicroOp type
844
845 with m.FSM(name="exec_fsm"):
846
847 # waiting for instruction bus (stays there until not busy)
848 with m.State("INSN_START"):
849 comb += exec_insn_ready_o.eq(1)
850 with m.If(exec_insn_valid_i):
851 comb += core_ivalid_i.eq(1) # instruction is valid
852 comb += core_issue_i.eq(1) # and issued
853 sync += sv_changed.eq(0)
854 sync += pc_changed.eq(0)
855 m.next = "INSN_ACTIVE" # move to "wait completion"
856
857 # instruction started: must wait till it finishes
858 with m.State("INSN_ACTIVE"):
859 with m.If(insn_type != MicrOp.OP_NOP):
860 comb += core_ivalid_i.eq(1) # instruction is valid
861 # note changes to PC and SVSTATE
862 with m.If(self.state_nia.wen & (1<<StateRegs.SVSTATE)):
863 sync += sv_changed.eq(1)
864 with m.If(self.state_nia.wen & (1<<StateRegs.PC)):
865 sync += pc_changed.eq(1)
866 with m.If(~core_busy_o): # instruction done!
867 comb += exec_pc_valid_o.eq(1)
868 with m.If(exec_pc_ready_i):
869 comb += self.insn_done.eq(1)
870 m.next = "INSN_START" # back to fetch
871
872 def setup_peripherals(self, m):
873 comb, sync = m.d.comb, m.d.sync
874
875 m.submodules.core = core = DomainRenamer("coresync")(self.core)
876 m.submodules.imem = imem = self.imem
877 m.submodules.dbg = dbg = self.dbg
878 if self.jtag_en:
879 m.submodules.jtag = jtag = self.jtag
880 # TODO: UART2GDB mux, here, from external pin
881 # see https://bugs.libre-soc.org/show_bug.cgi?id=499
882 sync += dbg.dmi.connect_to(jtag.dmi)
883
884 cur_state = self.cur_state
885
886 # 4x 4k SRAM blocks. these simply "exist", they get routed in litex
887 if self.sram4x4k:
888 for i, sram in enumerate(self.sram4k):
889 m.submodules["sram4k_%d" % i] = sram
890 comb += sram.enable.eq(self.wb_sram_en)
891
892 # XICS interrupt handler
893 if self.xics:
894 m.submodules.xics_icp = icp = self.xics_icp
895 m.submodules.xics_ics = ics = self.xics_ics
896 comb += icp.ics_i.eq(ics.icp_o) # connect ICS to ICP
897 sync += cur_state.eint.eq(icp.core_irq_o) # connect ICP to core
898
899 # GPIO test peripheral
900 if self.gpio:
901 m.submodules.simple_gpio = simple_gpio = self.simple_gpio
902
903 # connect one GPIO output to ICS bit 15 (like in microwatt soc.vhdl)
904 # XXX causes litex ECP5 test to get wrong idea about input and output
905 # (but works with verilator sim *sigh*)
906 #if self.gpio and self.xics:
907 # comb += self.int_level_i[15].eq(simple_gpio.gpio_o[0])
908
909 # instruction decoder
910 pdecode = create_pdecode()
911 m.submodules.dec2 = pdecode2 = self.pdecode2
912 if self.svp64_en:
913 m.submodules.svp64 = svp64 = self.svp64
914
915 # convenience
916 dmi, d_reg, d_cr, d_xer, = dbg.dmi, dbg.d_gpr, dbg.d_cr, dbg.d_xer
917 intrf = self.core.regs.rf['int']
918
919 # clock delay power-on reset
920 cd_por = ClockDomain(reset_less=True)
921 cd_sync = ClockDomain()
922 core_sync = ClockDomain("coresync")
923 m.domains += cd_por, cd_sync, core_sync
924
925 ti_rst = Signal(reset_less=True)
926 delay = Signal(range(4), reset=3)
927 with m.If(delay != 0):
928 m.d.por += delay.eq(delay - 1)
929 comb += cd_por.clk.eq(ClockSignal())
930
931 # power-on reset delay
932 core_rst = ResetSignal("coresync")
933 comb += ti_rst.eq(delay != 0 | dbg.core_rst_o | ResetSignal())
934 comb += core_rst.eq(ti_rst)
935
936 # busy/halted signals from core
937 comb += self.busy_o.eq(core.busy_o)
938 comb += pdecode2.dec.bigendian.eq(self.core_bigendian_i)
939
940 # temporary hack: says "go" immediately for both address gen and ST
941 l0 = core.l0
942 ldst = core.fus.fus['ldst0']
943 st_go_edge = rising_edge(m, ldst.st.rel_o)
944 m.d.comb += ldst.ad.go_i.eq(ldst.ad.rel_o) # link addr-go direct to rel
945 m.d.comb += ldst.st.go_i.eq(st_go_edge) # link store-go to rising rel
946
947 return core_rst
948
949 def elaborate(self, platform):
950 m = Module()
951 # convenience
952 comb, sync = m.d.comb, m.d.sync
953 cur_state = self.cur_state
954 pdecode2 = self.pdecode2
955 dbg = self.dbg
956 core = self.core
957
958 # set up peripherals and core
959 core_rst = self.setup_peripherals(m)
960
961 # reset current state if core reset requested
962 with m.If(core_rst):
963 m.d.sync += self.cur_state.eq(0)
964
965 # PC and instruction from I-Memory
966 comb += self.pc_o.eq(cur_state.pc)
967 pc_changed = Signal() # note write to PC
968 sv_changed = Signal() # note write to SVSTATE
969
970 # read state either from incoming override or from regfile
971 # TODO: really should be doing MSR in the same way
972 pc = state_get(m, core_rst, self.pc_i,
973 "pc", # read PC
974 self.state_r_pc, StateRegs.PC)
975 svstate = state_get(m, core_rst, self.svstate_i,
976 "svstate", # read SVSTATE
977 self.state_r_sv, StateRegs.SVSTATE)
978
979 # don't write pc every cycle
980 comb += self.state_w_pc.wen.eq(0)
981 comb += self.state_w_pc.data_i.eq(0)
982
983 # don't read msr every cycle
984 comb += self.state_r_msr.ren.eq(0)
985
986 # address of the next instruction, in the absence of a branch
987 # depends on the instruction size
988 nia = Signal(64)
989
990 # connect up debug signals
991 # TODO comb += core.icache_rst_i.eq(dbg.icache_rst_o)
992 comb += dbg.terminate_i.eq(core.core_terminate_o)
993 comb += dbg.state.pc.eq(pc)
994 comb += dbg.state.svstate.eq(svstate)
995 comb += dbg.state.msr.eq(cur_state.msr)
996
997 # pass the prefix mode from Fetch to Issue, so the latter can loop
998 # on VL==0
999 is_svp64_mode = Signal()
1000
1001 # there are *THREE* FSMs, fetch (32/64-bit) issue, decode/execute.
1002 # these are the handshake signals between fetch and decode/execute
1003
1004 # fetch FSM can run as soon as the PC is valid
1005 fetch_pc_valid_i = Signal() # Execute tells Fetch "start next read"
1006 fetch_pc_ready_o = Signal() # Fetch Tells SVSTATE "proceed"
1007
1008 # fetch FSM hands over the instruction to be decoded / issued
1009 fetch_insn_valid_o = Signal()
1010 fetch_insn_ready_i = Signal()
1011
1012 # predicate fetch FSM decodes and fetches the predicate
1013 pred_insn_valid_i = Signal()
1014 pred_insn_ready_o = Signal()
1015
1016 # predicate fetch FSM delivers the masks
1017 pred_mask_valid_o = Signal()
1018 pred_mask_ready_i = Signal()
1019
1020 # issue FSM delivers the instruction to the be executed
1021 exec_insn_valid_i = Signal()
1022 exec_insn_ready_o = Signal()
1023
1024 # execute FSM, hands over the PC/SVSTATE back to the issue FSM
1025 exec_pc_valid_o = Signal()
1026 exec_pc_ready_i = Signal()
1027
1028 # the FSMs here are perhaps unusual in that they detect conditions
1029 # then "hold" information, combinatorially, for the core
1030 # (as opposed to using sync - which would be on a clock's delay)
1031 # this includes the actual opcode, valid flags and so on.
1032
1033 # Fetch, then predicate fetch, then Issue, then Execute.
1034 # Issue is where the VL for-loop # lives. the ready/valid
1035 # signalling is used to communicate between the four.
1036
1037 self.fetch_fsm(m, core, pc, svstate, nia, is_svp64_mode,
1038 fetch_pc_ready_o, fetch_pc_valid_i,
1039 fetch_insn_valid_o, fetch_insn_ready_i)
1040
1041 self.issue_fsm(m, core, pc_changed, sv_changed, nia,
1042 dbg, core_rst, is_svp64_mode,
1043 fetch_pc_ready_o, fetch_pc_valid_i,
1044 fetch_insn_valid_o, fetch_insn_ready_i,
1045 pred_insn_valid_i, pred_insn_ready_o,
1046 pred_mask_valid_o, pred_mask_ready_i,
1047 exec_insn_valid_i, exec_insn_ready_o,
1048 exec_pc_valid_o, exec_pc_ready_i)
1049
1050 if self.svp64_en:
1051 self.fetch_predicate_fsm(m,
1052 pred_insn_valid_i, pred_insn_ready_o,
1053 pred_mask_valid_o, pred_mask_ready_i)
1054
1055 self.execute_fsm(m, core, pc_changed, sv_changed,
1056 exec_insn_valid_i, exec_insn_ready_o,
1057 exec_pc_valid_o, exec_pc_ready_i)
1058
1059 # whatever was done above, over-ride it if core reset is held
1060 with m.If(core_rst):
1061 sync += nia.eq(0)
1062
1063 # this bit doesn't have to be in the FSM: connect up to read
1064 # regfiles on demand from DMI
1065 self.do_dmi(m, dbg)
1066
1067 # DEC and TB inc/dec FSM. copy of DEC is put into CoreState,
1068 # (which uses that in PowerDecoder2 to raise 0x900 exception)
1069 self.tb_dec_fsm(m, cur_state.dec)
1070
1071 return m
1072
1073 def do_dmi(self, m, dbg):
1074 """deals with DMI debug requests
1075
1076 currently only provides read requests for the INT regfile, CR and XER
1077 it will later also deal with *writing* to these regfiles.
1078 """
1079 comb = m.d.comb
1080 sync = m.d.sync
1081 dmi, d_reg, d_cr, d_xer, = dbg.dmi, dbg.d_gpr, dbg.d_cr, dbg.d_xer
1082 intrf = self.core.regs.rf['int']
1083
1084 with m.If(d_reg.req): # request for regfile access being made
1085 # TODO: error-check this
1086 # XXX should this be combinatorial? sync better?
1087 if intrf.unary:
1088 comb += self.int_r.ren.eq(1<<d_reg.addr)
1089 else:
1090 comb += self.int_r.addr.eq(d_reg.addr)
1091 comb += self.int_r.ren.eq(1)
1092 d_reg_delay = Signal()
1093 sync += d_reg_delay.eq(d_reg.req)
1094 with m.If(d_reg_delay):
1095 # data arrives one clock later
1096 comb += d_reg.data.eq(self.int_r.data_o)
1097 comb += d_reg.ack.eq(1)
1098
1099 # sigh same thing for CR debug
1100 with m.If(d_cr.req): # request for regfile access being made
1101 comb += self.cr_r.ren.eq(0b11111111) # enable all
1102 d_cr_delay = Signal()
1103 sync += d_cr_delay.eq(d_cr.req)
1104 with m.If(d_cr_delay):
1105 # data arrives one clock later
1106 comb += d_cr.data.eq(self.cr_r.data_o)
1107 comb += d_cr.ack.eq(1)
1108
1109 # aaand XER...
1110 with m.If(d_xer.req): # request for regfile access being made
1111 comb += self.xer_r.ren.eq(0b111111) # enable all
1112 d_xer_delay = Signal()
1113 sync += d_xer_delay.eq(d_xer.req)
1114 with m.If(d_xer_delay):
1115 # data arrives one clock later
1116 comb += d_xer.data.eq(self.xer_r.data_o)
1117 comb += d_xer.ack.eq(1)
1118
1119 def tb_dec_fsm(self, m, spr_dec):
1120 """tb_dec_fsm
1121
1122 this is a FSM for updating either dec or tb. it runs alternately
1123 DEC, TB, DEC, TB. note that SPR pipeline could have written a new
1124 value to DEC, however the regfile has "passthrough" on it so this
1125 *should* be ok.
1126
1127 see v3.0B p1097-1099 for Timeer Resource and p1065 and p1076
1128 """
1129
1130 comb, sync = m.d.comb, m.d.sync
1131 fast_rf = self.core.regs.rf['fast']
1132 fast_r_dectb = fast_rf.r_ports['issue'] # DEC/TB
1133 fast_w_dectb = fast_rf.w_ports['issue'] # DEC/TB
1134
1135 with m.FSM() as fsm:
1136
1137 # initiates read of current DEC
1138 with m.State("DEC_READ"):
1139 comb += fast_r_dectb.addr.eq(FastRegs.DEC)
1140 comb += fast_r_dectb.ren.eq(1)
1141 m.next = "DEC_WRITE"
1142
1143 # waits for DEC read to arrive (1 cycle), updates with new value
1144 with m.State("DEC_WRITE"):
1145 new_dec = Signal(64)
1146 # TODO: MSR.LPCR 32-bit decrement mode
1147 comb += new_dec.eq(fast_r_dectb.data_o - 1)
1148 comb += fast_w_dectb.addr.eq(FastRegs.DEC)
1149 comb += fast_w_dectb.wen.eq(1)
1150 comb += fast_w_dectb.data_i.eq(new_dec)
1151 sync += spr_dec.eq(new_dec) # copy into cur_state for decoder
1152 m.next = "TB_READ"
1153
1154 # initiates read of current TB
1155 with m.State("TB_READ"):
1156 comb += fast_r_dectb.addr.eq(FastRegs.TB)
1157 comb += fast_r_dectb.ren.eq(1)
1158 m.next = "TB_WRITE"
1159
1160 # waits for read TB to arrive, initiates write of current TB
1161 with m.State("TB_WRITE"):
1162 new_tb = Signal(64)
1163 comb += new_tb.eq(fast_r_dectb.data_o + 1)
1164 comb += fast_w_dectb.addr.eq(FastRegs.TB)
1165 comb += fast_w_dectb.wen.eq(1)
1166 comb += fast_w_dectb.data_i.eq(new_tb)
1167 m.next = "DEC_READ"
1168
1169 return m
1170
1171 def __iter__(self):
1172 yield from self.pc_i.ports()
1173 yield self.pc_o
1174 yield self.memerr_o
1175 yield from self.core.ports()
1176 yield from self.imem.ports()
1177 yield self.core_bigendian_i
1178 yield self.busy_o
1179
1180 def ports(self):
1181 return list(self)
1182
1183 def external_ports(self):
1184 ports = self.pc_i.ports()
1185 ports += [self.pc_o, self.memerr_o, self.core_bigendian_i, self.busy_o,
1186 ]
1187
1188 if self.jtag_en:
1189 ports += list(self.jtag.external_ports())
1190 else:
1191 # don't add DMI if JTAG is enabled
1192 ports += list(self.dbg.dmi.ports())
1193
1194 ports += list(self.imem.ibus.fields.values())
1195 ports += list(self.core.l0.cmpi.wb_bus().fields.values())
1196
1197 if self.sram4x4k:
1198 for sram in self.sram4k:
1199 ports += list(sram.bus.fields.values())
1200
1201 if self.xics:
1202 ports += list(self.xics_icp.bus.fields.values())
1203 ports += list(self.xics_ics.bus.fields.values())
1204 ports.append(self.int_level_i)
1205
1206 if self.gpio:
1207 ports += list(self.simple_gpio.bus.fields.values())
1208 ports.append(self.gpio_o)
1209
1210 return ports
1211
1212 def ports(self):
1213 return list(self)
1214
1215
1216 class TestIssuer(Elaboratable):
1217 def __init__(self, pspec):
1218 self.ti = TestIssuerInternal(pspec)
1219
1220 self.pll = DummyPLL()
1221
1222 # PLL direct clock or not
1223 self.pll_en = hasattr(pspec, "use_pll") and pspec.use_pll
1224 if self.pll_en:
1225 self.pll_18_o = Signal(reset_less=True)
1226 self.clk_sel_i = Signal(reset_less=True)
1227
1228 def elaborate(self, platform):
1229 m = Module()
1230 comb = m.d.comb
1231
1232 # TestIssuer runs at direct clock
1233 m.submodules.ti = ti = self.ti
1234 cd_int = ClockDomain("coresync")
1235
1236 if self.pll_en:
1237 # ClockSelect runs at PLL output internal clock rate
1238 m.submodules.pll = pll = self.pll
1239
1240 # add clock domains from PLL
1241 cd_pll = ClockDomain("pllclk")
1242 m.domains += cd_pll
1243
1244 # PLL clock established. has the side-effect of running clklsel
1245 # at the PLL's speed (see DomainRenamer("pllclk") above)
1246 pllclk = ClockSignal("pllclk")
1247 comb += pllclk.eq(pll.clk_pll_o)
1248
1249 # wire up external 24mhz to PLL
1250 comb += pll.clk_24_i.eq(ClockSignal())
1251
1252 # output 18 mhz PLL test signal
1253 comb += self.pll_18_o.eq(pll.pll_18_o)
1254
1255 # input to pll clock selection
1256 comb += Cat(pll.sel_a0_i, pll.sel_a1_i).eq(self.clk_sel_i)
1257
1258 # now wire up ResetSignals. don't mind them being in this domain
1259 pll_rst = ResetSignal("pllclk")
1260 comb += pll_rst.eq(ResetSignal())
1261
1262 # internal clock is set to selector clock-out. has the side-effect of
1263 # running TestIssuer at this speed (see DomainRenamer("intclk") above)
1264 intclk = ClockSignal("coresync")
1265 if self.pll_en:
1266 comb += intclk.eq(pll.clk_pll_o)
1267 else:
1268 comb += intclk.eq(ClockSignal())
1269
1270 return m
1271
1272 def ports(self):
1273 return list(self.ti.ports()) + list(self.pll.ports()) + \
1274 [ClockSignal(), ResetSignal()]
1275
1276 def external_ports(self):
1277 ports = self.ti.external_ports()
1278 ports.append(ClockSignal())
1279 ports.append(ResetSignal())
1280 if self.pll_en:
1281 ports.append(self.clk_sel_i)
1282 ports.append(self.pll_18_o)
1283 ports.append(self.pll.pll_ana_o)
1284 return ports
1285
1286
1287 if __name__ == '__main__':
1288 units = {'alu': 1, 'cr': 1, 'branch': 1, 'trap': 1, 'logical': 1,
1289 'spr': 1,
1290 'div': 1,
1291 'mul': 1,
1292 'shiftrot': 1
1293 }
1294 pspec = TestMemPspec(ldst_ifacetype='bare_wb',
1295 imem_ifacetype='bare_wb',
1296 addr_wid=48,
1297 mask_wid=8,
1298 reg_wid=64,
1299 units=units)
1300 dut = TestIssuer(pspec)
1301 vl = main(dut, ports=dut.ports(), name="test_issuer")
1302
1303 if len(sys.argv) == 1:
1304 vl = rtlil.convert(dut, ports=dut.external_ports(), name="test_issuer")
1305 with open("test_issuer.il", "w") as f:
1306 f.write(vl)