1 """ Pipeline and BufferedPipeline implementation, conforming to the same API.
6 a strategically very important function that is identical in function
7 to nmigen's Signal.eq function, except it may take objects, or a list
8 of objects, or a tuple of objects, and where objects may also be
14 stage requires compliance with a strict API that may be
15 implemented in several means, including as a static class.
16 the methods of a stage instance must be as follows:
18 * ispec() - Input data format specification
19 returns an object or a list or tuple of objects, or
20 a Record, each object having an "eq" function which
21 takes responsibility for copying by assignment all
23 * ospec() - Output data format specification
24 requirements as for ospec
25 * process(m, i) - Processes an ispec-formatted object
26 returns a combinatorial block of a result that
27 may be assigned to the output, by way of the "eq"
29 * setup(m, i) - Optional function for setting up submodules
30 may be used for more complex stages, to link
31 the input (i) to submodules. must take responsibility
32 for adding those submodules to the module (m).
33 the submodules must be combinatorial blocks and
34 must have their inputs and output linked combinatorially.
39 A useful combinatorial wrapper around stages that chains them together
40 and then presents a Stage-API-conformant interface.
45 A simple stalling clock-synchronised pipeline that has no buffering
46 (unlike BufferedPipeline). A stall anywhere along the line will
47 result in a stall back-propagating down the entire chain.
49 The BufferedPipeline by contrast will buffer incoming data, allowing
50 previous stages one clock cycle's grace before also having to stall.
55 nmigen implementation of buffered pipeline stage, based on zipcpu:
56 https://zipcpu.com/blog/2017/08/14/strategies-for-pipelining.html
58 this module requires quite a bit of thought to understand how it works
59 (and why it is needed in the first place). reading the above is
60 *strongly* recommended.
62 unlike john dawson's IEEE754 FPU STB/ACK signalling, which requires
63 the STB / ACK signals to raise and lower (on separate clocks) before
64 data may proceeed (thus only allowing one piece of data to proceed
65 on *ALTERNATE* cycles), the signalling here is a true pipeline
66 where data will flow on *every* clock when the conditions are right.
68 input acceptance conditions are when:
69 * incoming previous-stage strobe (p.i_valid) is HIGH
70 * outgoing previous-stage ready (p.o_ready) is LOW
72 output transmission conditions are when:
73 * outgoing next-stage strobe (n.o_valid) is HIGH
74 * outgoing next-stage ready (n.i_ready) is LOW
76 the tricky bit is when the input has valid data and the output is not
77 ready to accept it. if it wasn't for the clock synchronisation, it
78 would be possible to tell the input "hey don't send that data, we're
79 not ready". unfortunately, it's not possible to "change the past":
80 the previous stage *has no choice* but to pass on its data.
82 therefore, the incoming data *must* be accepted - and stored: that
83 is the responsibility / contract that this stage *must* accept.
84 on the same clock, it's possible to tell the input that it must
85 not send any more data. this is the "stall" condition.
87 we now effectively have *two* possible pieces of data to "choose" from:
88 the buffered data, and the incoming data. the decision as to which
89 to process and output is based on whether we are in "stall" or not.
90 i.e. when the next stage is no longer ready, the output comes from
91 the buffer if a stall had previously occurred, otherwise it comes
92 direct from processing the input.
94 this allows us to respect a synchronous "travelling STB" with what
95 dan calls a "buffered handshake".
97 it's quite a complex state machine!
100 from nmigen
import Signal
, Cat
, Const
, Mux
, Module
, Array
101 from nmigen
.cli
import verilog
, rtlil
102 from nmigen
.hdl
.rec
import Record
, Layout
104 from collections
.abc
import Sequence
108 """ contains signals that come *from* the previous stage (both in and out)
109 * i_valid: previous stage indicating all incoming data is valid.
110 may be a multi-bit signal, where all bits are required
111 to be asserted to indicate "valid".
112 * o_ready: output to next stage indicating readiness to accept data
113 * i_data : an input - added by the user of this class
116 def __init__(self
, i_width
=1):
117 self
.i_valid
= Signal(i_width
, name
="p_i_valid") # prev >>in self
118 self
.o_ready
= Signal(name
="p_o_ready") # prev <<out self
120 def connect_in(self
, prev
):
121 """ helper function to connect stage to an input source. do not
122 use to connect stage-to-stage!
124 return [self
.i_valid
.eq(prev
.i_valid
),
125 prev
.o_ready
.eq(self
.o_ready
),
126 eq(self
.i_data
, prev
.i_data
),
129 def i_valid_logic(self
):
130 vlen
= len(self
.i_valid
)
131 if vlen
> 1: # multi-bit case: valid only when i_valid is all 1s
132 all1s
= Const(-1, (len(self
.i_valid
), False))
133 return self
.i_valid
== all1s
134 # single-bit i_valid case
139 """ contains the signals that go *to* the next stage (both in and out)
140 * o_valid: output indicating to next stage that data is valid
141 * i_ready: input from next stage indicating that it can accept data
142 * o_data : an output - added by the user of this class
145 self
.o_valid
= Signal(name
="n_o_valid") # self out>> next
146 self
.i_ready
= Signal(name
="n_i_ready") # self <<in next
148 def connect_to_next(self
, nxt
):
149 """ helper function to connect to the next stage data/valid/ready.
150 data/valid is passed *TO* nxt, and ready comes *IN* from nxt.
152 return [nxt
.i_valid
.eq(self
.o_valid
),
153 self
.i_ready
.eq(nxt
.o_ready
),
154 eq(nxt
.i_data
, self
.o_data
),
157 def connect_out(self
, nxt
):
158 """ helper function to connect stage to an output source. do not
159 use to connect stage-to-stage!
161 return [nxt
.o_valid
.eq(self
.o_valid
),
162 self
.i_ready
.eq(nxt
.i_ready
),
163 eq(nxt
.o_data
, self
.o_data
),
168 """ makes signals equal: a helper routine which identifies if it is being
169 passed a list (or tuple) of objects, or signals, or Records, and calls
170 the objects' eq function.
172 complex objects (classes) can be used: they must follow the
173 convention of having an eq member function, which takes the
174 responsibility of further calling eq and returning a list of
177 Record is a special (unusual, recursive) case, where the input may be
178 specified as a dictionary (which may contain further dictionaries,
179 recursively), where the field names of the dictionary must match
180 the Record's field spec. Alternatively, an object with the same
181 member names as the Record may be assigned: it does not have to
184 if not isinstance(o
, Sequence
):
187 for (ao
, ai
) in zip(o
, i
):
188 #print ("eq", ao, ai)
189 if isinstance(ao
, Record
):
190 for idx
, (field_name
, field_shape
, _
) in enumerate(ao
.layout
):
191 if isinstance(field_shape
, Layout
):
195 if hasattr(val
, field_name
): # check for attribute
196 val
= getattr(val
, field_name
)
198 val
= val
[field_name
] # dictionary-style specification
199 rres
= eq(ao
.fields
[field_name
], val
)
203 if not isinstance(rres
, Sequence
):
210 """ pass in a list of stages, and they will automatically be
211 chained together via their input and output specs into a
214 * input to this class will be the input of the first stage
215 * output of first stage goes into input of second
216 * output of second goes into input into third (etc. etc.)
217 * the output of this class will be the output of the last stage
219 def __init__(self
, chain
):
223 return self
.chain
[0].ispec()
226 return self
.chain
[-1].ospec()
228 def setup(self
, m
, i
):
229 for (idx
, c
) in enumerate(self
.chain
):
230 if hasattr(c
, "setup"):
231 c
.setup(m
, i
) # stage may have some module stuff
232 o
= self
.chain
[idx
].ospec() # only the last assignment survives
233 m
.d
.comb
+= eq(o
, c
.process(i
)) # process input into "o"
234 if idx
!= len(self
.chain
)-1:
235 ni
= self
.chain
[idx
+1].ispec() # becomes new input on next loop
236 m
.d
.comb
+= eq(ni
, o
) # assign output to next input
238 self
.o
= o
# last loop is the output
240 def process(self
, i
):
245 """ Common functions for Pipeline API
247 def __init__(self
, stage
, in_multi
=None, p_len
=1, n_len
=1):
248 """ pass in a "stage" which may be either a static class or a class
249 instance, which has four functions (one optional):
250 * ispec: returns input signals according to the input specification
251 * ispec: returns output signals to the output specification
252 * process: takes an input instance and returns processed data
253 * setup: performs any module linkage if the stage uses one.
256 * add i_data member to PrevControl and
257 * add o_data member to NextControl
261 # set up input and output IO ACK (prev/next ready/valid)
264 for i
in range(p_len
):
265 p
.append(PrevControl(in_multi
))
266 for i
in range(n_len
):
267 n
.append(NextControl())
277 def connect_to_next(self
, nxt
, p_idx
=0, n_idx
=0):
278 """ helper function to connect to the next stage data/valid/ready.
280 return self
.n
[n_idx
].connect_to_next(nxt
.p
[p_idx
])
282 def connect_in(self
, prev
, idx
=0, prev_idx
=None):
283 """ helper function to connect stage to an input source. do not
284 use to connect stage-to-stage!
287 return self
.p
[idx
].connect_in(prev
.p
)
288 return self
.p
[idx
].connect_in(prev
.p
[prev_idx
])
290 def connect_out(self
, nxt
, idx
=0, nxt_idx
=None):
291 """ helper function to connect stage to an output source. do not
292 use to connect stage-to-stage!
295 return self
.n
[idx
].connect_out(nxt
.n
)
296 return self
.n
[idx
].connect_out(nxt
.n
[nxt
+idx
])
298 def set_input(self
, i
, idx
=0):
299 """ helper function to set the input data
301 return eq(self
.p
[idx
].i_data
, i
)
305 for i
in range(len(self
.p
)):
306 res
+= [self
.p
[i
].i_valid
, self
.p
[i
].o_ready
,
307 self
.p
[i
].i_data
]# XXX need flattening!]
308 for i
in range(len(self
.n
)):
309 res
+= [self
.n
[i
].i_ready
, self
.n
[i
].o_valid
,
310 self
.n
.o_data
] # XXX need flattening!]
314 class BufferedPipeline(PipelineBase
):
315 """ buffered pipeline stage. data and strobe signals travel in sync.
316 if ever the input is ready and the output is not, processed data
317 is stored in a temporary register.
319 Argument: stage. see Stage API above
321 stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
322 stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
323 stage-1 p.i_data >>in stage n.o_data out>> stage+1
329 input data p.i_data is read (only), is processed and goes into an
330 intermediate result store [process()]. this is updated combinatorially.
332 in a non-stall condition, the intermediate result will go into the
333 output (update_output). however if ever there is a stall, it goes
334 into r_data instead [update_buffer()].
336 when the non-stall condition is released, r_data is the first
337 to be transferred to the output [flush_buffer()], and the stall
340 on the next cycle (as long as stall is not raised again) the
341 input may begin to be processed and transferred directly to output.
344 def __init__(self
, stage
, n_len
=1, p_len
=1, p_mux
=None, n_mux
=None):
345 PipelineBase
.__init
__(self
, stage
)
349 # set up the input and output data
350 for i
in range(p_len
):
351 self
.p
[i
].i_data
= stage
.ispec() # input type
352 for i
in range(n_len
):
353 self
.n
[i
].o_data
= stage
.ospec()
355 def elaborate(self
, platform
):
358 result
= self
.stage
.ospec()
359 r_data
= self
.stage
.ospec()
360 if hasattr(self
.stage
, "setup"):
361 for i
in range(p_len
):
362 self
.stage
.setup(m
, self
.p
[i
].i_data
)
364 pi
= 0 # TODO: use p_mux to decide which to select
365 ni
= 0 # TODO: use n_nux to decide which to select
367 # establish some combinatorial temporaries
368 o_n_validn
= Signal(reset_less
=True)
369 i_p_valid_o_p_ready
= Signal(reset_less
=True)
370 p_i_valid
= Signal(reset_less
=True)
371 m
.d
.comb
+= [p_i_valid
.eq(self
.p
[pi
].i_valid_logic()),
372 o_n_validn
.eq(~self
.n
[ni
].o_valid
),
373 i_p_valid_o_p_ready
.eq(p_i_valid
& self
.p
[pi
].o_ready
),
376 # store result of processing in combinatorial temporary
377 m
.d
.comb
+= eq(result
, self
.stage
.process(self
.p
[pi
].i_data
))
379 # if not in stall condition, update the temporary register
380 with m
.If(self
.p
[pi
].o_ready
): # not stalled
381 m
.d
.sync
+= eq(r_data
, result
) # update buffer
383 with m
.If(self
.n
[ni
].i_ready
): # next stage is ready
384 with m
.If(self
.p
[pi
].o_ready
): # not stalled
385 # nothing in buffer: send (processed) input direct to output
386 m
.d
.sync
+= [self
.n
[ni
].o_valid
.eq(p_i_valid
),
387 eq(self
.n
[ni
].o_data
, result
), # update output
389 with m
.Else(): # p.o_ready is false, and something is in buffer.
390 # Flush the [already processed] buffer to the output port.
391 m
.d
.sync
+= [self
.n
[ni
].o_valid
.eq(1), # declare reg empty
392 eq(self
.n
[ni
].o_data
, r_data
), # flush buffer
393 self
.p
[pi
].o_ready
.eq(1), # clear stall
395 # ignore input, since p.o_ready is also false.
397 # (n.i_ready) is false here: next stage is ready
398 with m
.Elif(o_n_validn
): # next stage being told "ready"
399 m
.d
.sync
+= [self
.n
[ni
].o_valid
.eq(p_i_valid
),
400 self
.p
[pi
].o_ready
.eq(1), # Keep the buffer empty
401 eq(self
.n
[ni
].o_data
, result
), # set output data
404 # (n.i_ready) false and (n.o_valid) true:
405 with m
.Elif(i_p_valid_o_p_ready
):
406 # If next stage *is* ready, and not stalled yet, accept input
407 m
.d
.sync
+= self
.p
[pi
].o_ready
.eq(~
(p_i_valid
& self
.n
[ni
].o_valid
))
412 class ExampleAddStage
:
413 """ an example of how to use the buffered pipeline, as a class instance
417 """ returns a tuple of input signals which will be the incoming data
419 return (Signal(16), Signal(16))
422 """ returns an output signal which will happen to contain the sum
427 def process(self
, i
):
428 """ process the input data (sums the values in the tuple) and returns it
433 class ExampleBufPipeAdd(BufferedPipeline
):
434 """ an example of how to use the buffered pipeline, using a class instance
438 addstage
= ExampleAddStage()
439 BufferedPipeline
.__init
__(self
, addstage
)
443 """ an example of how to use the buffered pipeline, in a static class
448 return Signal(16, name
="example_input_signal")
451 return Signal(16, name
="example_output_signal")
454 """ process the input data and returns it (adds 1)
459 class ExampleStageCls
:
460 """ an example of how to use the buffered pipeline, in a static class
465 return Signal(16, name
="example_input_signal")
468 return Signal(16, name
="example_output_signal")
470 def process(self
, i
):
471 """ process the input data and returns it (adds 1)
476 class ExampleBufPipe(BufferedPipeline
):
477 """ an example of how to use the buffered pipeline.
481 BufferedPipeline
.__init
__(self
, ExampleStage
)
484 class UnbufferedPipeline(PipelineBase
):
485 """ A simple pipeline stage with single-clock synchronisation
486 and two-way valid/ready synchronised signalling.
488 Note that a stall in one stage will result in the entire pipeline
491 Also that unlike BufferedPipeline, the valid/ready signalling does NOT
492 travel synchronously with the data: the valid/ready signalling
493 combines in a *combinatorial* fashion. Therefore, a long pipeline
494 chain will lengthen propagation delays.
496 Argument: stage. see Stage API, above
498 stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
499 stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
500 stage-1 p.i_data >>in stage n.o_data out>> stage+1
508 p.i_data : StageInput, shaped according to ispec
510 p.o_data : StageOutput, shaped according to ospec
512 r_data : input_shape according to ispec
513 A temporary (buffered) copy of a prior (valid) input.
514 This is HELD if the output is not ready. It is updated
516 result: output_shape according to ospec
517 The output of the combinatorial logic. it is updated
518 COMBINATORIALLY (no clock dependence).
521 def __init__(self
, stage
):
522 PipelineBase
.__init
__(self
, stage
)
523 self
._data
_valid
= Signal()
525 # set up the input and output data
526 self
.p
.i_data
= stage
.ispec() # input type
527 self
.n
.o_data
= stage
.ospec() # output type
529 def elaborate(self
, platform
):
532 r_data
= self
.stage
.ispec() # input type
533 result
= self
.stage
.ospec() # output data
534 if hasattr(self
.stage
, "setup"):
535 self
.stage
.setup(m
, r_data
)
537 p_i_valid
= Signal(reset_less
=True)
538 m
.d
.comb
+= p_i_valid
.eq(self
.p
.i_valid_logic())
539 m
.d
.comb
+= eq(result
, self
.stage
.process(r_data
))
540 m
.d
.comb
+= self
.n
.o_valid
.eq(self
._data
_valid
)
541 m
.d
.comb
+= self
.p
.o_ready
.eq(~self
._data
_valid | self
.n
.i_ready
)
542 m
.d
.sync
+= self
._data
_valid
.eq(p_i_valid | \
543 (~self
.n
.i_ready
& self
._data
_valid
))
544 with m
.If(self
.p
.i_valid
& self
.p
.o_ready
):
545 m
.d
.sync
+= eq(r_data
, self
.p
.i_data
)
546 m
.d
.comb
+= eq(self
.n
.o_data
, result
)
550 class ExamplePipeline(UnbufferedPipeline
):
551 """ an example of how to use the combinatorial pipeline.
555 UnbufferedPipeline
.__init
__(self
, ExampleStage
)
558 if __name__
== '__main__':
559 dut
= ExampleBufPipe()
560 vl
= rtlil
.convert(dut
, ports
=dut
.ports())
561 with
open("test_bufpipe.il", "w") as f
:
564 dut
= ExamplePipeline()
565 vl
= rtlil
.convert(dut
, ports
=dut
.ports())
566 with
open("test_combpipe.il", "w") as f
: