2 * yosys -- Yosys Open SYnthesis Suite
4 * Copyright (C) 2019-2020 whitequark <whitequark@whitequark.org>
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 // This file is included by the designs generated with `write_cxxrtl`. It is not used in Yosys itself.
21 // The CXXRTL support library implements compile time specialized arbitrary width arithmetics, as well as provides
22 // composite lvalues made out of bit slices and concatenations of lvalues. This allows the `write_cxxrtl` pass
23 // to perform a straightforward translation of RTLIL structures to readable C++, relying on the C++ compiler
24 // to unwrap the abstraction and generate efficient code.
33 #include <type_traits>
41 #include <backends/cxxrtl/cxxrtl_capi.h>
43 // CXXRTL essentially uses the C++ compiler as a hygienic macro engine that feeds an instruction selector.
44 // It generates a lot of specialized template functions with relatively large bodies that, when inlined
45 // into the caller and (for those with loops) unrolled, often expose many new optimization opportunities.
46 // Because of this, most of the CXXRTL runtime must be always inlined for best performance.
47 #ifndef __has_attribute
48 # define __has_attribute(x) 0
50 #if __has_attribute(always_inline)
51 #define CXXRTL_ALWAYS_INLINE inline __attribute__((__always_inline__))
53 #define CXXRTL_ALWAYS_INLINE inline
58 // All arbitrary-width values in CXXRTL are backed by arrays of unsigned integers called chunks. The chunk size
59 // is the same regardless of the value width to simplify manipulating values via FFI interfaces, e.g. driving
60 // and introspecting the simulation in Python.
62 // It is practical to use chunk sizes between 32 bits and platform register size because when arithmetics on
63 // narrower integer types is legalized by the C++ compiler, it inserts code to clear the high bits of the register.
64 // However, (a) most of our operations do not change those bits in the first place because of invariants that are
65 // invisible to the compiler, (b) we often operate on non-power-of-2 values and have to clear the high bits anyway.
66 // Therefore, using relatively wide chunks and clearing the high bits explicitly and only when we know they may be
67 // clobbered results in simpler generated code.
68 typedef uint32_t chunk_t
;
69 typedef uint64_t wide_chunk_t
;
73 static_assert(std::is_integral
<T
>::value
&& std::is_unsigned
<T
>::value
,
74 "chunk type must be an unsigned integral type");
76 static constexpr size_t bits
= std::numeric_limits
<T
>::digits
;
77 static constexpr T mask
= std::numeric_limits
<T
>::max();
84 struct value
: public expr_base
<value
<Bits
>> {
85 static constexpr size_t bits
= Bits
;
87 using chunk
= chunk_traits
<chunk_t
>;
88 static constexpr chunk::type msb_mask
= (Bits
% chunk::bits
== 0) ? chunk::mask
89 : chunk::mask
>> (chunk::bits
- (Bits
% chunk::bits
));
91 static constexpr size_t chunks
= (Bits
+ chunk::bits
- 1) / chunk::bits
;
92 chunk::type data
[chunks
] = {};
95 template<typename
... Init
>
96 explicit constexpr value(Init
...init
) : data
{init
...} {}
98 value(const value
<Bits
> &) = default;
99 value(value
<Bits
> &&) = default;
100 value
<Bits
> &operator=(const value
<Bits
> &) = default;
102 // A (no-op) helper that forces the cast to value<>.
104 const value
<Bits
> &val() const {
108 std::string
str() const {
109 std::stringstream ss
;
114 // Operations with compile-time parameters.
116 // These operations are used to implement slicing, concatenation, and blitting.
117 // The trunc, zext and sext operations add or remove most significant bits (i.e. on the left);
118 // the rtrunc and rzext operations add or remove least significant bits (i.e. on the right).
119 template<size_t NewBits
>
121 value
<NewBits
> trunc() const {
122 static_assert(NewBits
<= Bits
, "trunc() may not increase width");
123 value
<NewBits
> result
;
124 for (size_t n
= 0; n
< result
.chunks
; n
++)
125 result
.data
[n
] = data
[n
];
126 result
.data
[result
.chunks
- 1] &= result
.msb_mask
;
130 template<size_t NewBits
>
132 value
<NewBits
> zext() const {
133 static_assert(NewBits
>= Bits
, "zext() may not decrease width");
134 value
<NewBits
> result
;
135 for (size_t n
= 0; n
< chunks
; n
++)
136 result
.data
[n
] = data
[n
];
140 template<size_t NewBits
>
142 value
<NewBits
> sext() const {
143 static_assert(NewBits
>= Bits
, "sext() may not decrease width");
144 value
<NewBits
> result
;
145 for (size_t n
= 0; n
< chunks
; n
++)
146 result
.data
[n
] = data
[n
];
148 result
.data
[chunks
- 1] |= ~msb_mask
;
149 for (size_t n
= chunks
; n
< result
.chunks
; n
++)
150 result
.data
[n
] = chunk::mask
;
151 result
.data
[result
.chunks
- 1] &= result
.msb_mask
;
156 template<size_t NewBits
>
158 value
<NewBits
> rtrunc() const {
159 static_assert(NewBits
<= Bits
, "rtrunc() may not increase width");
160 value
<NewBits
> result
;
161 constexpr size_t shift_chunks
= (Bits
- NewBits
) / chunk::bits
;
162 constexpr size_t shift_bits
= (Bits
- NewBits
) % chunk::bits
;
163 chunk::type carry
= 0;
164 if (shift_chunks
+ result
.chunks
< chunks
) {
165 carry
= (shift_bits
== 0) ? 0
166 : data
[shift_chunks
+ result
.chunks
] << (chunk::bits
- shift_bits
);
168 for (size_t n
= result
.chunks
; n
> 0; n
--) {
169 result
.data
[n
- 1] = carry
| (data
[shift_chunks
+ n
- 1] >> shift_bits
);
170 carry
= (shift_bits
== 0) ? 0
171 : data
[shift_chunks
+ n
- 1] << (chunk::bits
- shift_bits
);
176 template<size_t NewBits
>
178 value
<NewBits
> rzext() const {
179 static_assert(NewBits
>= Bits
, "rzext() may not decrease width");
180 value
<NewBits
> result
;
181 constexpr size_t shift_chunks
= (NewBits
- Bits
) / chunk::bits
;
182 constexpr size_t shift_bits
= (NewBits
- Bits
) % chunk::bits
;
183 chunk::type carry
= 0;
184 for (size_t n
= 0; n
< chunks
; n
++) {
185 result
.data
[shift_chunks
+ n
] = (data
[n
] << shift_bits
) | carry
;
186 carry
= (shift_bits
== 0) ? 0
187 : data
[n
] >> (chunk::bits
- shift_bits
);
189 if (shift_chunks
+ chunks
< result
.chunks
)
190 result
.data
[shift_chunks
+ chunks
] = carry
;
194 // Bit blit operation, i.e. a partial read-modify-write.
195 template<size_t Stop
, size_t Start
>
197 value
<Bits
> blit(const value
<Stop
- Start
+ 1> &source
) const {
198 static_assert(Stop
>= Start
, "blit() may not reverse bit order");
199 constexpr chunk::type start_mask
= ~(chunk::mask
<< (Start
% chunk::bits
));
200 constexpr chunk::type stop_mask
= (Stop
% chunk::bits
+ 1 == chunk::bits
) ? 0
201 : (chunk::mask
<< (Stop
% chunk::bits
+ 1));
202 value
<Bits
> masked
= *this;
203 if (Start
/ chunk::bits
== Stop
/ chunk::bits
) {
204 masked
.data
[Start
/ chunk::bits
] &= stop_mask
| start_mask
;
206 masked
.data
[Start
/ chunk::bits
] &= start_mask
;
207 for (size_t n
= Start
/ chunk::bits
+ 1; n
< Stop
/ chunk::bits
; n
++)
209 masked
.data
[Stop
/ chunk::bits
] &= stop_mask
;
211 value
<Bits
> shifted
= source
212 .template rzext
<Stop
+ 1>()
213 .template zext
<Bits
>();
214 return masked
.bit_or(shifted
);
217 // Helpers for selecting extending or truncating operation depending on whether the result is wider or narrower
218 // than the operand. In C++17 these can be replaced with `if constexpr`.
219 template<size_t NewBits
, typename
= void>
222 value
<NewBits
> operator()(const value
<Bits
> &val
) {
223 return val
.template zext
<NewBits
>();
227 template<size_t NewBits
>
228 struct zext_cast
<NewBits
, typename
std::enable_if
<(NewBits
< Bits
)>::type
> {
230 value
<NewBits
> operator()(const value
<Bits
> &val
) {
231 return val
.template trunc
<NewBits
>();
235 template<size_t NewBits
, typename
= void>
238 value
<NewBits
> operator()(const value
<Bits
> &val
) {
239 return val
.template sext
<NewBits
>();
243 template<size_t NewBits
>
244 struct sext_cast
<NewBits
, typename
std::enable_if
<(NewBits
< Bits
)>::type
> {
246 value
<NewBits
> operator()(const value
<Bits
> &val
) {
247 return val
.template trunc
<NewBits
>();
251 template<size_t NewBits
>
253 value
<NewBits
> zcast() const {
254 return zext_cast
<NewBits
>()(*this);
257 template<size_t NewBits
>
259 value
<NewBits
> scast() const {
260 return sext_cast
<NewBits
>()(*this);
263 // Operations with run-time parameters (offsets, amounts, etc).
265 // These operations are used for computations.
266 bool bit(size_t offset
) const {
267 return data
[offset
/ chunk::bits
] & (1 << (offset
% chunk::bits
));
270 void set_bit(size_t offset
, bool value
= true) {
271 size_t offset_chunks
= offset
/ chunk::bits
;
272 size_t offset_bits
= offset
% chunk::bits
;
273 data
[offset_chunks
] &= ~(1 << offset_bits
);
274 data
[offset_chunks
] |= value
? 1 << offset_bits
: 0;
277 bool is_zero() const {
278 for (size_t n
= 0; n
< chunks
; n
++)
284 explicit operator bool() const {
288 bool is_neg() const {
289 return data
[chunks
- 1] & (1 << ((Bits
- 1) % chunk::bits
));
292 bool operator ==(const value
<Bits
> &other
) const {
293 for (size_t n
= 0; n
< chunks
; n
++)
294 if (data
[n
] != other
.data
[n
])
299 bool operator !=(const value
<Bits
> &other
) const {
300 return !(*this == other
);
303 value
<Bits
> bit_not() const {
305 for (size_t n
= 0; n
< chunks
; n
++)
306 result
.data
[n
] = ~data
[n
];
307 result
.data
[chunks
- 1] &= msb_mask
;
311 value
<Bits
> bit_and(const value
<Bits
> &other
) const {
313 for (size_t n
= 0; n
< chunks
; n
++)
314 result
.data
[n
] = data
[n
] & other
.data
[n
];
318 value
<Bits
> bit_or(const value
<Bits
> &other
) const {
320 for (size_t n
= 0; n
< chunks
; n
++)
321 result
.data
[n
] = data
[n
] | other
.data
[n
];
325 value
<Bits
> bit_xor(const value
<Bits
> &other
) const {
327 for (size_t n
= 0; n
< chunks
; n
++)
328 result
.data
[n
] = data
[n
] ^ other
.data
[n
];
332 value
<Bits
> update(const value
<Bits
> &val
, const value
<Bits
> &mask
) const {
333 return bit_and(mask
.bit_not()).bit_or(val
.bit_and(mask
));
336 template<size_t AmountBits
>
337 value
<Bits
> shl(const value
<AmountBits
> &amount
) const {
338 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
339 static_assert(Bits
<= chunk::mask
, "shl() of unreasonably large values is not supported");
340 // Detect shifts definitely large than Bits early.
341 for (size_t n
= 1; n
< amount
.chunks
; n
++)
342 if (amount
.data
[n
] != 0)
344 // Past this point we can use the least significant chunk as the shift size.
345 size_t shift_chunks
= amount
.data
[0] / chunk::bits
;
346 size_t shift_bits
= amount
.data
[0] % chunk::bits
;
347 if (shift_chunks
>= chunks
)
350 chunk::type carry
= 0;
351 for (size_t n
= 0; n
< chunks
- shift_chunks
; n
++) {
352 result
.data
[shift_chunks
+ n
] = (data
[n
] << shift_bits
) | carry
;
353 carry
= (shift_bits
== 0) ? 0
354 : data
[n
] >> (chunk::bits
- shift_bits
);
359 template<size_t AmountBits
, bool Signed
= false>
360 value
<Bits
> shr(const value
<AmountBits
> &amount
) const {
361 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
362 static_assert(Bits
<= chunk::mask
, "shr() of unreasonably large values is not supported");
363 // Detect shifts definitely large than Bits early.
364 for (size_t n
= 1; n
< amount
.chunks
; n
++)
365 if (amount
.data
[n
] != 0)
367 // Past this point we can use the least significant chunk as the shift size.
368 size_t shift_chunks
= amount
.data
[0] / chunk::bits
;
369 size_t shift_bits
= amount
.data
[0] % chunk::bits
;
370 if (shift_chunks
>= chunks
)
373 chunk::type carry
= 0;
374 for (size_t n
= 0; n
< chunks
- shift_chunks
; n
++) {
375 result
.data
[chunks
- shift_chunks
- 1 - n
] = carry
| (data
[chunks
- 1 - n
] >> shift_bits
);
376 carry
= (shift_bits
== 0) ? 0
377 : data
[chunks
- 1 - n
] << (chunk::bits
- shift_bits
);
379 if (Signed
&& is_neg()) {
380 for (size_t n
= chunks
- shift_chunks
; n
< chunks
; n
++)
381 result
.data
[n
] = chunk::mask
;
383 result
.data
[chunks
- shift_chunks
] |= chunk::mask
<< (chunk::bits
- shift_bits
);
388 template<size_t AmountBits
>
389 value
<Bits
> sshr(const value
<AmountBits
> &amount
) const {
390 return shr
<AmountBits
, /*Signed=*/true>(amount
);
393 size_t ctpop() const {
395 for (size_t n
= 0; n
< chunks
; n
++) {
396 // This loop implements the population count idiom as recognized by LLVM and GCC.
397 for (chunk::type x
= data
[n
]; x
!= 0; count
++)
403 size_t ctlz() const {
405 for (size_t n
= 0; n
< chunks
; n
++) {
406 chunk::type x
= data
[chunks
- 1 - n
];
408 count
+= (n
== 0 ? Bits
% chunk::bits
: chunk::bits
);
410 // This loop implements the find first set idiom as recognized by LLVM.
411 for (; x
!= 0; count
++)
418 template<bool Invert
, bool CarryIn
>
419 std::pair
<value
<Bits
>, bool /*CarryOut*/> alu(const value
<Bits
> &other
) const {
421 bool carry
= CarryIn
;
422 for (size_t n
= 0; n
< result
.chunks
; n
++) {
423 result
.data
[n
] = data
[n
] + (Invert
? ~other
.data
[n
] : other
.data
[n
]) + carry
;
424 carry
= (result
.data
[n
] < data
[n
]) ||
425 (result
.data
[n
] == data
[n
] && carry
);
427 result
.data
[result
.chunks
- 1] &= result
.msb_mask
;
428 return {result
, carry
};
431 value
<Bits
> add(const value
<Bits
> &other
) const {
432 return alu
</*Invert=*/false, /*CarryIn=*/false>(other
).first
;
435 value
<Bits
> sub(const value
<Bits
> &other
) const {
436 return alu
</*Invert=*/true, /*CarryIn=*/true>(other
).first
;
439 value
<Bits
> neg() const {
440 return value
<Bits
> { 0u }.sub(*this);
443 bool ucmp(const value
<Bits
> &other
) const {
445 std::tie(std::ignore
, carry
) = alu
</*Invert=*/true, /*CarryIn=*/true>(other
);
446 return !carry
; // a.ucmp(b) ≡ a u< b
449 bool scmp(const value
<Bits
> &other
) const {
452 std::tie(result
, carry
) = alu
</*Invert=*/true, /*CarryIn=*/true>(other
);
453 bool overflow
= (is_neg() == !other
.is_neg()) && (is_neg() != result
.is_neg());
454 return result
.is_neg() ^ overflow
; // a.scmp(b) ≡ a s< b
457 template<size_t ResultBits
>
458 value
<ResultBits
> mul(const value
<Bits
> &other
) const {
459 value
<ResultBits
> result
;
460 wide_chunk_t wide_result
[result
.chunks
+ 1] = {};
461 for (size_t n
= 0; n
< chunks
; n
++) {
462 for (size_t m
= 0; m
< chunks
&& n
+ m
< result
.chunks
; m
++) {
463 wide_result
[n
+ m
] += wide_chunk_t(data
[n
]) * wide_chunk_t(other
.data
[m
]);
464 wide_result
[n
+ m
+ 1] += wide_result
[n
+ m
] >> chunk::bits
;
465 wide_result
[n
+ m
] &= chunk::mask
;
468 for (size_t n
= 0; n
< result
.chunks
; n
++) {
469 result
.data
[n
] = wide_result
[n
];
471 result
.data
[result
.chunks
- 1] &= result
.msb_mask
;
476 // Expression template for a slice, usable as lvalue or rvalue, and composable with other expression templates here.
477 template<class T
, size_t Stop
, size_t Start
>
478 struct slice_expr
: public expr_base
<slice_expr
<T
, Stop
, Start
>> {
479 static_assert(Stop
>= Start
, "slice_expr() may not reverse bit order");
480 static_assert(Start
< T::bits
&& Stop
< T::bits
, "slice_expr() must be within bounds");
481 static constexpr size_t bits
= Stop
- Start
+ 1;
485 slice_expr(T
&expr
) : expr(expr
) {}
486 slice_expr(const slice_expr
<T
, Stop
, Start
> &) = delete;
489 operator value
<bits
>() const {
490 return static_cast<const value
<T::bits
> &>(expr
)
491 .template rtrunc
<T::bits
- Start
>()
492 .template trunc
<bits
>();
496 slice_expr
<T
, Stop
, Start
> &operator=(const value
<bits
> &rhs
) {
497 // Generic partial assignment implemented using a read-modify-write operation on the sliced expression.
498 expr
= static_cast<const value
<T::bits
> &>(expr
)
499 .template blit
<Stop
, Start
>(rhs
);
503 // A helper that forces the cast to value<>, which allows deduction to work.
505 value
<bits
> val() const {
506 return static_cast<const value
<bits
> &>(*this);
510 // Expression template for a concatenation, usable as lvalue or rvalue, and composable with other expression templates here.
511 template<class T
, class U
>
512 struct concat_expr
: public expr_base
<concat_expr
<T
, U
>> {
513 static constexpr size_t bits
= T::bits
+ U::bits
;
518 concat_expr(T
&ms_expr
, U
&ls_expr
) : ms_expr(ms_expr
), ls_expr(ls_expr
) {}
519 concat_expr(const concat_expr
<T
, U
> &) = delete;
522 operator value
<bits
>() const {
523 value
<bits
> ms_shifted
= static_cast<const value
<T::bits
> &>(ms_expr
)
524 .template rzext
<bits
>();
525 value
<bits
> ls_extended
= static_cast<const value
<U::bits
> &>(ls_expr
)
526 .template zext
<bits
>();
527 return ms_shifted
.bit_or(ls_extended
);
531 concat_expr
<T
, U
> &operator=(const value
<bits
> &rhs
) {
532 ms_expr
= rhs
.template rtrunc
<T::bits
>();
533 ls_expr
= rhs
.template trunc
<U::bits
>();
537 // A helper that forces the cast to value<>, which allows deduction to work.
539 value
<bits
> val() const {
540 return static_cast<const value
<bits
> &>(*this);
544 // Base class for expression templates, providing helper methods for operations that are valid on both rvalues and lvalues.
546 // Note that expression objects (slices and concatenations) constructed in this way should NEVER be captured because
547 // they refer to temporaries that will, in general, only live until the end of the statement. For example, both of
548 // these snippets perform use-after-free:
550 // const auto &a = val.slice<7,0>().slice<1>();
553 // auto &&c = val.slice<7,0>().slice<1>();
556 // An easy way to write code using slices and concatenations safely is to follow two simple rules:
557 // * Never explicitly name any type except `value<W>` or `const value<W> &`.
558 // * Never use a `const auto &` or `auto &&` in any such expression.
559 // Then, any code that compiles will be well-defined.
562 template<size_t Stop
, size_t Start
= Stop
>
564 slice_expr
<const T
, Stop
, Start
> slice() const {
565 return {*static_cast<const T
*>(this)};
568 template<size_t Stop
, size_t Start
= Stop
>
570 slice_expr
<T
, Stop
, Start
> slice() {
571 return {*static_cast<T
*>(this)};
576 concat_expr
<const T
, typename
std::remove_reference
<const U
>::type
> concat(const U
&other
) const {
577 return {*static_cast<const T
*>(this), other
};
582 concat_expr
<T
, typename
std::remove_reference
<U
>::type
> concat(U
&&other
) {
583 return {*static_cast<T
*>(this), other
};
587 template<size_t Bits
>
588 std::ostream
&operator<<(std::ostream
&os
, const value
<Bits
> &val
) {
589 auto old_flags
= os
.flags(std::ios::right
);
590 auto old_width
= os
.width(0);
591 auto old_fill
= os
.fill('0');
592 os
<< val
.bits
<< '\'' << std::hex
;
593 for (size_t n
= val
.chunks
- 1; n
!= (size_t)-1; n
--) {
594 if (n
== val
.chunks
- 1 && Bits
% value
<Bits
>::chunk::bits
!= 0)
595 os
.width((Bits
% value
<Bits
>::chunk::bits
+ 3) / 4);
597 os
.width((value
<Bits
>::chunk::bits
+ 3) / 4);
606 template<size_t Bits
>
608 static constexpr size_t bits
= Bits
;
614 constexpr wire(const value
<Bits
> &init
) : curr(init
), next(init
) {}
615 template<typename
... Init
>
616 explicit constexpr wire(Init
...init
) : curr
{init
...}, next
{init
...} {}
618 wire(const wire
<Bits
> &) = delete;
619 wire(wire
<Bits
> &&) = default;
620 wire
<Bits
> &operator=(const wire
<Bits
> &) = delete;
631 template<size_t Bits
>
632 std::ostream
&operator<<(std::ostream
&os
, const wire
<Bits
> &val
) {
637 template<size_t Width
>
639 std::vector
<value
<Width
>> data
;
641 size_t depth() const {
646 explicit memory(size_t depth
) : data(depth
) {}
648 memory(const memory
<Width
> &) = delete;
649 memory
<Width
> &operator=(const memory
<Width
> &) = delete;
651 // The only way to get the compiler to put the initializer in .rodata and do not copy it on stack is to stuff it
652 // into a plain array. You'd think an std::initializer_list would work here, but it doesn't, because you can't
653 // construct an initializer_list in a constexpr (or something) and so if you try to do that the whole thing is
654 // first copied on the stack (probably overflowing it) and then again into `data`.
655 template<size_t Size
>
658 value
<Width
> data
[Size
];
661 template<size_t... InitSize
>
662 explicit memory(size_t depth
, const init
<InitSize
> &...init
) : data(depth
) {
664 // This utterly reprehensible construct is the most reasonable way to apply a function to every element
665 // of a parameter pack, if the elements all have different types and so cannot be cast to an initializer list.
666 auto _
= {std::move(std::begin(init
.data
), std::end(init
.data
), data
.begin() + init
.offset
)...};
670 // An operator for direct memory reads. May be used at any time during the simulation.
671 const value
<Width
> &operator [](size_t index
) const {
672 assert(index
< data
.size());
676 // An operator for direct memory writes. May only be used before the simulation is started. If used
677 // after the simulation is started, the design may malfunction.
678 value
<Width
> &operator [](size_t index
) {
679 assert(index
< data
.size());
683 // A simple way to make a writable memory would be to use an array of wires instead of an array of values.
684 // However, there are two significant downsides to this approach: first, it has large overhead (2× space
685 // overhead, and O(depth) time overhead during commit); second, it does not simplify handling write port
686 // priorities. Although in principle write ports could be ordered or conditionally enabled in generated
687 // code based on their priorities and selected addresses, the feedback arc set problem is computationally
688 // expensive, and the heuristic based algorithms are not easily modified to guarantee (rather than prefer)
689 // a particular write port evaluation order.
691 // The approach used here instead is to queue writes into a buffer during the eval phase, then perform
692 // the writes during the commit phase in the priority order. This approach has low overhead, with both space
693 // and time proportional to the amount of write ports. Because virtually every memory in a practical design
694 // has at most two write ports, linear search is used on every write, being the fastest and simplest approach.
701 std::vector
<write
> write_queue
;
703 void update(size_t index
, const value
<Width
> &val
, const value
<Width
> &mask
, int priority
= 0) {
704 assert(index
< data
.size());
705 // Queue up the write while keeping the queue sorted by priority.
707 std::upper_bound(write_queue
.begin(), write_queue
.end(), priority
,
708 [](const int a
, const write
& b
) { return a
< b
.priority
; }),
709 write
{ index
, val
, mask
, priority
});
713 bool changed
= false;
714 for (const write
&entry
: write_queue
) {
715 value
<Width
> elem
= data
[entry
.index
];
716 elem
= elem
.update(entry
.val
, entry
.mask
);
717 changed
|= (data
[entry
.index
] != elem
);
718 data
[entry
.index
] = elem
;
734 // In debug mode, using the wrong .as_*() function will assert.
735 // In release mode, using the wrong .as_*() function will safely return a default value.
736 const unsigned uint_value
= 0;
737 const signed sint_value
= 0;
738 const std::string string_value
= "";
739 const double double_value
= 0.0;
741 metadata() : value_type(MISSING
) {}
742 metadata(unsigned value
) : value_type(UINT
), uint_value(value
) {}
743 metadata(signed value
) : value_type(SINT
), sint_value(value
) {}
744 metadata(const std::string
&value
) : value_type(STRING
), string_value(value
) {}
745 metadata(const char *value
) : value_type(STRING
), string_value(value
) {}
746 metadata(double value
) : value_type(DOUBLE
), double_value(value
) {}
748 metadata(const metadata
&) = default;
749 metadata
&operator=(const metadata
&) = delete;
751 unsigned as_uint() const {
752 assert(value_type
== UINT
);
756 signed as_sint() const {
757 assert(value_type
== SINT
);
761 const std::string
&as_string() const {
762 assert(value_type
== STRING
);
766 double as_double() const {
767 assert(value_type
== DOUBLE
);
772 typedef std::map
<std::string
, metadata
> metadata_map
;
774 // Helper class to disambiguate values/wires and their aliases.
775 struct debug_alias
{};
777 // This structure is intended for consumption via foreign function interfaces, like Python's ctypes.
778 // Because of this it uses a C-style layout that is easy to parse rather than more idiomatic C++.
780 // To avoid violating strict aliasing rules, this structure has to be a subclass of the one used
781 // in the C API, or it would not be possible to cast between the pointers to these.
782 struct debug_item
: ::cxxrtl_object
{
784 VALUE
= CXXRTL_VALUE
,
786 MEMORY
= CXXRTL_MEMORY
,
787 ALIAS
= CXXRTL_ALIAS
,
790 debug_item(const ::cxxrtl_object
&object
) : cxxrtl_object(object
) {}
792 template<size_t Bits
>
793 debug_item(value
<Bits
> &item
, size_t lsb_offset
= 0) {
794 static_assert(sizeof(item
) == value
<Bits
>::chunks
* sizeof(chunk_t
),
795 "value<Bits> is not compatible with C layout");
805 template<size_t Bits
>
806 debug_item(const value
<Bits
> &item
, size_t lsb_offset
= 0) {
807 static_assert(sizeof(item
) == value
<Bits
>::chunks
* sizeof(chunk_t
),
808 "value<Bits> is not compatible with C layout");
814 curr
= const_cast<chunk_t
*>(item
.data
);
818 template<size_t Bits
>
819 debug_item(wire
<Bits
> &item
, size_t lsb_offset
= 0) {
820 static_assert(sizeof(item
.curr
) == value
<Bits
>::chunks
* sizeof(chunk_t
) &&
821 sizeof(item
.next
) == value
<Bits
>::chunks
* sizeof(chunk_t
),
822 "wire<Bits> is not compatible with C layout");
828 curr
= item
.curr
.data
;
829 next
= item
.next
.data
;
832 template<size_t Width
>
833 debug_item(memory
<Width
> &item
, size_t zero_offset
= 0) {
834 static_assert(sizeof(item
.data
[0]) == value
<Width
>::chunks
* sizeof(chunk_t
),
835 "memory<Width> is not compatible with C layout");
839 depth
= item
.data
.size();
840 zero_at
= zero_offset
;
841 curr
= item
.data
.empty() ? nullptr : item
.data
[0].data
;
845 template<size_t Bits
>
846 debug_item(debug_alias
, const value
<Bits
> &item
, size_t lsb_offset
= 0) {
847 static_assert(sizeof(item
) == value
<Bits
>::chunks
* sizeof(chunk_t
),
848 "value<Bits> is not compatible with C layout");
854 curr
= const_cast<chunk_t
*>(item
.data
);
858 template<size_t Bits
>
859 debug_item(debug_alias
, const wire
<Bits
> &item
, size_t lsb_offset
= 0) {
860 static_assert(sizeof(item
.curr
) == value
<Bits
>::chunks
* sizeof(chunk_t
) &&
861 sizeof(item
.next
) == value
<Bits
>::chunks
* sizeof(chunk_t
),
862 "wire<Bits> is not compatible with C layout");
868 curr
= const_cast<chunk_t
*>(item
.curr
.data
);
872 static_assert(std::is_standard_layout
<debug_item
>::value
, "debug_item is not compatible with C layout");
875 std::map
<std::string
, std::vector
<debug_item
>> table
;
877 void add(const std::string
&name
, debug_item
&&item
) {
878 std::vector
<debug_item
> &parts
= table
[name
];
879 parts
.emplace_back(item
);
880 std::sort(parts
.begin(), parts
.end(),
881 [](const debug_item
&a
, const debug_item
&b
) {
882 return a
.lsb_at
< b
.lsb_at
;
886 size_t count(const std::string
&name
) const {
887 if (table
.count(name
) == 0)
889 return table
.at(name
).size();
892 const std::vector
<debug_item
> &parts_at(const std::string
&name
) const {
893 return table
.at(name
);
896 const debug_item
&at(const std::string
&name
) const {
897 const std::vector
<debug_item
> &parts
= table
.at(name
);
898 assert(parts
.size() == 1);
902 const debug_item
&operator [](const std::string
&name
) const {
911 module(const module
&) = delete;
912 module
&operator=(const module
&) = delete;
914 virtual bool eval() = 0;
915 virtual bool commit() = 0;
919 bool converged
= false;
923 } while (commit() && !converged
);
927 virtual void debug_info(debug_items
&items
, std::string path
= "") {
928 (void)items
, (void)path
;
932 } // namespace cxxrtl
934 // Internal structure used to communicate with the implementation of the C interface.
935 typedef struct _cxxrtl_toplevel
{
936 std::unique_ptr
<cxxrtl::module
> module
;
939 // Definitions of internal Yosys cells. Other than the functions in this namespace, CXXRTL is fully generic
940 // and indepenent of Yosys implementation details.
942 // The `write_cxxrtl` pass translates internal cells (cells with names that start with `$`) to calls of these
943 // functions. All of Yosys arithmetic and logical cells perform sign or zero extension on their operands,
944 // whereas basic operations on arbitrary width values require operands to be of the same width. These functions
945 // bridge the gap by performing the necessary casts. They are named similar to `cell_A[B]`, where A and B are `u`
946 // if the corresponding operand is unsigned, and `s` if it is signed.
947 namespace cxxrtl_yosys
{
949 using namespace cxxrtl
;
951 // std::max isn't constexpr until C++14 for no particular reason (it's an oversight), so we define our own.
954 constexpr T
max(const T
&a
, const T
&b
) {
955 return a
> b
? a
: b
;
959 template<size_t BitsY
, size_t BitsA
>
961 value
<BitsY
> logic_not(const value
<BitsA
> &a
) {
962 return value
<BitsY
> { a
? 0u : 1u };
965 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
967 value
<BitsY
> logic_and(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
968 return value
<BitsY
> { (bool(a
) & bool(b
)) ? 1u : 0u };
971 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
973 value
<BitsY
> logic_or(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
974 return value
<BitsY
> { (bool(a
) | bool(b
)) ? 1u : 0u };
977 // Reduction operations
978 template<size_t BitsY
, size_t BitsA
>
980 value
<BitsY
> reduce_and(const value
<BitsA
> &a
) {
981 return value
<BitsY
> { a
.bit_not().is_zero() ? 1u : 0u };
984 template<size_t BitsY
, size_t BitsA
>
986 value
<BitsY
> reduce_or(const value
<BitsA
> &a
) {
987 return value
<BitsY
> { a
? 1u : 0u };
990 template<size_t BitsY
, size_t BitsA
>
992 value
<BitsY
> reduce_xor(const value
<BitsA
> &a
) {
993 return value
<BitsY
> { (a
.ctpop() % 2) ? 1u : 0u };
996 template<size_t BitsY
, size_t BitsA
>
998 value
<BitsY
> reduce_xnor(const value
<BitsA
> &a
) {
999 return value
<BitsY
> { (a
.ctpop() % 2) ? 0u : 1u };
1002 template<size_t BitsY
, size_t BitsA
>
1003 CXXRTL_ALWAYS_INLINE
1004 value
<BitsY
> reduce_bool(const value
<BitsA
> &a
) {
1005 return value
<BitsY
> { a
? 1u : 0u };
1008 // Bitwise operations
1009 template<size_t BitsY
, size_t BitsA
>
1010 CXXRTL_ALWAYS_INLINE
1011 value
<BitsY
> not_u(const value
<BitsA
> &a
) {
1012 return a
.template zcast
<BitsY
>().bit_not();
1015 template<size_t BitsY
, size_t BitsA
>
1016 CXXRTL_ALWAYS_INLINE
1017 value
<BitsY
> not_s(const value
<BitsA
> &a
) {
1018 return a
.template scast
<BitsY
>().bit_not();
1021 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1022 CXXRTL_ALWAYS_INLINE
1023 value
<BitsY
> and_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1024 return a
.template zcast
<BitsY
>().bit_and(b
.template zcast
<BitsY
>());
1027 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1028 CXXRTL_ALWAYS_INLINE
1029 value
<BitsY
> and_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1030 return a
.template scast
<BitsY
>().bit_and(b
.template scast
<BitsY
>());
1033 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1034 CXXRTL_ALWAYS_INLINE
1035 value
<BitsY
> or_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1036 return a
.template zcast
<BitsY
>().bit_or(b
.template zcast
<BitsY
>());
1039 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1040 CXXRTL_ALWAYS_INLINE
1041 value
<BitsY
> or_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1042 return a
.template scast
<BitsY
>().bit_or(b
.template scast
<BitsY
>());
1045 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1046 CXXRTL_ALWAYS_INLINE
1047 value
<BitsY
> xor_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1048 return a
.template zcast
<BitsY
>().bit_xor(b
.template zcast
<BitsY
>());
1051 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1052 CXXRTL_ALWAYS_INLINE
1053 value
<BitsY
> xor_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1054 return a
.template scast
<BitsY
>().bit_xor(b
.template scast
<BitsY
>());
1057 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1058 CXXRTL_ALWAYS_INLINE
1059 value
<BitsY
> xnor_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1060 return a
.template zcast
<BitsY
>().bit_xor(b
.template zcast
<BitsY
>()).bit_not();
1063 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1064 CXXRTL_ALWAYS_INLINE
1065 value
<BitsY
> xnor_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1066 return a
.template scast
<BitsY
>().bit_xor(b
.template scast
<BitsY
>()).bit_not();
1069 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1070 CXXRTL_ALWAYS_INLINE
1071 value
<BitsY
> shl_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1072 return a
.template zcast
<BitsY
>().template shl(b
);
1075 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1076 CXXRTL_ALWAYS_INLINE
1077 value
<BitsY
> shl_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1078 return a
.template scast
<BitsY
>().template shl(b
);
1081 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1082 CXXRTL_ALWAYS_INLINE
1083 value
<BitsY
> sshl_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1084 return a
.template zcast
<BitsY
>().template shl(b
);
1087 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1088 CXXRTL_ALWAYS_INLINE
1089 value
<BitsY
> sshl_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1090 return a
.template scast
<BitsY
>().template shl(b
);
1093 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1094 CXXRTL_ALWAYS_INLINE
1095 value
<BitsY
> shr_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1096 return a
.template shr(b
).template zcast
<BitsY
>();
1099 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1100 CXXRTL_ALWAYS_INLINE
1101 value
<BitsY
> shr_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1102 return a
.template shr(b
).template scast
<BitsY
>();
1105 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1106 CXXRTL_ALWAYS_INLINE
1107 value
<BitsY
> sshr_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1108 return a
.template shr(b
).template zcast
<BitsY
>();
1111 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1112 CXXRTL_ALWAYS_INLINE
1113 value
<BitsY
> sshr_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1114 return a
.template sshr(b
).template scast
<BitsY
>();
1117 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1118 CXXRTL_ALWAYS_INLINE
1119 value
<BitsY
> shift_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1120 return shr_uu
<BitsY
>(a
, b
);
1123 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1124 CXXRTL_ALWAYS_INLINE
1125 value
<BitsY
> shift_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1126 return shr_su
<BitsY
>(a
, b
);
1129 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1130 CXXRTL_ALWAYS_INLINE
1131 value
<BitsY
> shift_us(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1132 return b
.is_neg() ? shl_uu
<BitsY
>(a
, b
.template sext
<BitsB
+ 1>().neg()) : shr_uu
<BitsY
>(a
, b
);
1135 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1136 CXXRTL_ALWAYS_INLINE
1137 value
<BitsY
> shift_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1138 return b
.is_neg() ? shl_su
<BitsY
>(a
, b
.template sext
<BitsB
+ 1>().neg()) : shr_su
<BitsY
>(a
, b
);
1141 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1142 CXXRTL_ALWAYS_INLINE
1143 value
<BitsY
> shiftx_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1144 return shift_uu
<BitsY
>(a
, b
);
1147 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1148 CXXRTL_ALWAYS_INLINE
1149 value
<BitsY
> shiftx_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1150 return shift_su
<BitsY
>(a
, b
);
1153 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1154 CXXRTL_ALWAYS_INLINE
1155 value
<BitsY
> shiftx_us(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1156 return shift_us
<BitsY
>(a
, b
);
1159 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1160 CXXRTL_ALWAYS_INLINE
1161 value
<BitsY
> shiftx_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1162 return shift_ss
<BitsY
>(a
, b
);
1165 // Comparison operations
1166 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1167 CXXRTL_ALWAYS_INLINE
1168 value
<BitsY
> eq_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1169 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1170 return value
<BitsY
>{ a
.template zext
<BitsExt
>() == b
.template zext
<BitsExt
>() ? 1u : 0u };
1173 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1174 CXXRTL_ALWAYS_INLINE
1175 value
<BitsY
> eq_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1176 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1177 return value
<BitsY
>{ a
.template sext
<BitsExt
>() == b
.template sext
<BitsExt
>() ? 1u : 0u };
1180 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1181 CXXRTL_ALWAYS_INLINE
1182 value
<BitsY
> ne_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1183 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1184 return value
<BitsY
>{ a
.template zext
<BitsExt
>() != b
.template zext
<BitsExt
>() ? 1u : 0u };
1187 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1188 CXXRTL_ALWAYS_INLINE
1189 value
<BitsY
> ne_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1190 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1191 return value
<BitsY
>{ a
.template sext
<BitsExt
>() != b
.template sext
<BitsExt
>() ? 1u : 0u };
1194 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1195 CXXRTL_ALWAYS_INLINE
1196 value
<BitsY
> eqx_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1197 return eq_uu
<BitsY
>(a
, b
);
1200 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1201 CXXRTL_ALWAYS_INLINE
1202 value
<BitsY
> eqx_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1203 return eq_ss
<BitsY
>(a
, b
);
1206 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1207 CXXRTL_ALWAYS_INLINE
1208 value
<BitsY
> nex_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1209 return ne_uu
<BitsY
>(a
, b
);
1212 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1213 CXXRTL_ALWAYS_INLINE
1214 value
<BitsY
> nex_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1215 return ne_ss
<BitsY
>(a
, b
);
1218 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1219 CXXRTL_ALWAYS_INLINE
1220 value
<BitsY
> gt_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1221 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1222 return value
<BitsY
> { b
.template zext
<BitsExt
>().ucmp(a
.template zext
<BitsExt
>()) ? 1u : 0u };
1225 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1226 CXXRTL_ALWAYS_INLINE
1227 value
<BitsY
> gt_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1228 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1229 return value
<BitsY
> { b
.template sext
<BitsExt
>().scmp(a
.template sext
<BitsExt
>()) ? 1u : 0u };
1232 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1233 CXXRTL_ALWAYS_INLINE
1234 value
<BitsY
> ge_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1235 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1236 return value
<BitsY
> { !a
.template zext
<BitsExt
>().ucmp(b
.template zext
<BitsExt
>()) ? 1u : 0u };
1239 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1240 CXXRTL_ALWAYS_INLINE
1241 value
<BitsY
> ge_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1242 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1243 return value
<BitsY
> { !a
.template sext
<BitsExt
>().scmp(b
.template sext
<BitsExt
>()) ? 1u : 0u };
1246 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1247 CXXRTL_ALWAYS_INLINE
1248 value
<BitsY
> lt_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1249 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1250 return value
<BitsY
> { a
.template zext
<BitsExt
>().ucmp(b
.template zext
<BitsExt
>()) ? 1u : 0u };
1253 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1254 CXXRTL_ALWAYS_INLINE
1255 value
<BitsY
> lt_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1256 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1257 return value
<BitsY
> { a
.template sext
<BitsExt
>().scmp(b
.template sext
<BitsExt
>()) ? 1u : 0u };
1260 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1261 CXXRTL_ALWAYS_INLINE
1262 value
<BitsY
> le_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1263 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1264 return value
<BitsY
> { !b
.template zext
<BitsExt
>().ucmp(a
.template zext
<BitsExt
>()) ? 1u : 0u };
1267 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1268 CXXRTL_ALWAYS_INLINE
1269 value
<BitsY
> le_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1270 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1271 return value
<BitsY
> { !b
.template sext
<BitsExt
>().scmp(a
.template sext
<BitsExt
>()) ? 1u : 0u };
1274 // Arithmetic operations
1275 template<size_t BitsY
, size_t BitsA
>
1276 CXXRTL_ALWAYS_INLINE
1277 value
<BitsY
> pos_u(const value
<BitsA
> &a
) {
1278 return a
.template zcast
<BitsY
>();
1281 template<size_t BitsY
, size_t BitsA
>
1282 CXXRTL_ALWAYS_INLINE
1283 value
<BitsY
> pos_s(const value
<BitsA
> &a
) {
1284 return a
.template scast
<BitsY
>();
1287 template<size_t BitsY
, size_t BitsA
>
1288 CXXRTL_ALWAYS_INLINE
1289 value
<BitsY
> neg_u(const value
<BitsA
> &a
) {
1290 return a
.template zcast
<BitsY
>().neg();
1293 template<size_t BitsY
, size_t BitsA
>
1294 CXXRTL_ALWAYS_INLINE
1295 value
<BitsY
> neg_s(const value
<BitsA
> &a
) {
1296 return a
.template scast
<BitsY
>().neg();
1299 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1300 CXXRTL_ALWAYS_INLINE
1301 value
<BitsY
> add_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1302 return a
.template zcast
<BitsY
>().add(b
.template zcast
<BitsY
>());
1305 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1306 CXXRTL_ALWAYS_INLINE
1307 value
<BitsY
> add_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1308 return a
.template scast
<BitsY
>().add(b
.template scast
<BitsY
>());
1311 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1312 CXXRTL_ALWAYS_INLINE
1313 value
<BitsY
> sub_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1314 return a
.template zcast
<BitsY
>().sub(b
.template zcast
<BitsY
>());
1317 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1318 CXXRTL_ALWAYS_INLINE
1319 value
<BitsY
> sub_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1320 return a
.template scast
<BitsY
>().sub(b
.template scast
<BitsY
>());
1323 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1324 CXXRTL_ALWAYS_INLINE
1325 value
<BitsY
> mul_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1326 constexpr size_t BitsM
= BitsA
>= BitsB
? BitsA
: BitsB
;
1327 return a
.template zcast
<BitsM
>().template mul
<BitsY
>(b
.template zcast
<BitsM
>());
1330 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1331 CXXRTL_ALWAYS_INLINE
1332 value
<BitsY
> mul_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1333 return a
.template scast
<BitsY
>().template mul
<BitsY
>(b
.template scast
<BitsY
>());
1336 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1337 CXXRTL_ALWAYS_INLINE
1338 std::pair
<value
<BitsY
>, value
<BitsY
>> divmod_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1339 constexpr size_t Bits
= max(BitsY
, max(BitsA
, BitsB
));
1340 value
<Bits
> quotient
;
1341 value
<Bits
> dividend
= a
.template zext
<Bits
>();
1342 value
<Bits
> divisor
= b
.template zext
<Bits
>();
1343 if (dividend
.ucmp(divisor
))
1344 return {/*quotient=*/value
<BitsY
> { 0u }, /*remainder=*/dividend
.template trunc
<BitsY
>()};
1345 uint32_t divisor_shift
= dividend
.ctlz() - divisor
.ctlz();
1346 divisor
= divisor
.shl(value
<32> { divisor_shift
});
1347 for (size_t step
= 0; step
<= divisor_shift
; step
++) {
1348 quotient
= quotient
.shl(value
<1> { 1u });
1349 if (!dividend
.ucmp(divisor
)) {
1350 dividend
= dividend
.sub(divisor
);
1351 quotient
.set_bit(0, true);
1353 divisor
= divisor
.shr(value
<1> { 1u });
1355 return {quotient
.template trunc
<BitsY
>(), /*remainder=*/dividend
.template trunc
<BitsY
>()};
1358 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1359 CXXRTL_ALWAYS_INLINE
1360 std::pair
<value
<BitsY
>, value
<BitsY
>> divmod_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1361 value
<BitsA
+ 1> ua
= a
.template sext
<BitsA
+ 1>();
1362 value
<BitsB
+ 1> ub
= b
.template sext
<BitsB
+ 1>();
1363 if (ua
.is_neg()) ua
= ua
.neg();
1364 if (ub
.is_neg()) ub
= ub
.neg();
1366 std::tie(y
, r
) = divmod_uu
<BitsY
>(ua
, ub
);
1367 if (a
.is_neg() != b
.is_neg()) y
= y
.neg();
1368 if (a
.is_neg()) r
= r
.neg();
1372 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1373 CXXRTL_ALWAYS_INLINE
1374 value
<BitsY
> div_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1375 return divmod_uu
<BitsY
>(a
, b
).first
;
1378 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1379 CXXRTL_ALWAYS_INLINE
1380 value
<BitsY
> div_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1381 return divmod_ss
<BitsY
>(a
, b
).first
;
1384 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1385 CXXRTL_ALWAYS_INLINE
1386 value
<BitsY
> mod_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1387 return divmod_uu
<BitsY
>(a
, b
).second
;
1390 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1391 CXXRTL_ALWAYS_INLINE
1392 value
<BitsY
> mod_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1393 return divmod_ss
<BitsY
>(a
, b
).second
;
1397 struct memory_index
{
1401 template<size_t BitsAddr
>
1402 memory_index(const value
<BitsAddr
> &addr
, size_t offset
, size_t depth
) {
1403 static_assert(value
<BitsAddr
>::chunks
<= 1, "memory address is too wide");
1404 size_t offset_index
= addr
.data
[0];
1406 valid
= (offset_index
>= offset
&& offset_index
< offset
+ depth
);
1407 index
= offset_index
- offset
;
1411 } // namespace cxxrtl_yosys