2 * yosys -- Yosys Open SYnthesis Suite
4 * Copyright (C) 2019-2020 whitequark <whitequark@whitequark.org>
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 // This file is included by the designs generated with `write_cxxrtl`. It is not used in Yosys itself.
21 // The CXXRTL support library implements compile time specialized arbitrary width arithmetics, as well as provides
22 // composite lvalues made out of bit slices and concatenations of lvalues. This allows the `write_cxxrtl` pass
23 // to perform a straightforward translation of RTLIL structures to readable C++, relying on the C++ compiler
24 // to unwrap the abstraction and generate efficient code.
33 #include <type_traits>
41 #include <backends/cxxrtl/cxxrtl_capi.h>
43 // CXXRTL essentially uses the C++ compiler as a hygienic macro engine that feeds an instruction selector.
44 // It generates a lot of specialized template functions with relatively large bodies that, when inlined
45 // into the caller and (for those with loops) unrolled, often expose many new optimization opportunities.
46 // Because of this, most of the CXXRTL runtime must be always inlined for best performance.
47 #ifndef __has_attribute
48 # define __has_attribute(x) 0
50 #if __has_attribute(always_inline)
51 #define CXXRTL_ALWAYS_INLINE inline __attribute__((__always_inline__))
53 #define CXXRTL_ALWAYS_INLINE inline
58 // All arbitrary-width values in CXXRTL are backed by arrays of unsigned integers called chunks. The chunk size
59 // is the same regardless of the value width to simplify manipulating values via FFI interfaces, e.g. driving
60 // and introspecting the simulation in Python.
62 // It is practical to use chunk sizes between 32 bits and platform register size because when arithmetics on
63 // narrower integer types is legalized by the C++ compiler, it inserts code to clear the high bits of the register.
64 // However, (a) most of our operations do not change those bits in the first place because of invariants that are
65 // invisible to the compiler, (b) we often operate on non-power-of-2 values and have to clear the high bits anyway.
66 // Therefore, using relatively wide chunks and clearing the high bits explicitly and only when we know they may be
67 // clobbered results in simpler generated code.
68 typedef uint32_t chunk_t
;
69 typedef uint64_t wide_chunk_t
;
73 static_assert(std::is_integral
<T
>::value
&& std::is_unsigned
<T
>::value
,
74 "chunk type must be an unsigned integral type");
76 static constexpr size_t bits
= std::numeric_limits
<T
>::digits
;
77 static constexpr T mask
= std::numeric_limits
<T
>::max();
84 struct value
: public expr_base
<value
<Bits
>> {
85 static constexpr size_t bits
= Bits
;
87 using chunk
= chunk_traits
<chunk_t
>;
88 static constexpr chunk::type msb_mask
= (Bits
% chunk::bits
== 0) ? chunk::mask
89 : chunk::mask
>> (chunk::bits
- (Bits
% chunk::bits
));
91 static constexpr size_t chunks
= (Bits
+ chunk::bits
- 1) / chunk::bits
;
92 chunk::type data
[chunks
] = {};
95 template<typename
... Init
>
96 explicit constexpr value(Init
...init
) : data
{init
...} {}
98 value(const value
<Bits
> &) = default;
99 value(value
<Bits
> &&) = default;
100 value
<Bits
> &operator=(const value
<Bits
> &) = default;
102 // A (no-op) helper that forces the cast to value<>.
104 const value
<Bits
> &val() const {
108 std::string
str() const {
109 std::stringstream ss
;
114 // Conversion operations.
116 // These functions ensure that a conversion is never out of range, and should be always used, if at all
117 // possible, instead of direct manipulation of the `data` member. For very large types, .slice() and
118 // .concat() can be used to split them into more manageable parts.
119 template<class IntegerT
>
121 IntegerT
get() const {
122 static_assert(std::numeric_limits
<IntegerT
>::is_integer
&& !std::numeric_limits
<IntegerT
>::is_signed
,
123 "get<T>() requires T to be an unsigned integral type");
124 static_assert(std::numeric_limits
<IntegerT
>::digits
>= Bits
,
125 "get<T>() requires T to be at least as wide as the value is");
127 for (size_t n
= 0; n
< chunks
; n
++)
128 result
|= IntegerT(data
[n
]) << (n
* chunk::bits
);
132 template<class IntegerT
>
134 void set(IntegerT other
) {
135 static_assert(std::numeric_limits
<IntegerT
>::is_integer
&& !std::numeric_limits
<IntegerT
>::is_signed
,
136 "set<T>() requires T to be an unsigned integral type");
137 static_assert(std::numeric_limits
<IntegerT
>::digits
>= Bits
,
138 "set<T>() requires the value to be at least as wide as T is");
139 for (size_t n
= 0; n
< chunks
; n
++)
140 data
[n
] = (other
>> (n
* chunk::bits
)) & chunk::mask
;
143 // Operations with compile-time parameters.
145 // These operations are used to implement slicing, concatenation, and blitting.
146 // The trunc, zext and sext operations add or remove most significant bits (i.e. on the left);
147 // the rtrunc and rzext operations add or remove least significant bits (i.e. on the right).
148 template<size_t NewBits
>
150 value
<NewBits
> trunc() const {
151 static_assert(NewBits
<= Bits
, "trunc() may not increase width");
152 value
<NewBits
> result
;
153 for (size_t n
= 0; n
< result
.chunks
; n
++)
154 result
.data
[n
] = data
[n
];
155 result
.data
[result
.chunks
- 1] &= result
.msb_mask
;
159 template<size_t NewBits
>
161 value
<NewBits
> zext() const {
162 static_assert(NewBits
>= Bits
, "zext() may not decrease width");
163 value
<NewBits
> result
;
164 for (size_t n
= 0; n
< chunks
; n
++)
165 result
.data
[n
] = data
[n
];
169 template<size_t NewBits
>
171 value
<NewBits
> sext() const {
172 static_assert(NewBits
>= Bits
, "sext() may not decrease width");
173 value
<NewBits
> result
;
174 for (size_t n
= 0; n
< chunks
; n
++)
175 result
.data
[n
] = data
[n
];
177 result
.data
[chunks
- 1] |= ~msb_mask
;
178 for (size_t n
= chunks
; n
< result
.chunks
; n
++)
179 result
.data
[n
] = chunk::mask
;
180 result
.data
[result
.chunks
- 1] &= result
.msb_mask
;
185 template<size_t NewBits
>
187 value
<NewBits
> rtrunc() const {
188 static_assert(NewBits
<= Bits
, "rtrunc() may not increase width");
189 value
<NewBits
> result
;
190 constexpr size_t shift_chunks
= (Bits
- NewBits
) / chunk::bits
;
191 constexpr size_t shift_bits
= (Bits
- NewBits
) % chunk::bits
;
192 chunk::type carry
= 0;
193 if (shift_chunks
+ result
.chunks
< chunks
) {
194 carry
= (shift_bits
== 0) ? 0
195 : data
[shift_chunks
+ result
.chunks
] << (chunk::bits
- shift_bits
);
197 for (size_t n
= result
.chunks
; n
> 0; n
--) {
198 result
.data
[n
- 1] = carry
| (data
[shift_chunks
+ n
- 1] >> shift_bits
);
199 carry
= (shift_bits
== 0) ? 0
200 : data
[shift_chunks
+ n
- 1] << (chunk::bits
- shift_bits
);
205 template<size_t NewBits
>
207 value
<NewBits
> rzext() const {
208 static_assert(NewBits
>= Bits
, "rzext() may not decrease width");
209 value
<NewBits
> result
;
210 constexpr size_t shift_chunks
= (NewBits
- Bits
) / chunk::bits
;
211 constexpr size_t shift_bits
= (NewBits
- Bits
) % chunk::bits
;
212 chunk::type carry
= 0;
213 for (size_t n
= 0; n
< chunks
; n
++) {
214 result
.data
[shift_chunks
+ n
] = (data
[n
] << shift_bits
) | carry
;
215 carry
= (shift_bits
== 0) ? 0
216 : data
[n
] >> (chunk::bits
- shift_bits
);
218 if (shift_chunks
+ chunks
< result
.chunks
)
219 result
.data
[shift_chunks
+ chunks
] = carry
;
223 // Bit blit operation, i.e. a partial read-modify-write.
224 template<size_t Stop
, size_t Start
>
226 value
<Bits
> blit(const value
<Stop
- Start
+ 1> &source
) const {
227 static_assert(Stop
>= Start
, "blit() may not reverse bit order");
228 constexpr chunk::type start_mask
= ~(chunk::mask
<< (Start
% chunk::bits
));
229 constexpr chunk::type stop_mask
= (Stop
% chunk::bits
+ 1 == chunk::bits
) ? 0
230 : (chunk::mask
<< (Stop
% chunk::bits
+ 1));
231 value
<Bits
> masked
= *this;
232 if (Start
/ chunk::bits
== Stop
/ chunk::bits
) {
233 masked
.data
[Start
/ chunk::bits
] &= stop_mask
| start_mask
;
235 masked
.data
[Start
/ chunk::bits
] &= start_mask
;
236 for (size_t n
= Start
/ chunk::bits
+ 1; n
< Stop
/ chunk::bits
; n
++)
238 masked
.data
[Stop
/ chunk::bits
] &= stop_mask
;
240 value
<Bits
> shifted
= source
241 .template rzext
<Stop
+ 1>()
242 .template zext
<Bits
>();
243 return masked
.bit_or(shifted
);
246 // Helpers for selecting extending or truncating operation depending on whether the result is wider or narrower
247 // than the operand. In C++17 these can be replaced with `if constexpr`.
248 template<size_t NewBits
, typename
= void>
251 value
<NewBits
> operator()(const value
<Bits
> &val
) {
252 return val
.template zext
<NewBits
>();
256 template<size_t NewBits
>
257 struct zext_cast
<NewBits
, typename
std::enable_if
<(NewBits
< Bits
)>::type
> {
259 value
<NewBits
> operator()(const value
<Bits
> &val
) {
260 return val
.template trunc
<NewBits
>();
264 template<size_t NewBits
, typename
= void>
267 value
<NewBits
> operator()(const value
<Bits
> &val
) {
268 return val
.template sext
<NewBits
>();
272 template<size_t NewBits
>
273 struct sext_cast
<NewBits
, typename
std::enable_if
<(NewBits
< Bits
)>::type
> {
275 value
<NewBits
> operator()(const value
<Bits
> &val
) {
276 return val
.template trunc
<NewBits
>();
280 template<size_t NewBits
>
282 value
<NewBits
> zcast() const {
283 return zext_cast
<NewBits
>()(*this);
286 template<size_t NewBits
>
288 value
<NewBits
> scast() const {
289 return sext_cast
<NewBits
>()(*this);
292 // Operations with run-time parameters (offsets, amounts, etc).
294 // These operations are used for computations.
295 bool bit(size_t offset
) const {
296 return data
[offset
/ chunk::bits
] & (1 << (offset
% chunk::bits
));
299 void set_bit(size_t offset
, bool value
= true) {
300 size_t offset_chunks
= offset
/ chunk::bits
;
301 size_t offset_bits
= offset
% chunk::bits
;
302 data
[offset_chunks
] &= ~(1 << offset_bits
);
303 data
[offset_chunks
] |= value
? 1 << offset_bits
: 0;
306 explicit operator bool() const {
310 bool is_zero() const {
311 for (size_t n
= 0; n
< chunks
; n
++)
317 bool is_neg() const {
318 return data
[chunks
- 1] & (1 << ((Bits
- 1) % chunk::bits
));
321 bool operator ==(const value
<Bits
> &other
) const {
322 for (size_t n
= 0; n
< chunks
; n
++)
323 if (data
[n
] != other
.data
[n
])
328 bool operator !=(const value
<Bits
> &other
) const {
329 return !(*this == other
);
332 value
<Bits
> bit_not() const {
334 for (size_t n
= 0; n
< chunks
; n
++)
335 result
.data
[n
] = ~data
[n
];
336 result
.data
[chunks
- 1] &= msb_mask
;
340 value
<Bits
> bit_and(const value
<Bits
> &other
) const {
342 for (size_t n
= 0; n
< chunks
; n
++)
343 result
.data
[n
] = data
[n
] & other
.data
[n
];
347 value
<Bits
> bit_or(const value
<Bits
> &other
) const {
349 for (size_t n
= 0; n
< chunks
; n
++)
350 result
.data
[n
] = data
[n
] | other
.data
[n
];
354 value
<Bits
> bit_xor(const value
<Bits
> &other
) const {
356 for (size_t n
= 0; n
< chunks
; n
++)
357 result
.data
[n
] = data
[n
] ^ other
.data
[n
];
361 value
<Bits
> update(const value
<Bits
> &val
, const value
<Bits
> &mask
) const {
362 return bit_and(mask
.bit_not()).bit_or(val
.bit_and(mask
));
365 template<size_t AmountBits
>
366 value
<Bits
> shl(const value
<AmountBits
> &amount
) const {
367 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
368 static_assert(Bits
<= chunk::mask
, "shl() of unreasonably large values is not supported");
369 // Detect shifts definitely large than Bits early.
370 for (size_t n
= 1; n
< amount
.chunks
; n
++)
371 if (amount
.data
[n
] != 0)
373 // Past this point we can use the least significant chunk as the shift size.
374 size_t shift_chunks
= amount
.data
[0] / chunk::bits
;
375 size_t shift_bits
= amount
.data
[0] % chunk::bits
;
376 if (shift_chunks
>= chunks
)
379 chunk::type carry
= 0;
380 for (size_t n
= 0; n
< chunks
- shift_chunks
; n
++) {
381 result
.data
[shift_chunks
+ n
] = (data
[n
] << shift_bits
) | carry
;
382 carry
= (shift_bits
== 0) ? 0
383 : data
[n
] >> (chunk::bits
- shift_bits
);
388 template<size_t AmountBits
, bool Signed
= false>
389 value
<Bits
> shr(const value
<AmountBits
> &amount
) const {
390 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
391 static_assert(Bits
<= chunk::mask
, "shr() of unreasonably large values is not supported");
392 // Detect shifts definitely large than Bits early.
393 for (size_t n
= 1; n
< amount
.chunks
; n
++)
394 if (amount
.data
[n
] != 0)
396 // Past this point we can use the least significant chunk as the shift size.
397 size_t shift_chunks
= amount
.data
[0] / chunk::bits
;
398 size_t shift_bits
= amount
.data
[0] % chunk::bits
;
399 if (shift_chunks
>= chunks
)
402 chunk::type carry
= 0;
403 for (size_t n
= 0; n
< chunks
- shift_chunks
; n
++) {
404 result
.data
[chunks
- shift_chunks
- 1 - n
] = carry
| (data
[chunks
- 1 - n
] >> shift_bits
);
405 carry
= (shift_bits
== 0) ? 0
406 : data
[chunks
- 1 - n
] << (chunk::bits
- shift_bits
);
408 if (Signed
&& is_neg()) {
409 size_t top_chunk_idx
= (Bits
- shift_bits
) / chunk::bits
;
410 size_t top_chunk_bits
= (Bits
- shift_bits
) % chunk::bits
;
411 for (size_t n
= top_chunk_idx
+ 1; n
< chunks
; n
++)
412 result
.data
[n
] = chunk::mask
;
414 result
.data
[top_chunk_idx
] |= chunk::mask
<< top_chunk_bits
;
419 template<size_t AmountBits
>
420 value
<Bits
> sshr(const value
<AmountBits
> &amount
) const {
421 return shr
<AmountBits
, /*Signed=*/true>(amount
);
424 size_t ctpop() const {
426 for (size_t n
= 0; n
< chunks
; n
++) {
427 // This loop implements the population count idiom as recognized by LLVM and GCC.
428 for (chunk::type x
= data
[n
]; x
!= 0; count
++)
434 size_t ctlz() const {
436 for (size_t n
= 0; n
< chunks
; n
++) {
437 chunk::type x
= data
[chunks
- 1 - n
];
439 count
+= (n
== 0 ? Bits
% chunk::bits
: chunk::bits
);
441 // This loop implements the find first set idiom as recognized by LLVM.
442 for (; x
!= 0; count
++)
449 template<bool Invert
, bool CarryIn
>
450 std::pair
<value
<Bits
>, bool /*CarryOut*/> alu(const value
<Bits
> &other
) const {
452 bool carry
= CarryIn
;
453 // Handle full chunks first
454 for (size_t n
= 0; n
< result
.chunks
- 1; n
++) {
455 result
.data
[n
] = data
[n
] + (Invert
? ~other
.data
[n
] : other
.data
[n
]) + carry
;
456 carry
= (result
.data
[n
] < data
[n
]) ||
457 (result
.data
[n
] == data
[n
] && carry
);
459 // Handle last chunk (mask before updating carry)
460 constexpr size_t last
= result
.chunks
- 1;
461 result
.data
[last
] = data
[last
] + (Invert
? ~other
.data
[last
] : other
.data
[last
]) + carry
;
462 result
.data
[last
] &= result
.msb_mask
;
463 carry
= (result
.data
[last
] < data
[last
]) ||
464 (result
.data
[last
] == data
[last
] && carry
);
465 return {result
, carry
};
468 value
<Bits
> add(const value
<Bits
> &other
) const {
469 return alu
</*Invert=*/false, /*CarryIn=*/false>(other
).first
;
472 value
<Bits
> sub(const value
<Bits
> &other
) const {
473 return alu
</*Invert=*/true, /*CarryIn=*/true>(other
).first
;
476 value
<Bits
> neg() const {
477 return value
<Bits
> { 0u }.sub(*this);
480 bool ucmp(const value
<Bits
> &other
) const {
482 std::tie(std::ignore
, carry
) = alu
</*Invert=*/true, /*CarryIn=*/true>(other
);
483 return !carry
; // a.ucmp(b) ≡ a u< b
486 bool scmp(const value
<Bits
> &other
) const {
489 std::tie(result
, carry
) = alu
</*Invert=*/true, /*CarryIn=*/true>(other
);
490 bool overflow
= (is_neg() == !other
.is_neg()) && (is_neg() != result
.is_neg());
491 return result
.is_neg() ^ overflow
; // a.scmp(b) ≡ a s< b
494 template<size_t ResultBits
>
495 value
<ResultBits
> mul(const value
<Bits
> &other
) const {
496 value
<ResultBits
> result
;
497 wide_chunk_t wide_result
[result
.chunks
+ 1] = {};
498 for (size_t n
= 0; n
< chunks
; n
++) {
499 for (size_t m
= 0; m
< chunks
&& n
+ m
< result
.chunks
; m
++) {
500 wide_result
[n
+ m
] += wide_chunk_t(data
[n
]) * wide_chunk_t(other
.data
[m
]);
501 wide_result
[n
+ m
+ 1] += wide_result
[n
+ m
] >> chunk::bits
;
502 wide_result
[n
+ m
] &= chunk::mask
;
505 for (size_t n
= 0; n
< result
.chunks
; n
++) {
506 result
.data
[n
] = wide_result
[n
];
508 result
.data
[result
.chunks
- 1] &= result
.msb_mask
;
513 // Expression template for a slice, usable as lvalue or rvalue, and composable with other expression templates here.
514 template<class T
, size_t Stop
, size_t Start
>
515 struct slice_expr
: public expr_base
<slice_expr
<T
, Stop
, Start
>> {
516 static_assert(Stop
>= Start
, "slice_expr() may not reverse bit order");
517 static_assert(Start
< T::bits
&& Stop
< T::bits
, "slice_expr() must be within bounds");
518 static constexpr size_t bits
= Stop
- Start
+ 1;
522 slice_expr(T
&expr
) : expr(expr
) {}
523 slice_expr(const slice_expr
<T
, Stop
, Start
> &) = delete;
526 operator value
<bits
>() const {
527 return static_cast<const value
<T::bits
> &>(expr
)
528 .template rtrunc
<T::bits
- Start
>()
529 .template trunc
<bits
>();
533 slice_expr
<T
, Stop
, Start
> &operator=(const value
<bits
> &rhs
) {
534 // Generic partial assignment implemented using a read-modify-write operation on the sliced expression.
535 expr
= static_cast<const value
<T::bits
> &>(expr
)
536 .template blit
<Stop
, Start
>(rhs
);
540 // A helper that forces the cast to value<>, which allows deduction to work.
542 value
<bits
> val() const {
543 return static_cast<const value
<bits
> &>(*this);
547 // Expression template for a concatenation, usable as lvalue or rvalue, and composable with other expression templates here.
548 template<class T
, class U
>
549 struct concat_expr
: public expr_base
<concat_expr
<T
, U
>> {
550 static constexpr size_t bits
= T::bits
+ U::bits
;
555 concat_expr(T
&ms_expr
, U
&ls_expr
) : ms_expr(ms_expr
), ls_expr(ls_expr
) {}
556 concat_expr(const concat_expr
<T
, U
> &) = delete;
559 operator value
<bits
>() const {
560 value
<bits
> ms_shifted
= static_cast<const value
<T::bits
> &>(ms_expr
)
561 .template rzext
<bits
>();
562 value
<bits
> ls_extended
= static_cast<const value
<U::bits
> &>(ls_expr
)
563 .template zext
<bits
>();
564 return ms_shifted
.bit_or(ls_extended
);
568 concat_expr
<T
, U
> &operator=(const value
<bits
> &rhs
) {
569 ms_expr
= rhs
.template rtrunc
<T::bits
>();
570 ls_expr
= rhs
.template trunc
<U::bits
>();
574 // A helper that forces the cast to value<>, which allows deduction to work.
576 value
<bits
> val() const {
577 return static_cast<const value
<bits
> &>(*this);
581 // Base class for expression templates, providing helper methods for operations that are valid on both rvalues and lvalues.
583 // Note that expression objects (slices and concatenations) constructed in this way should NEVER be captured because
584 // they refer to temporaries that will, in general, only live until the end of the statement. For example, both of
585 // these snippets perform use-after-free:
587 // const auto &a = val.slice<7,0>().slice<1>();
590 // auto &&c = val.slice<7,0>().slice<1>();
593 // An easy way to write code using slices and concatenations safely is to follow two simple rules:
594 // * Never explicitly name any type except `value<W>` or `const value<W> &`.
595 // * Never use a `const auto &` or `auto &&` in any such expression.
596 // Then, any code that compiles will be well-defined.
599 template<size_t Stop
, size_t Start
= Stop
>
601 slice_expr
<const T
, Stop
, Start
> slice() const {
602 return {*static_cast<const T
*>(this)};
605 template<size_t Stop
, size_t Start
= Stop
>
607 slice_expr
<T
, Stop
, Start
> slice() {
608 return {*static_cast<T
*>(this)};
613 concat_expr
<const T
, typename
std::remove_reference
<const U
>::type
> concat(const U
&other
) const {
614 return {*static_cast<const T
*>(this), other
};
619 concat_expr
<T
, typename
std::remove_reference
<U
>::type
> concat(U
&&other
) {
620 return {*static_cast<T
*>(this), other
};
624 template<size_t Bits
>
625 std::ostream
&operator<<(std::ostream
&os
, const value
<Bits
> &val
) {
626 auto old_flags
= os
.flags(std::ios::right
);
627 auto old_width
= os
.width(0);
628 auto old_fill
= os
.fill('0');
629 os
<< val
.bits
<< '\'' << std::hex
;
630 for (size_t n
= val
.chunks
- 1; n
!= (size_t)-1; n
--) {
631 if (n
== val
.chunks
- 1 && Bits
% value
<Bits
>::chunk::bits
!= 0)
632 os
.width((Bits
% value
<Bits
>::chunk::bits
+ 3) / 4);
634 os
.width((value
<Bits
>::chunk::bits
+ 3) / 4);
643 template<size_t Bits
>
645 static constexpr size_t bits
= Bits
;
651 constexpr wire(const value
<Bits
> &init
) : curr(init
), next(init
) {}
652 template<typename
... Init
>
653 explicit constexpr wire(Init
...init
) : curr
{init
...}, next
{init
...} {}
655 wire(const wire
<Bits
> &) = delete;
656 wire(wire
<Bits
> &&) = default;
657 wire
<Bits
> &operator=(const wire
<Bits
> &) = delete;
659 template<class IntegerT
>
661 IntegerT
get() const {
662 return curr
.template get
<IntegerT
>();
665 template<class IntegerT
>
667 void set(IntegerT other
) {
668 next
.template set
<IntegerT
>(other
);
680 template<size_t Bits
>
681 std::ostream
&operator<<(std::ostream
&os
, const wire
<Bits
> &val
) {
686 template<size_t Width
>
688 std::vector
<value
<Width
>> data
;
690 size_t depth() const {
695 explicit memory(size_t depth
) : data(depth
) {}
697 memory(const memory
<Width
> &) = delete;
698 memory
<Width
> &operator=(const memory
<Width
> &) = delete;
700 // The only way to get the compiler to put the initializer in .rodata and do not copy it on stack is to stuff it
701 // into a plain array. You'd think an std::initializer_list would work here, but it doesn't, because you can't
702 // construct an initializer_list in a constexpr (or something) and so if you try to do that the whole thing is
703 // first copied on the stack (probably overflowing it) and then again into `data`.
704 template<size_t Size
>
707 value
<Width
> data
[Size
];
710 template<size_t... InitSize
>
711 explicit memory(size_t depth
, const init
<InitSize
> &...init
) : data(depth
) {
713 // This utterly reprehensible construct is the most reasonable way to apply a function to every element
714 // of a parameter pack, if the elements all have different types and so cannot be cast to an initializer list.
715 auto _
= {std::move(std::begin(init
.data
), std::end(init
.data
), data
.begin() + init
.offset
)...};
719 // An operator for direct memory reads. May be used at any time during the simulation.
720 const value
<Width
> &operator [](size_t index
) const {
721 assert(index
< data
.size());
725 // An operator for direct memory writes. May only be used before the simulation is started. If used
726 // after the simulation is started, the design may malfunction.
727 value
<Width
> &operator [](size_t index
) {
728 assert(index
< data
.size());
732 // A simple way to make a writable memory would be to use an array of wires instead of an array of values.
733 // However, there are two significant downsides to this approach: first, it has large overhead (2× space
734 // overhead, and O(depth) time overhead during commit); second, it does not simplify handling write port
735 // priorities. Although in principle write ports could be ordered or conditionally enabled in generated
736 // code based on their priorities and selected addresses, the feedback arc set problem is computationally
737 // expensive, and the heuristic based algorithms are not easily modified to guarantee (rather than prefer)
738 // a particular write port evaluation order.
740 // The approach used here instead is to queue writes into a buffer during the eval phase, then perform
741 // the writes during the commit phase in the priority order. This approach has low overhead, with both space
742 // and time proportional to the amount of write ports. Because virtually every memory in a practical design
743 // has at most two write ports, linear search is used on every write, being the fastest and simplest approach.
750 std::vector
<write
> write_queue
;
752 void update(size_t index
, const value
<Width
> &val
, const value
<Width
> &mask
, int priority
= 0) {
753 assert(index
< data
.size());
754 // Queue up the write while keeping the queue sorted by priority.
756 std::upper_bound(write_queue
.begin(), write_queue
.end(), priority
,
757 [](const int a
, const write
& b
) { return a
< b
.priority
; }),
758 write
{ index
, val
, mask
, priority
});
762 bool changed
= false;
763 for (const write
&entry
: write_queue
) {
764 value
<Width
> elem
= data
[entry
.index
];
765 elem
= elem
.update(entry
.val
, entry
.mask
);
766 changed
|= (data
[entry
.index
] != elem
);
767 data
[entry
.index
] = elem
;
783 // In debug mode, using the wrong .as_*() function will assert.
784 // In release mode, using the wrong .as_*() function will safely return a default value.
785 const unsigned uint_value
= 0;
786 const signed sint_value
= 0;
787 const std::string string_value
= "";
788 const double double_value
= 0.0;
790 metadata() : value_type(MISSING
) {}
791 metadata(unsigned value
) : value_type(UINT
), uint_value(value
) {}
792 metadata(signed value
) : value_type(SINT
), sint_value(value
) {}
793 metadata(const std::string
&value
) : value_type(STRING
), string_value(value
) {}
794 metadata(const char *value
) : value_type(STRING
), string_value(value
) {}
795 metadata(double value
) : value_type(DOUBLE
), double_value(value
) {}
797 metadata(const metadata
&) = default;
798 metadata
&operator=(const metadata
&) = delete;
800 unsigned as_uint() const {
801 assert(value_type
== UINT
);
805 signed as_sint() const {
806 assert(value_type
== SINT
);
810 const std::string
&as_string() const {
811 assert(value_type
== STRING
);
815 double as_double() const {
816 assert(value_type
== DOUBLE
);
821 typedef std::map
<std::string
, metadata
> metadata_map
;
823 // Helper class to disambiguate values/wires and their aliases.
824 struct debug_alias
{};
826 // This structure is intended for consumption via foreign function interfaces, like Python's ctypes.
827 // Because of this it uses a C-style layout that is easy to parse rather than more idiomatic C++.
829 // To avoid violating strict aliasing rules, this structure has to be a subclass of the one used
830 // in the C API, or it would not be possible to cast between the pointers to these.
831 struct debug_item
: ::cxxrtl_object
{
833 VALUE
= CXXRTL_VALUE
,
835 MEMORY
= CXXRTL_MEMORY
,
836 ALIAS
= CXXRTL_ALIAS
,
839 debug_item(const ::cxxrtl_object
&object
) : cxxrtl_object(object
) {}
841 template<size_t Bits
>
842 debug_item(value
<Bits
> &item
, size_t lsb_offset
= 0) {
843 static_assert(sizeof(item
) == value
<Bits
>::chunks
* sizeof(chunk_t
),
844 "value<Bits> is not compatible with C layout");
854 template<size_t Bits
>
855 debug_item(const value
<Bits
> &item
, size_t lsb_offset
= 0) {
856 static_assert(sizeof(item
) == value
<Bits
>::chunks
* sizeof(chunk_t
),
857 "value<Bits> is not compatible with C layout");
863 curr
= const_cast<chunk_t
*>(item
.data
);
867 template<size_t Bits
>
868 debug_item(wire
<Bits
> &item
, size_t lsb_offset
= 0) {
869 static_assert(sizeof(item
.curr
) == value
<Bits
>::chunks
* sizeof(chunk_t
) &&
870 sizeof(item
.next
) == value
<Bits
>::chunks
* sizeof(chunk_t
),
871 "wire<Bits> is not compatible with C layout");
877 curr
= item
.curr
.data
;
878 next
= item
.next
.data
;
881 template<size_t Width
>
882 debug_item(memory
<Width
> &item
, size_t zero_offset
= 0) {
883 static_assert(sizeof(item
.data
[0]) == value
<Width
>::chunks
* sizeof(chunk_t
),
884 "memory<Width> is not compatible with C layout");
888 depth
= item
.data
.size();
889 zero_at
= zero_offset
;
890 curr
= item
.data
.empty() ? nullptr : item
.data
[0].data
;
894 template<size_t Bits
>
895 debug_item(debug_alias
, const value
<Bits
> &item
, size_t lsb_offset
= 0) {
896 static_assert(sizeof(item
) == value
<Bits
>::chunks
* sizeof(chunk_t
),
897 "value<Bits> is not compatible with C layout");
903 curr
= const_cast<chunk_t
*>(item
.data
);
907 template<size_t Bits
>
908 debug_item(debug_alias
, const wire
<Bits
> &item
, size_t lsb_offset
= 0) {
909 static_assert(sizeof(item
.curr
) == value
<Bits
>::chunks
* sizeof(chunk_t
) &&
910 sizeof(item
.next
) == value
<Bits
>::chunks
* sizeof(chunk_t
),
911 "wire<Bits> is not compatible with C layout");
917 curr
= const_cast<chunk_t
*>(item
.curr
.data
);
921 static_assert(std::is_standard_layout
<debug_item
>::value
, "debug_item is not compatible with C layout");
924 std::map
<std::string
, std::vector
<debug_item
>> table
;
926 void add(const std::string
&name
, debug_item
&&item
) {
927 std::vector
<debug_item
> &parts
= table
[name
];
928 parts
.emplace_back(item
);
929 std::sort(parts
.begin(), parts
.end(),
930 [](const debug_item
&a
, const debug_item
&b
) {
931 return a
.lsb_at
< b
.lsb_at
;
935 size_t count(const std::string
&name
) const {
936 if (table
.count(name
) == 0)
938 return table
.at(name
).size();
941 const std::vector
<debug_item
> &parts_at(const std::string
&name
) const {
942 return table
.at(name
);
945 const debug_item
&at(const std::string
&name
) const {
946 const std::vector
<debug_item
> &parts
= table
.at(name
);
947 assert(parts
.size() == 1);
951 const debug_item
&operator [](const std::string
&name
) const {
960 module(const module
&) = delete;
961 module
&operator=(const module
&) = delete;
963 virtual bool eval() = 0;
964 virtual bool commit() = 0;
968 bool converged
= false;
972 } while (commit() && !converged
);
976 virtual void debug_info(debug_items
&items
, std::string path
= "") {
977 (void)items
, (void)path
;
981 } // namespace cxxrtl
983 // Internal structure used to communicate with the implementation of the C interface.
984 typedef struct _cxxrtl_toplevel
{
985 std::unique_ptr
<cxxrtl::module
> module
;
988 // Definitions of internal Yosys cells. Other than the functions in this namespace, CXXRTL is fully generic
989 // and indepenent of Yosys implementation details.
991 // The `write_cxxrtl` pass translates internal cells (cells with names that start with `$`) to calls of these
992 // functions. All of Yosys arithmetic and logical cells perform sign or zero extension on their operands,
993 // whereas basic operations on arbitrary width values require operands to be of the same width. These functions
994 // bridge the gap by performing the necessary casts. They are named similar to `cell_A[B]`, where A and B are `u`
995 // if the corresponding operand is unsigned, and `s` if it is signed.
996 namespace cxxrtl_yosys
{
998 using namespace cxxrtl
;
1000 // std::max isn't constexpr until C++14 for no particular reason (it's an oversight), so we define our own.
1002 CXXRTL_ALWAYS_INLINE
1003 constexpr T
max(const T
&a
, const T
&b
) {
1004 return a
> b
? a
: b
;
1008 template<size_t BitsY
, size_t BitsA
>
1009 CXXRTL_ALWAYS_INLINE
1010 value
<BitsY
> logic_not(const value
<BitsA
> &a
) {
1011 return value
<BitsY
> { a
? 0u : 1u };
1014 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1015 CXXRTL_ALWAYS_INLINE
1016 value
<BitsY
> logic_and(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1017 return value
<BitsY
> { (bool(a
) && bool(b
)) ? 1u : 0u };
1020 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1021 CXXRTL_ALWAYS_INLINE
1022 value
<BitsY
> logic_or(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1023 return value
<BitsY
> { (bool(a
) || bool(b
)) ? 1u : 0u };
1026 // Reduction operations
1027 template<size_t BitsY
, size_t BitsA
>
1028 CXXRTL_ALWAYS_INLINE
1029 value
<BitsY
> reduce_and(const value
<BitsA
> &a
) {
1030 return value
<BitsY
> { a
.bit_not().is_zero() ? 1u : 0u };
1033 template<size_t BitsY
, size_t BitsA
>
1034 CXXRTL_ALWAYS_INLINE
1035 value
<BitsY
> reduce_or(const value
<BitsA
> &a
) {
1036 return value
<BitsY
> { a
? 1u : 0u };
1039 template<size_t BitsY
, size_t BitsA
>
1040 CXXRTL_ALWAYS_INLINE
1041 value
<BitsY
> reduce_xor(const value
<BitsA
> &a
) {
1042 return value
<BitsY
> { (a
.ctpop() % 2) ? 1u : 0u };
1045 template<size_t BitsY
, size_t BitsA
>
1046 CXXRTL_ALWAYS_INLINE
1047 value
<BitsY
> reduce_xnor(const value
<BitsA
> &a
) {
1048 return value
<BitsY
> { (a
.ctpop() % 2) ? 0u : 1u };
1051 template<size_t BitsY
, size_t BitsA
>
1052 CXXRTL_ALWAYS_INLINE
1053 value
<BitsY
> reduce_bool(const value
<BitsA
> &a
) {
1054 return value
<BitsY
> { a
? 1u : 0u };
1057 // Bitwise operations
1058 template<size_t BitsY
, size_t BitsA
>
1059 CXXRTL_ALWAYS_INLINE
1060 value
<BitsY
> not_u(const value
<BitsA
> &a
) {
1061 return a
.template zcast
<BitsY
>().bit_not();
1064 template<size_t BitsY
, size_t BitsA
>
1065 CXXRTL_ALWAYS_INLINE
1066 value
<BitsY
> not_s(const value
<BitsA
> &a
) {
1067 return a
.template scast
<BitsY
>().bit_not();
1070 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1071 CXXRTL_ALWAYS_INLINE
1072 value
<BitsY
> and_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1073 return a
.template zcast
<BitsY
>().bit_and(b
.template zcast
<BitsY
>());
1076 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1077 CXXRTL_ALWAYS_INLINE
1078 value
<BitsY
> and_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1079 return a
.template scast
<BitsY
>().bit_and(b
.template scast
<BitsY
>());
1082 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1083 CXXRTL_ALWAYS_INLINE
1084 value
<BitsY
> or_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1085 return a
.template zcast
<BitsY
>().bit_or(b
.template zcast
<BitsY
>());
1088 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1089 CXXRTL_ALWAYS_INLINE
1090 value
<BitsY
> or_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1091 return a
.template scast
<BitsY
>().bit_or(b
.template scast
<BitsY
>());
1094 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1095 CXXRTL_ALWAYS_INLINE
1096 value
<BitsY
> xor_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1097 return a
.template zcast
<BitsY
>().bit_xor(b
.template zcast
<BitsY
>());
1100 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1101 CXXRTL_ALWAYS_INLINE
1102 value
<BitsY
> xor_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1103 return a
.template scast
<BitsY
>().bit_xor(b
.template scast
<BitsY
>());
1106 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1107 CXXRTL_ALWAYS_INLINE
1108 value
<BitsY
> xnor_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1109 return a
.template zcast
<BitsY
>().bit_xor(b
.template zcast
<BitsY
>()).bit_not();
1112 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1113 CXXRTL_ALWAYS_INLINE
1114 value
<BitsY
> xnor_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1115 return a
.template scast
<BitsY
>().bit_xor(b
.template scast
<BitsY
>()).bit_not();
1118 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1119 CXXRTL_ALWAYS_INLINE
1120 value
<BitsY
> shl_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1121 return a
.template zcast
<BitsY
>().template shl(b
);
1124 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1125 CXXRTL_ALWAYS_INLINE
1126 value
<BitsY
> shl_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1127 return a
.template scast
<BitsY
>().template shl(b
);
1130 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1131 CXXRTL_ALWAYS_INLINE
1132 value
<BitsY
> sshl_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1133 return a
.template zcast
<BitsY
>().template shl(b
);
1136 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1137 CXXRTL_ALWAYS_INLINE
1138 value
<BitsY
> sshl_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1139 return a
.template scast
<BitsY
>().template shl(b
);
1142 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1143 CXXRTL_ALWAYS_INLINE
1144 value
<BitsY
> shr_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1145 return a
.template shr(b
).template zcast
<BitsY
>();
1148 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1149 CXXRTL_ALWAYS_INLINE
1150 value
<BitsY
> shr_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1151 return a
.template shr(b
).template scast
<BitsY
>();
1154 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1155 CXXRTL_ALWAYS_INLINE
1156 value
<BitsY
> sshr_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1157 return a
.template shr(b
).template zcast
<BitsY
>();
1160 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1161 CXXRTL_ALWAYS_INLINE
1162 value
<BitsY
> sshr_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1163 return a
.template sshr(b
).template scast
<BitsY
>();
1166 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1167 CXXRTL_ALWAYS_INLINE
1168 value
<BitsY
> shift_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1169 return shr_uu
<BitsY
>(a
, b
);
1172 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1173 CXXRTL_ALWAYS_INLINE
1174 value
<BitsY
> shift_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1175 return shr_su
<BitsY
>(a
, b
);
1178 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1179 CXXRTL_ALWAYS_INLINE
1180 value
<BitsY
> shift_us(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1181 return b
.is_neg() ? shl_uu
<BitsY
>(a
, b
.template sext
<BitsB
+ 1>().neg()) : shr_uu
<BitsY
>(a
, b
);
1184 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1185 CXXRTL_ALWAYS_INLINE
1186 value
<BitsY
> shift_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1187 return b
.is_neg() ? shl_su
<BitsY
>(a
, b
.template sext
<BitsB
+ 1>().neg()) : shr_su
<BitsY
>(a
, b
);
1190 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1191 CXXRTL_ALWAYS_INLINE
1192 value
<BitsY
> shiftx_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1193 return shift_uu
<BitsY
>(a
, b
);
1196 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1197 CXXRTL_ALWAYS_INLINE
1198 value
<BitsY
> shiftx_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1199 return shift_su
<BitsY
>(a
, b
);
1202 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1203 CXXRTL_ALWAYS_INLINE
1204 value
<BitsY
> shiftx_us(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1205 return shift_us
<BitsY
>(a
, b
);
1208 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1209 CXXRTL_ALWAYS_INLINE
1210 value
<BitsY
> shiftx_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1211 return shift_ss
<BitsY
>(a
, b
);
1214 // Comparison operations
1215 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1216 CXXRTL_ALWAYS_INLINE
1217 value
<BitsY
> eq_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1218 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1219 return value
<BitsY
>{ a
.template zext
<BitsExt
>() == b
.template zext
<BitsExt
>() ? 1u : 0u };
1222 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1223 CXXRTL_ALWAYS_INLINE
1224 value
<BitsY
> eq_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1225 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1226 return value
<BitsY
>{ a
.template sext
<BitsExt
>() == b
.template sext
<BitsExt
>() ? 1u : 0u };
1229 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1230 CXXRTL_ALWAYS_INLINE
1231 value
<BitsY
> ne_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1232 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1233 return value
<BitsY
>{ a
.template zext
<BitsExt
>() != b
.template zext
<BitsExt
>() ? 1u : 0u };
1236 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1237 CXXRTL_ALWAYS_INLINE
1238 value
<BitsY
> ne_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1239 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1240 return value
<BitsY
>{ a
.template sext
<BitsExt
>() != b
.template sext
<BitsExt
>() ? 1u : 0u };
1243 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1244 CXXRTL_ALWAYS_INLINE
1245 value
<BitsY
> eqx_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1246 return eq_uu
<BitsY
>(a
, b
);
1249 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1250 CXXRTL_ALWAYS_INLINE
1251 value
<BitsY
> eqx_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1252 return eq_ss
<BitsY
>(a
, b
);
1255 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1256 CXXRTL_ALWAYS_INLINE
1257 value
<BitsY
> nex_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1258 return ne_uu
<BitsY
>(a
, b
);
1261 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1262 CXXRTL_ALWAYS_INLINE
1263 value
<BitsY
> nex_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1264 return ne_ss
<BitsY
>(a
, b
);
1267 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1268 CXXRTL_ALWAYS_INLINE
1269 value
<BitsY
> gt_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1270 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1271 return value
<BitsY
> { b
.template zext
<BitsExt
>().ucmp(a
.template zext
<BitsExt
>()) ? 1u : 0u };
1274 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1275 CXXRTL_ALWAYS_INLINE
1276 value
<BitsY
> gt_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1277 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1278 return value
<BitsY
> { b
.template sext
<BitsExt
>().scmp(a
.template sext
<BitsExt
>()) ? 1u : 0u };
1281 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1282 CXXRTL_ALWAYS_INLINE
1283 value
<BitsY
> ge_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1284 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1285 return value
<BitsY
> { !a
.template zext
<BitsExt
>().ucmp(b
.template zext
<BitsExt
>()) ? 1u : 0u };
1288 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1289 CXXRTL_ALWAYS_INLINE
1290 value
<BitsY
> ge_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1291 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1292 return value
<BitsY
> { !a
.template sext
<BitsExt
>().scmp(b
.template sext
<BitsExt
>()) ? 1u : 0u };
1295 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1296 CXXRTL_ALWAYS_INLINE
1297 value
<BitsY
> lt_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1298 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1299 return value
<BitsY
> { a
.template zext
<BitsExt
>().ucmp(b
.template zext
<BitsExt
>()) ? 1u : 0u };
1302 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1303 CXXRTL_ALWAYS_INLINE
1304 value
<BitsY
> lt_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1305 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1306 return value
<BitsY
> { a
.template sext
<BitsExt
>().scmp(b
.template sext
<BitsExt
>()) ? 1u : 0u };
1309 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1310 CXXRTL_ALWAYS_INLINE
1311 value
<BitsY
> le_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1312 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1313 return value
<BitsY
> { !b
.template zext
<BitsExt
>().ucmp(a
.template zext
<BitsExt
>()) ? 1u : 0u };
1316 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1317 CXXRTL_ALWAYS_INLINE
1318 value
<BitsY
> le_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1319 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1320 return value
<BitsY
> { !b
.template sext
<BitsExt
>().scmp(a
.template sext
<BitsExt
>()) ? 1u : 0u };
1323 // Arithmetic operations
1324 template<size_t BitsY
, size_t BitsA
>
1325 CXXRTL_ALWAYS_INLINE
1326 value
<BitsY
> pos_u(const value
<BitsA
> &a
) {
1327 return a
.template zcast
<BitsY
>();
1330 template<size_t BitsY
, size_t BitsA
>
1331 CXXRTL_ALWAYS_INLINE
1332 value
<BitsY
> pos_s(const value
<BitsA
> &a
) {
1333 return a
.template scast
<BitsY
>();
1336 template<size_t BitsY
, size_t BitsA
>
1337 CXXRTL_ALWAYS_INLINE
1338 value
<BitsY
> neg_u(const value
<BitsA
> &a
) {
1339 return a
.template zcast
<BitsY
>().neg();
1342 template<size_t BitsY
, size_t BitsA
>
1343 CXXRTL_ALWAYS_INLINE
1344 value
<BitsY
> neg_s(const value
<BitsA
> &a
) {
1345 return a
.template scast
<BitsY
>().neg();
1348 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1349 CXXRTL_ALWAYS_INLINE
1350 value
<BitsY
> add_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1351 return a
.template zcast
<BitsY
>().add(b
.template zcast
<BitsY
>());
1354 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1355 CXXRTL_ALWAYS_INLINE
1356 value
<BitsY
> add_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1357 return a
.template scast
<BitsY
>().add(b
.template scast
<BitsY
>());
1360 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1361 CXXRTL_ALWAYS_INLINE
1362 value
<BitsY
> sub_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1363 return a
.template zcast
<BitsY
>().sub(b
.template zcast
<BitsY
>());
1366 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1367 CXXRTL_ALWAYS_INLINE
1368 value
<BitsY
> sub_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1369 return a
.template scast
<BitsY
>().sub(b
.template scast
<BitsY
>());
1372 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1373 CXXRTL_ALWAYS_INLINE
1374 value
<BitsY
> mul_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1375 constexpr size_t BitsM
= BitsA
>= BitsB
? BitsA
: BitsB
;
1376 return a
.template zcast
<BitsM
>().template mul
<BitsY
>(b
.template zcast
<BitsM
>());
1379 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1380 CXXRTL_ALWAYS_INLINE
1381 value
<BitsY
> mul_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1382 return a
.template scast
<BitsY
>().template mul
<BitsY
>(b
.template scast
<BitsY
>());
1385 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1386 CXXRTL_ALWAYS_INLINE
1387 std::pair
<value
<BitsY
>, value
<BitsY
>> divmod_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1388 constexpr size_t Bits
= max(BitsY
, max(BitsA
, BitsB
));
1389 value
<Bits
> quotient
;
1390 value
<Bits
> dividend
= a
.template zext
<Bits
>();
1391 value
<Bits
> divisor
= b
.template zext
<Bits
>();
1392 if (dividend
.ucmp(divisor
))
1393 return {/*quotient=*/value
<BitsY
> { 0u }, /*remainder=*/dividend
.template trunc
<BitsY
>()};
1394 uint32_t divisor_shift
= dividend
.ctlz() - divisor
.ctlz();
1395 divisor
= divisor
.shl(value
<32> { divisor_shift
});
1396 for (size_t step
= 0; step
<= divisor_shift
; step
++) {
1397 quotient
= quotient
.shl(value
<1> { 1u });
1398 if (!dividend
.ucmp(divisor
)) {
1399 dividend
= dividend
.sub(divisor
);
1400 quotient
.set_bit(0, true);
1402 divisor
= divisor
.shr(value
<1> { 1u });
1404 return {quotient
.template trunc
<BitsY
>(), /*remainder=*/dividend
.template trunc
<BitsY
>()};
1407 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1408 CXXRTL_ALWAYS_INLINE
1409 std::pair
<value
<BitsY
>, value
<BitsY
>> divmod_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1410 value
<BitsA
+ 1> ua
= a
.template sext
<BitsA
+ 1>();
1411 value
<BitsB
+ 1> ub
= b
.template sext
<BitsB
+ 1>();
1412 if (ua
.is_neg()) ua
= ua
.neg();
1413 if (ub
.is_neg()) ub
= ub
.neg();
1415 std::tie(y
, r
) = divmod_uu
<BitsY
>(ua
, ub
);
1416 if (a
.is_neg() != b
.is_neg()) y
= y
.neg();
1417 if (a
.is_neg()) r
= r
.neg();
1421 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1422 CXXRTL_ALWAYS_INLINE
1423 value
<BitsY
> div_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1424 return divmod_uu
<BitsY
>(a
, b
).first
;
1427 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1428 CXXRTL_ALWAYS_INLINE
1429 value
<BitsY
> div_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1430 return divmod_ss
<BitsY
>(a
, b
).first
;
1433 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1434 CXXRTL_ALWAYS_INLINE
1435 value
<BitsY
> mod_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1436 return divmod_uu
<BitsY
>(a
, b
).second
;
1439 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1440 CXXRTL_ALWAYS_INLINE
1441 value
<BitsY
> mod_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1442 return divmod_ss
<BitsY
>(a
, b
).second
;
1446 struct memory_index
{
1450 template<size_t BitsAddr
>
1451 memory_index(const value
<BitsAddr
> &addr
, size_t offset
, size_t depth
) {
1452 static_assert(value
<BitsAddr
>::chunks
<= 1, "memory address is too wide");
1453 size_t offset_index
= addr
.data
[0];
1455 valid
= (offset_index
>= offset
&& offset_index
< offset
+ depth
);
1456 index
= offset_index
- offset
;
1460 } // namespace cxxrtl_yosys