2 * yosys -- Yosys Open SYnthesis Suite
4 * Copyright (C) 2019-2020 whitequark <whitequark@whitequark.org>
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 // This file is included by the designs generated with `write_cxxrtl`. It is not used in Yosys itself.
21 // The CXXRTL support library implements compile time specialized arbitrary width arithmetics, as well as provides
22 // composite lvalues made out of bit slices and concatenations of lvalues. This allows the `write_cxxrtl` pass
23 // to perform a straightforward translation of RTLIL structures to readable C++, relying on the C++ compiler
24 // to unwrap the abstraction and generate efficient code.
33 #include <type_traits>
41 #include <backends/cxxrtl/cxxrtl_capi.h>
43 // CXXRTL essentially uses the C++ compiler as a hygienic macro engine that feeds an instruction selector.
44 // It generates a lot of specialized template functions with relatively large bodies that, when inlined
45 // into the caller and (for those with loops) unrolled, often expose many new optimization opportunities.
46 // Because of this, most of the CXXRTL runtime must be always inlined for best performance.
47 #ifndef __has_attribute
48 # define __has_attribute(x) 0
50 #if __has_attribute(always_inline)
51 #define CXXRTL_ALWAYS_INLINE inline __attribute__((__always_inline__))
53 #define CXXRTL_ALWAYS_INLINE inline
58 // All arbitrary-width values in CXXRTL are backed by arrays of unsigned integers called chunks. The chunk size
59 // is the same regardless of the value width to simplify manipulating values via FFI interfaces, e.g. driving
60 // and introspecting the simulation in Python.
62 // It is practical to use chunk sizes between 32 bits and platform register size because when arithmetics on
63 // narrower integer types is legalized by the C++ compiler, it inserts code to clear the high bits of the register.
64 // However, (a) most of our operations do not change those bits in the first place because of invariants that are
65 // invisible to the compiler, (b) we often operate on non-power-of-2 values and have to clear the high bits anyway.
66 // Therefore, using relatively wide chunks and clearing the high bits explicitly and only when we know they may be
67 // clobbered results in simpler generated code.
68 typedef uint32_t chunk_t
;
72 static_assert(std::is_integral
<T
>::value
&& std::is_unsigned
<T
>::value
,
73 "chunk type must be an unsigned integral type");
75 static constexpr size_t bits
= std::numeric_limits
<T
>::digits
;
76 static constexpr T mask
= std::numeric_limits
<T
>::max();
83 struct value
: public expr_base
<value
<Bits
>> {
84 static constexpr size_t bits
= Bits
;
86 using chunk
= chunk_traits
<chunk_t
>;
87 static constexpr chunk::type msb_mask
= (Bits
% chunk::bits
== 0) ? chunk::mask
88 : chunk::mask
>> (chunk::bits
- (Bits
% chunk::bits
));
90 static constexpr size_t chunks
= (Bits
+ chunk::bits
- 1) / chunk::bits
;
91 chunk::type data
[chunks
] = {};
94 template<typename
... Init
>
95 explicit constexpr value(Init
...init
) : data
{init
...} {}
97 value(const value
<Bits
> &) = default;
98 value(value
<Bits
> &&) = default;
99 value
<Bits
> &operator=(const value
<Bits
> &) = default;
101 // A (no-op) helper that forces the cast to value<>.
103 const value
<Bits
> &val() const {
107 std::string
str() const {
108 std::stringstream ss
;
113 // Operations with compile-time parameters.
115 // These operations are used to implement slicing, concatenation, and blitting.
116 // The trunc, zext and sext operations add or remove most significant bits (i.e. on the left);
117 // the rtrunc and rzext operations add or remove least significant bits (i.e. on the right).
118 template<size_t NewBits
>
120 value
<NewBits
> trunc() const {
121 static_assert(NewBits
<= Bits
, "trunc() may not increase width");
122 value
<NewBits
> result
;
123 for (size_t n
= 0; n
< result
.chunks
; n
++)
124 result
.data
[n
] = data
[n
];
125 result
.data
[result
.chunks
- 1] &= result
.msb_mask
;
129 template<size_t NewBits
>
131 value
<NewBits
> zext() const {
132 static_assert(NewBits
>= Bits
, "zext() may not decrease width");
133 value
<NewBits
> result
;
134 for (size_t n
= 0; n
< chunks
; n
++)
135 result
.data
[n
] = data
[n
];
139 template<size_t NewBits
>
141 value
<NewBits
> sext() const {
142 static_assert(NewBits
>= Bits
, "sext() may not decrease width");
143 value
<NewBits
> result
;
144 for (size_t n
= 0; n
< chunks
; n
++)
145 result
.data
[n
] = data
[n
];
147 result
.data
[chunks
- 1] |= ~msb_mask
;
148 for (size_t n
= chunks
; n
< result
.chunks
; n
++)
149 result
.data
[n
] = chunk::mask
;
150 result
.data
[result
.chunks
- 1] &= result
.msb_mask
;
155 template<size_t NewBits
>
157 value
<NewBits
> rtrunc() const {
158 static_assert(NewBits
<= Bits
, "rtrunc() may not increase width");
159 value
<NewBits
> result
;
160 constexpr size_t shift_chunks
= (Bits
- NewBits
) / chunk::bits
;
161 constexpr size_t shift_bits
= (Bits
- NewBits
) % chunk::bits
;
162 chunk::type carry
= 0;
163 if (shift_chunks
+ result
.chunks
< chunks
) {
164 carry
= (shift_bits
== 0) ? 0
165 : data
[shift_chunks
+ result
.chunks
] << (chunk::bits
- shift_bits
);
167 for (size_t n
= result
.chunks
; n
> 0; n
--) {
168 result
.data
[n
- 1] = carry
| (data
[shift_chunks
+ n
- 1] >> shift_bits
);
169 carry
= (shift_bits
== 0) ? 0
170 : data
[shift_chunks
+ n
- 1] << (chunk::bits
- shift_bits
);
175 template<size_t NewBits
>
177 value
<NewBits
> rzext() const {
178 static_assert(NewBits
>= Bits
, "rzext() may not decrease width");
179 value
<NewBits
> result
;
180 constexpr size_t shift_chunks
= (NewBits
- Bits
) / chunk::bits
;
181 constexpr size_t shift_bits
= (NewBits
- Bits
) % chunk::bits
;
182 chunk::type carry
= 0;
183 for (size_t n
= 0; n
< chunks
; n
++) {
184 result
.data
[shift_chunks
+ n
] = (data
[n
] << shift_bits
) | carry
;
185 carry
= (shift_bits
== 0) ? 0
186 : data
[n
] >> (chunk::bits
- shift_bits
);
188 if (shift_chunks
+ chunks
< result
.chunks
)
189 result
.data
[shift_chunks
+ chunks
] = carry
;
193 // Bit blit operation, i.e. a partial read-modify-write.
194 template<size_t Stop
, size_t Start
>
196 value
<Bits
> blit(const value
<Stop
- Start
+ 1> &source
) const {
197 static_assert(Stop
>= Start
, "blit() may not reverse bit order");
198 constexpr chunk::type start_mask
= ~(chunk::mask
<< (Start
% chunk::bits
));
199 constexpr chunk::type stop_mask
= (Stop
% chunk::bits
+ 1 == chunk::bits
) ? 0
200 : (chunk::mask
<< (Stop
% chunk::bits
+ 1));
201 value
<Bits
> masked
= *this;
202 if (Start
/ chunk::bits
== Stop
/ chunk::bits
) {
203 masked
.data
[Start
/ chunk::bits
] &= stop_mask
| start_mask
;
205 masked
.data
[Start
/ chunk::bits
] &= start_mask
;
206 for (size_t n
= Start
/ chunk::bits
+ 1; n
< Stop
/ chunk::bits
; n
++)
208 masked
.data
[Stop
/ chunk::bits
] &= stop_mask
;
210 value
<Bits
> shifted
= source
211 .template rzext
<Stop
+ 1>()
212 .template zext
<Bits
>();
213 return masked
.bit_or(shifted
);
216 // Helpers for selecting extending or truncating operation depending on whether the result is wider or narrower
217 // than the operand. In C++17 these can be replaced with `if constexpr`.
218 template<size_t NewBits
, typename
= void>
221 value
<NewBits
> operator()(const value
<Bits
> &val
) {
222 return val
.template zext
<NewBits
>();
226 template<size_t NewBits
>
227 struct zext_cast
<NewBits
, typename
std::enable_if
<(NewBits
< Bits
)>::type
> {
229 value
<NewBits
> operator()(const value
<Bits
> &val
) {
230 return val
.template trunc
<NewBits
>();
234 template<size_t NewBits
, typename
= void>
237 value
<NewBits
> operator()(const value
<Bits
> &val
) {
238 return val
.template sext
<NewBits
>();
242 template<size_t NewBits
>
243 struct sext_cast
<NewBits
, typename
std::enable_if
<(NewBits
< Bits
)>::type
> {
245 value
<NewBits
> operator()(const value
<Bits
> &val
) {
246 return val
.template trunc
<NewBits
>();
250 template<size_t NewBits
>
252 value
<NewBits
> zcast() const {
253 return zext_cast
<NewBits
>()(*this);
256 template<size_t NewBits
>
258 value
<NewBits
> scast() const {
259 return sext_cast
<NewBits
>()(*this);
262 // Operations with run-time parameters (offsets, amounts, etc).
264 // These operations are used for computations.
265 bool bit(size_t offset
) const {
266 return data
[offset
/ chunk::bits
] & (1 << (offset
% chunk::bits
));
269 void set_bit(size_t offset
, bool value
= true) {
270 size_t offset_chunks
= offset
/ chunk::bits
;
271 size_t offset_bits
= offset
% chunk::bits
;
272 data
[offset_chunks
] &= ~(1 << offset_bits
);
273 data
[offset_chunks
] |= value
? 1 << offset_bits
: 0;
276 bool is_zero() const {
277 for (size_t n
= 0; n
< chunks
; n
++)
283 explicit operator bool() const {
287 bool is_neg() const {
288 return data
[chunks
- 1] & (1 << ((Bits
- 1) % chunk::bits
));
291 bool operator ==(const value
<Bits
> &other
) const {
292 for (size_t n
= 0; n
< chunks
; n
++)
293 if (data
[n
] != other
.data
[n
])
298 bool operator !=(const value
<Bits
> &other
) const {
299 return !(*this == other
);
302 value
<Bits
> bit_not() const {
304 for (size_t n
= 0; n
< chunks
; n
++)
305 result
.data
[n
] = ~data
[n
];
306 result
.data
[chunks
- 1] &= msb_mask
;
310 value
<Bits
> bit_and(const value
<Bits
> &other
) const {
312 for (size_t n
= 0; n
< chunks
; n
++)
313 result
.data
[n
] = data
[n
] & other
.data
[n
];
317 value
<Bits
> bit_or(const value
<Bits
> &other
) const {
319 for (size_t n
= 0; n
< chunks
; n
++)
320 result
.data
[n
] = data
[n
] | other
.data
[n
];
324 value
<Bits
> bit_xor(const value
<Bits
> &other
) const {
326 for (size_t n
= 0; n
< chunks
; n
++)
327 result
.data
[n
] = data
[n
] ^ other
.data
[n
];
331 value
<Bits
> update(const value
<Bits
> &val
, const value
<Bits
> &mask
) const {
332 return bit_and(mask
.bit_not()).bit_or(val
.bit_and(mask
));
335 template<size_t AmountBits
>
336 value
<Bits
> shl(const value
<AmountBits
> &amount
) const {
337 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
338 static_assert(Bits
<= chunk::mask
, "shl() of unreasonably large values is not supported");
339 // Detect shifts definitely large than Bits early.
340 for (size_t n
= 1; n
< amount
.chunks
; n
++)
341 if (amount
.data
[n
] != 0)
343 // Past this point we can use the least significant chunk as the shift size.
344 size_t shift_chunks
= amount
.data
[0] / chunk::bits
;
345 size_t shift_bits
= amount
.data
[0] % chunk::bits
;
346 if (shift_chunks
>= chunks
)
349 chunk::type carry
= 0;
350 for (size_t n
= 0; n
< chunks
- shift_chunks
; n
++) {
351 result
.data
[shift_chunks
+ n
] = (data
[n
] << shift_bits
) | carry
;
352 carry
= (shift_bits
== 0) ? 0
353 : data
[n
] >> (chunk::bits
- shift_bits
);
358 template<size_t AmountBits
, bool Signed
= false>
359 value
<Bits
> shr(const value
<AmountBits
> &amount
) const {
360 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
361 static_assert(Bits
<= chunk::mask
, "shr() of unreasonably large values is not supported");
362 // Detect shifts definitely large than Bits early.
363 for (size_t n
= 1; n
< amount
.chunks
; n
++)
364 if (amount
.data
[n
] != 0)
366 // Past this point we can use the least significant chunk as the shift size.
367 size_t shift_chunks
= amount
.data
[0] / chunk::bits
;
368 size_t shift_bits
= amount
.data
[0] % chunk::bits
;
369 if (shift_chunks
>= chunks
)
372 chunk::type carry
= 0;
373 for (size_t n
= 0; n
< chunks
- shift_chunks
; n
++) {
374 result
.data
[chunks
- shift_chunks
- 1 - n
] = carry
| (data
[chunks
- 1 - n
] >> shift_bits
);
375 carry
= (shift_bits
== 0) ? 0
376 : data
[chunks
- 1 - n
] << (chunk::bits
- shift_bits
);
378 if (Signed
&& is_neg()) {
379 for (size_t n
= chunks
- shift_chunks
; n
< chunks
; n
++)
380 result
.data
[n
] = chunk::mask
;
382 result
.data
[chunks
- shift_chunks
] |= chunk::mask
<< (chunk::bits
- shift_bits
);
387 template<size_t AmountBits
>
388 value
<Bits
> sshr(const value
<AmountBits
> &amount
) const {
389 return shr
<AmountBits
, /*Signed=*/true>(amount
);
392 size_t ctpop() const {
394 for (size_t n
= 0; n
< chunks
; n
++) {
395 // This loop implements the population count idiom as recognized by LLVM and GCC.
396 for (chunk::type x
= data
[n
]; x
!= 0; count
++)
402 size_t ctlz() const {
404 for (size_t n
= 0; n
< chunks
; n
++) {
405 chunk::type x
= data
[chunks
- 1 - n
];
407 count
+= (n
== 0 ? Bits
% chunk::bits
: chunk::bits
);
409 // This loop implements the find first set idiom as recognized by LLVM.
410 for (; x
!= 0; count
++)
417 template<bool Invert
, bool CarryIn
>
418 std::pair
<value
<Bits
>, bool /*CarryOut*/> alu(const value
<Bits
> &other
) const {
420 bool carry
= CarryIn
;
421 for (size_t n
= 0; n
< result
.chunks
; n
++) {
422 result
.data
[n
] = data
[n
] + (Invert
? ~other
.data
[n
] : other
.data
[n
]) + carry
;
423 carry
= (result
.data
[n
] < data
[n
]) ||
424 (result
.data
[n
] == data
[n
] && carry
);
426 result
.data
[result
.chunks
- 1] &= result
.msb_mask
;
427 return {result
, carry
};
430 value
<Bits
> add(const value
<Bits
> &other
) const {
431 return alu
</*Invert=*/false, /*CarryIn=*/false>(other
).first
;
434 value
<Bits
> sub(const value
<Bits
> &other
) const {
435 return alu
</*Invert=*/true, /*CarryIn=*/true>(other
).first
;
438 value
<Bits
> neg() const {
439 return value
<Bits
> { 0u }.sub(*this);
442 bool ucmp(const value
<Bits
> &other
) const {
444 std::tie(std::ignore
, carry
) = alu
</*Invert=*/true, /*CarryIn=*/true>(other
);
445 return !carry
; // a.ucmp(b) ≡ a u< b
448 bool scmp(const value
<Bits
> &other
) const {
451 std::tie(result
, carry
) = alu
</*Invert=*/true, /*CarryIn=*/true>(other
);
452 bool overflow
= (is_neg() == !other
.is_neg()) && (is_neg() != result
.is_neg());
453 return result
.is_neg() ^ overflow
; // a.scmp(b) ≡ a s< b
457 // Expression template for a slice, usable as lvalue or rvalue, and composable with other expression templates here.
458 template<class T
, size_t Stop
, size_t Start
>
459 struct slice_expr
: public expr_base
<slice_expr
<T
, Stop
, Start
>> {
460 static_assert(Stop
>= Start
, "slice_expr() may not reverse bit order");
461 static_assert(Start
< T::bits
&& Stop
< T::bits
, "slice_expr() must be within bounds");
462 static constexpr size_t bits
= Stop
- Start
+ 1;
466 slice_expr(T
&expr
) : expr(expr
) {}
467 slice_expr(const slice_expr
<T
, Stop
, Start
> &) = delete;
470 operator value
<bits
>() const {
471 return static_cast<const value
<T::bits
> &>(expr
)
472 .template rtrunc
<T::bits
- Start
>()
473 .template trunc
<bits
>();
477 slice_expr
<T
, Stop
, Start
> &operator=(const value
<bits
> &rhs
) {
478 // Generic partial assignment implemented using a read-modify-write operation on the sliced expression.
479 expr
= static_cast<const value
<T::bits
> &>(expr
)
480 .template blit
<Stop
, Start
>(rhs
);
484 // A helper that forces the cast to value<>, which allows deduction to work.
486 value
<bits
> val() const {
487 return static_cast<const value
<bits
> &>(*this);
491 // Expression template for a concatenation, usable as lvalue or rvalue, and composable with other expression templates here.
492 template<class T
, class U
>
493 struct concat_expr
: public expr_base
<concat_expr
<T
, U
>> {
494 static constexpr size_t bits
= T::bits
+ U::bits
;
499 concat_expr(T
&ms_expr
, U
&ls_expr
) : ms_expr(ms_expr
), ls_expr(ls_expr
) {}
500 concat_expr(const concat_expr
<T
, U
> &) = delete;
503 operator value
<bits
>() const {
504 value
<bits
> ms_shifted
= static_cast<const value
<T::bits
> &>(ms_expr
)
505 .template rzext
<bits
>();
506 value
<bits
> ls_extended
= static_cast<const value
<U::bits
> &>(ls_expr
)
507 .template zext
<bits
>();
508 return ms_shifted
.bit_or(ls_extended
);
512 concat_expr
<T
, U
> &operator=(const value
<bits
> &rhs
) {
513 ms_expr
= rhs
.template rtrunc
<T::bits
>();
514 ls_expr
= rhs
.template trunc
<U::bits
>();
518 // A helper that forces the cast to value<>, which allows deduction to work.
520 value
<bits
> val() const {
521 return static_cast<const value
<bits
> &>(*this);
525 // Base class for expression templates, providing helper methods for operations that are valid on both rvalues and lvalues.
527 // Note that expression objects (slices and concatenations) constructed in this way should NEVER be captured because
528 // they refer to temporaries that will, in general, only live until the end of the statement. For example, both of
529 // these snippets perform use-after-free:
531 // const auto &a = val.slice<7,0>().slice<1>();
534 // auto &&c = val.slice<7,0>().slice<1>();
537 // An easy way to write code using slices and concatenations safely is to follow two simple rules:
538 // * Never explicitly name any type except `value<W>` or `const value<W> &`.
539 // * Never use a `const auto &` or `auto &&` in any such expression.
540 // Then, any code that compiles will be well-defined.
543 template<size_t Stop
, size_t Start
= Stop
>
545 slice_expr
<const T
, Stop
, Start
> slice() const {
546 return {*static_cast<const T
*>(this)};
549 template<size_t Stop
, size_t Start
= Stop
>
551 slice_expr
<T
, Stop
, Start
> slice() {
552 return {*static_cast<T
*>(this)};
557 concat_expr
<const T
, typename
std::remove_reference
<const U
>::type
> concat(const U
&other
) const {
558 return {*static_cast<const T
*>(this), other
};
563 concat_expr
<T
, typename
std::remove_reference
<U
>::type
> concat(U
&&other
) {
564 return {*static_cast<T
*>(this), other
};
568 template<size_t Bits
>
569 std::ostream
&operator<<(std::ostream
&os
, const value
<Bits
> &val
) {
570 auto old_flags
= os
.flags(std::ios::right
);
571 auto old_width
= os
.width(0);
572 auto old_fill
= os
.fill('0');
573 os
<< val
.bits
<< '\'' << std::hex
;
574 for (size_t n
= val
.chunks
- 1; n
!= (size_t)-1; n
--) {
575 if (n
== val
.chunks
- 1 && Bits
% value
<Bits
>::chunk::bits
!= 0)
576 os
.width((Bits
% value
<Bits
>::chunk::bits
+ 3) / 4);
578 os
.width((value
<Bits
>::chunk::bits
+ 3) / 4);
587 template<size_t Bits
>
589 static constexpr size_t bits
= Bits
;
595 constexpr wire(const value
<Bits
> &init
) : curr(init
), next(init
) {}
596 template<typename
... Init
>
597 explicit constexpr wire(Init
...init
) : curr
{init
...}, next
{init
...} {}
599 wire(const wire
<Bits
> &) = delete;
600 wire(wire
<Bits
> &&) = default;
601 wire
<Bits
> &operator=(const wire
<Bits
> &) = delete;
612 template<size_t Bits
>
613 std::ostream
&operator<<(std::ostream
&os
, const wire
<Bits
> &val
) {
618 template<size_t Width
>
620 std::vector
<value
<Width
>> data
;
622 size_t depth() const {
627 explicit memory(size_t depth
) : data(depth
) {}
629 memory(const memory
<Width
> &) = delete;
630 memory
<Width
> &operator=(const memory
<Width
> &) = delete;
632 // The only way to get the compiler to put the initializer in .rodata and do not copy it on stack is to stuff it
633 // into a plain array. You'd think an std::initializer_list would work here, but it doesn't, because you can't
634 // construct an initializer_list in a constexpr (or something) and so if you try to do that the whole thing is
635 // first copied on the stack (probably overflowing it) and then again into `data`.
636 template<size_t Size
>
639 value
<Width
> data
[Size
];
642 template<size_t... InitSize
>
643 explicit memory(size_t depth
, const init
<InitSize
> &...init
) : data(depth
) {
645 // This utterly reprehensible construct is the most reasonable way to apply a function to every element
646 // of a parameter pack, if the elements all have different types and so cannot be cast to an initializer list.
647 auto _
= {std::move(std::begin(init
.data
), std::end(init
.data
), data
.begin() + init
.offset
)...};
651 // An operator for direct memory reads. May be used at any time during the simulation.
652 const value
<Width
> &operator [](size_t index
) const {
653 assert(index
< data
.size());
657 // An operator for direct memory writes. May only be used before the simulation is started. If used
658 // after the simulation is started, the design may malfunction.
659 value
<Width
> &operator [](size_t index
) {
660 assert(index
< data
.size());
664 // A simple way to make a writable memory would be to use an array of wires instead of an array of values.
665 // However, there are two significant downsides to this approach: first, it has large overhead (2× space
666 // overhead, and O(depth) time overhead during commit); second, it does not simplify handling write port
667 // priorities. Although in principle write ports could be ordered or conditionally enabled in generated
668 // code based on their priorities and selected addresses, the feedback arc set problem is computationally
669 // expensive, and the heuristic based algorithms are not easily modified to guarantee (rather than prefer)
670 // a particular write port evaluation order.
672 // The approach used here instead is to queue writes into a buffer during the eval phase, then perform
673 // the writes during the commit phase in the priority order. This approach has low overhead, with both space
674 // and time proportional to the amount of write ports. Because virtually every memory in a practical design
675 // has at most two write ports, linear search is used on every write, being the fastest and simplest approach.
682 std::vector
<write
> write_queue
;
684 void update(size_t index
, const value
<Width
> &val
, const value
<Width
> &mask
, int priority
= 0) {
685 assert(index
< data
.size());
686 // Queue up the write while keeping the queue sorted by priority.
688 std::upper_bound(write_queue
.begin(), write_queue
.end(), priority
,
689 [](const int a
, const write
& b
) { return a
< b
.priority
; }),
690 write
{ index
, val
, mask
, priority
});
694 bool changed
= false;
695 for (const write
&entry
: write_queue
) {
696 value
<Width
> elem
= data
[entry
.index
];
697 elem
= elem
.update(entry
.val
, entry
.mask
);
698 changed
|= (data
[entry
.index
] != elem
);
699 data
[entry
.index
] = elem
;
715 // In debug mode, using the wrong .as_*() function will assert.
716 // In release mode, using the wrong .as_*() function will safely return a default value.
717 const unsigned uint_value
= 0;
718 const signed sint_value
= 0;
719 const std::string string_value
= "";
720 const double double_value
= 0.0;
722 metadata() : value_type(MISSING
) {}
723 metadata(unsigned value
) : value_type(UINT
), uint_value(value
) {}
724 metadata(signed value
) : value_type(SINT
), sint_value(value
) {}
725 metadata(const std::string
&value
) : value_type(STRING
), string_value(value
) {}
726 metadata(const char *value
) : value_type(STRING
), string_value(value
) {}
727 metadata(double value
) : value_type(DOUBLE
), double_value(value
) {}
729 metadata(const metadata
&) = default;
730 metadata
&operator=(const metadata
&) = delete;
732 unsigned as_uint() const {
733 assert(value_type
== UINT
);
737 signed as_sint() const {
738 assert(value_type
== SINT
);
742 const std::string
&as_string() const {
743 assert(value_type
== STRING
);
747 double as_double() const {
748 assert(value_type
== DOUBLE
);
753 typedef std::map
<std::string
, metadata
> metadata_map
;
755 // Helper class to disambiguate values/wires and their aliases.
756 struct debug_alias
{};
758 // This structure is intended for consumption via foreign function interfaces, like Python's ctypes.
759 // Because of this it uses a C-style layout that is easy to parse rather than more idiomatic C++.
761 // To avoid violating strict aliasing rules, this structure has to be a subclass of the one used
762 // in the C API, or it would not be possible to cast between the pointers to these.
763 struct debug_item
: ::cxxrtl_object
{
765 VALUE
= CXXRTL_VALUE
,
767 MEMORY
= CXXRTL_MEMORY
,
768 ALIAS
= CXXRTL_ALIAS
,
771 debug_item(const ::cxxrtl_object
&object
) : cxxrtl_object(object
) {}
773 template<size_t Bits
>
774 debug_item(value
<Bits
> &item
, size_t lsb_offset
= 0) {
775 static_assert(sizeof(item
) == value
<Bits
>::chunks
* sizeof(chunk_t
),
776 "value<Bits> is not compatible with C layout");
786 template<size_t Bits
>
787 debug_item(const value
<Bits
> &item
, size_t lsb_offset
= 0) {
788 static_assert(sizeof(item
) == value
<Bits
>::chunks
* sizeof(chunk_t
),
789 "value<Bits> is not compatible with C layout");
795 curr
= const_cast<chunk_t
*>(item
.data
);
799 template<size_t Bits
>
800 debug_item(wire
<Bits
> &item
, size_t lsb_offset
= 0) {
801 static_assert(sizeof(item
.curr
) == value
<Bits
>::chunks
* sizeof(chunk_t
) &&
802 sizeof(item
.next
) == value
<Bits
>::chunks
* sizeof(chunk_t
),
803 "wire<Bits> is not compatible with C layout");
809 curr
= item
.curr
.data
;
810 next
= item
.next
.data
;
813 template<size_t Width
>
814 debug_item(memory
<Width
> &item
, size_t zero_offset
= 0) {
815 static_assert(sizeof(item
.data
[0]) == value
<Width
>::chunks
* sizeof(chunk_t
),
816 "memory<Width> is not compatible with C layout");
820 depth
= item
.data
.size();
821 zero_at
= zero_offset
;
822 curr
= item
.data
.empty() ? nullptr : item
.data
[0].data
;
826 template<size_t Bits
>
827 debug_item(debug_alias
, const value
<Bits
> &item
, size_t lsb_offset
= 0) {
828 static_assert(sizeof(item
) == value
<Bits
>::chunks
* sizeof(chunk_t
),
829 "value<Bits> is not compatible with C layout");
835 curr
= const_cast<chunk_t
*>(item
.data
);
839 template<size_t Bits
>
840 debug_item(debug_alias
, const wire
<Bits
> &item
, size_t lsb_offset
= 0) {
841 static_assert(sizeof(item
.curr
) == value
<Bits
>::chunks
* sizeof(chunk_t
) &&
842 sizeof(item
.next
) == value
<Bits
>::chunks
* sizeof(chunk_t
),
843 "wire<Bits> is not compatible with C layout");
849 curr
= const_cast<chunk_t
*>(item
.curr
.data
);
853 static_assert(std::is_standard_layout
<debug_item
>::value
, "debug_item is not compatible with C layout");
856 std::map
<std::string
, std::vector
<debug_item
>> table
;
858 void add(const std::string
&name
, debug_item
&&item
) {
859 std::vector
<debug_item
> &parts
= table
[name
];
860 parts
.emplace_back(item
);
861 std::sort(parts
.begin(), parts
.end(),
862 [](const debug_item
&a
, const debug_item
&b
) {
863 return a
.lsb_at
< b
.lsb_at
;
867 size_t count(const std::string
&name
) const {
868 if (table
.count(name
) == 0)
870 return table
.at(name
).size();
873 const std::vector
<debug_item
> &parts_at(const std::string
&name
) const {
874 return table
.at(name
);
877 const debug_item
&at(const std::string
&name
) const {
878 const std::vector
<debug_item
> &parts
= table
.at(name
);
879 assert(parts
.size() == 1);
883 const debug_item
&operator [](const std::string
&name
) const {
892 module(const module
&) = delete;
893 module
&operator=(const module
&) = delete;
895 virtual bool eval() = 0;
896 virtual bool commit() = 0;
900 bool converged
= false;
904 } while (commit() && !converged
);
908 virtual void debug_info(debug_items
&items
, std::string path
= "") {
909 (void)items
, (void)path
;
913 } // namespace cxxrtl
915 // Internal structure used to communicate with the implementation of the C interface.
916 typedef struct _cxxrtl_toplevel
{
917 std::unique_ptr
<cxxrtl::module
> module
;
920 // Definitions of internal Yosys cells. Other than the functions in this namespace, CXXRTL is fully generic
921 // and indepenent of Yosys implementation details.
923 // The `write_cxxrtl` pass translates internal cells (cells with names that start with `$`) to calls of these
924 // functions. All of Yosys arithmetic and logical cells perform sign or zero extension on their operands,
925 // whereas basic operations on arbitrary width values require operands to be of the same width. These functions
926 // bridge the gap by performing the necessary casts. They are named similar to `cell_A[B]`, where A and B are `u`
927 // if the corresponding operand is unsigned, and `s` if it is signed.
928 namespace cxxrtl_yosys
{
930 using namespace cxxrtl
;
932 // std::max isn't constexpr until C++14 for no particular reason (it's an oversight), so we define our own.
935 constexpr T
max(const T
&a
, const T
&b
) {
936 return a
> b
? a
: b
;
940 template<size_t BitsY
, size_t BitsA
>
942 value
<BitsY
> logic_not(const value
<BitsA
> &a
) {
943 return value
<BitsY
> { a
? 0u : 1u };
946 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
948 value
<BitsY
> logic_and(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
949 return value
<BitsY
> { (bool(a
) & bool(b
)) ? 1u : 0u };
952 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
954 value
<BitsY
> logic_or(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
955 return value
<BitsY
> { (bool(a
) | bool(b
)) ? 1u : 0u };
958 // Reduction operations
959 template<size_t BitsY
, size_t BitsA
>
961 value
<BitsY
> reduce_and(const value
<BitsA
> &a
) {
962 return value
<BitsY
> { a
.bit_not().is_zero() ? 1u : 0u };
965 template<size_t BitsY
, size_t BitsA
>
967 value
<BitsY
> reduce_or(const value
<BitsA
> &a
) {
968 return value
<BitsY
> { a
? 1u : 0u };
971 template<size_t BitsY
, size_t BitsA
>
973 value
<BitsY
> reduce_xor(const value
<BitsA
> &a
) {
974 return value
<BitsY
> { (a
.ctpop() % 2) ? 1u : 0u };
977 template<size_t BitsY
, size_t BitsA
>
979 value
<BitsY
> reduce_xnor(const value
<BitsA
> &a
) {
980 return value
<BitsY
> { (a
.ctpop() % 2) ? 0u : 1u };
983 template<size_t BitsY
, size_t BitsA
>
985 value
<BitsY
> reduce_bool(const value
<BitsA
> &a
) {
986 return value
<BitsY
> { a
? 1u : 0u };
989 // Bitwise operations
990 template<size_t BitsY
, size_t BitsA
>
992 value
<BitsY
> not_u(const value
<BitsA
> &a
) {
993 return a
.template zcast
<BitsY
>().bit_not();
996 template<size_t BitsY
, size_t BitsA
>
998 value
<BitsY
> not_s(const value
<BitsA
> &a
) {
999 return a
.template scast
<BitsY
>().bit_not();
1002 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1003 CXXRTL_ALWAYS_INLINE
1004 value
<BitsY
> and_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1005 return a
.template zcast
<BitsY
>().bit_and(b
.template zcast
<BitsY
>());
1008 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1009 CXXRTL_ALWAYS_INLINE
1010 value
<BitsY
> and_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1011 return a
.template scast
<BitsY
>().bit_and(b
.template scast
<BitsY
>());
1014 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1015 CXXRTL_ALWAYS_INLINE
1016 value
<BitsY
> or_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1017 return a
.template zcast
<BitsY
>().bit_or(b
.template zcast
<BitsY
>());
1020 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1021 CXXRTL_ALWAYS_INLINE
1022 value
<BitsY
> or_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1023 return a
.template scast
<BitsY
>().bit_or(b
.template scast
<BitsY
>());
1026 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1027 CXXRTL_ALWAYS_INLINE
1028 value
<BitsY
> xor_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1029 return a
.template zcast
<BitsY
>().bit_xor(b
.template zcast
<BitsY
>());
1032 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1033 CXXRTL_ALWAYS_INLINE
1034 value
<BitsY
> xor_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1035 return a
.template scast
<BitsY
>().bit_xor(b
.template scast
<BitsY
>());
1038 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1039 CXXRTL_ALWAYS_INLINE
1040 value
<BitsY
> xnor_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1041 return a
.template zcast
<BitsY
>().bit_xor(b
.template zcast
<BitsY
>()).bit_not();
1044 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1045 CXXRTL_ALWAYS_INLINE
1046 value
<BitsY
> xnor_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1047 return a
.template scast
<BitsY
>().bit_xor(b
.template scast
<BitsY
>()).bit_not();
1050 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1051 CXXRTL_ALWAYS_INLINE
1052 value
<BitsY
> shl_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1053 return a
.template zcast
<BitsY
>().template shl(b
);
1056 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1057 CXXRTL_ALWAYS_INLINE
1058 value
<BitsY
> shl_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1059 return a
.template scast
<BitsY
>().template shl(b
);
1062 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1063 CXXRTL_ALWAYS_INLINE
1064 value
<BitsY
> sshl_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1065 return a
.template zcast
<BitsY
>().template shl(b
);
1068 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1069 CXXRTL_ALWAYS_INLINE
1070 value
<BitsY
> sshl_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1071 return a
.template scast
<BitsY
>().template shl(b
);
1074 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1075 CXXRTL_ALWAYS_INLINE
1076 value
<BitsY
> shr_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1077 return a
.template shr(b
).template zcast
<BitsY
>();
1080 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1081 CXXRTL_ALWAYS_INLINE
1082 value
<BitsY
> shr_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1083 return a
.template shr(b
).template scast
<BitsY
>();
1086 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1087 CXXRTL_ALWAYS_INLINE
1088 value
<BitsY
> sshr_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1089 return a
.template shr(b
).template zcast
<BitsY
>();
1092 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1093 CXXRTL_ALWAYS_INLINE
1094 value
<BitsY
> sshr_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1095 return a
.template sshr(b
).template scast
<BitsY
>();
1098 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1099 CXXRTL_ALWAYS_INLINE
1100 value
<BitsY
> shift_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1101 return shr_uu
<BitsY
>(a
, b
);
1104 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1105 CXXRTL_ALWAYS_INLINE
1106 value
<BitsY
> shift_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1107 return shr_su
<BitsY
>(a
, b
);
1110 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1111 CXXRTL_ALWAYS_INLINE
1112 value
<BitsY
> shift_us(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1113 return b
.is_neg() ? shl_uu
<BitsY
>(a
, b
.template sext
<BitsB
+ 1>().neg()) : shr_uu
<BitsY
>(a
, b
);
1116 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1117 CXXRTL_ALWAYS_INLINE
1118 value
<BitsY
> shift_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1119 return b
.is_neg() ? shl_su
<BitsY
>(a
, b
.template sext
<BitsB
+ 1>().neg()) : shr_su
<BitsY
>(a
, b
);
1122 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1123 CXXRTL_ALWAYS_INLINE
1124 value
<BitsY
> shiftx_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1125 return shift_uu
<BitsY
>(a
, b
);
1128 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1129 CXXRTL_ALWAYS_INLINE
1130 value
<BitsY
> shiftx_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1131 return shift_su
<BitsY
>(a
, b
);
1134 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1135 CXXRTL_ALWAYS_INLINE
1136 value
<BitsY
> shiftx_us(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1137 return shift_us
<BitsY
>(a
, b
);
1140 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1141 CXXRTL_ALWAYS_INLINE
1142 value
<BitsY
> shiftx_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1143 return shift_ss
<BitsY
>(a
, b
);
1146 // Comparison operations
1147 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1148 CXXRTL_ALWAYS_INLINE
1149 value
<BitsY
> eq_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1150 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1151 return value
<BitsY
>{ a
.template zext
<BitsExt
>() == b
.template zext
<BitsExt
>() ? 1u : 0u };
1154 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1155 CXXRTL_ALWAYS_INLINE
1156 value
<BitsY
> eq_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1157 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1158 return value
<BitsY
>{ a
.template sext
<BitsExt
>() == b
.template sext
<BitsExt
>() ? 1u : 0u };
1161 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1162 CXXRTL_ALWAYS_INLINE
1163 value
<BitsY
> ne_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1164 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1165 return value
<BitsY
>{ a
.template zext
<BitsExt
>() != b
.template zext
<BitsExt
>() ? 1u : 0u };
1168 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1169 CXXRTL_ALWAYS_INLINE
1170 value
<BitsY
> ne_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1171 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1172 return value
<BitsY
>{ a
.template sext
<BitsExt
>() != b
.template sext
<BitsExt
>() ? 1u : 0u };
1175 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1176 CXXRTL_ALWAYS_INLINE
1177 value
<BitsY
> eqx_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1178 return eq_uu
<BitsY
>(a
, b
);
1181 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1182 CXXRTL_ALWAYS_INLINE
1183 value
<BitsY
> eqx_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1184 return eq_ss
<BitsY
>(a
, b
);
1187 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1188 CXXRTL_ALWAYS_INLINE
1189 value
<BitsY
> nex_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1190 return ne_uu
<BitsY
>(a
, b
);
1193 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1194 CXXRTL_ALWAYS_INLINE
1195 value
<BitsY
> nex_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1196 return ne_ss
<BitsY
>(a
, b
);
1199 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1200 CXXRTL_ALWAYS_INLINE
1201 value
<BitsY
> gt_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1202 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1203 return value
<BitsY
> { b
.template zext
<BitsExt
>().ucmp(a
.template zext
<BitsExt
>()) ? 1u : 0u };
1206 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1207 CXXRTL_ALWAYS_INLINE
1208 value
<BitsY
> gt_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1209 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1210 return value
<BitsY
> { b
.template sext
<BitsExt
>().scmp(a
.template sext
<BitsExt
>()) ? 1u : 0u };
1213 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1214 CXXRTL_ALWAYS_INLINE
1215 value
<BitsY
> ge_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1216 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1217 return value
<BitsY
> { !a
.template zext
<BitsExt
>().ucmp(b
.template zext
<BitsExt
>()) ? 1u : 0u };
1220 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1221 CXXRTL_ALWAYS_INLINE
1222 value
<BitsY
> ge_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1223 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1224 return value
<BitsY
> { !a
.template sext
<BitsExt
>().scmp(b
.template sext
<BitsExt
>()) ? 1u : 0u };
1227 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1228 CXXRTL_ALWAYS_INLINE
1229 value
<BitsY
> lt_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1230 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1231 return value
<BitsY
> { a
.template zext
<BitsExt
>().ucmp(b
.template zext
<BitsExt
>()) ? 1u : 0u };
1234 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1235 CXXRTL_ALWAYS_INLINE
1236 value
<BitsY
> lt_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1237 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1238 return value
<BitsY
> { a
.template sext
<BitsExt
>().scmp(b
.template sext
<BitsExt
>()) ? 1u : 0u };
1241 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1242 CXXRTL_ALWAYS_INLINE
1243 value
<BitsY
> le_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1244 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1245 return value
<BitsY
> { !b
.template zext
<BitsExt
>().ucmp(a
.template zext
<BitsExt
>()) ? 1u : 0u };
1248 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1249 CXXRTL_ALWAYS_INLINE
1250 value
<BitsY
> le_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1251 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1252 return value
<BitsY
> { !b
.template sext
<BitsExt
>().scmp(a
.template sext
<BitsExt
>()) ? 1u : 0u };
1255 // Arithmetic operations
1256 template<size_t BitsY
, size_t BitsA
>
1257 CXXRTL_ALWAYS_INLINE
1258 value
<BitsY
> pos_u(const value
<BitsA
> &a
) {
1259 return a
.template zcast
<BitsY
>();
1262 template<size_t BitsY
, size_t BitsA
>
1263 CXXRTL_ALWAYS_INLINE
1264 value
<BitsY
> pos_s(const value
<BitsA
> &a
) {
1265 return a
.template scast
<BitsY
>();
1268 template<size_t BitsY
, size_t BitsA
>
1269 CXXRTL_ALWAYS_INLINE
1270 value
<BitsY
> neg_u(const value
<BitsA
> &a
) {
1271 return a
.template zcast
<BitsY
>().neg();
1274 template<size_t BitsY
, size_t BitsA
>
1275 CXXRTL_ALWAYS_INLINE
1276 value
<BitsY
> neg_s(const value
<BitsA
> &a
) {
1277 return a
.template scast
<BitsY
>().neg();
1280 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1281 CXXRTL_ALWAYS_INLINE
1282 value
<BitsY
> add_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1283 return a
.template zcast
<BitsY
>().add(b
.template zcast
<BitsY
>());
1286 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1287 CXXRTL_ALWAYS_INLINE
1288 value
<BitsY
> add_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1289 return a
.template scast
<BitsY
>().add(b
.template scast
<BitsY
>());
1292 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1293 CXXRTL_ALWAYS_INLINE
1294 value
<BitsY
> sub_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1295 return a
.template zcast
<BitsY
>().sub(b
.template zcast
<BitsY
>());
1298 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1299 CXXRTL_ALWAYS_INLINE
1300 value
<BitsY
> sub_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1301 return a
.template scast
<BitsY
>().sub(b
.template scast
<BitsY
>());
1304 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1305 CXXRTL_ALWAYS_INLINE
1306 value
<BitsY
> mul_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1307 value
<BitsY
> product
;
1308 value
<BitsY
> multiplicand
= a
.template zcast
<BitsY
>();
1309 const value
<BitsB
> &multiplier
= b
;
1310 uint32_t multiplicand_shift
= 0;
1311 for (size_t step
= 0; step
< BitsB
; step
++) {
1312 if (multiplier
.bit(step
)) {
1313 multiplicand
= multiplicand
.shl(value
<32> { multiplicand_shift
});
1314 product
= product
.add(multiplicand
);
1315 multiplicand_shift
= 0;
1317 multiplicand_shift
++;
1322 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1323 CXXRTL_ALWAYS_INLINE
1324 value
<BitsY
> mul_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1325 value
<BitsB
+ 1> ub
= b
.template sext
<BitsB
+ 1>();
1326 if (ub
.is_neg()) ub
= ub
.neg();
1327 value
<BitsY
> y
= mul_uu
<BitsY
>(a
.template scast
<BitsY
>(), ub
);
1328 return b
.is_neg() ? y
.neg() : y
;
1331 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1332 CXXRTL_ALWAYS_INLINE
1333 std::pair
<value
<BitsY
>, value
<BitsY
>> divmod_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1334 constexpr size_t Bits
= max(BitsY
, max(BitsA
, BitsB
));
1335 value
<Bits
> quotient
;
1336 value
<Bits
> dividend
= a
.template zext
<Bits
>();
1337 value
<Bits
> divisor
= b
.template zext
<Bits
>();
1338 if (dividend
.ucmp(divisor
))
1339 return {/*quotient=*/value
<BitsY
> { 0u }, /*remainder=*/dividend
.template trunc
<BitsY
>()};
1340 uint32_t divisor_shift
= dividend
.ctlz() - divisor
.ctlz();
1341 divisor
= divisor
.shl(value
<32> { divisor_shift
});
1342 for (size_t step
= 0; step
<= divisor_shift
; step
++) {
1343 quotient
= quotient
.shl(value
<1> { 1u });
1344 if (!dividend
.ucmp(divisor
)) {
1345 dividend
= dividend
.sub(divisor
);
1346 quotient
.set_bit(0, true);
1348 divisor
= divisor
.shr(value
<1> { 1u });
1350 return {quotient
.template trunc
<BitsY
>(), /*remainder=*/dividend
.template trunc
<BitsY
>()};
1353 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1354 CXXRTL_ALWAYS_INLINE
1355 std::pair
<value
<BitsY
>, value
<BitsY
>> divmod_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1356 value
<BitsA
+ 1> ua
= a
.template sext
<BitsA
+ 1>();
1357 value
<BitsB
+ 1> ub
= b
.template sext
<BitsB
+ 1>();
1358 if (ua
.is_neg()) ua
= ua
.neg();
1359 if (ub
.is_neg()) ub
= ub
.neg();
1361 std::tie(y
, r
) = divmod_uu
<BitsY
>(ua
, ub
);
1362 if (a
.is_neg() != b
.is_neg()) y
= y
.neg();
1363 if (a
.is_neg()) r
= r
.neg();
1367 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1368 CXXRTL_ALWAYS_INLINE
1369 value
<BitsY
> div_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1370 return divmod_uu
<BitsY
>(a
, b
).first
;
1373 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1374 CXXRTL_ALWAYS_INLINE
1375 value
<BitsY
> div_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1376 return divmod_ss
<BitsY
>(a
, b
).first
;
1379 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1380 CXXRTL_ALWAYS_INLINE
1381 value
<BitsY
> mod_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1382 return divmod_uu
<BitsY
>(a
, b
).second
;
1385 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1386 CXXRTL_ALWAYS_INLINE
1387 value
<BitsY
> mod_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1388 return divmod_ss
<BitsY
>(a
, b
).second
;
1392 struct memory_index
{
1396 template<size_t BitsAddr
>
1397 memory_index(const value
<BitsAddr
> &addr
, size_t offset
, size_t depth
) {
1398 static_assert(value
<BitsAddr
>::chunks
<= 1, "memory address is too wide");
1399 size_t offset_index
= addr
.data
[0];
1401 valid
= (offset_index
>= offset
&& offset_index
< offset
+ depth
);
1402 index
= offset_index
- offset
;
1406 } // namespace cxxrtl_yosys