2 * yosys -- Yosys Open SYnthesis Suite
4 * Copyright (C) 2019-2020 whitequark <whitequark@whitequark.org>
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 // This file is included by the designs generated with `write_cxxrtl`. It is not used in Yosys itself.
28 #include <type_traits>
36 // The cxxrtl support library implements compile time specialized arbitrary width arithmetics, as well as provides
37 // composite lvalues made out of bit slices and concatenations of lvalues. This allows the `write_cxxrtl` pass
38 // to perform a straightforward translation of RTLIL structures to readable C++, relying on the C++ compiler
39 // to unwrap the abstraction and generate efficient code.
42 // All arbitrary-width values in cxxrtl are backed by arrays of unsigned integers called chunks. The chunk size
43 // is the same regardless of the value width to simplify manipulating values via FFI interfaces, e.g. driving
44 // and introspecting the simulation in Python.
46 // It is practical to use chunk sizes between 32 bits and platform register size because when arithmetics on
47 // narrower integer types is legalized by the C++ compiler, it inserts code to clear the high bits of the register.
48 // However, (a) most of our operations do not change those bits in the first place because of invariants that are
49 // invisible to the compiler, (b) we often operate on non-power-of-2 values and have to clear the high bits anyway.
50 // Therefore, using relatively wide chunks and clearing the high bits explicitly and only when we know they may be
51 // clobbered results in simpler generated code.
54 static_assert(std::is_integral
<T
>::value
&& std::is_unsigned
<T
>::value
,
55 "chunk type must be an unsigned integral type");
57 static constexpr size_t bits
= std::numeric_limits
<T
>::digits
;
58 static constexpr T mask
= std::numeric_limits
<T
>::max();
65 struct value
: public expr_base
<value
<Bits
>> {
66 static constexpr size_t bits
= Bits
;
68 using chunk
= chunk_traits
<uint32_t>;
69 static constexpr chunk::type msb_mask
= (Bits
% chunk::bits
== 0) ? chunk::mask
70 : chunk::mask
>> (chunk::bits
- (Bits
% chunk::bits
));
72 static constexpr size_t chunks
= (Bits
+ chunk::bits
- 1) / chunk::bits
;
73 chunk::type data
[chunks
] = {};
76 template<typename
... Init
>
77 explicit constexpr value(Init
...init
) : data
{init
...} {}
79 value(const value
<Bits
> &) = default;
80 value(value
<Bits
> &&) = default;
81 value
<Bits
> &operator=(const value
<Bits
> &) = default;
83 // A (no-op) helper that forces the cast to value<>.
84 const value
<Bits
> &val() const {
88 std::string
str() const {
94 // Operations with compile-time parameters.
96 // These operations are used to implement slicing, concatenation, and blitting.
97 // The trunc, zext and sext operations add or remove most significant bits (i.e. on the left);
98 // the rtrunc and rzext operations add or remove least significant bits (i.e. on the right).
99 template<size_t NewBits
>
100 value
<NewBits
> trunc() const {
101 static_assert(NewBits
<= Bits
, "trunc() may not increase width");
102 value
<NewBits
> result
;
103 for (size_t n
= 0; n
< result
.chunks
; n
++)
104 result
.data
[n
] = data
[n
];
105 result
.data
[result
.chunks
- 1] &= result
.msb_mask
;
109 template<size_t NewBits
>
110 value
<NewBits
> zext() const {
111 static_assert(NewBits
>= Bits
, "zext() may not decrease width");
112 value
<NewBits
> result
;
113 for (size_t n
= 0; n
< chunks
; n
++)
114 result
.data
[n
] = data
[n
];
118 template<size_t NewBits
>
119 value
<NewBits
> sext() const {
120 static_assert(NewBits
>= Bits
, "sext() may not decrease width");
121 value
<NewBits
> result
;
122 for (size_t n
= 0; n
< chunks
; n
++)
123 result
.data
[n
] = data
[n
];
125 result
.data
[chunks
- 1] |= ~msb_mask
;
126 for (size_t n
= chunks
; n
< result
.chunks
; n
++)
127 result
.data
[n
] = chunk::mask
;
128 result
.data
[result
.chunks
- 1] &= result
.msb_mask
;
133 template<size_t NewBits
>
134 value
<NewBits
> rtrunc() const {
135 static_assert(NewBits
<= Bits
, "rtrunc() may not increase width");
136 value
<NewBits
> result
;
137 constexpr size_t shift_chunks
= (Bits
- NewBits
) / chunk::bits
;
138 constexpr size_t shift_bits
= (Bits
- NewBits
) % chunk::bits
;
139 chunk::type carry
= 0;
140 if (shift_chunks
+ result
.chunks
< chunks
) {
141 carry
= (shift_bits
== 0) ? 0
142 : data
[shift_chunks
+ result
.chunks
] << (chunk::bits
- shift_bits
);
144 for (size_t n
= result
.chunks
; n
> 0; n
--) {
145 result
.data
[n
- 1] = carry
| (data
[shift_chunks
+ n
- 1] >> shift_bits
);
146 carry
= (shift_bits
== 0) ? 0
147 : data
[shift_chunks
+ n
- 1] << (chunk::bits
- shift_bits
);
152 template<size_t NewBits
>
153 value
<NewBits
> rzext() const {
154 static_assert(NewBits
>= Bits
, "rzext() may not decrease width");
155 value
<NewBits
> result
;
156 constexpr size_t shift_chunks
= (NewBits
- Bits
) / chunk::bits
;
157 constexpr size_t shift_bits
= (NewBits
- Bits
) % chunk::bits
;
158 chunk::type carry
= 0;
159 for (size_t n
= 0; n
< chunks
; n
++) {
160 result
.data
[shift_chunks
+ n
] = (data
[n
] << shift_bits
) | carry
;
161 carry
= (shift_bits
== 0) ? 0
162 : data
[n
] >> (chunk::bits
- shift_bits
);
165 result
.data
[result
.chunks
- 1] = carry
;
169 // Bit blit operation, i.e. a partial read-modify-write.
170 template<size_t Stop
, size_t Start
>
171 value
<Bits
> blit(const value
<Stop
- Start
+ 1> &source
) const {
172 static_assert(Stop
>= Start
, "blit() may not reverse bit order");
173 constexpr chunk::type start_mask
= ~(chunk::mask
<< (Start
% chunk::bits
));
174 constexpr chunk::type stop_mask
= (Stop
% chunk::bits
+ 1 == chunk::bits
) ? 0
175 : (chunk::mask
<< (Stop
% chunk::bits
+ 1));
176 value
<Bits
> masked
= *this;
177 if (Start
/ chunk::bits
== Stop
/ chunk::bits
) {
178 masked
.data
[Start
/ chunk::bits
] &= stop_mask
| start_mask
;
180 masked
.data
[Start
/ chunk::bits
] &= start_mask
;
181 for (size_t n
= Start
/ chunk::bits
+ 1; n
< Stop
/ chunk::bits
; n
++)
183 masked
.data
[Stop
/ chunk::bits
] &= stop_mask
;
185 value
<Bits
> shifted
= source
186 .template rzext
<Stop
+ 1>()
187 .template zext
<Bits
>();
188 return masked
.bit_or(shifted
);
191 // Helpers for selecting extending or truncating operation depending on whether the result is wider or narrower
192 // than the operand. In C++17 these can be replaced with `if constexpr`.
193 template<size_t NewBits
, typename
= void>
195 value
<NewBits
> operator()(const value
<Bits
> &val
) {
196 return val
.template zext
<NewBits
>();
200 template<size_t NewBits
>
201 struct zext_cast
<NewBits
, typename
std::enable_if
<(NewBits
< Bits
)>::type
> {
202 value
<NewBits
> operator()(const value
<Bits
> &val
) {
203 return val
.template trunc
<NewBits
>();
207 template<size_t NewBits
, typename
= void>
209 value
<NewBits
> operator()(const value
<Bits
> &val
) {
210 return val
.template sext
<NewBits
>();
214 template<size_t NewBits
>
215 struct sext_cast
<NewBits
, typename
std::enable_if
<(NewBits
< Bits
)>::type
> {
216 value
<NewBits
> operator()(const value
<Bits
> &val
) {
217 return val
.template trunc
<NewBits
>();
221 template<size_t NewBits
>
222 value
<NewBits
> zcast() const {
223 return zext_cast
<NewBits
>()(*this);
226 template<size_t NewBits
>
227 value
<NewBits
> scast() const {
228 return sext_cast
<NewBits
>()(*this);
231 // Operations with run-time parameters (offsets, amounts, etc).
233 // These operations are used for computations.
234 bool bit(size_t offset
) const {
235 return data
[offset
/ chunk::bits
] & (1 << (offset
% chunk::bits
));
238 void set_bit(size_t offset
, bool value
= true) {
239 size_t offset_chunks
= offset
/ chunk::bits
;
240 size_t offset_bits
= offset
% chunk::bits
;
241 data
[offset_chunks
] &= ~(1 << offset_bits
);
242 data
[offset_chunks
] |= value
? 1 << offset_bits
: 0;
245 bool is_zero() const {
246 for (size_t n
= 0; n
< chunks
; n
++)
252 explicit operator bool() const {
256 bool is_neg() const {
257 return data
[chunks
- 1] & (1 << ((Bits
- 1) % chunk::bits
));
260 bool operator ==(const value
<Bits
> &other
) const {
261 for (size_t n
= 0; n
< chunks
; n
++)
262 if (data
[n
] != other
.data
[n
])
267 bool operator !=(const value
<Bits
> &other
) const {
268 return !(*this == other
);
271 value
<Bits
> bit_not() const {
273 for (size_t n
= 0; n
< chunks
; n
++)
274 result
.data
[n
] = ~data
[n
];
275 result
.data
[chunks
- 1] &= msb_mask
;
279 value
<Bits
> bit_and(const value
<Bits
> &other
) const {
281 for (size_t n
= 0; n
< chunks
; n
++)
282 result
.data
[n
] = data
[n
] & other
.data
[n
];
286 value
<Bits
> bit_or(const value
<Bits
> &other
) const {
288 for (size_t n
= 0; n
< chunks
; n
++)
289 result
.data
[n
] = data
[n
] | other
.data
[n
];
293 value
<Bits
> bit_xor(const value
<Bits
> &other
) const {
295 for (size_t n
= 0; n
< chunks
; n
++)
296 result
.data
[n
] = data
[n
] ^ other
.data
[n
];
300 value
<Bits
> update(const value
<Bits
> &val
, const value
<Bits
> &mask
) const {
301 return bit_and(mask
.bit_not()).bit_or(val
.bit_and(mask
));
304 template<size_t AmountBits
>
305 value
<Bits
> shl(const value
<AmountBits
> &amount
) const {
306 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
307 static_assert(Bits
<= chunk::mask
, "shl() of unreasonably large values is not supported");
308 // Detect shifts definitely large than Bits early.
309 for (size_t n
= 1; n
< amount
.chunks
; n
++)
310 if (amount
.data
[n
] != 0)
312 // Past this point we can use the least significant chunk as the shift size.
313 size_t shift_chunks
= amount
.data
[0] / chunk::bits
;
314 size_t shift_bits
= amount
.data
[0] % chunk::bits
;
315 if (shift_chunks
>= chunks
)
318 chunk::type carry
= 0;
319 for (size_t n
= 0; n
< chunks
- shift_chunks
; n
++) {
320 result
.data
[shift_chunks
+ n
] = (data
[n
] << shift_bits
) | carry
;
321 carry
= (shift_bits
== 0) ? 0
322 : data
[n
] >> (chunk::bits
- shift_bits
);
327 template<size_t AmountBits
, bool Signed
= false>
328 value
<Bits
> shr(const value
<AmountBits
> &amount
) const {
329 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
330 static_assert(Bits
<= chunk::mask
, "shr() of unreasonably large values is not supported");
331 // Detect shifts definitely large than Bits early.
332 for (size_t n
= 1; n
< amount
.chunks
; n
++)
333 if (amount
.data
[n
] != 0)
335 // Past this point we can use the least significant chunk as the shift size.
336 size_t shift_chunks
= amount
.data
[0] / chunk::bits
;
337 size_t shift_bits
= amount
.data
[0] % chunk::bits
;
338 if (shift_chunks
>= chunks
)
341 chunk::type carry
= 0;
342 for (size_t n
= 0; n
< chunks
- shift_chunks
; n
++) {
343 result
.data
[chunks
- shift_chunks
- 1 - n
] = carry
| (data
[chunks
- 1 - n
] >> shift_bits
);
344 carry
= (shift_bits
== 0) ? 0
345 : data
[chunks
- 1 - n
] << (chunk::bits
- shift_bits
);
347 if (Signed
&& is_neg()) {
348 for (size_t n
= chunks
- shift_chunks
; n
< chunks
; n
++)
349 result
.data
[n
] = chunk::mask
;
351 result
.data
[chunks
- shift_chunks
] |= chunk::mask
<< (chunk::bits
- shift_bits
);
356 template<size_t AmountBits
>
357 value
<Bits
> sshr(const value
<AmountBits
> &amount
) const {
358 return shr
<AmountBits
, /*Signed=*/true>(amount
);
361 size_t ctpop() const {
363 for (size_t n
= 0; n
< chunks
; n
++) {
364 // This loop implements the population count idiom as recognized by LLVM and GCC.
365 for (chunk::type x
= data
[n
]; x
!= 0; count
++)
371 size_t ctlz() const {
373 for (size_t n
= 0; n
< chunks
; n
++) {
374 chunk::type x
= data
[chunks
- 1 - n
];
376 count
+= (n
== 0 ? Bits
% chunk::bits
: chunk::bits
);
378 // This loop implements the find first set idiom as recognized by LLVM.
379 for (; x
!= 0; count
++)
386 template<bool Invert
, bool CarryIn
>
387 std::pair
<value
<Bits
>, bool /*CarryOut*/> alu(const value
<Bits
> &other
) const {
389 bool carry
= CarryIn
;
390 for (size_t n
= 0; n
< result
.chunks
; n
++) {
391 result
.data
[n
] = data
[n
] + (Invert
? ~other
.data
[n
] : other
.data
[n
]) + carry
;
392 carry
= (result
.data
[n
] < data
[n
]) ||
393 (result
.data
[n
] == data
[n
] && carry
);
395 result
.data
[result
.chunks
- 1] &= result
.msb_mask
;
396 return {result
, carry
};
399 value
<Bits
> add(const value
<Bits
> &other
) const {
400 return alu
</*Invert=*/false, /*CarryIn=*/false>(other
).first
;
403 value
<Bits
> sub(const value
<Bits
> &other
) const {
404 return alu
</*Invert=*/true, /*CarryIn=*/true>(other
).first
;
407 value
<Bits
> neg() const {
408 return value
<Bits
> { 0u }.sub(*this);
411 bool ucmp(const value
<Bits
> &other
) const {
413 std::tie(std::ignore
, carry
) = alu
</*Invert=*/true, /*CarryIn=*/true>(other
);
414 return !carry
; // a.ucmp(b) ≡ a u< b
417 bool scmp(const value
<Bits
> &other
) const {
420 std::tie(result
, carry
) = alu
</*Invert=*/true, /*CarryIn=*/true>(other
);
421 bool overflow
= (is_neg() == !other
.is_neg()) && (is_neg() != result
.is_neg());
422 return result
.is_neg() ^ overflow
; // a.scmp(b) ≡ a s< b
426 // Expression template for a slice, usable as lvalue or rvalue, and composable with other expression templates here.
427 template<class T
, size_t Stop
, size_t Start
>
428 struct slice_expr
: public expr_base
<slice_expr
<T
, Stop
, Start
>> {
429 static_assert(Stop
>= Start
, "slice_expr() may not reverse bit order");
430 static_assert(Start
< T::bits
&& Stop
< T::bits
, "slice_expr() must be within bounds");
431 static constexpr size_t bits
= Stop
- Start
+ 1;
435 slice_expr(T
&expr
) : expr(expr
) {}
436 slice_expr(const slice_expr
<T
, Stop
, Start
> &) = delete;
438 operator value
<bits
>() const {
439 return static_cast<const value
<T::bits
> &>(expr
)
440 .template rtrunc
<T::bits
- Start
>()
441 .template trunc
<bits
>();
444 slice_expr
<T
, Stop
, Start
> &operator=(const value
<bits
> &rhs
) {
445 // Generic partial assignment implemented using a read-modify-write operation on the sliced expression.
446 expr
= static_cast<const value
<T::bits
> &>(expr
)
447 .template blit
<Stop
, Start
>(rhs
);
451 // A helper that forces the cast to value<>, which allows deduction to work.
452 value
<bits
> val() const {
453 return static_cast<const value
<bits
> &>(*this);
457 // Expression template for a concatenation, usable as lvalue or rvalue, and composable with other expression templates here.
458 template<class T
, class U
>
459 struct concat_expr
: public expr_base
<concat_expr
<T
, U
>> {
460 static constexpr size_t bits
= T::bits
+ U::bits
;
465 concat_expr(T
&ms_expr
, U
&ls_expr
) : ms_expr(ms_expr
), ls_expr(ls_expr
) {}
466 concat_expr(const concat_expr
<T
, U
> &) = delete;
468 operator value
<bits
>() const {
469 value
<bits
> ms_shifted
= static_cast<const value
<T::bits
> &>(ms_expr
)
470 .template rzext
<bits
>();
471 value
<bits
> ls_extended
= static_cast<const value
<U::bits
> &>(ls_expr
)
472 .template zext
<bits
>();
473 return ms_shifted
.bit_or(ls_extended
);
476 concat_expr
<T
, U
> &operator=(const value
<bits
> &rhs
) {
477 ms_expr
= rhs
.template rtrunc
<T::bits
>();
478 ls_expr
= rhs
.template trunc
<U::bits
>();
482 // A helper that forces the cast to value<>, which allows deduction to work.
483 value
<bits
> val() const {
484 return static_cast<const value
<bits
> &>(*this);
488 // Base class for expression templates, providing helper methods for operations that are valid on both rvalues and lvalues.
490 // Note that expression objects (slices and concatenations) constructed in this way should NEVER be captured because
491 // they refer to temporaries that will, in general, only live until the end of the statement. For example, both of
492 // these snippets perform use-after-free:
494 // const auto &a = val.slice<7,0>().slice<1>();
497 // auto &&c = val.slice<7,0>().slice<1>();
500 // An easy way to write code using slices and concatenations safely is to follow two simple rules:
501 // * Never explicitly name any type except `value<W>` or `const value<W> &`.
502 // * Never use a `const auto &` or `auto &&` in any such expression.
503 // Then, any code that compiles will be well-defined.
506 template<size_t Stop
, size_t Start
= Stop
>
507 slice_expr
<const T
, Stop
, Start
> slice() const {
508 return {*static_cast<const T
*>(this)};
511 template<size_t Stop
, size_t Start
= Stop
>
512 slice_expr
<T
, Stop
, Start
> slice() {
513 return {*static_cast<T
*>(this)};
517 concat_expr
<const T
, typename
std::remove_reference
<const U
>::type
> concat(const U
&other
) const {
518 return {*static_cast<const T
*>(this), other
};
522 concat_expr
<T
, typename
std::remove_reference
<U
>::type
> concat(U
&&other
) {
523 return {*static_cast<T
*>(this), other
};
527 template<size_t Bits
>
528 std::ostream
&operator<<(std::ostream
&os
, const value
<Bits
> &val
) {
529 auto old_flags
= os
.flags(std::ios::right
);
530 auto old_width
= os
.width(0);
531 auto old_fill
= os
.fill('0');
532 os
<< val
.bits
<< '\'' << std::hex
;
533 for (size_t n
= val
.chunks
- 1; n
!= (size_t)-1; n
--) {
534 if (n
== val
.chunks
- 1 && Bits
% value
<Bits
>::chunk::bits
!= 0)
535 os
.width((Bits
% value
<Bits
>::chunk::bits
+ 3) / 4);
537 os
.width((value
<Bits
>::chunk::bits
+ 3) / 4);
546 template<size_t Bits
>
548 static constexpr size_t bits
= Bits
;
554 constexpr wire(const value
<Bits
> &init
) : curr(init
), next(init
) {}
555 template<typename
... Init
>
556 explicit constexpr wire(Init
...init
) : curr
{init
...}, next
{init
...} {}
558 wire(const wire
<Bits
> &) = delete;
559 wire(wire
<Bits
> &&) = default;
560 wire
<Bits
> &operator=(const wire
<Bits
> &) = delete;
571 template<size_t Bits
>
572 std::ostream
&operator<<(std::ostream
&os
, const wire
<Bits
> &val
) {
577 template<size_t Width
>
579 std::vector
<value
<Width
>> data
;
581 size_t depth() const {
586 explicit memory(size_t depth
) : data(depth
) {}
588 memory(const memory
<Width
> &) = delete;
589 memory
<Width
> &operator=(const memory
<Width
> &) = delete;
591 // The only way to get the compiler to put the initializer in .rodata and do not copy it on stack is to stuff it
592 // into a plain array. You'd think an std::initializer_list would work here, but it doesn't, because you can't
593 // construct an initializer_list in a constexpr (or something) and so if you try to do that the whole thing is
594 // first copied on the stack (probably overflowing it) and then again into `data`.
595 template<size_t Size
>
598 value
<Width
> data
[Size
];
601 template<size_t... InitSize
>
602 explicit memory(size_t depth
, const init
<InitSize
> &...init
) : data(depth
) {
604 // This utterly reprehensible construct is the most reasonable way to apply a function to every element
605 // of a parameter pack, if the elements all have different types and so cannot be cast to an initializer list.
606 auto _
= {std::move(std::begin(init
.data
), std::end(init
.data
), data
.begin() + init
.offset
)...};
609 // An operator for direct memory reads. May be used at any time during the simulation.
610 const value
<Width
> &operator [](size_t index
) const {
611 assert(index
< data
.size());
615 // An operator for direct memory writes. May only be used before the simulation is started. If used
616 // after the simulation is started, the design may malfunction.
617 value
<Width
> &operator [](size_t index
) {
618 assert(index
< data
.size());
622 // A simple way to make a writable memory would be to use an array of wires instead of an array of values.
623 // However, there are two significant downsides to this approach: first, it has large overhead (2× space
624 // overhead, and O(depth) time overhead during commit); second, it does not simplify handling write port
625 // priorities. Although in principle write ports could be ordered or conditionally enabled in generated
626 // code based on their priorities and selected addresses, the feedback arc set problem is computationally
627 // expensive, and the heuristic based algorithms are not easily modified to guarantee (rather than prefer)
628 // a particular write port evaluation order.
630 // The approach used here instead is to queue writes into a buffer during the eval phase, then perform
631 // the writes during the commit phase in the priority order. This approach has low overhead, with both space
632 // and time proportional to the amount of write ports. Because virtually every memory in a practical design
633 // has at most two write ports, linear search is used on every write, being the fastest and simplest approach.
640 std::vector
<write
> write_queue
;
642 void update(size_t index
, const value
<Width
> &val
, const value
<Width
> &mask
, int priority
= 0) {
643 assert(index
< data
.size());
644 write_queue
.emplace_back(write
{ index
, val
, mask
, priority
});
648 bool changed
= false;
649 std::sort(write_queue
.begin(), write_queue
.end(),
650 [](const write
&a
, const write
&b
) { return a
.priority
< b
.priority
; });
651 for (const write
&entry
: write_queue
) {
652 value
<Width
> elem
= data
[entry
.index
];
653 elem
= elem
.update(entry
.val
, entry
.mask
);
654 changed
|= (data
[entry
.index
] != elem
);
655 data
[entry
.index
] = elem
;
671 // In debug mode, using the wrong .as_*() function will assert.
672 // In release mode, using the wrong .as_*() function will safely return a default value.
674 const unsigned uint_value
= 0;
675 const signed sint_value
;
677 const std::string string_value
= "";
678 const double double_value
= 0.0;
680 metadata() : value_type(MISSING
) {}
681 metadata(unsigned value
) : value_type(UINT
), uint_value(value
) {}
682 metadata(signed value
) : value_type(SINT
), sint_value(value
) {}
683 metadata(const std::string
&value
) : value_type(STRING
), string_value(value
) {}
684 metadata(const char *value
) : value_type(STRING
), string_value(value
) {}
685 metadata(double value
) : value_type(DOUBLE
), double_value(value
) {}
687 metadata(const metadata
&) = default;
688 metadata
&operator=(const metadata
&) = delete;
690 unsigned as_uint() const {
691 assert(value_type
== UINT
);
695 signed as_sint() const {
696 assert(value_type
== SINT
);
700 const std::string
&as_string() const {
701 assert(value_type
== STRING
);
705 double as_double() const {
706 assert(value_type
== DOUBLE
);
711 typedef std::map
<std::string
, metadata
> metadata_map
;
717 module(const module
&) = delete;
718 module
&operator=(const module
&) = delete;
720 virtual bool eval() = 0;
721 virtual bool commit() = 0;
725 bool converged
= false;
729 } while (commit() && !converged
);
734 } // namespace cxxrtl
736 // Definitions of internal Yosys cells. Other than the functions in this namespace, cxxrtl is fully generic
737 // and indepenent of Yosys implementation details.
739 // The `write_cxxrtl` pass translates internal cells (cells with names that start with `$`) to calls of these
740 // functions. All of Yosys arithmetic and logical cells perform sign or zero extension on their operands,
741 // whereas basic operations on arbitrary width values require operands to be of the same width. These functions
742 // bridge the gap by performing the necessary casts. They are named similar to `cell_A[B]`, where A and B are `u`
743 // if the corresponding operand is unsigned, and `s` if it is signed.
744 namespace cxxrtl_yosys
{
746 using namespace cxxrtl
;
748 // std::max isn't constexpr until C++14 for no particular reason (it's an oversight), so we define our own.
750 constexpr T
max(const T
&a
, const T
&b
) {
751 return a
> b
? a
: b
;
755 template<size_t BitsY
, size_t BitsA
>
756 value
<BitsY
> not_u(const value
<BitsA
> &a
) {
757 return a
.template zcast
<BitsY
>().bit_not();
760 template<size_t BitsY
, size_t BitsA
>
761 value
<BitsY
> not_s(const value
<BitsA
> &a
) {
762 return a
.template scast
<BitsY
>().bit_not();
765 template<size_t BitsY
, size_t BitsA
>
766 value
<BitsY
> logic_not_u(const value
<BitsA
> &a
) {
767 return value
<BitsY
> { a
? 0u : 1u };
770 template<size_t BitsY
, size_t BitsA
>
771 value
<BitsY
> logic_not_s(const value
<BitsA
> &a
) {
772 return value
<BitsY
> { a
? 0u : 1u };
775 template<size_t BitsY
, size_t BitsA
>
776 value
<BitsY
> reduce_and_u(const value
<BitsA
> &a
) {
777 return value
<BitsY
> { a
.bit_not().is_zero() ? 1u : 0u };
780 template<size_t BitsY
, size_t BitsA
>
781 value
<BitsY
> reduce_and_s(const value
<BitsA
> &a
) {
782 return value
<BitsY
> { a
.bit_not().is_zero() ? 1u : 0u };
785 template<size_t BitsY
, size_t BitsA
>
786 value
<BitsY
> reduce_or_u(const value
<BitsA
> &a
) {
787 return value
<BitsY
> { a
? 1u : 0u };
790 template<size_t BitsY
, size_t BitsA
>
791 value
<BitsY
> reduce_or_s(const value
<BitsA
> &a
) {
792 return value
<BitsY
> { a
? 1u : 0u };
795 template<size_t BitsY
, size_t BitsA
>
796 value
<BitsY
> reduce_xor_u(const value
<BitsA
> &a
) {
797 return value
<BitsY
> { (a
.ctpop() % 2) ? 1u : 0u };
800 template<size_t BitsY
, size_t BitsA
>
801 value
<BitsY
> reduce_xor_s(const value
<BitsA
> &a
) {
802 return value
<BitsY
> { (a
.ctpop() % 2) ? 1u : 0u };
805 template<size_t BitsY
, size_t BitsA
>
806 value
<BitsY
> reduce_xnor_u(const value
<BitsA
> &a
) {
807 return value
<BitsY
> { (a
.ctpop() % 2) ? 0u : 1u };
810 template<size_t BitsY
, size_t BitsA
>
811 value
<BitsY
> reduce_xnor_s(const value
<BitsA
> &a
) {
812 return value
<BitsY
> { (a
.ctpop() % 2) ? 0u : 1u };
815 template<size_t BitsY
, size_t BitsA
>
816 value
<BitsY
> reduce_bool_u(const value
<BitsA
> &a
) {
817 return value
<BitsY
> { a
? 1u : 0u };
820 template<size_t BitsY
, size_t BitsA
>
821 value
<BitsY
> reduce_bool_s(const value
<BitsA
> &a
) {
822 return value
<BitsY
> { a
? 1u : 0u };
825 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
826 value
<BitsY
> and_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
827 return a
.template zcast
<BitsY
>().bit_and(b
.template zcast
<BitsY
>());
830 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
831 value
<BitsY
> and_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
832 return a
.template scast
<BitsY
>().bit_and(b
.template scast
<BitsY
>());
835 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
836 value
<BitsY
> or_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
837 return a
.template zcast
<BitsY
>().bit_or(b
.template zcast
<BitsY
>());
840 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
841 value
<BitsY
> or_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
842 return a
.template scast
<BitsY
>().bit_or(b
.template scast
<BitsY
>());
845 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
846 value
<BitsY
> xor_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
847 return a
.template zcast
<BitsY
>().bit_xor(b
.template zcast
<BitsY
>());
850 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
851 value
<BitsY
> xor_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
852 return a
.template scast
<BitsY
>().bit_xor(b
.template scast
<BitsY
>());
855 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
856 value
<BitsY
> xnor_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
857 return a
.template zcast
<BitsY
>().bit_xor(b
.template zcast
<BitsY
>()).bit_not();
860 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
861 value
<BitsY
> xnor_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
862 return a
.template scast
<BitsY
>().bit_xor(b
.template scast
<BitsY
>()).bit_not();
865 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
866 value
<BitsY
> logic_and_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
867 return value
<BitsY
> { (bool(a
) & bool(b
)) ? 1u : 0u };
870 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
871 value
<BitsY
> logic_and_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
872 return value
<BitsY
> { (bool(a
) & bool(b
)) ? 1u : 0u };
875 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
876 value
<BitsY
> logic_or_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
877 return value
<BitsY
> { (bool(a
) | bool(b
)) ? 1u : 0u };
880 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
881 value
<BitsY
> logic_or_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
882 return value
<BitsY
> { (bool(a
) | bool(b
)) ? 1u : 0u };
885 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
886 value
<BitsY
> shl_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
887 return a
.template zcast
<BitsY
>().template shl(b
);
890 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
891 value
<BitsY
> shl_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
892 return a
.template scast
<BitsY
>().template shl(b
);
895 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
896 value
<BitsY
> sshl_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
897 return a
.template zcast
<BitsY
>().template shl(b
);
900 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
901 value
<BitsY
> sshl_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
902 return a
.template scast
<BitsY
>().template shl(b
);
905 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
906 value
<BitsY
> shr_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
907 return a
.template shr(b
).template zcast
<BitsY
>();
910 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
911 value
<BitsY
> shr_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
912 return a
.template shr(b
).template scast
<BitsY
>();
915 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
916 value
<BitsY
> sshr_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
917 return a
.template shr(b
).template zcast
<BitsY
>();
920 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
921 value
<BitsY
> sshr_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
922 return a
.template shr(b
).template scast
<BitsY
>();
925 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
926 value
<BitsY
> shift_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
927 return shr_uu
<BitsY
>(a
, b
);
930 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
931 value
<BitsY
> shift_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
932 return shr_su
<BitsY
>(a
, b
);
935 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
936 value
<BitsY
> shift_us(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
937 return b
.is_neg() ? shl_uu
<BitsY
>(a
, b
.template sext
<BitsB
+ 1>().neg()) : shr_uu
<BitsY
>(a
, b
);
940 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
941 value
<BitsY
> shift_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
942 return b
.is_neg() ? shl_su
<BitsY
>(a
, b
.template sext
<BitsB
+ 1>().neg()) : shr_su
<BitsY
>(a
, b
);
945 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
946 value
<BitsY
> shiftx_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
947 return shift_uu
<BitsY
>(a
, b
);
950 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
951 value
<BitsY
> shiftx_su(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
952 return shift_su
<BitsY
>(a
, b
);
955 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
956 value
<BitsY
> shiftx_us(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
957 return shift_us
<BitsY
>(a
, b
);
960 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
961 value
<BitsY
> shiftx_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
962 return shift_ss
<BitsY
>(a
, b
);
965 // Comparison operations
966 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
967 value
<BitsY
> eq_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
968 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
969 return value
<BitsY
>{ a
.template zext
<BitsExt
>() == b
.template zext
<BitsExt
>() ? 1u : 0u };
972 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
973 value
<BitsY
> eq_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
974 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
975 return value
<BitsY
>{ a
.template sext
<BitsExt
>() == b
.template sext
<BitsExt
>() ? 1u : 0u };
978 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
979 value
<BitsY
> ne_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
980 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
981 return value
<BitsY
>{ a
.template zext
<BitsExt
>() != b
.template zext
<BitsExt
>() ? 1u : 0u };
984 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
985 value
<BitsY
> ne_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
986 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
987 return value
<BitsY
>{ a
.template sext
<BitsExt
>() != b
.template sext
<BitsExt
>() ? 1u : 0u };
990 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
991 value
<BitsY
> eqx_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
992 return eq_uu
<BitsY
>(a
, b
);
995 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
996 value
<BitsY
> eqx_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
997 return eq_ss
<BitsY
>(a
, b
);
1000 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1001 value
<BitsY
> nex_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1002 return ne_uu
<BitsY
>(a
, b
);
1005 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1006 value
<BitsY
> nex_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1007 return ne_ss
<BitsY
>(a
, b
);
1010 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1011 value
<BitsY
> gt_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1012 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1013 return value
<BitsY
> { b
.template zext
<BitsExt
>().ucmp(a
.template zext
<BitsExt
>()) ? 1u : 0u };
1016 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1017 value
<BitsY
> gt_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1018 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1019 return value
<BitsY
> { b
.template sext
<BitsExt
>().scmp(a
.template sext
<BitsExt
>()) ? 1u : 0u };
1022 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1023 value
<BitsY
> ge_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1024 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1025 return value
<BitsY
> { !a
.template zext
<BitsExt
>().ucmp(b
.template zext
<BitsExt
>()) ? 1u : 0u };
1028 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1029 value
<BitsY
> ge_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1030 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1031 return value
<BitsY
> { !a
.template sext
<BitsExt
>().scmp(b
.template sext
<BitsExt
>()) ? 1u : 0u };
1034 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1035 value
<BitsY
> lt_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1036 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1037 return value
<BitsY
> { a
.template zext
<BitsExt
>().ucmp(b
.template zext
<BitsExt
>()) ? 1u : 0u };
1040 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1041 value
<BitsY
> lt_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1042 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1043 return value
<BitsY
> { a
.template sext
<BitsExt
>().scmp(b
.template sext
<BitsExt
>()) ? 1u : 0u };
1046 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1047 value
<BitsY
> le_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1048 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1049 return value
<BitsY
> { !b
.template zext
<BitsExt
>().ucmp(a
.template zext
<BitsExt
>()) ? 1u : 0u };
1052 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1053 value
<BitsY
> le_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1054 constexpr size_t BitsExt
= max(BitsA
, BitsB
);
1055 return value
<BitsY
> { !b
.template sext
<BitsExt
>().scmp(a
.template sext
<BitsExt
>()) ? 1u : 0u };
1058 // Arithmetic operations
1059 template<size_t BitsY
, size_t BitsA
>
1060 value
<BitsY
> pos_u(const value
<BitsA
> &a
) {
1061 return a
.template zcast
<BitsY
>();
1064 template<size_t BitsY
, size_t BitsA
>
1065 value
<BitsY
> pos_s(const value
<BitsA
> &a
) {
1066 return a
.template scast
<BitsY
>();
1069 template<size_t BitsY
, size_t BitsA
>
1070 value
<BitsY
> neg_u(const value
<BitsA
> &a
) {
1071 return a
.template zcast
<BitsY
>().neg();
1074 template<size_t BitsY
, size_t BitsA
>
1075 value
<BitsY
> neg_s(const value
<BitsA
> &a
) {
1076 return a
.template scast
<BitsY
>().neg();
1079 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1080 value
<BitsY
> add_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1081 return a
.template zcast
<BitsY
>().add(b
.template zcast
<BitsY
>());
1084 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1085 value
<BitsY
> add_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1086 return a
.template scast
<BitsY
>().add(b
.template scast
<BitsY
>());
1089 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1090 value
<BitsY
> sub_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1091 return a
.template zcast
<BitsY
>().sub(b
.template zcast
<BitsY
>());
1094 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1095 value
<BitsY
> sub_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1096 return a
.template scast
<BitsY
>().sub(b
.template scast
<BitsY
>());
1099 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1100 value
<BitsY
> mul_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1101 value
<BitsY
> product
;
1102 value
<BitsY
> multiplicand
= a
.template zcast
<BitsY
>();
1103 const value
<BitsB
> &multiplier
= b
;
1104 uint32_t multiplicand_shift
= 0;
1105 for (size_t step
= 0; step
< BitsB
; step
++) {
1106 if (multiplier
.bit(step
)) {
1107 multiplicand
= multiplicand
.shl(value
<32> { multiplicand_shift
});
1108 product
= product
.add(multiplicand
);
1109 multiplicand_shift
= 0;
1111 multiplicand_shift
++;
1116 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1117 value
<BitsY
> mul_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1118 value
<BitsB
+ 1> ub
= b
.template sext
<BitsB
+ 1>();
1119 if (ub
.is_neg()) ub
= ub
.neg();
1120 value
<BitsY
> y
= mul_uu
<BitsY
>(a
.template scast
<BitsY
>(), ub
);
1121 return b
.is_neg() ? y
.neg() : y
;
1124 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1125 std::pair
<value
<BitsY
>, value
<BitsY
>> divmod_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1126 constexpr size_t Bits
= max(BitsY
, max(BitsA
, BitsB
));
1127 value
<Bits
> quotient
;
1128 value
<Bits
> dividend
= a
.template zext
<Bits
>();
1129 value
<Bits
> divisor
= b
.template zext
<Bits
>();
1130 if (dividend
.ucmp(divisor
))
1131 return {/*quotient=*/value
<BitsY
> { 0u }, /*remainder=*/dividend
.template trunc
<BitsY
>()};
1132 uint32_t divisor_shift
= dividend
.ctlz() - divisor
.ctlz();
1133 divisor
= divisor
.shl(value
<32> { divisor_shift
});
1134 for (size_t step
= 0; step
<= divisor_shift
; step
++) {
1135 quotient
= quotient
.shl(value
<1> { 1u });
1136 if (!dividend
.ucmp(divisor
)) {
1137 dividend
= dividend
.sub(divisor
);
1138 quotient
.set_bit(0, true);
1140 divisor
= divisor
.shr(value
<1> { 1u });
1142 return {quotient
.template trunc
<BitsY
>(), /*remainder=*/dividend
.template trunc
<BitsY
>()};
1145 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1146 std::pair
<value
<BitsY
>, value
<BitsY
>> divmod_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1147 value
<BitsA
+ 1> ua
= a
.template sext
<BitsA
+ 1>();
1148 value
<BitsB
+ 1> ub
= b
.template sext
<BitsB
+ 1>();
1149 if (ua
.is_neg()) ua
= ua
.neg();
1150 if (ub
.is_neg()) ub
= ub
.neg();
1152 std::tie(y
, r
) = divmod_uu
<BitsY
>(ua
, ub
);
1153 if (a
.is_neg() != b
.is_neg()) y
= y
.neg();
1154 if (a
.is_neg()) r
= r
.neg();
1158 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1159 value
<BitsY
> div_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1160 return divmod_uu
<BitsY
>(a
, b
).first
;
1163 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1164 value
<BitsY
> div_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1165 return divmod_ss
<BitsY
>(a
, b
).first
;
1168 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1169 value
<BitsY
> mod_uu(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1170 return divmod_uu
<BitsY
>(a
, b
).second
;
1173 template<size_t BitsY
, size_t BitsA
, size_t BitsB
>
1174 value
<BitsY
> mod_ss(const value
<BitsA
> &a
, const value
<BitsB
> &b
) {
1175 return divmod_ss
<BitsY
>(a
, b
).second
;
1179 struct memory_index
{
1183 template<size_t BitsAddr
>
1184 memory_index(const value
<BitsAddr
> &addr
, size_t offset
, size_t depth
) {
1185 static_assert(value
<BitsAddr
>::chunks
<= 1, "memory address is too wide");
1186 size_t offset_index
= addr
.data
[0];
1188 valid
= (offset_index
>= offset
&& offset_index
< offset
+ depth
);
1189 index
= offset_index
- offset
;
1193 } // namespace cxxrtl_yosys