cxxrtl: don't overwrite buffered inputs.
[yosys.git] / backends / cxxrtl / cxxrtl.h
1 /*
2 * yosys -- Yosys Open SYnthesis Suite
3 *
4 * Copyright (C) 2019-2020 whitequark <whitequark@whitequark.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19 // This file is included by the designs generated with `write_cxxrtl`. It is not used in Yosys itself.
20 //
21 // The CXXRTL support library implements compile time specialized arbitrary width arithmetics, as well as provides
22 // composite lvalues made out of bit slices and concatenations of lvalues. This allows the `write_cxxrtl` pass
23 // to perform a straightforward translation of RTLIL structures to readable C++, relying on the C++ compiler
24 // to unwrap the abstraction and generate efficient code.
25
26 #ifndef CXXRTL_H
27 #define CXXRTL_H
28
29 #include <cstddef>
30 #include <cstdint>
31 #include <cassert>
32 #include <limits>
33 #include <type_traits>
34 #include <tuple>
35 #include <vector>
36 #include <map>
37 #include <algorithm>
38 #include <memory>
39 #include <sstream>
40
41 #include <backends/cxxrtl/cxxrtl_capi.h>
42
43 // CXXRTL essentially uses the C++ compiler as a hygienic macro engine that feeds an instruction selector.
44 // It generates a lot of specialized template functions with relatively large bodies that, when inlined
45 // into the caller and (for those with loops) unrolled, often expose many new optimization opportunities.
46 // Because of this, most of the CXXRTL runtime must be always inlined for best performance.
47 #ifndef __has_attribute
48 # define __has_attribute(x) 0
49 #endif
50 #if __has_attribute(always_inline)
51 #define CXXRTL_ALWAYS_INLINE inline __attribute__((__always_inline__))
52 #else
53 #define CXXRTL_ALWAYS_INLINE inline
54 #endif
55
56 // CXXRTL uses assert() to check for C++ contract violations (which may result in e.g. undefined behavior
57 // of the simulation code itself), and CXXRTL_ASSERT to check for RTL contract violations (which may at
58 // most result in undefined simulation results).
59 //
60 // Though by default, CXXRTL_ASSERT() expands to assert(), it may be overridden e.g. when integrating
61 // the simulation into another process that should survive violating RTL contracts.
62 #ifndef CXXRTL_ASSERT
63 #ifndef CXXRTL_NDEBUG
64 #define CXXRTL_ASSERT(x) assert(x)
65 #else
66 #define CXXRTL_ASSERT(x)
67 #endif
68 #endif
69
70 namespace cxxrtl {
71
72 // All arbitrary-width values in CXXRTL are backed by arrays of unsigned integers called chunks. The chunk size
73 // is the same regardless of the value width to simplify manipulating values via FFI interfaces, e.g. driving
74 // and introspecting the simulation in Python.
75 //
76 // It is practical to use chunk sizes between 32 bits and platform register size because when arithmetics on
77 // narrower integer types is legalized by the C++ compiler, it inserts code to clear the high bits of the register.
78 // However, (a) most of our operations do not change those bits in the first place because of invariants that are
79 // invisible to the compiler, (b) we often operate on non-power-of-2 values and have to clear the high bits anyway.
80 // Therefore, using relatively wide chunks and clearing the high bits explicitly and only when we know they may be
81 // clobbered results in simpler generated code.
82 typedef uint32_t chunk_t;
83 typedef uint64_t wide_chunk_t;
84
85 template<typename T>
86 struct chunk_traits {
87 static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
88 "chunk type must be an unsigned integral type");
89 using type = T;
90 static constexpr size_t bits = std::numeric_limits<T>::digits;
91 static constexpr T mask = std::numeric_limits<T>::max();
92 };
93
94 template<class T>
95 struct expr_base;
96
97 template<size_t Bits>
98 struct value : public expr_base<value<Bits>> {
99 static constexpr size_t bits = Bits;
100
101 using chunk = chunk_traits<chunk_t>;
102 static constexpr chunk::type msb_mask = (Bits % chunk::bits == 0) ? chunk::mask
103 : chunk::mask >> (chunk::bits - (Bits % chunk::bits));
104
105 static constexpr size_t chunks = (Bits + chunk::bits - 1) / chunk::bits;
106 chunk::type data[chunks] = {};
107
108 value() = default;
109 template<typename... Init>
110 explicit constexpr value(Init ...init) : data{init...} {}
111
112 value(const value<Bits> &) = default;
113 value<Bits> &operator=(const value<Bits> &) = default;
114
115 value(value<Bits> &&) = default;
116 value<Bits> &operator=(value<Bits> &&) = default;
117
118 // A (no-op) helper that forces the cast to value<>.
119 CXXRTL_ALWAYS_INLINE
120 const value<Bits> &val() const {
121 return *this;
122 }
123
124 std::string str() const {
125 std::stringstream ss;
126 ss << *this;
127 return ss.str();
128 }
129
130 // Conversion operations.
131 //
132 // These functions ensure that a conversion is never out of range, and should be always used, if at all
133 // possible, instead of direct manipulation of the `data` member. For very large types, .slice() and
134 // .concat() can be used to split them into more manageable parts.
135 template<class IntegerT>
136 CXXRTL_ALWAYS_INLINE
137 IntegerT get() const {
138 static_assert(std::numeric_limits<IntegerT>::is_integer && !std::numeric_limits<IntegerT>::is_signed,
139 "get<T>() requires T to be an unsigned integral type");
140 static_assert(std::numeric_limits<IntegerT>::digits >= Bits,
141 "get<T>() requires T to be at least as wide as the value is");
142 IntegerT result = 0;
143 for (size_t n = 0; n < chunks; n++)
144 result |= IntegerT(data[n]) << (n * chunk::bits);
145 return result;
146 }
147
148 template<class IntegerT>
149 CXXRTL_ALWAYS_INLINE
150 void set(IntegerT other) {
151 static_assert(std::numeric_limits<IntegerT>::is_integer && !std::numeric_limits<IntegerT>::is_signed,
152 "set<T>() requires T to be an unsigned integral type");
153 static_assert(std::numeric_limits<IntegerT>::digits >= Bits,
154 "set<T>() requires the value to be at least as wide as T is");
155 for (size_t n = 0; n < chunks; n++)
156 data[n] = (other >> (n * chunk::bits)) & chunk::mask;
157 }
158
159 // Operations with compile-time parameters.
160 //
161 // These operations are used to implement slicing, concatenation, and blitting.
162 // The trunc, zext and sext operations add or remove most significant bits (i.e. on the left);
163 // the rtrunc and rzext operations add or remove least significant bits (i.e. on the right).
164 template<size_t NewBits>
165 CXXRTL_ALWAYS_INLINE
166 value<NewBits> trunc() const {
167 static_assert(NewBits <= Bits, "trunc() may not increase width");
168 value<NewBits> result;
169 for (size_t n = 0; n < result.chunks; n++)
170 result.data[n] = data[n];
171 result.data[result.chunks - 1] &= result.msb_mask;
172 return result;
173 }
174
175 template<size_t NewBits>
176 CXXRTL_ALWAYS_INLINE
177 value<NewBits> zext() const {
178 static_assert(NewBits >= Bits, "zext() may not decrease width");
179 value<NewBits> result;
180 for (size_t n = 0; n < chunks; n++)
181 result.data[n] = data[n];
182 return result;
183 }
184
185 template<size_t NewBits>
186 CXXRTL_ALWAYS_INLINE
187 value<NewBits> sext() const {
188 static_assert(NewBits >= Bits, "sext() may not decrease width");
189 value<NewBits> result;
190 for (size_t n = 0; n < chunks; n++)
191 result.data[n] = data[n];
192 if (is_neg()) {
193 result.data[chunks - 1] |= ~msb_mask;
194 for (size_t n = chunks; n < result.chunks; n++)
195 result.data[n] = chunk::mask;
196 result.data[result.chunks - 1] &= result.msb_mask;
197 }
198 return result;
199 }
200
201 template<size_t NewBits>
202 CXXRTL_ALWAYS_INLINE
203 value<NewBits> rtrunc() const {
204 static_assert(NewBits <= Bits, "rtrunc() may not increase width");
205 value<NewBits> result;
206 constexpr size_t shift_chunks = (Bits - NewBits) / chunk::bits;
207 constexpr size_t shift_bits = (Bits - NewBits) % chunk::bits;
208 chunk::type carry = 0;
209 if (shift_chunks + result.chunks < chunks) {
210 carry = (shift_bits == 0) ? 0
211 : data[shift_chunks + result.chunks] << (chunk::bits - shift_bits);
212 }
213 for (size_t n = result.chunks; n > 0; n--) {
214 result.data[n - 1] = carry | (data[shift_chunks + n - 1] >> shift_bits);
215 carry = (shift_bits == 0) ? 0
216 : data[shift_chunks + n - 1] << (chunk::bits - shift_bits);
217 }
218 return result;
219 }
220
221 template<size_t NewBits>
222 CXXRTL_ALWAYS_INLINE
223 value<NewBits> rzext() const {
224 static_assert(NewBits >= Bits, "rzext() may not decrease width");
225 value<NewBits> result;
226 constexpr size_t shift_chunks = (NewBits - Bits) / chunk::bits;
227 constexpr size_t shift_bits = (NewBits - Bits) % chunk::bits;
228 chunk::type carry = 0;
229 for (size_t n = 0; n < chunks; n++) {
230 result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
231 carry = (shift_bits == 0) ? 0
232 : data[n] >> (chunk::bits - shift_bits);
233 }
234 if (shift_chunks + chunks < result.chunks)
235 result.data[shift_chunks + chunks] = carry;
236 return result;
237 }
238
239 // Bit blit operation, i.e. a partial read-modify-write.
240 template<size_t Stop, size_t Start>
241 CXXRTL_ALWAYS_INLINE
242 value<Bits> blit(const value<Stop - Start + 1> &source) const {
243 static_assert(Stop >= Start, "blit() may not reverse bit order");
244 constexpr chunk::type start_mask = ~(chunk::mask << (Start % chunk::bits));
245 constexpr chunk::type stop_mask = (Stop % chunk::bits + 1 == chunk::bits) ? 0
246 : (chunk::mask << (Stop % chunk::bits + 1));
247 value<Bits> masked = *this;
248 if (Start / chunk::bits == Stop / chunk::bits) {
249 masked.data[Start / chunk::bits] &= stop_mask | start_mask;
250 } else {
251 masked.data[Start / chunk::bits] &= start_mask;
252 for (size_t n = Start / chunk::bits + 1; n < Stop / chunk::bits; n++)
253 masked.data[n] = 0;
254 masked.data[Stop / chunk::bits] &= stop_mask;
255 }
256 value<Bits> shifted = source
257 .template rzext<Stop + 1>()
258 .template zext<Bits>();
259 return masked.bit_or(shifted);
260 }
261
262 // Helpers for selecting extending or truncating operation depending on whether the result is wider or narrower
263 // than the operand. In C++17 these can be replaced with `if constexpr`.
264 template<size_t NewBits, typename = void>
265 struct zext_cast {
266 CXXRTL_ALWAYS_INLINE
267 value<NewBits> operator()(const value<Bits> &val) {
268 return val.template zext<NewBits>();
269 }
270 };
271
272 template<size_t NewBits>
273 struct zext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
274 CXXRTL_ALWAYS_INLINE
275 value<NewBits> operator()(const value<Bits> &val) {
276 return val.template trunc<NewBits>();
277 }
278 };
279
280 template<size_t NewBits, typename = void>
281 struct sext_cast {
282 CXXRTL_ALWAYS_INLINE
283 value<NewBits> operator()(const value<Bits> &val) {
284 return val.template sext<NewBits>();
285 }
286 };
287
288 template<size_t NewBits>
289 struct sext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
290 CXXRTL_ALWAYS_INLINE
291 value<NewBits> operator()(const value<Bits> &val) {
292 return val.template trunc<NewBits>();
293 }
294 };
295
296 template<size_t NewBits>
297 CXXRTL_ALWAYS_INLINE
298 value<NewBits> zcast() const {
299 return zext_cast<NewBits>()(*this);
300 }
301
302 template<size_t NewBits>
303 CXXRTL_ALWAYS_INLINE
304 value<NewBits> scast() const {
305 return sext_cast<NewBits>()(*this);
306 }
307
308 // Operations with run-time parameters (offsets, amounts, etc).
309 //
310 // These operations are used for computations.
311 bool bit(size_t offset) const {
312 return data[offset / chunk::bits] & (1 << (offset % chunk::bits));
313 }
314
315 void set_bit(size_t offset, bool value = true) {
316 size_t offset_chunks = offset / chunk::bits;
317 size_t offset_bits = offset % chunk::bits;
318 data[offset_chunks] &= ~(1 << offset_bits);
319 data[offset_chunks] |= value ? 1 << offset_bits : 0;
320 }
321
322 explicit operator bool() const {
323 return !is_zero();
324 }
325
326 bool is_zero() const {
327 for (size_t n = 0; n < chunks; n++)
328 if (data[n] != 0)
329 return false;
330 return true;
331 }
332
333 bool is_neg() const {
334 return data[chunks - 1] & (1 << ((Bits - 1) % chunk::bits));
335 }
336
337 bool operator ==(const value<Bits> &other) const {
338 for (size_t n = 0; n < chunks; n++)
339 if (data[n] != other.data[n])
340 return false;
341 return true;
342 }
343
344 bool operator !=(const value<Bits> &other) const {
345 return !(*this == other);
346 }
347
348 value<Bits> bit_not() const {
349 value<Bits> result;
350 for (size_t n = 0; n < chunks; n++)
351 result.data[n] = ~data[n];
352 result.data[chunks - 1] &= msb_mask;
353 return result;
354 }
355
356 value<Bits> bit_and(const value<Bits> &other) const {
357 value<Bits> result;
358 for (size_t n = 0; n < chunks; n++)
359 result.data[n] = data[n] & other.data[n];
360 return result;
361 }
362
363 value<Bits> bit_or(const value<Bits> &other) const {
364 value<Bits> result;
365 for (size_t n = 0; n < chunks; n++)
366 result.data[n] = data[n] | other.data[n];
367 return result;
368 }
369
370 value<Bits> bit_xor(const value<Bits> &other) const {
371 value<Bits> result;
372 for (size_t n = 0; n < chunks; n++)
373 result.data[n] = data[n] ^ other.data[n];
374 return result;
375 }
376
377 value<Bits> update(const value<Bits> &val, const value<Bits> &mask) const {
378 return bit_and(mask.bit_not()).bit_or(val.bit_and(mask));
379 }
380
381 template<size_t AmountBits>
382 value<Bits> shl(const value<AmountBits> &amount) const {
383 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
384 static_assert(Bits <= chunk::mask, "shl() of unreasonably large values is not supported");
385 // Detect shifts definitely large than Bits early.
386 for (size_t n = 1; n < amount.chunks; n++)
387 if (amount.data[n] != 0)
388 return {};
389 // Past this point we can use the least significant chunk as the shift size.
390 size_t shift_chunks = amount.data[0] / chunk::bits;
391 size_t shift_bits = amount.data[0] % chunk::bits;
392 if (shift_chunks >= chunks)
393 return {};
394 value<Bits> result;
395 chunk::type carry = 0;
396 for (size_t n = 0; n < chunks - shift_chunks; n++) {
397 result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
398 carry = (shift_bits == 0) ? 0
399 : data[n] >> (chunk::bits - shift_bits);
400 }
401 return result;
402 }
403
404 template<size_t AmountBits, bool Signed = false>
405 value<Bits> shr(const value<AmountBits> &amount) const {
406 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
407 static_assert(Bits <= chunk::mask, "shr() of unreasonably large values is not supported");
408 // Detect shifts definitely large than Bits early.
409 for (size_t n = 1; n < amount.chunks; n++)
410 if (amount.data[n] != 0)
411 return {};
412 // Past this point we can use the least significant chunk as the shift size.
413 size_t shift_chunks = amount.data[0] / chunk::bits;
414 size_t shift_bits = amount.data[0] % chunk::bits;
415 if (shift_chunks >= chunks)
416 return {};
417 value<Bits> result;
418 chunk::type carry = 0;
419 for (size_t n = 0; n < chunks - shift_chunks; n++) {
420 result.data[chunks - shift_chunks - 1 - n] = carry | (data[chunks - 1 - n] >> shift_bits);
421 carry = (shift_bits == 0) ? 0
422 : data[chunks - 1 - n] << (chunk::bits - shift_bits);
423 }
424 if (Signed && is_neg()) {
425 size_t top_chunk_idx = (Bits - shift_bits) / chunk::bits;
426 size_t top_chunk_bits = (Bits - shift_bits) % chunk::bits;
427 for (size_t n = top_chunk_idx + 1; n < chunks; n++)
428 result.data[n] = chunk::mask;
429 if (shift_bits != 0)
430 result.data[top_chunk_idx] |= chunk::mask << top_chunk_bits;
431 }
432 return result;
433 }
434
435 template<size_t AmountBits>
436 value<Bits> sshr(const value<AmountBits> &amount) const {
437 return shr<AmountBits, /*Signed=*/true>(amount);
438 }
439
440 size_t ctpop() const {
441 size_t count = 0;
442 for (size_t n = 0; n < chunks; n++) {
443 // This loop implements the population count idiom as recognized by LLVM and GCC.
444 for (chunk::type x = data[n]; x != 0; count++)
445 x = x & (x - 1);
446 }
447 return count;
448 }
449
450 size_t ctlz() const {
451 size_t count = 0;
452 for (size_t n = 0; n < chunks; n++) {
453 chunk::type x = data[chunks - 1 - n];
454 if (x == 0) {
455 count += (n == 0 ? Bits % chunk::bits : chunk::bits);
456 } else {
457 // This loop implements the find first set idiom as recognized by LLVM.
458 for (; x != 0; count++)
459 x >>= 1;
460 }
461 }
462 return count;
463 }
464
465 template<bool Invert, bool CarryIn>
466 std::pair<value<Bits>, bool /*CarryOut*/> alu(const value<Bits> &other) const {
467 value<Bits> result;
468 bool carry = CarryIn;
469 for (size_t n = 0; n < result.chunks; n++) {
470 result.data[n] = data[n] + (Invert ? ~other.data[n] : other.data[n]) + carry;
471 if (result.chunks - 1 == n)
472 result.data[result.chunks - 1] &= result.msb_mask;
473 carry = (result.data[n] < data[n]) ||
474 (result.data[n] == data[n] && carry);
475 }
476 return {result, carry};
477 }
478
479 value<Bits> add(const value<Bits> &other) const {
480 return alu</*Invert=*/false, /*CarryIn=*/false>(other).first;
481 }
482
483 value<Bits> sub(const value<Bits> &other) const {
484 return alu</*Invert=*/true, /*CarryIn=*/true>(other).first;
485 }
486
487 value<Bits> neg() const {
488 return value<Bits> { 0u }.sub(*this);
489 }
490
491 bool ucmp(const value<Bits> &other) const {
492 bool carry;
493 std::tie(std::ignore, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
494 return !carry; // a.ucmp(b) ≡ a u< b
495 }
496
497 bool scmp(const value<Bits> &other) const {
498 value<Bits> result;
499 bool carry;
500 std::tie(result, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
501 bool overflow = (is_neg() == !other.is_neg()) && (is_neg() != result.is_neg());
502 return result.is_neg() ^ overflow; // a.scmp(b) ≡ a s< b
503 }
504
505 template<size_t ResultBits>
506 value<ResultBits> mul(const value<Bits> &other) const {
507 value<ResultBits> result;
508 wide_chunk_t wide_result[result.chunks + 1] = {};
509 for (size_t n = 0; n < chunks; n++) {
510 for (size_t m = 0; m < chunks && n + m < result.chunks; m++) {
511 wide_result[n + m] += wide_chunk_t(data[n]) * wide_chunk_t(other.data[m]);
512 wide_result[n + m + 1] += wide_result[n + m] >> chunk::bits;
513 wide_result[n + m] &= chunk::mask;
514 }
515 }
516 for (size_t n = 0; n < result.chunks; n++) {
517 result.data[n] = wide_result[n];
518 }
519 result.data[result.chunks - 1] &= result.msb_mask;
520 return result;
521 }
522 };
523
524 // Expression template for a slice, usable as lvalue or rvalue, and composable with other expression templates here.
525 template<class T, size_t Stop, size_t Start>
526 struct slice_expr : public expr_base<slice_expr<T, Stop, Start>> {
527 static_assert(Stop >= Start, "slice_expr() may not reverse bit order");
528 static_assert(Start < T::bits && Stop < T::bits, "slice_expr() must be within bounds");
529 static constexpr size_t bits = Stop - Start + 1;
530
531 T &expr;
532
533 slice_expr(T &expr) : expr(expr) {}
534 slice_expr(const slice_expr<T, Stop, Start> &) = delete;
535
536 CXXRTL_ALWAYS_INLINE
537 operator value<bits>() const {
538 return static_cast<const value<T::bits> &>(expr)
539 .template rtrunc<T::bits - Start>()
540 .template trunc<bits>();
541 }
542
543 CXXRTL_ALWAYS_INLINE
544 slice_expr<T, Stop, Start> &operator=(const value<bits> &rhs) {
545 // Generic partial assignment implemented using a read-modify-write operation on the sliced expression.
546 expr = static_cast<const value<T::bits> &>(expr)
547 .template blit<Stop, Start>(rhs);
548 return *this;
549 }
550
551 // A helper that forces the cast to value<>, which allows deduction to work.
552 CXXRTL_ALWAYS_INLINE
553 value<bits> val() const {
554 return static_cast<const value<bits> &>(*this);
555 }
556 };
557
558 // Expression template for a concatenation, usable as lvalue or rvalue, and composable with other expression templates here.
559 template<class T, class U>
560 struct concat_expr : public expr_base<concat_expr<T, U>> {
561 static constexpr size_t bits = T::bits + U::bits;
562
563 T &ms_expr;
564 U &ls_expr;
565
566 concat_expr(T &ms_expr, U &ls_expr) : ms_expr(ms_expr), ls_expr(ls_expr) {}
567 concat_expr(const concat_expr<T, U> &) = delete;
568
569 CXXRTL_ALWAYS_INLINE
570 operator value<bits>() const {
571 value<bits> ms_shifted = static_cast<const value<T::bits> &>(ms_expr)
572 .template rzext<bits>();
573 value<bits> ls_extended = static_cast<const value<U::bits> &>(ls_expr)
574 .template zext<bits>();
575 return ms_shifted.bit_or(ls_extended);
576 }
577
578 CXXRTL_ALWAYS_INLINE
579 concat_expr<T, U> &operator=(const value<bits> &rhs) {
580 ms_expr = rhs.template rtrunc<T::bits>();
581 ls_expr = rhs.template trunc<U::bits>();
582 return *this;
583 }
584
585 // A helper that forces the cast to value<>, which allows deduction to work.
586 CXXRTL_ALWAYS_INLINE
587 value<bits> val() const {
588 return static_cast<const value<bits> &>(*this);
589 }
590 };
591
592 // Base class for expression templates, providing helper methods for operations that are valid on both rvalues and lvalues.
593 //
594 // Note that expression objects (slices and concatenations) constructed in this way should NEVER be captured because
595 // they refer to temporaries that will, in general, only live until the end of the statement. For example, both of
596 // these snippets perform use-after-free:
597 //
598 // const auto &a = val.slice<7,0>().slice<1>();
599 // value<1> b = a;
600 //
601 // auto &&c = val.slice<7,0>().slice<1>();
602 // c = value<1>{1u};
603 //
604 // An easy way to write code using slices and concatenations safely is to follow two simple rules:
605 // * Never explicitly name any type except `value<W>` or `const value<W> &`.
606 // * Never use a `const auto &` or `auto &&` in any such expression.
607 // Then, any code that compiles will be well-defined.
608 template<class T>
609 struct expr_base {
610 template<size_t Stop, size_t Start = Stop>
611 CXXRTL_ALWAYS_INLINE
612 slice_expr<const T, Stop, Start> slice() const {
613 return {*static_cast<const T *>(this)};
614 }
615
616 template<size_t Stop, size_t Start = Stop>
617 CXXRTL_ALWAYS_INLINE
618 slice_expr<T, Stop, Start> slice() {
619 return {*static_cast<T *>(this)};
620 }
621
622 template<class U>
623 CXXRTL_ALWAYS_INLINE
624 concat_expr<const T, typename std::remove_reference<const U>::type> concat(const U &other) const {
625 return {*static_cast<const T *>(this), other};
626 }
627
628 template<class U>
629 CXXRTL_ALWAYS_INLINE
630 concat_expr<T, typename std::remove_reference<U>::type> concat(U &&other) {
631 return {*static_cast<T *>(this), other};
632 }
633 };
634
635 template<size_t Bits>
636 std::ostream &operator<<(std::ostream &os, const value<Bits> &val) {
637 auto old_flags = os.flags(std::ios::right);
638 auto old_width = os.width(0);
639 auto old_fill = os.fill('0');
640 os << val.bits << '\'' << std::hex;
641 for (size_t n = val.chunks - 1; n != (size_t)-1; n--) {
642 if (n == val.chunks - 1 && Bits % value<Bits>::chunk::bits != 0)
643 os.width((Bits % value<Bits>::chunk::bits + 3) / 4);
644 else
645 os.width((value<Bits>::chunk::bits + 3) / 4);
646 os << val.data[n];
647 }
648 os.fill(old_fill);
649 os.width(old_width);
650 os.flags(old_flags);
651 return os;
652 }
653
654 template<size_t Bits>
655 struct wire {
656 static constexpr size_t bits = Bits;
657
658 value<Bits> curr;
659 value<Bits> next;
660
661 wire() = default;
662 explicit constexpr wire(const value<Bits> &init) : curr(init), next(init) {}
663 template<typename... Init>
664 explicit constexpr wire(Init ...init) : curr{init...}, next{init...} {}
665
666 // Copying and copy-assigning values is natural. If, however, a value is replaced with a wire,
667 // e.g. because a module is built with a different optimization level, then existing code could
668 // unintentionally copy a wire instead, which would create a subtle but serious bug. To make sure
669 // this doesn't happen, prohibit copying and copy-assigning wires.
670 wire(const wire<Bits> &) = delete;
671 wire<Bits> &operator=(const wire<Bits> &) = delete;
672
673 wire(wire<Bits> &&) = default;
674 wire<Bits> &operator=(wire<Bits> &&) = default;
675
676 template<class IntegerT>
677 CXXRTL_ALWAYS_INLINE
678 IntegerT get() const {
679 return curr.template get<IntegerT>();
680 }
681
682 template<class IntegerT>
683 CXXRTL_ALWAYS_INLINE
684 void set(IntegerT other) {
685 next.template set<IntegerT>(other);
686 }
687
688 bool commit() {
689 if (curr != next) {
690 curr = next;
691 return true;
692 }
693 return false;
694 }
695 };
696
697 template<size_t Bits>
698 std::ostream &operator<<(std::ostream &os, const wire<Bits> &val) {
699 os << val.curr;
700 return os;
701 }
702
703 template<size_t Width>
704 struct memory {
705 std::vector<value<Width>> data;
706
707 size_t depth() const {
708 return data.size();
709 }
710
711 memory() = delete;
712 explicit memory(size_t depth) : data(depth) {}
713
714 memory(const memory<Width> &) = delete;
715 memory<Width> &operator=(const memory<Width> &) = delete;
716
717 memory(memory<Width> &&) = default;
718 memory<Width> &operator=(memory<Width> &&) = default;
719
720 // The only way to get the compiler to put the initializer in .rodata and do not copy it on stack is to stuff it
721 // into a plain array. You'd think an std::initializer_list would work here, but it doesn't, because you can't
722 // construct an initializer_list in a constexpr (or something) and so if you try to do that the whole thing is
723 // first copied on the stack (probably overflowing it) and then again into `data`.
724 template<size_t Size>
725 struct init {
726 size_t offset;
727 value<Width> data[Size];
728 };
729
730 template<size_t... InitSize>
731 explicit memory(size_t depth, const init<InitSize> &...init) : data(depth) {
732 data.resize(depth);
733 // This utterly reprehensible construct is the most reasonable way to apply a function to every element
734 // of a parameter pack, if the elements all have different types and so cannot be cast to an initializer list.
735 auto _ = {std::move(std::begin(init.data), std::end(init.data), data.begin() + init.offset)...};
736 (void)_;
737 }
738
739 // An operator for direct memory reads. May be used at any time during the simulation.
740 const value<Width> &operator [](size_t index) const {
741 assert(index < data.size());
742 return data[index];
743 }
744
745 // An operator for direct memory writes. May only be used before the simulation is started. If used
746 // after the simulation is started, the design may malfunction.
747 value<Width> &operator [](size_t index) {
748 assert(index < data.size());
749 return data[index];
750 }
751
752 // A simple way to make a writable memory would be to use an array of wires instead of an array of values.
753 // However, there are two significant downsides to this approach: first, it has large overhead (2× space
754 // overhead, and O(depth) time overhead during commit); second, it does not simplify handling write port
755 // priorities. Although in principle write ports could be ordered or conditionally enabled in generated
756 // code based on their priorities and selected addresses, the feedback arc set problem is computationally
757 // expensive, and the heuristic based algorithms are not easily modified to guarantee (rather than prefer)
758 // a particular write port evaluation order.
759 //
760 // The approach used here instead is to queue writes into a buffer during the eval phase, then perform
761 // the writes during the commit phase in the priority order. This approach has low overhead, with both space
762 // and time proportional to the amount of write ports. Because virtually every memory in a practical design
763 // has at most two write ports, linear search is used on every write, being the fastest and simplest approach.
764 struct write {
765 size_t index;
766 value<Width> val;
767 value<Width> mask;
768 int priority;
769 };
770 std::vector<write> write_queue;
771
772 void update(size_t index, const value<Width> &val, const value<Width> &mask, int priority = 0) {
773 assert(index < data.size());
774 // Queue up the write while keeping the queue sorted by priority.
775 write_queue.insert(
776 std::upper_bound(write_queue.begin(), write_queue.end(), priority,
777 [](const int a, const write& b) { return a < b.priority; }),
778 write { index, val, mask, priority });
779 }
780
781 bool commit() {
782 bool changed = false;
783 for (const write &entry : write_queue) {
784 value<Width> elem = data[entry.index];
785 elem = elem.update(entry.val, entry.mask);
786 changed |= (data[entry.index] != elem);
787 data[entry.index] = elem;
788 }
789 write_queue.clear();
790 return changed;
791 }
792 };
793
794 struct metadata {
795 const enum {
796 MISSING = 0,
797 UINT = 1,
798 SINT = 2,
799 STRING = 3,
800 DOUBLE = 4,
801 } value_type;
802
803 // In debug mode, using the wrong .as_*() function will assert.
804 // In release mode, using the wrong .as_*() function will safely return a default value.
805 const unsigned uint_value = 0;
806 const signed sint_value = 0;
807 const std::string string_value = "";
808 const double double_value = 0.0;
809
810 metadata() : value_type(MISSING) {}
811 metadata(unsigned value) : value_type(UINT), uint_value(value) {}
812 metadata(signed value) : value_type(SINT), sint_value(value) {}
813 metadata(const std::string &value) : value_type(STRING), string_value(value) {}
814 metadata(const char *value) : value_type(STRING), string_value(value) {}
815 metadata(double value) : value_type(DOUBLE), double_value(value) {}
816
817 metadata(const metadata &) = default;
818 metadata &operator=(const metadata &) = delete;
819
820 unsigned as_uint() const {
821 assert(value_type == UINT);
822 return uint_value;
823 }
824
825 signed as_sint() const {
826 assert(value_type == SINT);
827 return sint_value;
828 }
829
830 const std::string &as_string() const {
831 assert(value_type == STRING);
832 return string_value;
833 }
834
835 double as_double() const {
836 assert(value_type == DOUBLE);
837 return double_value;
838 }
839 };
840
841 typedef std::map<std::string, metadata> metadata_map;
842
843 // Tag class to disambiguate values/wires and their aliases.
844 struct debug_alias {};
845
846 // This structure is intended for consumption via foreign function interfaces, like Python's ctypes.
847 // Because of this it uses a C-style layout that is easy to parse rather than more idiomatic C++.
848 //
849 // To avoid violating strict aliasing rules, this structure has to be a subclass of the one used
850 // in the C API, or it would not be possible to cast between the pointers to these.
851 struct debug_item : ::cxxrtl_object {
852 // Object types.
853 enum : uint32_t {
854 VALUE = CXXRTL_VALUE,
855 WIRE = CXXRTL_WIRE,
856 MEMORY = CXXRTL_MEMORY,
857 ALIAS = CXXRTL_ALIAS,
858 };
859
860 // Object flags.
861 enum : uint32_t {
862 INPUT = CXXRTL_INPUT,
863 OUTPUT = CXXRTL_OUTPUT,
864 INOUT = CXXRTL_INOUT,
865 DRIVEN_SYNC = CXXRTL_DRIVEN_SYNC,
866 DRIVEN_COMB = CXXRTL_DRIVEN_COMB,
867 UNDRIVEN = CXXRTL_UNDRIVEN,
868 };
869
870 debug_item(const ::cxxrtl_object &object) : cxxrtl_object(object) {}
871
872 template<size_t Bits>
873 debug_item(value<Bits> &item, size_t lsb_offset = 0, uint32_t flags_ = 0) {
874 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
875 "value<Bits> is not compatible with C layout");
876 type = VALUE;
877 flags = flags_;
878 width = Bits;
879 lsb_at = lsb_offset;
880 depth = 1;
881 zero_at = 0;
882 curr = item.data;
883 next = item.data;
884 }
885
886 template<size_t Bits>
887 debug_item(const value<Bits> &item, size_t lsb_offset = 0) {
888 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
889 "value<Bits> is not compatible with C layout");
890 type = VALUE;
891 flags = DRIVEN_COMB;
892 width = Bits;
893 lsb_at = lsb_offset;
894 depth = 1;
895 zero_at = 0;
896 curr = const_cast<chunk_t*>(item.data);
897 next = nullptr;
898 }
899
900 template<size_t Bits>
901 debug_item(wire<Bits> &item, size_t lsb_offset = 0, uint32_t flags_ = 0) {
902 static_assert(sizeof(item.curr) == value<Bits>::chunks * sizeof(chunk_t) &&
903 sizeof(item.next) == value<Bits>::chunks * sizeof(chunk_t),
904 "wire<Bits> is not compatible with C layout");
905 type = WIRE;
906 flags = flags_;
907 width = Bits;
908 lsb_at = lsb_offset;
909 depth = 1;
910 zero_at = 0;
911 curr = item.curr.data;
912 next = item.next.data;
913 }
914
915 template<size_t Width>
916 debug_item(memory<Width> &item, size_t zero_offset = 0) {
917 static_assert(sizeof(item.data[0]) == value<Width>::chunks * sizeof(chunk_t),
918 "memory<Width> is not compatible with C layout");
919 type = MEMORY;
920 flags = 0;
921 width = Width;
922 lsb_at = 0;
923 depth = item.data.size();
924 zero_at = zero_offset;
925 curr = item.data.empty() ? nullptr : item.data[0].data;
926 next = nullptr;
927 }
928
929 template<size_t Bits>
930 debug_item(debug_alias, const value<Bits> &item, size_t lsb_offset = 0) {
931 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
932 "value<Bits> is not compatible with C layout");
933 type = ALIAS;
934 flags = DRIVEN_COMB;
935 width = Bits;
936 lsb_at = lsb_offset;
937 depth = 1;
938 zero_at = 0;
939 curr = const_cast<chunk_t*>(item.data);
940 next = nullptr;
941 }
942
943 template<size_t Bits>
944 debug_item(debug_alias, const wire<Bits> &item, size_t lsb_offset = 0) {
945 static_assert(sizeof(item.curr) == value<Bits>::chunks * sizeof(chunk_t) &&
946 sizeof(item.next) == value<Bits>::chunks * sizeof(chunk_t),
947 "wire<Bits> is not compatible with C layout");
948 type = ALIAS;
949 flags = DRIVEN_COMB;
950 width = Bits;
951 lsb_at = lsb_offset;
952 depth = 1;
953 zero_at = 0;
954 curr = const_cast<chunk_t*>(item.curr.data);
955 next = nullptr;
956 }
957 };
958 static_assert(std::is_standard_layout<debug_item>::value, "debug_item is not compatible with C layout");
959
960 struct debug_items {
961 std::map<std::string, std::vector<debug_item>> table;
962
963 void add(const std::string &name, debug_item &&item) {
964 std::vector<debug_item> &parts = table[name];
965 parts.emplace_back(item);
966 std::sort(parts.begin(), parts.end(),
967 [](const debug_item &a, const debug_item &b) {
968 return a.lsb_at < b.lsb_at;
969 });
970 }
971
972 size_t count(const std::string &name) const {
973 if (table.count(name) == 0)
974 return 0;
975 return table.at(name).size();
976 }
977
978 const std::vector<debug_item> &parts_at(const std::string &name) const {
979 return table.at(name);
980 }
981
982 const debug_item &at(const std::string &name) const {
983 const std::vector<debug_item> &parts = table.at(name);
984 assert(parts.size() == 1);
985 return parts.at(0);
986 }
987
988 const debug_item &operator [](const std::string &name) const {
989 return at(name);
990 }
991 };
992
993 // Tag class to disambiguate module move constructor and module constructor that takes black boxes
994 // out of another instance of the module.
995 struct adopt {};
996
997 struct module {
998 module() {}
999 virtual ~module() {}
1000
1001 // Modules with black boxes cannot be copied. Although not all designs include black boxes,
1002 // delete the copy constructor and copy assignment operator to make sure that any downstream
1003 // code that manipulates modules doesn't accidentally depend on their availability.
1004 module(const module &) = delete;
1005 module &operator=(const module &) = delete;
1006
1007 module(module &&) = default;
1008 module &operator=(module &&) = default;
1009
1010 virtual void reset() = 0;
1011
1012 virtual bool eval() = 0;
1013 virtual bool commit() = 0;
1014
1015 size_t step() {
1016 size_t deltas = 0;
1017 bool converged = false;
1018 do {
1019 converged = eval();
1020 deltas++;
1021 } while (commit() && !converged);
1022 return deltas;
1023 }
1024
1025 virtual void debug_info(debug_items &items, std::string path = "") {
1026 (void)items, (void)path;
1027 }
1028 };
1029
1030 } // namespace cxxrtl
1031
1032 // Internal structure used to communicate with the implementation of the C interface.
1033 typedef struct _cxxrtl_toplevel {
1034 std::unique_ptr<cxxrtl::module> module;
1035 } *cxxrtl_toplevel;
1036
1037 // Definitions of internal Yosys cells. Other than the functions in this namespace, CXXRTL is fully generic
1038 // and indepenent of Yosys implementation details.
1039 //
1040 // The `write_cxxrtl` pass translates internal cells (cells with names that start with `$`) to calls of these
1041 // functions. All of Yosys arithmetic and logical cells perform sign or zero extension on their operands,
1042 // whereas basic operations on arbitrary width values require operands to be of the same width. These functions
1043 // bridge the gap by performing the necessary casts. They are named similar to `cell_A[B]`, where A and B are `u`
1044 // if the corresponding operand is unsigned, and `s` if it is signed.
1045 namespace cxxrtl_yosys {
1046
1047 using namespace cxxrtl;
1048
1049 // std::max isn't constexpr until C++14 for no particular reason (it's an oversight), so we define our own.
1050 template<class T>
1051 CXXRTL_ALWAYS_INLINE
1052 constexpr T max(const T &a, const T &b) {
1053 return a > b ? a : b;
1054 }
1055
1056 // Logic operations
1057 template<size_t BitsY, size_t BitsA>
1058 CXXRTL_ALWAYS_INLINE
1059 value<BitsY> logic_not(const value<BitsA> &a) {
1060 return value<BitsY> { a ? 0u : 1u };
1061 }
1062
1063 template<size_t BitsY, size_t BitsA, size_t BitsB>
1064 CXXRTL_ALWAYS_INLINE
1065 value<BitsY> logic_and(const value<BitsA> &a, const value<BitsB> &b) {
1066 return value<BitsY> { (bool(a) && bool(b)) ? 1u : 0u };
1067 }
1068
1069 template<size_t BitsY, size_t BitsA, size_t BitsB>
1070 CXXRTL_ALWAYS_INLINE
1071 value<BitsY> logic_or(const value<BitsA> &a, const value<BitsB> &b) {
1072 return value<BitsY> { (bool(a) || bool(b)) ? 1u : 0u };
1073 }
1074
1075 // Reduction operations
1076 template<size_t BitsY, size_t BitsA>
1077 CXXRTL_ALWAYS_INLINE
1078 value<BitsY> reduce_and(const value<BitsA> &a) {
1079 return value<BitsY> { a.bit_not().is_zero() ? 1u : 0u };
1080 }
1081
1082 template<size_t BitsY, size_t BitsA>
1083 CXXRTL_ALWAYS_INLINE
1084 value<BitsY> reduce_or(const value<BitsA> &a) {
1085 return value<BitsY> { a ? 1u : 0u };
1086 }
1087
1088 template<size_t BitsY, size_t BitsA>
1089 CXXRTL_ALWAYS_INLINE
1090 value<BitsY> reduce_xor(const value<BitsA> &a) {
1091 return value<BitsY> { (a.ctpop() % 2) ? 1u : 0u };
1092 }
1093
1094 template<size_t BitsY, size_t BitsA>
1095 CXXRTL_ALWAYS_INLINE
1096 value<BitsY> reduce_xnor(const value<BitsA> &a) {
1097 return value<BitsY> { (a.ctpop() % 2) ? 0u : 1u };
1098 }
1099
1100 template<size_t BitsY, size_t BitsA>
1101 CXXRTL_ALWAYS_INLINE
1102 value<BitsY> reduce_bool(const value<BitsA> &a) {
1103 return value<BitsY> { a ? 1u : 0u };
1104 }
1105
1106 // Bitwise operations
1107 template<size_t BitsY, size_t BitsA>
1108 CXXRTL_ALWAYS_INLINE
1109 value<BitsY> not_u(const value<BitsA> &a) {
1110 return a.template zcast<BitsY>().bit_not();
1111 }
1112
1113 template<size_t BitsY, size_t BitsA>
1114 CXXRTL_ALWAYS_INLINE
1115 value<BitsY> not_s(const value<BitsA> &a) {
1116 return a.template scast<BitsY>().bit_not();
1117 }
1118
1119 template<size_t BitsY, size_t BitsA, size_t BitsB>
1120 CXXRTL_ALWAYS_INLINE
1121 value<BitsY> and_uu(const value<BitsA> &a, const value<BitsB> &b) {
1122 return a.template zcast<BitsY>().bit_and(b.template zcast<BitsY>());
1123 }
1124
1125 template<size_t BitsY, size_t BitsA, size_t BitsB>
1126 CXXRTL_ALWAYS_INLINE
1127 value<BitsY> and_ss(const value<BitsA> &a, const value<BitsB> &b) {
1128 return a.template scast<BitsY>().bit_and(b.template scast<BitsY>());
1129 }
1130
1131 template<size_t BitsY, size_t BitsA, size_t BitsB>
1132 CXXRTL_ALWAYS_INLINE
1133 value<BitsY> or_uu(const value<BitsA> &a, const value<BitsB> &b) {
1134 return a.template zcast<BitsY>().bit_or(b.template zcast<BitsY>());
1135 }
1136
1137 template<size_t BitsY, size_t BitsA, size_t BitsB>
1138 CXXRTL_ALWAYS_INLINE
1139 value<BitsY> or_ss(const value<BitsA> &a, const value<BitsB> &b) {
1140 return a.template scast<BitsY>().bit_or(b.template scast<BitsY>());
1141 }
1142
1143 template<size_t BitsY, size_t BitsA, size_t BitsB>
1144 CXXRTL_ALWAYS_INLINE
1145 value<BitsY> xor_uu(const value<BitsA> &a, const value<BitsB> &b) {
1146 return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>());
1147 }
1148
1149 template<size_t BitsY, size_t BitsA, size_t BitsB>
1150 CXXRTL_ALWAYS_INLINE
1151 value<BitsY> xor_ss(const value<BitsA> &a, const value<BitsB> &b) {
1152 return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>());
1153 }
1154
1155 template<size_t BitsY, size_t BitsA, size_t BitsB>
1156 CXXRTL_ALWAYS_INLINE
1157 value<BitsY> xnor_uu(const value<BitsA> &a, const value<BitsB> &b) {
1158 return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>()).bit_not();
1159 }
1160
1161 template<size_t BitsY, size_t BitsA, size_t BitsB>
1162 CXXRTL_ALWAYS_INLINE
1163 value<BitsY> xnor_ss(const value<BitsA> &a, const value<BitsB> &b) {
1164 return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>()).bit_not();
1165 }
1166
1167 template<size_t BitsY, size_t BitsA, size_t BitsB>
1168 CXXRTL_ALWAYS_INLINE
1169 value<BitsY> shl_uu(const value<BitsA> &a, const value<BitsB> &b) {
1170 return a.template zcast<BitsY>().template shl(b);
1171 }
1172
1173 template<size_t BitsY, size_t BitsA, size_t BitsB>
1174 CXXRTL_ALWAYS_INLINE
1175 value<BitsY> shl_su(const value<BitsA> &a, const value<BitsB> &b) {
1176 return a.template scast<BitsY>().template shl(b);
1177 }
1178
1179 template<size_t BitsY, size_t BitsA, size_t BitsB>
1180 CXXRTL_ALWAYS_INLINE
1181 value<BitsY> sshl_uu(const value<BitsA> &a, const value<BitsB> &b) {
1182 return a.template zcast<BitsY>().template shl(b);
1183 }
1184
1185 template<size_t BitsY, size_t BitsA, size_t BitsB>
1186 CXXRTL_ALWAYS_INLINE
1187 value<BitsY> sshl_su(const value<BitsA> &a, const value<BitsB> &b) {
1188 return a.template scast<BitsY>().template shl(b);
1189 }
1190
1191 template<size_t BitsY, size_t BitsA, size_t BitsB>
1192 CXXRTL_ALWAYS_INLINE
1193 value<BitsY> shr_uu(const value<BitsA> &a, const value<BitsB> &b) {
1194 return a.template shr(b).template zcast<BitsY>();
1195 }
1196
1197 template<size_t BitsY, size_t BitsA, size_t BitsB>
1198 CXXRTL_ALWAYS_INLINE
1199 value<BitsY> shr_su(const value<BitsA> &a, const value<BitsB> &b) {
1200 return a.template shr(b).template scast<BitsY>();
1201 }
1202
1203 template<size_t BitsY, size_t BitsA, size_t BitsB>
1204 CXXRTL_ALWAYS_INLINE
1205 value<BitsY> sshr_uu(const value<BitsA> &a, const value<BitsB> &b) {
1206 return a.template shr(b).template zcast<BitsY>();
1207 }
1208
1209 template<size_t BitsY, size_t BitsA, size_t BitsB>
1210 CXXRTL_ALWAYS_INLINE
1211 value<BitsY> sshr_su(const value<BitsA> &a, const value<BitsB> &b) {
1212 return a.template sshr(b).template scast<BitsY>();
1213 }
1214
1215 template<size_t BitsY, size_t BitsA, size_t BitsB>
1216 CXXRTL_ALWAYS_INLINE
1217 value<BitsY> shift_uu(const value<BitsA> &a, const value<BitsB> &b) {
1218 return shr_uu<BitsY>(a, b);
1219 }
1220
1221 template<size_t BitsY, size_t BitsA, size_t BitsB>
1222 CXXRTL_ALWAYS_INLINE
1223 value<BitsY> shift_su(const value<BitsA> &a, const value<BitsB> &b) {
1224 return shr_su<BitsY>(a, b);
1225 }
1226
1227 template<size_t BitsY, size_t BitsA, size_t BitsB>
1228 CXXRTL_ALWAYS_INLINE
1229 value<BitsY> shift_us(const value<BitsA> &a, const value<BitsB> &b) {
1230 return b.is_neg() ? shl_uu<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_uu<BitsY>(a, b);
1231 }
1232
1233 template<size_t BitsY, size_t BitsA, size_t BitsB>
1234 CXXRTL_ALWAYS_INLINE
1235 value<BitsY> shift_ss(const value<BitsA> &a, const value<BitsB> &b) {
1236 return b.is_neg() ? shl_su<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_su<BitsY>(a, b);
1237 }
1238
1239 template<size_t BitsY, size_t BitsA, size_t BitsB>
1240 CXXRTL_ALWAYS_INLINE
1241 value<BitsY> shiftx_uu(const value<BitsA> &a, const value<BitsB> &b) {
1242 return shift_uu<BitsY>(a, b);
1243 }
1244
1245 template<size_t BitsY, size_t BitsA, size_t BitsB>
1246 CXXRTL_ALWAYS_INLINE
1247 value<BitsY> shiftx_su(const value<BitsA> &a, const value<BitsB> &b) {
1248 return shift_su<BitsY>(a, b);
1249 }
1250
1251 template<size_t BitsY, size_t BitsA, size_t BitsB>
1252 CXXRTL_ALWAYS_INLINE
1253 value<BitsY> shiftx_us(const value<BitsA> &a, const value<BitsB> &b) {
1254 return shift_us<BitsY>(a, b);
1255 }
1256
1257 template<size_t BitsY, size_t BitsA, size_t BitsB>
1258 CXXRTL_ALWAYS_INLINE
1259 value<BitsY> shiftx_ss(const value<BitsA> &a, const value<BitsB> &b) {
1260 return shift_ss<BitsY>(a, b);
1261 }
1262
1263 // Comparison operations
1264 template<size_t BitsY, size_t BitsA, size_t BitsB>
1265 CXXRTL_ALWAYS_INLINE
1266 value<BitsY> eq_uu(const value<BitsA> &a, const value<BitsB> &b) {
1267 constexpr size_t BitsExt = max(BitsA, BitsB);
1268 return value<BitsY>{ a.template zext<BitsExt>() == b.template zext<BitsExt>() ? 1u : 0u };
1269 }
1270
1271 template<size_t BitsY, size_t BitsA, size_t BitsB>
1272 CXXRTL_ALWAYS_INLINE
1273 value<BitsY> eq_ss(const value<BitsA> &a, const value<BitsB> &b) {
1274 constexpr size_t BitsExt = max(BitsA, BitsB);
1275 return value<BitsY>{ a.template sext<BitsExt>() == b.template sext<BitsExt>() ? 1u : 0u };
1276 }
1277
1278 template<size_t BitsY, size_t BitsA, size_t BitsB>
1279 CXXRTL_ALWAYS_INLINE
1280 value<BitsY> ne_uu(const value<BitsA> &a, const value<BitsB> &b) {
1281 constexpr size_t BitsExt = max(BitsA, BitsB);
1282 return value<BitsY>{ a.template zext<BitsExt>() != b.template zext<BitsExt>() ? 1u : 0u };
1283 }
1284
1285 template<size_t BitsY, size_t BitsA, size_t BitsB>
1286 CXXRTL_ALWAYS_INLINE
1287 value<BitsY> ne_ss(const value<BitsA> &a, const value<BitsB> &b) {
1288 constexpr size_t BitsExt = max(BitsA, BitsB);
1289 return value<BitsY>{ a.template sext<BitsExt>() != b.template sext<BitsExt>() ? 1u : 0u };
1290 }
1291
1292 template<size_t BitsY, size_t BitsA, size_t BitsB>
1293 CXXRTL_ALWAYS_INLINE
1294 value<BitsY> eqx_uu(const value<BitsA> &a, const value<BitsB> &b) {
1295 return eq_uu<BitsY>(a, b);
1296 }
1297
1298 template<size_t BitsY, size_t BitsA, size_t BitsB>
1299 CXXRTL_ALWAYS_INLINE
1300 value<BitsY> eqx_ss(const value<BitsA> &a, const value<BitsB> &b) {
1301 return eq_ss<BitsY>(a, b);
1302 }
1303
1304 template<size_t BitsY, size_t BitsA, size_t BitsB>
1305 CXXRTL_ALWAYS_INLINE
1306 value<BitsY> nex_uu(const value<BitsA> &a, const value<BitsB> &b) {
1307 return ne_uu<BitsY>(a, b);
1308 }
1309
1310 template<size_t BitsY, size_t BitsA, size_t BitsB>
1311 CXXRTL_ALWAYS_INLINE
1312 value<BitsY> nex_ss(const value<BitsA> &a, const value<BitsB> &b) {
1313 return ne_ss<BitsY>(a, b);
1314 }
1315
1316 template<size_t BitsY, size_t BitsA, size_t BitsB>
1317 CXXRTL_ALWAYS_INLINE
1318 value<BitsY> gt_uu(const value<BitsA> &a, const value<BitsB> &b) {
1319 constexpr size_t BitsExt = max(BitsA, BitsB);
1320 return value<BitsY> { b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
1321 }
1322
1323 template<size_t BitsY, size_t BitsA, size_t BitsB>
1324 CXXRTL_ALWAYS_INLINE
1325 value<BitsY> gt_ss(const value<BitsA> &a, const value<BitsB> &b) {
1326 constexpr size_t BitsExt = max(BitsA, BitsB);
1327 return value<BitsY> { b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
1328 }
1329
1330 template<size_t BitsY, size_t BitsA, size_t BitsB>
1331 CXXRTL_ALWAYS_INLINE
1332 value<BitsY> ge_uu(const value<BitsA> &a, const value<BitsB> &b) {
1333 constexpr size_t BitsExt = max(BitsA, BitsB);
1334 return value<BitsY> { !a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
1335 }
1336
1337 template<size_t BitsY, size_t BitsA, size_t BitsB>
1338 CXXRTL_ALWAYS_INLINE
1339 value<BitsY> ge_ss(const value<BitsA> &a, const value<BitsB> &b) {
1340 constexpr size_t BitsExt = max(BitsA, BitsB);
1341 return value<BitsY> { !a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
1342 }
1343
1344 template<size_t BitsY, size_t BitsA, size_t BitsB>
1345 CXXRTL_ALWAYS_INLINE
1346 value<BitsY> lt_uu(const value<BitsA> &a, const value<BitsB> &b) {
1347 constexpr size_t BitsExt = max(BitsA, BitsB);
1348 return value<BitsY> { a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
1349 }
1350
1351 template<size_t BitsY, size_t BitsA, size_t BitsB>
1352 CXXRTL_ALWAYS_INLINE
1353 value<BitsY> lt_ss(const value<BitsA> &a, const value<BitsB> &b) {
1354 constexpr size_t BitsExt = max(BitsA, BitsB);
1355 return value<BitsY> { a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
1356 }
1357
1358 template<size_t BitsY, size_t BitsA, size_t BitsB>
1359 CXXRTL_ALWAYS_INLINE
1360 value<BitsY> le_uu(const value<BitsA> &a, const value<BitsB> &b) {
1361 constexpr size_t BitsExt = max(BitsA, BitsB);
1362 return value<BitsY> { !b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
1363 }
1364
1365 template<size_t BitsY, size_t BitsA, size_t BitsB>
1366 CXXRTL_ALWAYS_INLINE
1367 value<BitsY> le_ss(const value<BitsA> &a, const value<BitsB> &b) {
1368 constexpr size_t BitsExt = max(BitsA, BitsB);
1369 return value<BitsY> { !b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
1370 }
1371
1372 // Arithmetic operations
1373 template<size_t BitsY, size_t BitsA>
1374 CXXRTL_ALWAYS_INLINE
1375 value<BitsY> pos_u(const value<BitsA> &a) {
1376 return a.template zcast<BitsY>();
1377 }
1378
1379 template<size_t BitsY, size_t BitsA>
1380 CXXRTL_ALWAYS_INLINE
1381 value<BitsY> pos_s(const value<BitsA> &a) {
1382 return a.template scast<BitsY>();
1383 }
1384
1385 template<size_t BitsY, size_t BitsA>
1386 CXXRTL_ALWAYS_INLINE
1387 value<BitsY> neg_u(const value<BitsA> &a) {
1388 return a.template zcast<BitsY>().neg();
1389 }
1390
1391 template<size_t BitsY, size_t BitsA>
1392 CXXRTL_ALWAYS_INLINE
1393 value<BitsY> neg_s(const value<BitsA> &a) {
1394 return a.template scast<BitsY>().neg();
1395 }
1396
1397 template<size_t BitsY, size_t BitsA, size_t BitsB>
1398 CXXRTL_ALWAYS_INLINE
1399 value<BitsY> add_uu(const value<BitsA> &a, const value<BitsB> &b) {
1400 return a.template zcast<BitsY>().add(b.template zcast<BitsY>());
1401 }
1402
1403 template<size_t BitsY, size_t BitsA, size_t BitsB>
1404 CXXRTL_ALWAYS_INLINE
1405 value<BitsY> add_ss(const value<BitsA> &a, const value<BitsB> &b) {
1406 return a.template scast<BitsY>().add(b.template scast<BitsY>());
1407 }
1408
1409 template<size_t BitsY, size_t BitsA, size_t BitsB>
1410 CXXRTL_ALWAYS_INLINE
1411 value<BitsY> sub_uu(const value<BitsA> &a, const value<BitsB> &b) {
1412 return a.template zcast<BitsY>().sub(b.template zcast<BitsY>());
1413 }
1414
1415 template<size_t BitsY, size_t BitsA, size_t BitsB>
1416 CXXRTL_ALWAYS_INLINE
1417 value<BitsY> sub_ss(const value<BitsA> &a, const value<BitsB> &b) {
1418 return a.template scast<BitsY>().sub(b.template scast<BitsY>());
1419 }
1420
1421 template<size_t BitsY, size_t BitsA, size_t BitsB>
1422 CXXRTL_ALWAYS_INLINE
1423 value<BitsY> mul_uu(const value<BitsA> &a, const value<BitsB> &b) {
1424 constexpr size_t BitsM = BitsA >= BitsB ? BitsA : BitsB;
1425 return a.template zcast<BitsM>().template mul<BitsY>(b.template zcast<BitsM>());
1426 }
1427
1428 template<size_t BitsY, size_t BitsA, size_t BitsB>
1429 CXXRTL_ALWAYS_INLINE
1430 value<BitsY> mul_ss(const value<BitsA> &a, const value<BitsB> &b) {
1431 return a.template scast<BitsY>().template mul<BitsY>(b.template scast<BitsY>());
1432 }
1433
1434 template<size_t BitsY, size_t BitsA, size_t BitsB>
1435 CXXRTL_ALWAYS_INLINE
1436 std::pair<value<BitsY>, value<BitsY>> divmod_uu(const value<BitsA> &a, const value<BitsB> &b) {
1437 constexpr size_t Bits = max(BitsY, max(BitsA, BitsB));
1438 value<Bits> quotient;
1439 value<Bits> dividend = a.template zext<Bits>();
1440 value<Bits> divisor = b.template zext<Bits>();
1441 if (dividend.ucmp(divisor))
1442 return {/*quotient=*/value<BitsY> { 0u }, /*remainder=*/dividend.template trunc<BitsY>()};
1443 uint32_t divisor_shift = dividend.ctlz() - divisor.ctlz();
1444 divisor = divisor.shl(value<32> { divisor_shift });
1445 for (size_t step = 0; step <= divisor_shift; step++) {
1446 quotient = quotient.shl(value<1> { 1u });
1447 if (!dividend.ucmp(divisor)) {
1448 dividend = dividend.sub(divisor);
1449 quotient.set_bit(0, true);
1450 }
1451 divisor = divisor.shr(value<1> { 1u });
1452 }
1453 return {quotient.template trunc<BitsY>(), /*remainder=*/dividend.template trunc<BitsY>()};
1454 }
1455
1456 template<size_t BitsY, size_t BitsA, size_t BitsB>
1457 CXXRTL_ALWAYS_INLINE
1458 std::pair<value<BitsY>, value<BitsY>> divmod_ss(const value<BitsA> &a, const value<BitsB> &b) {
1459 value<BitsA + 1> ua = a.template sext<BitsA + 1>();
1460 value<BitsB + 1> ub = b.template sext<BitsB + 1>();
1461 if (ua.is_neg()) ua = ua.neg();
1462 if (ub.is_neg()) ub = ub.neg();
1463 value<BitsY> y, r;
1464 std::tie(y, r) = divmod_uu<BitsY>(ua, ub);
1465 if (a.is_neg() != b.is_neg()) y = y.neg();
1466 if (a.is_neg()) r = r.neg();
1467 return {y, r};
1468 }
1469
1470 template<size_t BitsY, size_t BitsA, size_t BitsB>
1471 CXXRTL_ALWAYS_INLINE
1472 value<BitsY> div_uu(const value<BitsA> &a, const value<BitsB> &b) {
1473 return divmod_uu<BitsY>(a, b).first;
1474 }
1475
1476 template<size_t BitsY, size_t BitsA, size_t BitsB>
1477 CXXRTL_ALWAYS_INLINE
1478 value<BitsY> div_ss(const value<BitsA> &a, const value<BitsB> &b) {
1479 return divmod_ss<BitsY>(a, b).first;
1480 }
1481
1482 template<size_t BitsY, size_t BitsA, size_t BitsB>
1483 CXXRTL_ALWAYS_INLINE
1484 value<BitsY> mod_uu(const value<BitsA> &a, const value<BitsB> &b) {
1485 return divmod_uu<BitsY>(a, b).second;
1486 }
1487
1488 template<size_t BitsY, size_t BitsA, size_t BitsB>
1489 CXXRTL_ALWAYS_INLINE
1490 value<BitsY> mod_ss(const value<BitsA> &a, const value<BitsB> &b) {
1491 return divmod_ss<BitsY>(a, b).second;
1492 }
1493
1494 // Memory helper
1495 struct memory_index {
1496 bool valid;
1497 size_t index;
1498
1499 template<size_t BitsAddr>
1500 memory_index(const value<BitsAddr> &addr, size_t offset, size_t depth) {
1501 static_assert(value<BitsAddr>::chunks <= 1, "memory address is too wide");
1502 size_t offset_index = addr.data[0];
1503
1504 valid = (offset_index >= offset && offset_index < offset + depth);
1505 index = offset_index - offset;
1506 }
1507 };
1508
1509 } // namespace cxxrtl_yosys
1510
1511 #endif