cxxrtl: use CXXRTL_ASSERT for RTL contract violations instead of assert.
[yosys.git] / backends / cxxrtl / cxxrtl.h
1 /*
2 * yosys -- Yosys Open SYnthesis Suite
3 *
4 * Copyright (C) 2019-2020 whitequark <whitequark@whitequark.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19 // This file is included by the designs generated with `write_cxxrtl`. It is not used in Yosys itself.
20 //
21 // The CXXRTL support library implements compile time specialized arbitrary width arithmetics, as well as provides
22 // composite lvalues made out of bit slices and concatenations of lvalues. This allows the `write_cxxrtl` pass
23 // to perform a straightforward translation of RTLIL structures to readable C++, relying on the C++ compiler
24 // to unwrap the abstraction and generate efficient code.
25
26 #ifndef CXXRTL_H
27 #define CXXRTL_H
28
29 #include <cstddef>
30 #include <cstdint>
31 #include <cassert>
32 #include <limits>
33 #include <type_traits>
34 #include <tuple>
35 #include <vector>
36 #include <map>
37 #include <algorithm>
38 #include <memory>
39 #include <sstream>
40
41 #include <backends/cxxrtl/cxxrtl_capi.h>
42
43 // CXXRTL essentially uses the C++ compiler as a hygienic macro engine that feeds an instruction selector.
44 // It generates a lot of specialized template functions with relatively large bodies that, when inlined
45 // into the caller and (for those with loops) unrolled, often expose many new optimization opportunities.
46 // Because of this, most of the CXXRTL runtime must be always inlined for best performance.
47 #ifndef __has_attribute
48 # define __has_attribute(x) 0
49 #endif
50 #if __has_attribute(always_inline)
51 #define CXXRTL_ALWAYS_INLINE inline __attribute__((__always_inline__))
52 #else
53 #define CXXRTL_ALWAYS_INLINE inline
54 #endif
55
56 // CXXRTL uses assert() to check for C++ contract violations (which may result in e.g. undefined behavior
57 // of the simulation code itself), and CXXRTL_ASSERT to check for RTL contract violations (which may at
58 // most result in undefined simulation results).
59 //
60 // Though by default, CXXRTL_ASSERT() expands to assert(), it may be overridden e.g. when integrating
61 // the simulation into another process that should survive violating RTL contracts.
62 #ifndef CXXRTL_ASSERT
63 #ifndef CXXRTL_NDEBUG
64 #define CXXRTL_ASSERT(x) assert(x)
65 #else
66 #define CXXRTL_ASSERT(x)
67 #endif
68 #endif
69
70 namespace cxxrtl {
71
72 // All arbitrary-width values in CXXRTL are backed by arrays of unsigned integers called chunks. The chunk size
73 // is the same regardless of the value width to simplify manipulating values via FFI interfaces, e.g. driving
74 // and introspecting the simulation in Python.
75 //
76 // It is practical to use chunk sizes between 32 bits and platform register size because when arithmetics on
77 // narrower integer types is legalized by the C++ compiler, it inserts code to clear the high bits of the register.
78 // However, (a) most of our operations do not change those bits in the first place because of invariants that are
79 // invisible to the compiler, (b) we often operate on non-power-of-2 values and have to clear the high bits anyway.
80 // Therefore, using relatively wide chunks and clearing the high bits explicitly and only when we know they may be
81 // clobbered results in simpler generated code.
82 typedef uint32_t chunk_t;
83 typedef uint64_t wide_chunk_t;
84
85 template<typename T>
86 struct chunk_traits {
87 static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
88 "chunk type must be an unsigned integral type");
89 using type = T;
90 static constexpr size_t bits = std::numeric_limits<T>::digits;
91 static constexpr T mask = std::numeric_limits<T>::max();
92 };
93
94 template<class T>
95 struct expr_base;
96
97 template<size_t Bits>
98 struct value : public expr_base<value<Bits>> {
99 static constexpr size_t bits = Bits;
100
101 using chunk = chunk_traits<chunk_t>;
102 static constexpr chunk::type msb_mask = (Bits % chunk::bits == 0) ? chunk::mask
103 : chunk::mask >> (chunk::bits - (Bits % chunk::bits));
104
105 static constexpr size_t chunks = (Bits + chunk::bits - 1) / chunk::bits;
106 chunk::type data[chunks] = {};
107
108 value() = default;
109 template<typename... Init>
110 explicit constexpr value(Init ...init) : data{init...} {}
111
112 value(const value<Bits> &) = default;
113 value(value<Bits> &&) = default;
114 value<Bits> &operator=(const value<Bits> &) = default;
115
116 // A (no-op) helper that forces the cast to value<>.
117 CXXRTL_ALWAYS_INLINE
118 const value<Bits> &val() const {
119 return *this;
120 }
121
122 std::string str() const {
123 std::stringstream ss;
124 ss << *this;
125 return ss.str();
126 }
127
128 // Conversion operations.
129 //
130 // These functions ensure that a conversion is never out of range, and should be always used, if at all
131 // possible, instead of direct manipulation of the `data` member. For very large types, .slice() and
132 // .concat() can be used to split them into more manageable parts.
133 template<class IntegerT>
134 CXXRTL_ALWAYS_INLINE
135 IntegerT get() const {
136 static_assert(std::numeric_limits<IntegerT>::is_integer && !std::numeric_limits<IntegerT>::is_signed,
137 "get<T>() requires T to be an unsigned integral type");
138 static_assert(std::numeric_limits<IntegerT>::digits >= Bits,
139 "get<T>() requires T to be at least as wide as the value is");
140 IntegerT result = 0;
141 for (size_t n = 0; n < chunks; n++)
142 result |= IntegerT(data[n]) << (n * chunk::bits);
143 return result;
144 }
145
146 template<class IntegerT>
147 CXXRTL_ALWAYS_INLINE
148 void set(IntegerT other) {
149 static_assert(std::numeric_limits<IntegerT>::is_integer && !std::numeric_limits<IntegerT>::is_signed,
150 "set<T>() requires T to be an unsigned integral type");
151 static_assert(std::numeric_limits<IntegerT>::digits >= Bits,
152 "set<T>() requires the value to be at least as wide as T is");
153 for (size_t n = 0; n < chunks; n++)
154 data[n] = (other >> (n * chunk::bits)) & chunk::mask;
155 }
156
157 // Operations with compile-time parameters.
158 //
159 // These operations are used to implement slicing, concatenation, and blitting.
160 // The trunc, zext and sext operations add or remove most significant bits (i.e. on the left);
161 // the rtrunc and rzext operations add or remove least significant bits (i.e. on the right).
162 template<size_t NewBits>
163 CXXRTL_ALWAYS_INLINE
164 value<NewBits> trunc() const {
165 static_assert(NewBits <= Bits, "trunc() may not increase width");
166 value<NewBits> result;
167 for (size_t n = 0; n < result.chunks; n++)
168 result.data[n] = data[n];
169 result.data[result.chunks - 1] &= result.msb_mask;
170 return result;
171 }
172
173 template<size_t NewBits>
174 CXXRTL_ALWAYS_INLINE
175 value<NewBits> zext() const {
176 static_assert(NewBits >= Bits, "zext() may not decrease width");
177 value<NewBits> result;
178 for (size_t n = 0; n < chunks; n++)
179 result.data[n] = data[n];
180 return result;
181 }
182
183 template<size_t NewBits>
184 CXXRTL_ALWAYS_INLINE
185 value<NewBits> sext() const {
186 static_assert(NewBits >= Bits, "sext() may not decrease width");
187 value<NewBits> result;
188 for (size_t n = 0; n < chunks; n++)
189 result.data[n] = data[n];
190 if (is_neg()) {
191 result.data[chunks - 1] |= ~msb_mask;
192 for (size_t n = chunks; n < result.chunks; n++)
193 result.data[n] = chunk::mask;
194 result.data[result.chunks - 1] &= result.msb_mask;
195 }
196 return result;
197 }
198
199 template<size_t NewBits>
200 CXXRTL_ALWAYS_INLINE
201 value<NewBits> rtrunc() const {
202 static_assert(NewBits <= Bits, "rtrunc() may not increase width");
203 value<NewBits> result;
204 constexpr size_t shift_chunks = (Bits - NewBits) / chunk::bits;
205 constexpr size_t shift_bits = (Bits - NewBits) % chunk::bits;
206 chunk::type carry = 0;
207 if (shift_chunks + result.chunks < chunks) {
208 carry = (shift_bits == 0) ? 0
209 : data[shift_chunks + result.chunks] << (chunk::bits - shift_bits);
210 }
211 for (size_t n = result.chunks; n > 0; n--) {
212 result.data[n - 1] = carry | (data[shift_chunks + n - 1] >> shift_bits);
213 carry = (shift_bits == 0) ? 0
214 : data[shift_chunks + n - 1] << (chunk::bits - shift_bits);
215 }
216 return result;
217 }
218
219 template<size_t NewBits>
220 CXXRTL_ALWAYS_INLINE
221 value<NewBits> rzext() const {
222 static_assert(NewBits >= Bits, "rzext() may not decrease width");
223 value<NewBits> result;
224 constexpr size_t shift_chunks = (NewBits - Bits) / chunk::bits;
225 constexpr size_t shift_bits = (NewBits - Bits) % chunk::bits;
226 chunk::type carry = 0;
227 for (size_t n = 0; n < chunks; n++) {
228 result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
229 carry = (shift_bits == 0) ? 0
230 : data[n] >> (chunk::bits - shift_bits);
231 }
232 if (shift_chunks + chunks < result.chunks)
233 result.data[shift_chunks + chunks] = carry;
234 return result;
235 }
236
237 // Bit blit operation, i.e. a partial read-modify-write.
238 template<size_t Stop, size_t Start>
239 CXXRTL_ALWAYS_INLINE
240 value<Bits> blit(const value<Stop - Start + 1> &source) const {
241 static_assert(Stop >= Start, "blit() may not reverse bit order");
242 constexpr chunk::type start_mask = ~(chunk::mask << (Start % chunk::bits));
243 constexpr chunk::type stop_mask = (Stop % chunk::bits + 1 == chunk::bits) ? 0
244 : (chunk::mask << (Stop % chunk::bits + 1));
245 value<Bits> masked = *this;
246 if (Start / chunk::bits == Stop / chunk::bits) {
247 masked.data[Start / chunk::bits] &= stop_mask | start_mask;
248 } else {
249 masked.data[Start / chunk::bits] &= start_mask;
250 for (size_t n = Start / chunk::bits + 1; n < Stop / chunk::bits; n++)
251 masked.data[n] = 0;
252 masked.data[Stop / chunk::bits] &= stop_mask;
253 }
254 value<Bits> shifted = source
255 .template rzext<Stop + 1>()
256 .template zext<Bits>();
257 return masked.bit_or(shifted);
258 }
259
260 // Helpers for selecting extending or truncating operation depending on whether the result is wider or narrower
261 // than the operand. In C++17 these can be replaced with `if constexpr`.
262 template<size_t NewBits, typename = void>
263 struct zext_cast {
264 CXXRTL_ALWAYS_INLINE
265 value<NewBits> operator()(const value<Bits> &val) {
266 return val.template zext<NewBits>();
267 }
268 };
269
270 template<size_t NewBits>
271 struct zext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
272 CXXRTL_ALWAYS_INLINE
273 value<NewBits> operator()(const value<Bits> &val) {
274 return val.template trunc<NewBits>();
275 }
276 };
277
278 template<size_t NewBits, typename = void>
279 struct sext_cast {
280 CXXRTL_ALWAYS_INLINE
281 value<NewBits> operator()(const value<Bits> &val) {
282 return val.template sext<NewBits>();
283 }
284 };
285
286 template<size_t NewBits>
287 struct sext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
288 CXXRTL_ALWAYS_INLINE
289 value<NewBits> operator()(const value<Bits> &val) {
290 return val.template trunc<NewBits>();
291 }
292 };
293
294 template<size_t NewBits>
295 CXXRTL_ALWAYS_INLINE
296 value<NewBits> zcast() const {
297 return zext_cast<NewBits>()(*this);
298 }
299
300 template<size_t NewBits>
301 CXXRTL_ALWAYS_INLINE
302 value<NewBits> scast() const {
303 return sext_cast<NewBits>()(*this);
304 }
305
306 // Operations with run-time parameters (offsets, amounts, etc).
307 //
308 // These operations are used for computations.
309 bool bit(size_t offset) const {
310 return data[offset / chunk::bits] & (1 << (offset % chunk::bits));
311 }
312
313 void set_bit(size_t offset, bool value = true) {
314 size_t offset_chunks = offset / chunk::bits;
315 size_t offset_bits = offset % chunk::bits;
316 data[offset_chunks] &= ~(1 << offset_bits);
317 data[offset_chunks] |= value ? 1 << offset_bits : 0;
318 }
319
320 explicit operator bool() const {
321 return !is_zero();
322 }
323
324 bool is_zero() const {
325 for (size_t n = 0; n < chunks; n++)
326 if (data[n] != 0)
327 return false;
328 return true;
329 }
330
331 bool is_neg() const {
332 return data[chunks - 1] & (1 << ((Bits - 1) % chunk::bits));
333 }
334
335 bool operator ==(const value<Bits> &other) const {
336 for (size_t n = 0; n < chunks; n++)
337 if (data[n] != other.data[n])
338 return false;
339 return true;
340 }
341
342 bool operator !=(const value<Bits> &other) const {
343 return !(*this == other);
344 }
345
346 value<Bits> bit_not() const {
347 value<Bits> result;
348 for (size_t n = 0; n < chunks; n++)
349 result.data[n] = ~data[n];
350 result.data[chunks - 1] &= msb_mask;
351 return result;
352 }
353
354 value<Bits> bit_and(const value<Bits> &other) const {
355 value<Bits> result;
356 for (size_t n = 0; n < chunks; n++)
357 result.data[n] = data[n] & other.data[n];
358 return result;
359 }
360
361 value<Bits> bit_or(const value<Bits> &other) const {
362 value<Bits> result;
363 for (size_t n = 0; n < chunks; n++)
364 result.data[n] = data[n] | other.data[n];
365 return result;
366 }
367
368 value<Bits> bit_xor(const value<Bits> &other) const {
369 value<Bits> result;
370 for (size_t n = 0; n < chunks; n++)
371 result.data[n] = data[n] ^ other.data[n];
372 return result;
373 }
374
375 value<Bits> update(const value<Bits> &val, const value<Bits> &mask) const {
376 return bit_and(mask.bit_not()).bit_or(val.bit_and(mask));
377 }
378
379 template<size_t AmountBits>
380 value<Bits> shl(const value<AmountBits> &amount) const {
381 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
382 static_assert(Bits <= chunk::mask, "shl() of unreasonably large values is not supported");
383 // Detect shifts definitely large than Bits early.
384 for (size_t n = 1; n < amount.chunks; n++)
385 if (amount.data[n] != 0)
386 return {};
387 // Past this point we can use the least significant chunk as the shift size.
388 size_t shift_chunks = amount.data[0] / chunk::bits;
389 size_t shift_bits = amount.data[0] % chunk::bits;
390 if (shift_chunks >= chunks)
391 return {};
392 value<Bits> result;
393 chunk::type carry = 0;
394 for (size_t n = 0; n < chunks - shift_chunks; n++) {
395 result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
396 carry = (shift_bits == 0) ? 0
397 : data[n] >> (chunk::bits - shift_bits);
398 }
399 return result;
400 }
401
402 template<size_t AmountBits, bool Signed = false>
403 value<Bits> shr(const value<AmountBits> &amount) const {
404 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
405 static_assert(Bits <= chunk::mask, "shr() of unreasonably large values is not supported");
406 // Detect shifts definitely large than Bits early.
407 for (size_t n = 1; n < amount.chunks; n++)
408 if (amount.data[n] != 0)
409 return {};
410 // Past this point we can use the least significant chunk as the shift size.
411 size_t shift_chunks = amount.data[0] / chunk::bits;
412 size_t shift_bits = amount.data[0] % chunk::bits;
413 if (shift_chunks >= chunks)
414 return {};
415 value<Bits> result;
416 chunk::type carry = 0;
417 for (size_t n = 0; n < chunks - shift_chunks; n++) {
418 result.data[chunks - shift_chunks - 1 - n] = carry | (data[chunks - 1 - n] >> shift_bits);
419 carry = (shift_bits == 0) ? 0
420 : data[chunks - 1 - n] << (chunk::bits - shift_bits);
421 }
422 if (Signed && is_neg()) {
423 size_t top_chunk_idx = (Bits - shift_bits) / chunk::bits;
424 size_t top_chunk_bits = (Bits - shift_bits) % chunk::bits;
425 for (size_t n = top_chunk_idx + 1; n < chunks; n++)
426 result.data[n] = chunk::mask;
427 if (shift_bits != 0)
428 result.data[top_chunk_idx] |= chunk::mask << top_chunk_bits;
429 }
430 return result;
431 }
432
433 template<size_t AmountBits>
434 value<Bits> sshr(const value<AmountBits> &amount) const {
435 return shr<AmountBits, /*Signed=*/true>(amount);
436 }
437
438 size_t ctpop() const {
439 size_t count = 0;
440 for (size_t n = 0; n < chunks; n++) {
441 // This loop implements the population count idiom as recognized by LLVM and GCC.
442 for (chunk::type x = data[n]; x != 0; count++)
443 x = x & (x - 1);
444 }
445 return count;
446 }
447
448 size_t ctlz() const {
449 size_t count = 0;
450 for (size_t n = 0; n < chunks; n++) {
451 chunk::type x = data[chunks - 1 - n];
452 if (x == 0) {
453 count += (n == 0 ? Bits % chunk::bits : chunk::bits);
454 } else {
455 // This loop implements the find first set idiom as recognized by LLVM.
456 for (; x != 0; count++)
457 x >>= 1;
458 }
459 }
460 return count;
461 }
462
463 template<bool Invert, bool CarryIn>
464 std::pair<value<Bits>, bool /*CarryOut*/> alu(const value<Bits> &other) const {
465 value<Bits> result;
466 bool carry = CarryIn;
467 for (size_t n = 0; n < result.chunks; n++) {
468 result.data[n] = data[n] + (Invert ? ~other.data[n] : other.data[n]) + carry;
469 if (result.chunks - 1 == n)
470 result.data[result.chunks - 1] &= result.msb_mask;
471 carry = (result.data[n] < data[n]) ||
472 (result.data[n] == data[n] && carry);
473 }
474 return {result, carry};
475 }
476
477 value<Bits> add(const value<Bits> &other) const {
478 return alu</*Invert=*/false, /*CarryIn=*/false>(other).first;
479 }
480
481 value<Bits> sub(const value<Bits> &other) const {
482 return alu</*Invert=*/true, /*CarryIn=*/true>(other).first;
483 }
484
485 value<Bits> neg() const {
486 return value<Bits> { 0u }.sub(*this);
487 }
488
489 bool ucmp(const value<Bits> &other) const {
490 bool carry;
491 std::tie(std::ignore, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
492 return !carry; // a.ucmp(b) ≡ a u< b
493 }
494
495 bool scmp(const value<Bits> &other) const {
496 value<Bits> result;
497 bool carry;
498 std::tie(result, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
499 bool overflow = (is_neg() == !other.is_neg()) && (is_neg() != result.is_neg());
500 return result.is_neg() ^ overflow; // a.scmp(b) ≡ a s< b
501 }
502
503 template<size_t ResultBits>
504 value<ResultBits> mul(const value<Bits> &other) const {
505 value<ResultBits> result;
506 wide_chunk_t wide_result[result.chunks + 1] = {};
507 for (size_t n = 0; n < chunks; n++) {
508 for (size_t m = 0; m < chunks && n + m < result.chunks; m++) {
509 wide_result[n + m] += wide_chunk_t(data[n]) * wide_chunk_t(other.data[m]);
510 wide_result[n + m + 1] += wide_result[n + m] >> chunk::bits;
511 wide_result[n + m] &= chunk::mask;
512 }
513 }
514 for (size_t n = 0; n < result.chunks; n++) {
515 result.data[n] = wide_result[n];
516 }
517 result.data[result.chunks - 1] &= result.msb_mask;
518 return result;
519 }
520 };
521
522 // Expression template for a slice, usable as lvalue or rvalue, and composable with other expression templates here.
523 template<class T, size_t Stop, size_t Start>
524 struct slice_expr : public expr_base<slice_expr<T, Stop, Start>> {
525 static_assert(Stop >= Start, "slice_expr() may not reverse bit order");
526 static_assert(Start < T::bits && Stop < T::bits, "slice_expr() must be within bounds");
527 static constexpr size_t bits = Stop - Start + 1;
528
529 T &expr;
530
531 slice_expr(T &expr) : expr(expr) {}
532 slice_expr(const slice_expr<T, Stop, Start> &) = delete;
533
534 CXXRTL_ALWAYS_INLINE
535 operator value<bits>() const {
536 return static_cast<const value<T::bits> &>(expr)
537 .template rtrunc<T::bits - Start>()
538 .template trunc<bits>();
539 }
540
541 CXXRTL_ALWAYS_INLINE
542 slice_expr<T, Stop, Start> &operator=(const value<bits> &rhs) {
543 // Generic partial assignment implemented using a read-modify-write operation on the sliced expression.
544 expr = static_cast<const value<T::bits> &>(expr)
545 .template blit<Stop, Start>(rhs);
546 return *this;
547 }
548
549 // A helper that forces the cast to value<>, which allows deduction to work.
550 CXXRTL_ALWAYS_INLINE
551 value<bits> val() const {
552 return static_cast<const value<bits> &>(*this);
553 }
554 };
555
556 // Expression template for a concatenation, usable as lvalue or rvalue, and composable with other expression templates here.
557 template<class T, class U>
558 struct concat_expr : public expr_base<concat_expr<T, U>> {
559 static constexpr size_t bits = T::bits + U::bits;
560
561 T &ms_expr;
562 U &ls_expr;
563
564 concat_expr(T &ms_expr, U &ls_expr) : ms_expr(ms_expr), ls_expr(ls_expr) {}
565 concat_expr(const concat_expr<T, U> &) = delete;
566
567 CXXRTL_ALWAYS_INLINE
568 operator value<bits>() const {
569 value<bits> ms_shifted = static_cast<const value<T::bits> &>(ms_expr)
570 .template rzext<bits>();
571 value<bits> ls_extended = static_cast<const value<U::bits> &>(ls_expr)
572 .template zext<bits>();
573 return ms_shifted.bit_or(ls_extended);
574 }
575
576 CXXRTL_ALWAYS_INLINE
577 concat_expr<T, U> &operator=(const value<bits> &rhs) {
578 ms_expr = rhs.template rtrunc<T::bits>();
579 ls_expr = rhs.template trunc<U::bits>();
580 return *this;
581 }
582
583 // A helper that forces the cast to value<>, which allows deduction to work.
584 CXXRTL_ALWAYS_INLINE
585 value<bits> val() const {
586 return static_cast<const value<bits> &>(*this);
587 }
588 };
589
590 // Base class for expression templates, providing helper methods for operations that are valid on both rvalues and lvalues.
591 //
592 // Note that expression objects (slices and concatenations) constructed in this way should NEVER be captured because
593 // they refer to temporaries that will, in general, only live until the end of the statement. For example, both of
594 // these snippets perform use-after-free:
595 //
596 // const auto &a = val.slice<7,0>().slice<1>();
597 // value<1> b = a;
598 //
599 // auto &&c = val.slice<7,0>().slice<1>();
600 // c = value<1>{1u};
601 //
602 // An easy way to write code using slices and concatenations safely is to follow two simple rules:
603 // * Never explicitly name any type except `value<W>` or `const value<W> &`.
604 // * Never use a `const auto &` or `auto &&` in any such expression.
605 // Then, any code that compiles will be well-defined.
606 template<class T>
607 struct expr_base {
608 template<size_t Stop, size_t Start = Stop>
609 CXXRTL_ALWAYS_INLINE
610 slice_expr<const T, Stop, Start> slice() const {
611 return {*static_cast<const T *>(this)};
612 }
613
614 template<size_t Stop, size_t Start = Stop>
615 CXXRTL_ALWAYS_INLINE
616 slice_expr<T, Stop, Start> slice() {
617 return {*static_cast<T *>(this)};
618 }
619
620 template<class U>
621 CXXRTL_ALWAYS_INLINE
622 concat_expr<const T, typename std::remove_reference<const U>::type> concat(const U &other) const {
623 return {*static_cast<const T *>(this), other};
624 }
625
626 template<class U>
627 CXXRTL_ALWAYS_INLINE
628 concat_expr<T, typename std::remove_reference<U>::type> concat(U &&other) {
629 return {*static_cast<T *>(this), other};
630 }
631 };
632
633 template<size_t Bits>
634 std::ostream &operator<<(std::ostream &os, const value<Bits> &val) {
635 auto old_flags = os.flags(std::ios::right);
636 auto old_width = os.width(0);
637 auto old_fill = os.fill('0');
638 os << val.bits << '\'' << std::hex;
639 for (size_t n = val.chunks - 1; n != (size_t)-1; n--) {
640 if (n == val.chunks - 1 && Bits % value<Bits>::chunk::bits != 0)
641 os.width((Bits % value<Bits>::chunk::bits + 3) / 4);
642 else
643 os.width((value<Bits>::chunk::bits + 3) / 4);
644 os << val.data[n];
645 }
646 os.fill(old_fill);
647 os.width(old_width);
648 os.flags(old_flags);
649 return os;
650 }
651
652 template<size_t Bits>
653 struct wire {
654 static constexpr size_t bits = Bits;
655
656 value<Bits> curr;
657 value<Bits> next;
658
659 wire() = default;
660 constexpr wire(const value<Bits> &init) : curr(init), next(init) {}
661 template<typename... Init>
662 explicit constexpr wire(Init ...init) : curr{init...}, next{init...} {}
663
664 wire(const wire<Bits> &) = delete;
665 wire(wire<Bits> &&) = default;
666 wire<Bits> &operator=(const wire<Bits> &) = delete;
667
668 template<class IntegerT>
669 CXXRTL_ALWAYS_INLINE
670 IntegerT get() const {
671 return curr.template get<IntegerT>();
672 }
673
674 template<class IntegerT>
675 CXXRTL_ALWAYS_INLINE
676 void set(IntegerT other) {
677 next.template set<IntegerT>(other);
678 }
679
680 bool commit() {
681 if (curr != next) {
682 curr = next;
683 return true;
684 }
685 return false;
686 }
687 };
688
689 template<size_t Bits>
690 std::ostream &operator<<(std::ostream &os, const wire<Bits> &val) {
691 os << val.curr;
692 return os;
693 }
694
695 template<size_t Width>
696 struct memory {
697 std::vector<value<Width>> data;
698
699 size_t depth() const {
700 return data.size();
701 }
702
703 memory() = delete;
704 explicit memory(size_t depth) : data(depth) {}
705
706 memory(const memory<Width> &) = delete;
707 memory<Width> &operator=(const memory<Width> &) = delete;
708
709 // The only way to get the compiler to put the initializer in .rodata and do not copy it on stack is to stuff it
710 // into a plain array. You'd think an std::initializer_list would work here, but it doesn't, because you can't
711 // construct an initializer_list in a constexpr (or something) and so if you try to do that the whole thing is
712 // first copied on the stack (probably overflowing it) and then again into `data`.
713 template<size_t Size>
714 struct init {
715 size_t offset;
716 value<Width> data[Size];
717 };
718
719 template<size_t... InitSize>
720 explicit memory(size_t depth, const init<InitSize> &...init) : data(depth) {
721 data.resize(depth);
722 // This utterly reprehensible construct is the most reasonable way to apply a function to every element
723 // of a parameter pack, if the elements all have different types and so cannot be cast to an initializer list.
724 auto _ = {std::move(std::begin(init.data), std::end(init.data), data.begin() + init.offset)...};
725 (void)_;
726 }
727
728 // An operator for direct memory reads. May be used at any time during the simulation.
729 const value<Width> &operator [](size_t index) const {
730 assert(index < data.size());
731 return data[index];
732 }
733
734 // An operator for direct memory writes. May only be used before the simulation is started. If used
735 // after the simulation is started, the design may malfunction.
736 value<Width> &operator [](size_t index) {
737 assert(index < data.size());
738 return data[index];
739 }
740
741 // A simple way to make a writable memory would be to use an array of wires instead of an array of values.
742 // However, there are two significant downsides to this approach: first, it has large overhead (2× space
743 // overhead, and O(depth) time overhead during commit); second, it does not simplify handling write port
744 // priorities. Although in principle write ports could be ordered or conditionally enabled in generated
745 // code based on their priorities and selected addresses, the feedback arc set problem is computationally
746 // expensive, and the heuristic based algorithms are not easily modified to guarantee (rather than prefer)
747 // a particular write port evaluation order.
748 //
749 // The approach used here instead is to queue writes into a buffer during the eval phase, then perform
750 // the writes during the commit phase in the priority order. This approach has low overhead, with both space
751 // and time proportional to the amount of write ports. Because virtually every memory in a practical design
752 // has at most two write ports, linear search is used on every write, being the fastest and simplest approach.
753 struct write {
754 size_t index;
755 value<Width> val;
756 value<Width> mask;
757 int priority;
758 };
759 std::vector<write> write_queue;
760
761 void update(size_t index, const value<Width> &val, const value<Width> &mask, int priority = 0) {
762 assert(index < data.size());
763 // Queue up the write while keeping the queue sorted by priority.
764 write_queue.insert(
765 std::upper_bound(write_queue.begin(), write_queue.end(), priority,
766 [](const int a, const write& b) { return a < b.priority; }),
767 write { index, val, mask, priority });
768 }
769
770 bool commit() {
771 bool changed = false;
772 for (const write &entry : write_queue) {
773 value<Width> elem = data[entry.index];
774 elem = elem.update(entry.val, entry.mask);
775 changed |= (data[entry.index] != elem);
776 data[entry.index] = elem;
777 }
778 write_queue.clear();
779 return changed;
780 }
781 };
782
783 struct metadata {
784 const enum {
785 MISSING = 0,
786 UINT = 1,
787 SINT = 2,
788 STRING = 3,
789 DOUBLE = 4,
790 } value_type;
791
792 // In debug mode, using the wrong .as_*() function will assert.
793 // In release mode, using the wrong .as_*() function will safely return a default value.
794 const unsigned uint_value = 0;
795 const signed sint_value = 0;
796 const std::string string_value = "";
797 const double double_value = 0.0;
798
799 metadata() : value_type(MISSING) {}
800 metadata(unsigned value) : value_type(UINT), uint_value(value) {}
801 metadata(signed value) : value_type(SINT), sint_value(value) {}
802 metadata(const std::string &value) : value_type(STRING), string_value(value) {}
803 metadata(const char *value) : value_type(STRING), string_value(value) {}
804 metadata(double value) : value_type(DOUBLE), double_value(value) {}
805
806 metadata(const metadata &) = default;
807 metadata &operator=(const metadata &) = delete;
808
809 unsigned as_uint() const {
810 assert(value_type == UINT);
811 return uint_value;
812 }
813
814 signed as_sint() const {
815 assert(value_type == SINT);
816 return sint_value;
817 }
818
819 const std::string &as_string() const {
820 assert(value_type == STRING);
821 return string_value;
822 }
823
824 double as_double() const {
825 assert(value_type == DOUBLE);
826 return double_value;
827 }
828 };
829
830 typedef std::map<std::string, metadata> metadata_map;
831
832 // Helper class to disambiguate values/wires and their aliases.
833 struct debug_alias {};
834
835 // This structure is intended for consumption via foreign function interfaces, like Python's ctypes.
836 // Because of this it uses a C-style layout that is easy to parse rather than more idiomatic C++.
837 //
838 // To avoid violating strict aliasing rules, this structure has to be a subclass of the one used
839 // in the C API, or it would not be possible to cast between the pointers to these.
840 struct debug_item : ::cxxrtl_object {
841 // Object types.
842 enum : uint32_t {
843 VALUE = CXXRTL_VALUE,
844 WIRE = CXXRTL_WIRE,
845 MEMORY = CXXRTL_MEMORY,
846 ALIAS = CXXRTL_ALIAS,
847 };
848
849 // Object flags.
850 enum : uint32_t {
851 INPUT = CXXRTL_INPUT,
852 OUTPUT = CXXRTL_OUTPUT,
853 INOUT = CXXRTL_INOUT,
854 DRIVEN_SYNC = CXXRTL_DRIVEN_SYNC,
855 DRIVEN_COMB = CXXRTL_DRIVEN_COMB,
856 UNDRIVEN = CXXRTL_UNDRIVEN,
857 };
858
859 debug_item(const ::cxxrtl_object &object) : cxxrtl_object(object) {}
860
861 template<size_t Bits>
862 debug_item(value<Bits> &item, size_t lsb_offset = 0, uint32_t flags_ = 0) {
863 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
864 "value<Bits> is not compatible with C layout");
865 type = VALUE;
866 flags = flags_;
867 width = Bits;
868 lsb_at = lsb_offset;
869 depth = 1;
870 zero_at = 0;
871 curr = item.data;
872 next = item.data;
873 }
874
875 template<size_t Bits>
876 debug_item(const value<Bits> &item, size_t lsb_offset = 0) {
877 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
878 "value<Bits> is not compatible with C layout");
879 type = VALUE;
880 flags = DRIVEN_COMB;
881 width = Bits;
882 lsb_at = lsb_offset;
883 depth = 1;
884 zero_at = 0;
885 curr = const_cast<chunk_t*>(item.data);
886 next = nullptr;
887 }
888
889 template<size_t Bits>
890 debug_item(wire<Bits> &item, size_t lsb_offset = 0, uint32_t flags_ = 0) {
891 static_assert(sizeof(item.curr) == value<Bits>::chunks * sizeof(chunk_t) &&
892 sizeof(item.next) == value<Bits>::chunks * sizeof(chunk_t),
893 "wire<Bits> is not compatible with C layout");
894 type = WIRE;
895 flags = flags_;
896 width = Bits;
897 lsb_at = lsb_offset;
898 depth = 1;
899 zero_at = 0;
900 curr = item.curr.data;
901 next = item.next.data;
902 }
903
904 template<size_t Width>
905 debug_item(memory<Width> &item, size_t zero_offset = 0) {
906 static_assert(sizeof(item.data[0]) == value<Width>::chunks * sizeof(chunk_t),
907 "memory<Width> is not compatible with C layout");
908 type = MEMORY;
909 flags = 0;
910 width = Width;
911 lsb_at = 0;
912 depth = item.data.size();
913 zero_at = zero_offset;
914 curr = item.data.empty() ? nullptr : item.data[0].data;
915 next = nullptr;
916 }
917
918 template<size_t Bits>
919 debug_item(debug_alias, const value<Bits> &item, size_t lsb_offset = 0) {
920 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
921 "value<Bits> is not compatible with C layout");
922 type = ALIAS;
923 flags = DRIVEN_COMB;
924 width = Bits;
925 lsb_at = lsb_offset;
926 depth = 1;
927 zero_at = 0;
928 curr = const_cast<chunk_t*>(item.data);
929 next = nullptr;
930 }
931
932 template<size_t Bits>
933 debug_item(debug_alias, const wire<Bits> &item, size_t lsb_offset = 0) {
934 static_assert(sizeof(item.curr) == value<Bits>::chunks * sizeof(chunk_t) &&
935 sizeof(item.next) == value<Bits>::chunks * sizeof(chunk_t),
936 "wire<Bits> is not compatible with C layout");
937 type = ALIAS;
938 flags = DRIVEN_COMB;
939 width = Bits;
940 lsb_at = lsb_offset;
941 depth = 1;
942 zero_at = 0;
943 curr = const_cast<chunk_t*>(item.curr.data);
944 next = nullptr;
945 }
946 };
947 static_assert(std::is_standard_layout<debug_item>::value, "debug_item is not compatible with C layout");
948
949 struct debug_items {
950 std::map<std::string, std::vector<debug_item>> table;
951
952 void add(const std::string &name, debug_item &&item) {
953 std::vector<debug_item> &parts = table[name];
954 parts.emplace_back(item);
955 std::sort(parts.begin(), parts.end(),
956 [](const debug_item &a, const debug_item &b) {
957 return a.lsb_at < b.lsb_at;
958 });
959 }
960
961 size_t count(const std::string &name) const {
962 if (table.count(name) == 0)
963 return 0;
964 return table.at(name).size();
965 }
966
967 const std::vector<debug_item> &parts_at(const std::string &name) const {
968 return table.at(name);
969 }
970
971 const debug_item &at(const std::string &name) const {
972 const std::vector<debug_item> &parts = table.at(name);
973 assert(parts.size() == 1);
974 return parts.at(0);
975 }
976
977 const debug_item &operator [](const std::string &name) const {
978 return at(name);
979 }
980 };
981
982 struct module {
983 module() {}
984 virtual ~module() {}
985
986 module(const module &) = delete;
987 module &operator=(const module &) = delete;
988
989 virtual bool eval() = 0;
990 virtual bool commit() = 0;
991
992 size_t step() {
993 size_t deltas = 0;
994 bool converged = false;
995 do {
996 converged = eval();
997 deltas++;
998 } while (commit() && !converged);
999 return deltas;
1000 }
1001
1002 virtual void debug_info(debug_items &items, std::string path = "") {
1003 (void)items, (void)path;
1004 }
1005 };
1006
1007 } // namespace cxxrtl
1008
1009 // Internal structure used to communicate with the implementation of the C interface.
1010 typedef struct _cxxrtl_toplevel {
1011 std::unique_ptr<cxxrtl::module> module;
1012 } *cxxrtl_toplevel;
1013
1014 // Definitions of internal Yosys cells. Other than the functions in this namespace, CXXRTL is fully generic
1015 // and indepenent of Yosys implementation details.
1016 //
1017 // The `write_cxxrtl` pass translates internal cells (cells with names that start with `$`) to calls of these
1018 // functions. All of Yosys arithmetic and logical cells perform sign or zero extension on their operands,
1019 // whereas basic operations on arbitrary width values require operands to be of the same width. These functions
1020 // bridge the gap by performing the necessary casts. They are named similar to `cell_A[B]`, where A and B are `u`
1021 // if the corresponding operand is unsigned, and `s` if it is signed.
1022 namespace cxxrtl_yosys {
1023
1024 using namespace cxxrtl;
1025
1026 // std::max isn't constexpr until C++14 for no particular reason (it's an oversight), so we define our own.
1027 template<class T>
1028 CXXRTL_ALWAYS_INLINE
1029 constexpr T max(const T &a, const T &b) {
1030 return a > b ? a : b;
1031 }
1032
1033 // Logic operations
1034 template<size_t BitsY, size_t BitsA>
1035 CXXRTL_ALWAYS_INLINE
1036 value<BitsY> logic_not(const value<BitsA> &a) {
1037 return value<BitsY> { a ? 0u : 1u };
1038 }
1039
1040 template<size_t BitsY, size_t BitsA, size_t BitsB>
1041 CXXRTL_ALWAYS_INLINE
1042 value<BitsY> logic_and(const value<BitsA> &a, const value<BitsB> &b) {
1043 return value<BitsY> { (bool(a) && bool(b)) ? 1u : 0u };
1044 }
1045
1046 template<size_t BitsY, size_t BitsA, size_t BitsB>
1047 CXXRTL_ALWAYS_INLINE
1048 value<BitsY> logic_or(const value<BitsA> &a, const value<BitsB> &b) {
1049 return value<BitsY> { (bool(a) || bool(b)) ? 1u : 0u };
1050 }
1051
1052 // Reduction operations
1053 template<size_t BitsY, size_t BitsA>
1054 CXXRTL_ALWAYS_INLINE
1055 value<BitsY> reduce_and(const value<BitsA> &a) {
1056 return value<BitsY> { a.bit_not().is_zero() ? 1u : 0u };
1057 }
1058
1059 template<size_t BitsY, size_t BitsA>
1060 CXXRTL_ALWAYS_INLINE
1061 value<BitsY> reduce_or(const value<BitsA> &a) {
1062 return value<BitsY> { a ? 1u : 0u };
1063 }
1064
1065 template<size_t BitsY, size_t BitsA>
1066 CXXRTL_ALWAYS_INLINE
1067 value<BitsY> reduce_xor(const value<BitsA> &a) {
1068 return value<BitsY> { (a.ctpop() % 2) ? 1u : 0u };
1069 }
1070
1071 template<size_t BitsY, size_t BitsA>
1072 CXXRTL_ALWAYS_INLINE
1073 value<BitsY> reduce_xnor(const value<BitsA> &a) {
1074 return value<BitsY> { (a.ctpop() % 2) ? 0u : 1u };
1075 }
1076
1077 template<size_t BitsY, size_t BitsA>
1078 CXXRTL_ALWAYS_INLINE
1079 value<BitsY> reduce_bool(const value<BitsA> &a) {
1080 return value<BitsY> { a ? 1u : 0u };
1081 }
1082
1083 // Bitwise operations
1084 template<size_t BitsY, size_t BitsA>
1085 CXXRTL_ALWAYS_INLINE
1086 value<BitsY> not_u(const value<BitsA> &a) {
1087 return a.template zcast<BitsY>().bit_not();
1088 }
1089
1090 template<size_t BitsY, size_t BitsA>
1091 CXXRTL_ALWAYS_INLINE
1092 value<BitsY> not_s(const value<BitsA> &a) {
1093 return a.template scast<BitsY>().bit_not();
1094 }
1095
1096 template<size_t BitsY, size_t BitsA, size_t BitsB>
1097 CXXRTL_ALWAYS_INLINE
1098 value<BitsY> and_uu(const value<BitsA> &a, const value<BitsB> &b) {
1099 return a.template zcast<BitsY>().bit_and(b.template zcast<BitsY>());
1100 }
1101
1102 template<size_t BitsY, size_t BitsA, size_t BitsB>
1103 CXXRTL_ALWAYS_INLINE
1104 value<BitsY> and_ss(const value<BitsA> &a, const value<BitsB> &b) {
1105 return a.template scast<BitsY>().bit_and(b.template scast<BitsY>());
1106 }
1107
1108 template<size_t BitsY, size_t BitsA, size_t BitsB>
1109 CXXRTL_ALWAYS_INLINE
1110 value<BitsY> or_uu(const value<BitsA> &a, const value<BitsB> &b) {
1111 return a.template zcast<BitsY>().bit_or(b.template zcast<BitsY>());
1112 }
1113
1114 template<size_t BitsY, size_t BitsA, size_t BitsB>
1115 CXXRTL_ALWAYS_INLINE
1116 value<BitsY> or_ss(const value<BitsA> &a, const value<BitsB> &b) {
1117 return a.template scast<BitsY>().bit_or(b.template scast<BitsY>());
1118 }
1119
1120 template<size_t BitsY, size_t BitsA, size_t BitsB>
1121 CXXRTL_ALWAYS_INLINE
1122 value<BitsY> xor_uu(const value<BitsA> &a, const value<BitsB> &b) {
1123 return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>());
1124 }
1125
1126 template<size_t BitsY, size_t BitsA, size_t BitsB>
1127 CXXRTL_ALWAYS_INLINE
1128 value<BitsY> xor_ss(const value<BitsA> &a, const value<BitsB> &b) {
1129 return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>());
1130 }
1131
1132 template<size_t BitsY, size_t BitsA, size_t BitsB>
1133 CXXRTL_ALWAYS_INLINE
1134 value<BitsY> xnor_uu(const value<BitsA> &a, const value<BitsB> &b) {
1135 return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>()).bit_not();
1136 }
1137
1138 template<size_t BitsY, size_t BitsA, size_t BitsB>
1139 CXXRTL_ALWAYS_INLINE
1140 value<BitsY> xnor_ss(const value<BitsA> &a, const value<BitsB> &b) {
1141 return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>()).bit_not();
1142 }
1143
1144 template<size_t BitsY, size_t BitsA, size_t BitsB>
1145 CXXRTL_ALWAYS_INLINE
1146 value<BitsY> shl_uu(const value<BitsA> &a, const value<BitsB> &b) {
1147 return a.template zcast<BitsY>().template shl(b);
1148 }
1149
1150 template<size_t BitsY, size_t BitsA, size_t BitsB>
1151 CXXRTL_ALWAYS_INLINE
1152 value<BitsY> shl_su(const value<BitsA> &a, const value<BitsB> &b) {
1153 return a.template scast<BitsY>().template shl(b);
1154 }
1155
1156 template<size_t BitsY, size_t BitsA, size_t BitsB>
1157 CXXRTL_ALWAYS_INLINE
1158 value<BitsY> sshl_uu(const value<BitsA> &a, const value<BitsB> &b) {
1159 return a.template zcast<BitsY>().template shl(b);
1160 }
1161
1162 template<size_t BitsY, size_t BitsA, size_t BitsB>
1163 CXXRTL_ALWAYS_INLINE
1164 value<BitsY> sshl_su(const value<BitsA> &a, const value<BitsB> &b) {
1165 return a.template scast<BitsY>().template shl(b);
1166 }
1167
1168 template<size_t BitsY, size_t BitsA, size_t BitsB>
1169 CXXRTL_ALWAYS_INLINE
1170 value<BitsY> shr_uu(const value<BitsA> &a, const value<BitsB> &b) {
1171 return a.template shr(b).template zcast<BitsY>();
1172 }
1173
1174 template<size_t BitsY, size_t BitsA, size_t BitsB>
1175 CXXRTL_ALWAYS_INLINE
1176 value<BitsY> shr_su(const value<BitsA> &a, const value<BitsB> &b) {
1177 return a.template shr(b).template scast<BitsY>();
1178 }
1179
1180 template<size_t BitsY, size_t BitsA, size_t BitsB>
1181 CXXRTL_ALWAYS_INLINE
1182 value<BitsY> sshr_uu(const value<BitsA> &a, const value<BitsB> &b) {
1183 return a.template shr(b).template zcast<BitsY>();
1184 }
1185
1186 template<size_t BitsY, size_t BitsA, size_t BitsB>
1187 CXXRTL_ALWAYS_INLINE
1188 value<BitsY> sshr_su(const value<BitsA> &a, const value<BitsB> &b) {
1189 return a.template sshr(b).template scast<BitsY>();
1190 }
1191
1192 template<size_t BitsY, size_t BitsA, size_t BitsB>
1193 CXXRTL_ALWAYS_INLINE
1194 value<BitsY> shift_uu(const value<BitsA> &a, const value<BitsB> &b) {
1195 return shr_uu<BitsY>(a, b);
1196 }
1197
1198 template<size_t BitsY, size_t BitsA, size_t BitsB>
1199 CXXRTL_ALWAYS_INLINE
1200 value<BitsY> shift_su(const value<BitsA> &a, const value<BitsB> &b) {
1201 return shr_su<BitsY>(a, b);
1202 }
1203
1204 template<size_t BitsY, size_t BitsA, size_t BitsB>
1205 CXXRTL_ALWAYS_INLINE
1206 value<BitsY> shift_us(const value<BitsA> &a, const value<BitsB> &b) {
1207 return b.is_neg() ? shl_uu<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_uu<BitsY>(a, b);
1208 }
1209
1210 template<size_t BitsY, size_t BitsA, size_t BitsB>
1211 CXXRTL_ALWAYS_INLINE
1212 value<BitsY> shift_ss(const value<BitsA> &a, const value<BitsB> &b) {
1213 return b.is_neg() ? shl_su<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_su<BitsY>(a, b);
1214 }
1215
1216 template<size_t BitsY, size_t BitsA, size_t BitsB>
1217 CXXRTL_ALWAYS_INLINE
1218 value<BitsY> shiftx_uu(const value<BitsA> &a, const value<BitsB> &b) {
1219 return shift_uu<BitsY>(a, b);
1220 }
1221
1222 template<size_t BitsY, size_t BitsA, size_t BitsB>
1223 CXXRTL_ALWAYS_INLINE
1224 value<BitsY> shiftx_su(const value<BitsA> &a, const value<BitsB> &b) {
1225 return shift_su<BitsY>(a, b);
1226 }
1227
1228 template<size_t BitsY, size_t BitsA, size_t BitsB>
1229 CXXRTL_ALWAYS_INLINE
1230 value<BitsY> shiftx_us(const value<BitsA> &a, const value<BitsB> &b) {
1231 return shift_us<BitsY>(a, b);
1232 }
1233
1234 template<size_t BitsY, size_t BitsA, size_t BitsB>
1235 CXXRTL_ALWAYS_INLINE
1236 value<BitsY> shiftx_ss(const value<BitsA> &a, const value<BitsB> &b) {
1237 return shift_ss<BitsY>(a, b);
1238 }
1239
1240 // Comparison operations
1241 template<size_t BitsY, size_t BitsA, size_t BitsB>
1242 CXXRTL_ALWAYS_INLINE
1243 value<BitsY> eq_uu(const value<BitsA> &a, const value<BitsB> &b) {
1244 constexpr size_t BitsExt = max(BitsA, BitsB);
1245 return value<BitsY>{ a.template zext<BitsExt>() == b.template zext<BitsExt>() ? 1u : 0u };
1246 }
1247
1248 template<size_t BitsY, size_t BitsA, size_t BitsB>
1249 CXXRTL_ALWAYS_INLINE
1250 value<BitsY> eq_ss(const value<BitsA> &a, const value<BitsB> &b) {
1251 constexpr size_t BitsExt = max(BitsA, BitsB);
1252 return value<BitsY>{ a.template sext<BitsExt>() == b.template sext<BitsExt>() ? 1u : 0u };
1253 }
1254
1255 template<size_t BitsY, size_t BitsA, size_t BitsB>
1256 CXXRTL_ALWAYS_INLINE
1257 value<BitsY> ne_uu(const value<BitsA> &a, const value<BitsB> &b) {
1258 constexpr size_t BitsExt = max(BitsA, BitsB);
1259 return value<BitsY>{ a.template zext<BitsExt>() != b.template zext<BitsExt>() ? 1u : 0u };
1260 }
1261
1262 template<size_t BitsY, size_t BitsA, size_t BitsB>
1263 CXXRTL_ALWAYS_INLINE
1264 value<BitsY> ne_ss(const value<BitsA> &a, const value<BitsB> &b) {
1265 constexpr size_t BitsExt = max(BitsA, BitsB);
1266 return value<BitsY>{ a.template sext<BitsExt>() != b.template sext<BitsExt>() ? 1u : 0u };
1267 }
1268
1269 template<size_t BitsY, size_t BitsA, size_t BitsB>
1270 CXXRTL_ALWAYS_INLINE
1271 value<BitsY> eqx_uu(const value<BitsA> &a, const value<BitsB> &b) {
1272 return eq_uu<BitsY>(a, b);
1273 }
1274
1275 template<size_t BitsY, size_t BitsA, size_t BitsB>
1276 CXXRTL_ALWAYS_INLINE
1277 value<BitsY> eqx_ss(const value<BitsA> &a, const value<BitsB> &b) {
1278 return eq_ss<BitsY>(a, b);
1279 }
1280
1281 template<size_t BitsY, size_t BitsA, size_t BitsB>
1282 CXXRTL_ALWAYS_INLINE
1283 value<BitsY> nex_uu(const value<BitsA> &a, const value<BitsB> &b) {
1284 return ne_uu<BitsY>(a, b);
1285 }
1286
1287 template<size_t BitsY, size_t BitsA, size_t BitsB>
1288 CXXRTL_ALWAYS_INLINE
1289 value<BitsY> nex_ss(const value<BitsA> &a, const value<BitsB> &b) {
1290 return ne_ss<BitsY>(a, b);
1291 }
1292
1293 template<size_t BitsY, size_t BitsA, size_t BitsB>
1294 CXXRTL_ALWAYS_INLINE
1295 value<BitsY> gt_uu(const value<BitsA> &a, const value<BitsB> &b) {
1296 constexpr size_t BitsExt = max(BitsA, BitsB);
1297 return value<BitsY> { b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
1298 }
1299
1300 template<size_t BitsY, size_t BitsA, size_t BitsB>
1301 CXXRTL_ALWAYS_INLINE
1302 value<BitsY> gt_ss(const value<BitsA> &a, const value<BitsB> &b) {
1303 constexpr size_t BitsExt = max(BitsA, BitsB);
1304 return value<BitsY> { b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
1305 }
1306
1307 template<size_t BitsY, size_t BitsA, size_t BitsB>
1308 CXXRTL_ALWAYS_INLINE
1309 value<BitsY> ge_uu(const value<BitsA> &a, const value<BitsB> &b) {
1310 constexpr size_t BitsExt = max(BitsA, BitsB);
1311 return value<BitsY> { !a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
1312 }
1313
1314 template<size_t BitsY, size_t BitsA, size_t BitsB>
1315 CXXRTL_ALWAYS_INLINE
1316 value<BitsY> ge_ss(const value<BitsA> &a, const value<BitsB> &b) {
1317 constexpr size_t BitsExt = max(BitsA, BitsB);
1318 return value<BitsY> { !a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
1319 }
1320
1321 template<size_t BitsY, size_t BitsA, size_t BitsB>
1322 CXXRTL_ALWAYS_INLINE
1323 value<BitsY> lt_uu(const value<BitsA> &a, const value<BitsB> &b) {
1324 constexpr size_t BitsExt = max(BitsA, BitsB);
1325 return value<BitsY> { a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
1326 }
1327
1328 template<size_t BitsY, size_t BitsA, size_t BitsB>
1329 CXXRTL_ALWAYS_INLINE
1330 value<BitsY> lt_ss(const value<BitsA> &a, const value<BitsB> &b) {
1331 constexpr size_t BitsExt = max(BitsA, BitsB);
1332 return value<BitsY> { a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
1333 }
1334
1335 template<size_t BitsY, size_t BitsA, size_t BitsB>
1336 CXXRTL_ALWAYS_INLINE
1337 value<BitsY> le_uu(const value<BitsA> &a, const value<BitsB> &b) {
1338 constexpr size_t BitsExt = max(BitsA, BitsB);
1339 return value<BitsY> { !b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
1340 }
1341
1342 template<size_t BitsY, size_t BitsA, size_t BitsB>
1343 CXXRTL_ALWAYS_INLINE
1344 value<BitsY> le_ss(const value<BitsA> &a, const value<BitsB> &b) {
1345 constexpr size_t BitsExt = max(BitsA, BitsB);
1346 return value<BitsY> { !b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
1347 }
1348
1349 // Arithmetic operations
1350 template<size_t BitsY, size_t BitsA>
1351 CXXRTL_ALWAYS_INLINE
1352 value<BitsY> pos_u(const value<BitsA> &a) {
1353 return a.template zcast<BitsY>();
1354 }
1355
1356 template<size_t BitsY, size_t BitsA>
1357 CXXRTL_ALWAYS_INLINE
1358 value<BitsY> pos_s(const value<BitsA> &a) {
1359 return a.template scast<BitsY>();
1360 }
1361
1362 template<size_t BitsY, size_t BitsA>
1363 CXXRTL_ALWAYS_INLINE
1364 value<BitsY> neg_u(const value<BitsA> &a) {
1365 return a.template zcast<BitsY>().neg();
1366 }
1367
1368 template<size_t BitsY, size_t BitsA>
1369 CXXRTL_ALWAYS_INLINE
1370 value<BitsY> neg_s(const value<BitsA> &a) {
1371 return a.template scast<BitsY>().neg();
1372 }
1373
1374 template<size_t BitsY, size_t BitsA, size_t BitsB>
1375 CXXRTL_ALWAYS_INLINE
1376 value<BitsY> add_uu(const value<BitsA> &a, const value<BitsB> &b) {
1377 return a.template zcast<BitsY>().add(b.template zcast<BitsY>());
1378 }
1379
1380 template<size_t BitsY, size_t BitsA, size_t BitsB>
1381 CXXRTL_ALWAYS_INLINE
1382 value<BitsY> add_ss(const value<BitsA> &a, const value<BitsB> &b) {
1383 return a.template scast<BitsY>().add(b.template scast<BitsY>());
1384 }
1385
1386 template<size_t BitsY, size_t BitsA, size_t BitsB>
1387 CXXRTL_ALWAYS_INLINE
1388 value<BitsY> sub_uu(const value<BitsA> &a, const value<BitsB> &b) {
1389 return a.template zcast<BitsY>().sub(b.template zcast<BitsY>());
1390 }
1391
1392 template<size_t BitsY, size_t BitsA, size_t BitsB>
1393 CXXRTL_ALWAYS_INLINE
1394 value<BitsY> sub_ss(const value<BitsA> &a, const value<BitsB> &b) {
1395 return a.template scast<BitsY>().sub(b.template scast<BitsY>());
1396 }
1397
1398 template<size_t BitsY, size_t BitsA, size_t BitsB>
1399 CXXRTL_ALWAYS_INLINE
1400 value<BitsY> mul_uu(const value<BitsA> &a, const value<BitsB> &b) {
1401 constexpr size_t BitsM = BitsA >= BitsB ? BitsA : BitsB;
1402 return a.template zcast<BitsM>().template mul<BitsY>(b.template zcast<BitsM>());
1403 }
1404
1405 template<size_t BitsY, size_t BitsA, size_t BitsB>
1406 CXXRTL_ALWAYS_INLINE
1407 value<BitsY> mul_ss(const value<BitsA> &a, const value<BitsB> &b) {
1408 return a.template scast<BitsY>().template mul<BitsY>(b.template scast<BitsY>());
1409 }
1410
1411 template<size_t BitsY, size_t BitsA, size_t BitsB>
1412 CXXRTL_ALWAYS_INLINE
1413 std::pair<value<BitsY>, value<BitsY>> divmod_uu(const value<BitsA> &a, const value<BitsB> &b) {
1414 constexpr size_t Bits = max(BitsY, max(BitsA, BitsB));
1415 value<Bits> quotient;
1416 value<Bits> dividend = a.template zext<Bits>();
1417 value<Bits> divisor = b.template zext<Bits>();
1418 if (dividend.ucmp(divisor))
1419 return {/*quotient=*/value<BitsY> { 0u }, /*remainder=*/dividend.template trunc<BitsY>()};
1420 uint32_t divisor_shift = dividend.ctlz() - divisor.ctlz();
1421 divisor = divisor.shl(value<32> { divisor_shift });
1422 for (size_t step = 0; step <= divisor_shift; step++) {
1423 quotient = quotient.shl(value<1> { 1u });
1424 if (!dividend.ucmp(divisor)) {
1425 dividend = dividend.sub(divisor);
1426 quotient.set_bit(0, true);
1427 }
1428 divisor = divisor.shr(value<1> { 1u });
1429 }
1430 return {quotient.template trunc<BitsY>(), /*remainder=*/dividend.template trunc<BitsY>()};
1431 }
1432
1433 template<size_t BitsY, size_t BitsA, size_t BitsB>
1434 CXXRTL_ALWAYS_INLINE
1435 std::pair<value<BitsY>, value<BitsY>> divmod_ss(const value<BitsA> &a, const value<BitsB> &b) {
1436 value<BitsA + 1> ua = a.template sext<BitsA + 1>();
1437 value<BitsB + 1> ub = b.template sext<BitsB + 1>();
1438 if (ua.is_neg()) ua = ua.neg();
1439 if (ub.is_neg()) ub = ub.neg();
1440 value<BitsY> y, r;
1441 std::tie(y, r) = divmod_uu<BitsY>(ua, ub);
1442 if (a.is_neg() != b.is_neg()) y = y.neg();
1443 if (a.is_neg()) r = r.neg();
1444 return {y, r};
1445 }
1446
1447 template<size_t BitsY, size_t BitsA, size_t BitsB>
1448 CXXRTL_ALWAYS_INLINE
1449 value<BitsY> div_uu(const value<BitsA> &a, const value<BitsB> &b) {
1450 return divmod_uu<BitsY>(a, b).first;
1451 }
1452
1453 template<size_t BitsY, size_t BitsA, size_t BitsB>
1454 CXXRTL_ALWAYS_INLINE
1455 value<BitsY> div_ss(const value<BitsA> &a, const value<BitsB> &b) {
1456 return divmod_ss<BitsY>(a, b).first;
1457 }
1458
1459 template<size_t BitsY, size_t BitsA, size_t BitsB>
1460 CXXRTL_ALWAYS_INLINE
1461 value<BitsY> mod_uu(const value<BitsA> &a, const value<BitsB> &b) {
1462 return divmod_uu<BitsY>(a, b).second;
1463 }
1464
1465 template<size_t BitsY, size_t BitsA, size_t BitsB>
1466 CXXRTL_ALWAYS_INLINE
1467 value<BitsY> mod_ss(const value<BitsA> &a, const value<BitsB> &b) {
1468 return divmod_ss<BitsY>(a, b).second;
1469 }
1470
1471 // Memory helper
1472 struct memory_index {
1473 bool valid;
1474 size_t index;
1475
1476 template<size_t BitsAddr>
1477 memory_index(const value<BitsAddr> &addr, size_t offset, size_t depth) {
1478 static_assert(value<BitsAddr>::chunks <= 1, "memory address is too wide");
1479 size_t offset_index = addr.data[0];
1480
1481 valid = (offset_index >= offset && offset_index < offset + depth);
1482 index = offset_index - offset;
1483 }
1484 };
1485
1486 } // namespace cxxrtl_yosys
1487
1488 #endif