Merge pull request #2153 from boqwxp/splitnets-cleanup
[yosys.git] / backends / cxxrtl / cxxrtl.h
1 /*
2 * yosys -- Yosys Open SYnthesis Suite
3 *
4 * Copyright (C) 2019-2020 whitequark <whitequark@whitequark.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19 // This file is included by the designs generated with `write_cxxrtl`. It is not used in Yosys itself.
20 //
21 // The CXXRTL support library implements compile time specialized arbitrary width arithmetics, as well as provides
22 // composite lvalues made out of bit slices and concatenations of lvalues. This allows the `write_cxxrtl` pass
23 // to perform a straightforward translation of RTLIL structures to readable C++, relying on the C++ compiler
24 // to unwrap the abstraction and generate efficient code.
25
26 #ifndef CXXRTL_H
27 #define CXXRTL_H
28
29 #include <cstddef>
30 #include <cstdint>
31 #include <cassert>
32 #include <limits>
33 #include <type_traits>
34 #include <tuple>
35 #include <vector>
36 #include <map>
37 #include <algorithm>
38 #include <memory>
39 #include <sstream>
40
41 #include <backends/cxxrtl/cxxrtl_capi.h>
42
43 // CXXRTL essentially uses the C++ compiler as a hygienic macro engine that feeds an instruction selector.
44 // It generates a lot of specialized template functions with relatively large bodies that, when inlined
45 // into the caller and (for those with loops) unrolled, often expose many new optimization opportunities.
46 // Because of this, most of the CXXRTL runtime must be always inlined for best performance.
47 #ifndef __has_attribute
48 # define __has_attribute(x) 0
49 #endif
50 #if __has_attribute(always_inline)
51 #define CXXRTL_ALWAYS_INLINE inline __attribute__((__always_inline__))
52 #else
53 #define CXXRTL_ALWAYS_INLINE inline
54 #endif
55
56 namespace cxxrtl {
57
58 // All arbitrary-width values in CXXRTL are backed by arrays of unsigned integers called chunks. The chunk size
59 // is the same regardless of the value width to simplify manipulating values via FFI interfaces, e.g. driving
60 // and introspecting the simulation in Python.
61 //
62 // It is practical to use chunk sizes between 32 bits and platform register size because when arithmetics on
63 // narrower integer types is legalized by the C++ compiler, it inserts code to clear the high bits of the register.
64 // However, (a) most of our operations do not change those bits in the first place because of invariants that are
65 // invisible to the compiler, (b) we often operate on non-power-of-2 values and have to clear the high bits anyway.
66 // Therefore, using relatively wide chunks and clearing the high bits explicitly and only when we know they may be
67 // clobbered results in simpler generated code.
68 typedef uint32_t chunk_t;
69 typedef uint64_t wide_chunk_t;
70
71 template<typename T>
72 struct chunk_traits {
73 static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
74 "chunk type must be an unsigned integral type");
75 using type = T;
76 static constexpr size_t bits = std::numeric_limits<T>::digits;
77 static constexpr T mask = std::numeric_limits<T>::max();
78 };
79
80 template<class T>
81 struct expr_base;
82
83 template<size_t Bits>
84 struct value : public expr_base<value<Bits>> {
85 static constexpr size_t bits = Bits;
86
87 using chunk = chunk_traits<chunk_t>;
88 static constexpr chunk::type msb_mask = (Bits % chunk::bits == 0) ? chunk::mask
89 : chunk::mask >> (chunk::bits - (Bits % chunk::bits));
90
91 static constexpr size_t chunks = (Bits + chunk::bits - 1) / chunk::bits;
92 chunk::type data[chunks] = {};
93
94 value() = default;
95 template<typename... Init>
96 explicit constexpr value(Init ...init) : data{init...} {}
97
98 value(const value<Bits> &) = default;
99 value(value<Bits> &&) = default;
100 value<Bits> &operator=(const value<Bits> &) = default;
101
102 // A (no-op) helper that forces the cast to value<>.
103 CXXRTL_ALWAYS_INLINE
104 const value<Bits> &val() const {
105 return *this;
106 }
107
108 std::string str() const {
109 std::stringstream ss;
110 ss << *this;
111 return ss.str();
112 }
113
114 // Operations with compile-time parameters.
115 //
116 // These operations are used to implement slicing, concatenation, and blitting.
117 // The trunc, zext and sext operations add or remove most significant bits (i.e. on the left);
118 // the rtrunc and rzext operations add or remove least significant bits (i.e. on the right).
119 template<size_t NewBits>
120 CXXRTL_ALWAYS_INLINE
121 value<NewBits> trunc() const {
122 static_assert(NewBits <= Bits, "trunc() may not increase width");
123 value<NewBits> result;
124 for (size_t n = 0; n < result.chunks; n++)
125 result.data[n] = data[n];
126 result.data[result.chunks - 1] &= result.msb_mask;
127 return result;
128 }
129
130 template<size_t NewBits>
131 CXXRTL_ALWAYS_INLINE
132 value<NewBits> zext() const {
133 static_assert(NewBits >= Bits, "zext() may not decrease width");
134 value<NewBits> result;
135 for (size_t n = 0; n < chunks; n++)
136 result.data[n] = data[n];
137 return result;
138 }
139
140 template<size_t NewBits>
141 CXXRTL_ALWAYS_INLINE
142 value<NewBits> sext() const {
143 static_assert(NewBits >= Bits, "sext() may not decrease width");
144 value<NewBits> result;
145 for (size_t n = 0; n < chunks; n++)
146 result.data[n] = data[n];
147 if (is_neg()) {
148 result.data[chunks - 1] |= ~msb_mask;
149 for (size_t n = chunks; n < result.chunks; n++)
150 result.data[n] = chunk::mask;
151 result.data[result.chunks - 1] &= result.msb_mask;
152 }
153 return result;
154 }
155
156 template<size_t NewBits>
157 CXXRTL_ALWAYS_INLINE
158 value<NewBits> rtrunc() const {
159 static_assert(NewBits <= Bits, "rtrunc() may not increase width");
160 value<NewBits> result;
161 constexpr size_t shift_chunks = (Bits - NewBits) / chunk::bits;
162 constexpr size_t shift_bits = (Bits - NewBits) % chunk::bits;
163 chunk::type carry = 0;
164 if (shift_chunks + result.chunks < chunks) {
165 carry = (shift_bits == 0) ? 0
166 : data[shift_chunks + result.chunks] << (chunk::bits - shift_bits);
167 }
168 for (size_t n = result.chunks; n > 0; n--) {
169 result.data[n - 1] = carry | (data[shift_chunks + n - 1] >> shift_bits);
170 carry = (shift_bits == 0) ? 0
171 : data[shift_chunks + n - 1] << (chunk::bits - shift_bits);
172 }
173 return result;
174 }
175
176 template<size_t NewBits>
177 CXXRTL_ALWAYS_INLINE
178 value<NewBits> rzext() const {
179 static_assert(NewBits >= Bits, "rzext() may not decrease width");
180 value<NewBits> result;
181 constexpr size_t shift_chunks = (NewBits - Bits) / chunk::bits;
182 constexpr size_t shift_bits = (NewBits - Bits) % chunk::bits;
183 chunk::type carry = 0;
184 for (size_t n = 0; n < chunks; n++) {
185 result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
186 carry = (shift_bits == 0) ? 0
187 : data[n] >> (chunk::bits - shift_bits);
188 }
189 if (shift_chunks + chunks < result.chunks)
190 result.data[shift_chunks + chunks] = carry;
191 return result;
192 }
193
194 // Bit blit operation, i.e. a partial read-modify-write.
195 template<size_t Stop, size_t Start>
196 CXXRTL_ALWAYS_INLINE
197 value<Bits> blit(const value<Stop - Start + 1> &source) const {
198 static_assert(Stop >= Start, "blit() may not reverse bit order");
199 constexpr chunk::type start_mask = ~(chunk::mask << (Start % chunk::bits));
200 constexpr chunk::type stop_mask = (Stop % chunk::bits + 1 == chunk::bits) ? 0
201 : (chunk::mask << (Stop % chunk::bits + 1));
202 value<Bits> masked = *this;
203 if (Start / chunk::bits == Stop / chunk::bits) {
204 masked.data[Start / chunk::bits] &= stop_mask | start_mask;
205 } else {
206 masked.data[Start / chunk::bits] &= start_mask;
207 for (size_t n = Start / chunk::bits + 1; n < Stop / chunk::bits; n++)
208 masked.data[n] = 0;
209 masked.data[Stop / chunk::bits] &= stop_mask;
210 }
211 value<Bits> shifted = source
212 .template rzext<Stop + 1>()
213 .template zext<Bits>();
214 return masked.bit_or(shifted);
215 }
216
217 // Helpers for selecting extending or truncating operation depending on whether the result is wider or narrower
218 // than the operand. In C++17 these can be replaced with `if constexpr`.
219 template<size_t NewBits, typename = void>
220 struct zext_cast {
221 CXXRTL_ALWAYS_INLINE
222 value<NewBits> operator()(const value<Bits> &val) {
223 return val.template zext<NewBits>();
224 }
225 };
226
227 template<size_t NewBits>
228 struct zext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
229 CXXRTL_ALWAYS_INLINE
230 value<NewBits> operator()(const value<Bits> &val) {
231 return val.template trunc<NewBits>();
232 }
233 };
234
235 template<size_t NewBits, typename = void>
236 struct sext_cast {
237 CXXRTL_ALWAYS_INLINE
238 value<NewBits> operator()(const value<Bits> &val) {
239 return val.template sext<NewBits>();
240 }
241 };
242
243 template<size_t NewBits>
244 struct sext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
245 CXXRTL_ALWAYS_INLINE
246 value<NewBits> operator()(const value<Bits> &val) {
247 return val.template trunc<NewBits>();
248 }
249 };
250
251 template<size_t NewBits>
252 CXXRTL_ALWAYS_INLINE
253 value<NewBits> zcast() const {
254 return zext_cast<NewBits>()(*this);
255 }
256
257 template<size_t NewBits>
258 CXXRTL_ALWAYS_INLINE
259 value<NewBits> scast() const {
260 return sext_cast<NewBits>()(*this);
261 }
262
263 // Operations with run-time parameters (offsets, amounts, etc).
264 //
265 // These operations are used for computations.
266 bool bit(size_t offset) const {
267 return data[offset / chunk::bits] & (1 << (offset % chunk::bits));
268 }
269
270 void set_bit(size_t offset, bool value = true) {
271 size_t offset_chunks = offset / chunk::bits;
272 size_t offset_bits = offset % chunk::bits;
273 data[offset_chunks] &= ~(1 << offset_bits);
274 data[offset_chunks] |= value ? 1 << offset_bits : 0;
275 }
276
277 bool is_zero() const {
278 for (size_t n = 0; n < chunks; n++)
279 if (data[n] != 0)
280 return false;
281 return true;
282 }
283
284 explicit operator bool() const {
285 return !is_zero();
286 }
287
288 bool is_neg() const {
289 return data[chunks - 1] & (1 << ((Bits - 1) % chunk::bits));
290 }
291
292 bool operator ==(const value<Bits> &other) const {
293 for (size_t n = 0; n < chunks; n++)
294 if (data[n] != other.data[n])
295 return false;
296 return true;
297 }
298
299 bool operator !=(const value<Bits> &other) const {
300 return !(*this == other);
301 }
302
303 value<Bits> bit_not() const {
304 value<Bits> result;
305 for (size_t n = 0; n < chunks; n++)
306 result.data[n] = ~data[n];
307 result.data[chunks - 1] &= msb_mask;
308 return result;
309 }
310
311 value<Bits> bit_and(const value<Bits> &other) const {
312 value<Bits> result;
313 for (size_t n = 0; n < chunks; n++)
314 result.data[n] = data[n] & other.data[n];
315 return result;
316 }
317
318 value<Bits> bit_or(const value<Bits> &other) const {
319 value<Bits> result;
320 for (size_t n = 0; n < chunks; n++)
321 result.data[n] = data[n] | other.data[n];
322 return result;
323 }
324
325 value<Bits> bit_xor(const value<Bits> &other) const {
326 value<Bits> result;
327 for (size_t n = 0; n < chunks; n++)
328 result.data[n] = data[n] ^ other.data[n];
329 return result;
330 }
331
332 value<Bits> update(const value<Bits> &val, const value<Bits> &mask) const {
333 return bit_and(mask.bit_not()).bit_or(val.bit_and(mask));
334 }
335
336 template<size_t AmountBits>
337 value<Bits> shl(const value<AmountBits> &amount) const {
338 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
339 static_assert(Bits <= chunk::mask, "shl() of unreasonably large values is not supported");
340 // Detect shifts definitely large than Bits early.
341 for (size_t n = 1; n < amount.chunks; n++)
342 if (amount.data[n] != 0)
343 return {};
344 // Past this point we can use the least significant chunk as the shift size.
345 size_t shift_chunks = amount.data[0] / chunk::bits;
346 size_t shift_bits = amount.data[0] % chunk::bits;
347 if (shift_chunks >= chunks)
348 return {};
349 value<Bits> result;
350 chunk::type carry = 0;
351 for (size_t n = 0; n < chunks - shift_chunks; n++) {
352 result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
353 carry = (shift_bits == 0) ? 0
354 : data[n] >> (chunk::bits - shift_bits);
355 }
356 return result;
357 }
358
359 template<size_t AmountBits, bool Signed = false>
360 value<Bits> shr(const value<AmountBits> &amount) const {
361 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
362 static_assert(Bits <= chunk::mask, "shr() of unreasonably large values is not supported");
363 // Detect shifts definitely large than Bits early.
364 for (size_t n = 1; n < amount.chunks; n++)
365 if (amount.data[n] != 0)
366 return {};
367 // Past this point we can use the least significant chunk as the shift size.
368 size_t shift_chunks = amount.data[0] / chunk::bits;
369 size_t shift_bits = amount.data[0] % chunk::bits;
370 if (shift_chunks >= chunks)
371 return {};
372 value<Bits> result;
373 chunk::type carry = 0;
374 for (size_t n = 0; n < chunks - shift_chunks; n++) {
375 result.data[chunks - shift_chunks - 1 - n] = carry | (data[chunks - 1 - n] >> shift_bits);
376 carry = (shift_bits == 0) ? 0
377 : data[chunks - 1 - n] << (chunk::bits - shift_bits);
378 }
379 if (Signed && is_neg()) {
380 size_t top_chunk_idx = (Bits - shift_bits) / chunk::bits;
381 size_t top_chunk_bits = (Bits - shift_bits) % chunk::bits;
382 for (size_t n = top_chunk_idx + 1; n < chunks; n++)
383 result.data[n] = chunk::mask;
384 if (shift_bits != 0)
385 result.data[top_chunk_idx] |= chunk::mask << top_chunk_bits;
386 }
387 return result;
388 }
389
390 template<size_t AmountBits>
391 value<Bits> sshr(const value<AmountBits> &amount) const {
392 return shr<AmountBits, /*Signed=*/true>(amount);
393 }
394
395 size_t ctpop() const {
396 size_t count = 0;
397 for (size_t n = 0; n < chunks; n++) {
398 // This loop implements the population count idiom as recognized by LLVM and GCC.
399 for (chunk::type x = data[n]; x != 0; count++)
400 x = x & (x - 1);
401 }
402 return count;
403 }
404
405 size_t ctlz() const {
406 size_t count = 0;
407 for (size_t n = 0; n < chunks; n++) {
408 chunk::type x = data[chunks - 1 - n];
409 if (x == 0) {
410 count += (n == 0 ? Bits % chunk::bits : chunk::bits);
411 } else {
412 // This loop implements the find first set idiom as recognized by LLVM.
413 for (; x != 0; count++)
414 x >>= 1;
415 }
416 }
417 return count;
418 }
419
420 template<bool Invert, bool CarryIn>
421 std::pair<value<Bits>, bool /*CarryOut*/> alu(const value<Bits> &other) const {
422 value<Bits> result;
423 bool carry = CarryIn;
424 for (size_t n = 0; n < result.chunks; n++) {
425 result.data[n] = data[n] + (Invert ? ~other.data[n] : other.data[n]) + carry;
426 carry = (result.data[n] < data[n]) ||
427 (result.data[n] == data[n] && carry);
428 }
429 result.data[result.chunks - 1] &= result.msb_mask;
430 return {result, carry};
431 }
432
433 value<Bits> add(const value<Bits> &other) const {
434 return alu</*Invert=*/false, /*CarryIn=*/false>(other).first;
435 }
436
437 value<Bits> sub(const value<Bits> &other) const {
438 return alu</*Invert=*/true, /*CarryIn=*/true>(other).first;
439 }
440
441 value<Bits> neg() const {
442 return value<Bits> { 0u }.sub(*this);
443 }
444
445 bool ucmp(const value<Bits> &other) const {
446 bool carry;
447 std::tie(std::ignore, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
448 return !carry; // a.ucmp(b) ≡ a u< b
449 }
450
451 bool scmp(const value<Bits> &other) const {
452 value<Bits> result;
453 bool carry;
454 std::tie(result, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
455 bool overflow = (is_neg() == !other.is_neg()) && (is_neg() != result.is_neg());
456 return result.is_neg() ^ overflow; // a.scmp(b) ≡ a s< b
457 }
458
459 template<size_t ResultBits>
460 value<ResultBits> mul(const value<Bits> &other) const {
461 value<ResultBits> result;
462 wide_chunk_t wide_result[result.chunks + 1] = {};
463 for (size_t n = 0; n < chunks; n++) {
464 for (size_t m = 0; m < chunks && n + m < result.chunks; m++) {
465 wide_result[n + m] += wide_chunk_t(data[n]) * wide_chunk_t(other.data[m]);
466 wide_result[n + m + 1] += wide_result[n + m] >> chunk::bits;
467 wide_result[n + m] &= chunk::mask;
468 }
469 }
470 for (size_t n = 0; n < result.chunks; n++) {
471 result.data[n] = wide_result[n];
472 }
473 result.data[result.chunks - 1] &= result.msb_mask;
474 return result;
475 }
476 };
477
478 // Expression template for a slice, usable as lvalue or rvalue, and composable with other expression templates here.
479 template<class T, size_t Stop, size_t Start>
480 struct slice_expr : public expr_base<slice_expr<T, Stop, Start>> {
481 static_assert(Stop >= Start, "slice_expr() may not reverse bit order");
482 static_assert(Start < T::bits && Stop < T::bits, "slice_expr() must be within bounds");
483 static constexpr size_t bits = Stop - Start + 1;
484
485 T &expr;
486
487 slice_expr(T &expr) : expr(expr) {}
488 slice_expr(const slice_expr<T, Stop, Start> &) = delete;
489
490 CXXRTL_ALWAYS_INLINE
491 operator value<bits>() const {
492 return static_cast<const value<T::bits> &>(expr)
493 .template rtrunc<T::bits - Start>()
494 .template trunc<bits>();
495 }
496
497 CXXRTL_ALWAYS_INLINE
498 slice_expr<T, Stop, Start> &operator=(const value<bits> &rhs) {
499 // Generic partial assignment implemented using a read-modify-write operation on the sliced expression.
500 expr = static_cast<const value<T::bits> &>(expr)
501 .template blit<Stop, Start>(rhs);
502 return *this;
503 }
504
505 // A helper that forces the cast to value<>, which allows deduction to work.
506 CXXRTL_ALWAYS_INLINE
507 value<bits> val() const {
508 return static_cast<const value<bits> &>(*this);
509 }
510 };
511
512 // Expression template for a concatenation, usable as lvalue or rvalue, and composable with other expression templates here.
513 template<class T, class U>
514 struct concat_expr : public expr_base<concat_expr<T, U>> {
515 static constexpr size_t bits = T::bits + U::bits;
516
517 T &ms_expr;
518 U &ls_expr;
519
520 concat_expr(T &ms_expr, U &ls_expr) : ms_expr(ms_expr), ls_expr(ls_expr) {}
521 concat_expr(const concat_expr<T, U> &) = delete;
522
523 CXXRTL_ALWAYS_INLINE
524 operator value<bits>() const {
525 value<bits> ms_shifted = static_cast<const value<T::bits> &>(ms_expr)
526 .template rzext<bits>();
527 value<bits> ls_extended = static_cast<const value<U::bits> &>(ls_expr)
528 .template zext<bits>();
529 return ms_shifted.bit_or(ls_extended);
530 }
531
532 CXXRTL_ALWAYS_INLINE
533 concat_expr<T, U> &operator=(const value<bits> &rhs) {
534 ms_expr = rhs.template rtrunc<T::bits>();
535 ls_expr = rhs.template trunc<U::bits>();
536 return *this;
537 }
538
539 // A helper that forces the cast to value<>, which allows deduction to work.
540 CXXRTL_ALWAYS_INLINE
541 value<bits> val() const {
542 return static_cast<const value<bits> &>(*this);
543 }
544 };
545
546 // Base class for expression templates, providing helper methods for operations that are valid on both rvalues and lvalues.
547 //
548 // Note that expression objects (slices and concatenations) constructed in this way should NEVER be captured because
549 // they refer to temporaries that will, in general, only live until the end of the statement. For example, both of
550 // these snippets perform use-after-free:
551 //
552 // const auto &a = val.slice<7,0>().slice<1>();
553 // value<1> b = a;
554 //
555 // auto &&c = val.slice<7,0>().slice<1>();
556 // c = value<1>{1u};
557 //
558 // An easy way to write code using slices and concatenations safely is to follow two simple rules:
559 // * Never explicitly name any type except `value<W>` or `const value<W> &`.
560 // * Never use a `const auto &` or `auto &&` in any such expression.
561 // Then, any code that compiles will be well-defined.
562 template<class T>
563 struct expr_base {
564 template<size_t Stop, size_t Start = Stop>
565 CXXRTL_ALWAYS_INLINE
566 slice_expr<const T, Stop, Start> slice() const {
567 return {*static_cast<const T *>(this)};
568 }
569
570 template<size_t Stop, size_t Start = Stop>
571 CXXRTL_ALWAYS_INLINE
572 slice_expr<T, Stop, Start> slice() {
573 return {*static_cast<T *>(this)};
574 }
575
576 template<class U>
577 CXXRTL_ALWAYS_INLINE
578 concat_expr<const T, typename std::remove_reference<const U>::type> concat(const U &other) const {
579 return {*static_cast<const T *>(this), other};
580 }
581
582 template<class U>
583 CXXRTL_ALWAYS_INLINE
584 concat_expr<T, typename std::remove_reference<U>::type> concat(U &&other) {
585 return {*static_cast<T *>(this), other};
586 }
587 };
588
589 template<size_t Bits>
590 std::ostream &operator<<(std::ostream &os, const value<Bits> &val) {
591 auto old_flags = os.flags(std::ios::right);
592 auto old_width = os.width(0);
593 auto old_fill = os.fill('0');
594 os << val.bits << '\'' << std::hex;
595 for (size_t n = val.chunks - 1; n != (size_t)-1; n--) {
596 if (n == val.chunks - 1 && Bits % value<Bits>::chunk::bits != 0)
597 os.width((Bits % value<Bits>::chunk::bits + 3) / 4);
598 else
599 os.width((value<Bits>::chunk::bits + 3) / 4);
600 os << val.data[n];
601 }
602 os.fill(old_fill);
603 os.width(old_width);
604 os.flags(old_flags);
605 return os;
606 }
607
608 template<size_t Bits>
609 struct wire {
610 static constexpr size_t bits = Bits;
611
612 value<Bits> curr;
613 value<Bits> next;
614
615 wire() = default;
616 constexpr wire(const value<Bits> &init) : curr(init), next(init) {}
617 template<typename... Init>
618 explicit constexpr wire(Init ...init) : curr{init...}, next{init...} {}
619
620 wire(const wire<Bits> &) = delete;
621 wire(wire<Bits> &&) = default;
622 wire<Bits> &operator=(const wire<Bits> &) = delete;
623
624 bool commit() {
625 if (curr != next) {
626 curr = next;
627 return true;
628 }
629 return false;
630 }
631 };
632
633 template<size_t Bits>
634 std::ostream &operator<<(std::ostream &os, const wire<Bits> &val) {
635 os << val.curr;
636 return os;
637 }
638
639 template<size_t Width>
640 struct memory {
641 std::vector<value<Width>> data;
642
643 size_t depth() const {
644 return data.size();
645 }
646
647 memory() = delete;
648 explicit memory(size_t depth) : data(depth) {}
649
650 memory(const memory<Width> &) = delete;
651 memory<Width> &operator=(const memory<Width> &) = delete;
652
653 // The only way to get the compiler to put the initializer in .rodata and do not copy it on stack is to stuff it
654 // into a plain array. You'd think an std::initializer_list would work here, but it doesn't, because you can't
655 // construct an initializer_list in a constexpr (or something) and so if you try to do that the whole thing is
656 // first copied on the stack (probably overflowing it) and then again into `data`.
657 template<size_t Size>
658 struct init {
659 size_t offset;
660 value<Width> data[Size];
661 };
662
663 template<size_t... InitSize>
664 explicit memory(size_t depth, const init<InitSize> &...init) : data(depth) {
665 data.resize(depth);
666 // This utterly reprehensible construct is the most reasonable way to apply a function to every element
667 // of a parameter pack, if the elements all have different types and so cannot be cast to an initializer list.
668 auto _ = {std::move(std::begin(init.data), std::end(init.data), data.begin() + init.offset)...};
669 (void)_;
670 }
671
672 // An operator for direct memory reads. May be used at any time during the simulation.
673 const value<Width> &operator [](size_t index) const {
674 assert(index < data.size());
675 return data[index];
676 }
677
678 // An operator for direct memory writes. May only be used before the simulation is started. If used
679 // after the simulation is started, the design may malfunction.
680 value<Width> &operator [](size_t index) {
681 assert(index < data.size());
682 return data[index];
683 }
684
685 // A simple way to make a writable memory would be to use an array of wires instead of an array of values.
686 // However, there are two significant downsides to this approach: first, it has large overhead (2× space
687 // overhead, and O(depth) time overhead during commit); second, it does not simplify handling write port
688 // priorities. Although in principle write ports could be ordered or conditionally enabled in generated
689 // code based on their priorities and selected addresses, the feedback arc set problem is computationally
690 // expensive, and the heuristic based algorithms are not easily modified to guarantee (rather than prefer)
691 // a particular write port evaluation order.
692 //
693 // The approach used here instead is to queue writes into a buffer during the eval phase, then perform
694 // the writes during the commit phase in the priority order. This approach has low overhead, with both space
695 // and time proportional to the amount of write ports. Because virtually every memory in a practical design
696 // has at most two write ports, linear search is used on every write, being the fastest and simplest approach.
697 struct write {
698 size_t index;
699 value<Width> val;
700 value<Width> mask;
701 int priority;
702 };
703 std::vector<write> write_queue;
704
705 void update(size_t index, const value<Width> &val, const value<Width> &mask, int priority = 0) {
706 assert(index < data.size());
707 // Queue up the write while keeping the queue sorted by priority.
708 write_queue.insert(
709 std::upper_bound(write_queue.begin(), write_queue.end(), priority,
710 [](const int a, const write& b) { return a < b.priority; }),
711 write { index, val, mask, priority });
712 }
713
714 bool commit() {
715 bool changed = false;
716 for (const write &entry : write_queue) {
717 value<Width> elem = data[entry.index];
718 elem = elem.update(entry.val, entry.mask);
719 changed |= (data[entry.index] != elem);
720 data[entry.index] = elem;
721 }
722 write_queue.clear();
723 return changed;
724 }
725 };
726
727 struct metadata {
728 const enum {
729 MISSING = 0,
730 UINT = 1,
731 SINT = 2,
732 STRING = 3,
733 DOUBLE = 4,
734 } value_type;
735
736 // In debug mode, using the wrong .as_*() function will assert.
737 // In release mode, using the wrong .as_*() function will safely return a default value.
738 const unsigned uint_value = 0;
739 const signed sint_value = 0;
740 const std::string string_value = "";
741 const double double_value = 0.0;
742
743 metadata() : value_type(MISSING) {}
744 metadata(unsigned value) : value_type(UINT), uint_value(value) {}
745 metadata(signed value) : value_type(SINT), sint_value(value) {}
746 metadata(const std::string &value) : value_type(STRING), string_value(value) {}
747 metadata(const char *value) : value_type(STRING), string_value(value) {}
748 metadata(double value) : value_type(DOUBLE), double_value(value) {}
749
750 metadata(const metadata &) = default;
751 metadata &operator=(const metadata &) = delete;
752
753 unsigned as_uint() const {
754 assert(value_type == UINT);
755 return uint_value;
756 }
757
758 signed as_sint() const {
759 assert(value_type == SINT);
760 return sint_value;
761 }
762
763 const std::string &as_string() const {
764 assert(value_type == STRING);
765 return string_value;
766 }
767
768 double as_double() const {
769 assert(value_type == DOUBLE);
770 return double_value;
771 }
772 };
773
774 typedef std::map<std::string, metadata> metadata_map;
775
776 // Helper class to disambiguate values/wires and their aliases.
777 struct debug_alias {};
778
779 // This structure is intended for consumption via foreign function interfaces, like Python's ctypes.
780 // Because of this it uses a C-style layout that is easy to parse rather than more idiomatic C++.
781 //
782 // To avoid violating strict aliasing rules, this structure has to be a subclass of the one used
783 // in the C API, or it would not be possible to cast between the pointers to these.
784 struct debug_item : ::cxxrtl_object {
785 enum : uint32_t {
786 VALUE = CXXRTL_VALUE,
787 WIRE = CXXRTL_WIRE,
788 MEMORY = CXXRTL_MEMORY,
789 ALIAS = CXXRTL_ALIAS,
790 };
791
792 debug_item(const ::cxxrtl_object &object) : cxxrtl_object(object) {}
793
794 template<size_t Bits>
795 debug_item(value<Bits> &item, size_t lsb_offset = 0) {
796 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
797 "value<Bits> is not compatible with C layout");
798 type = VALUE;
799 width = Bits;
800 lsb_at = lsb_offset;
801 depth = 1;
802 zero_at = 0;
803 curr = item.data;
804 next = item.data;
805 }
806
807 template<size_t Bits>
808 debug_item(const value<Bits> &item, size_t lsb_offset = 0) {
809 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
810 "value<Bits> is not compatible with C layout");
811 type = VALUE;
812 width = Bits;
813 lsb_at = lsb_offset;
814 depth = 1;
815 zero_at = 0;
816 curr = const_cast<chunk_t*>(item.data);
817 next = nullptr;
818 }
819
820 template<size_t Bits>
821 debug_item(wire<Bits> &item, size_t lsb_offset = 0) {
822 static_assert(sizeof(item.curr) == value<Bits>::chunks * sizeof(chunk_t) &&
823 sizeof(item.next) == value<Bits>::chunks * sizeof(chunk_t),
824 "wire<Bits> is not compatible with C layout");
825 type = WIRE;
826 width = Bits;
827 lsb_at = lsb_offset;
828 depth = 1;
829 zero_at = 0;
830 curr = item.curr.data;
831 next = item.next.data;
832 }
833
834 template<size_t Width>
835 debug_item(memory<Width> &item, size_t zero_offset = 0) {
836 static_assert(sizeof(item.data[0]) == value<Width>::chunks * sizeof(chunk_t),
837 "memory<Width> is not compatible with C layout");
838 type = MEMORY;
839 width = Width;
840 lsb_at = 0;
841 depth = item.data.size();
842 zero_at = zero_offset;
843 curr = item.data.empty() ? nullptr : item.data[0].data;
844 next = nullptr;
845 }
846
847 template<size_t Bits>
848 debug_item(debug_alias, const value<Bits> &item, size_t lsb_offset = 0) {
849 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
850 "value<Bits> is not compatible with C layout");
851 type = ALIAS;
852 width = Bits;
853 lsb_at = lsb_offset;
854 depth = 1;
855 zero_at = 0;
856 curr = const_cast<chunk_t*>(item.data);
857 next = nullptr;
858 }
859
860 template<size_t Bits>
861 debug_item(debug_alias, const wire<Bits> &item, size_t lsb_offset = 0) {
862 static_assert(sizeof(item.curr) == value<Bits>::chunks * sizeof(chunk_t) &&
863 sizeof(item.next) == value<Bits>::chunks * sizeof(chunk_t),
864 "wire<Bits> is not compatible with C layout");
865 type = ALIAS;
866 width = Bits;
867 lsb_at = lsb_offset;
868 depth = 1;
869 zero_at = 0;
870 curr = const_cast<chunk_t*>(item.curr.data);
871 next = nullptr;
872 }
873 };
874 static_assert(std::is_standard_layout<debug_item>::value, "debug_item is not compatible with C layout");
875
876 struct debug_items {
877 std::map<std::string, std::vector<debug_item>> table;
878
879 void add(const std::string &name, debug_item &&item) {
880 std::vector<debug_item> &parts = table[name];
881 parts.emplace_back(item);
882 std::sort(parts.begin(), parts.end(),
883 [](const debug_item &a, const debug_item &b) {
884 return a.lsb_at < b.lsb_at;
885 });
886 }
887
888 size_t count(const std::string &name) const {
889 if (table.count(name) == 0)
890 return 0;
891 return table.at(name).size();
892 }
893
894 const std::vector<debug_item> &parts_at(const std::string &name) const {
895 return table.at(name);
896 }
897
898 const debug_item &at(const std::string &name) const {
899 const std::vector<debug_item> &parts = table.at(name);
900 assert(parts.size() == 1);
901 return parts.at(0);
902 }
903
904 const debug_item &operator [](const std::string &name) const {
905 return at(name);
906 }
907 };
908
909 struct module {
910 module() {}
911 virtual ~module() {}
912
913 module(const module &) = delete;
914 module &operator=(const module &) = delete;
915
916 virtual bool eval() = 0;
917 virtual bool commit() = 0;
918
919 size_t step() {
920 size_t deltas = 0;
921 bool converged = false;
922 do {
923 converged = eval();
924 deltas++;
925 } while (commit() && !converged);
926 return deltas;
927 }
928
929 virtual void debug_info(debug_items &items, std::string path = "") {
930 (void)items, (void)path;
931 }
932 };
933
934 } // namespace cxxrtl
935
936 // Internal structure used to communicate with the implementation of the C interface.
937 typedef struct _cxxrtl_toplevel {
938 std::unique_ptr<cxxrtl::module> module;
939 } *cxxrtl_toplevel;
940
941 // Definitions of internal Yosys cells. Other than the functions in this namespace, CXXRTL is fully generic
942 // and indepenent of Yosys implementation details.
943 //
944 // The `write_cxxrtl` pass translates internal cells (cells with names that start with `$`) to calls of these
945 // functions. All of Yosys arithmetic and logical cells perform sign or zero extension on their operands,
946 // whereas basic operations on arbitrary width values require operands to be of the same width. These functions
947 // bridge the gap by performing the necessary casts. They are named similar to `cell_A[B]`, where A and B are `u`
948 // if the corresponding operand is unsigned, and `s` if it is signed.
949 namespace cxxrtl_yosys {
950
951 using namespace cxxrtl;
952
953 // std::max isn't constexpr until C++14 for no particular reason (it's an oversight), so we define our own.
954 template<class T>
955 CXXRTL_ALWAYS_INLINE
956 constexpr T max(const T &a, const T &b) {
957 return a > b ? a : b;
958 }
959
960 // Logic operations
961 template<size_t BitsY, size_t BitsA>
962 CXXRTL_ALWAYS_INLINE
963 value<BitsY> logic_not(const value<BitsA> &a) {
964 return value<BitsY> { a ? 0u : 1u };
965 }
966
967 template<size_t BitsY, size_t BitsA, size_t BitsB>
968 CXXRTL_ALWAYS_INLINE
969 value<BitsY> logic_and(const value<BitsA> &a, const value<BitsB> &b) {
970 return value<BitsY> { (bool(a) & bool(b)) ? 1u : 0u };
971 }
972
973 template<size_t BitsY, size_t BitsA, size_t BitsB>
974 CXXRTL_ALWAYS_INLINE
975 value<BitsY> logic_or(const value<BitsA> &a, const value<BitsB> &b) {
976 return value<BitsY> { (bool(a) | bool(b)) ? 1u : 0u };
977 }
978
979 // Reduction operations
980 template<size_t BitsY, size_t BitsA>
981 CXXRTL_ALWAYS_INLINE
982 value<BitsY> reduce_and(const value<BitsA> &a) {
983 return value<BitsY> { a.bit_not().is_zero() ? 1u : 0u };
984 }
985
986 template<size_t BitsY, size_t BitsA>
987 CXXRTL_ALWAYS_INLINE
988 value<BitsY> reduce_or(const value<BitsA> &a) {
989 return value<BitsY> { a ? 1u : 0u };
990 }
991
992 template<size_t BitsY, size_t BitsA>
993 CXXRTL_ALWAYS_INLINE
994 value<BitsY> reduce_xor(const value<BitsA> &a) {
995 return value<BitsY> { (a.ctpop() % 2) ? 1u : 0u };
996 }
997
998 template<size_t BitsY, size_t BitsA>
999 CXXRTL_ALWAYS_INLINE
1000 value<BitsY> reduce_xnor(const value<BitsA> &a) {
1001 return value<BitsY> { (a.ctpop() % 2) ? 0u : 1u };
1002 }
1003
1004 template<size_t BitsY, size_t BitsA>
1005 CXXRTL_ALWAYS_INLINE
1006 value<BitsY> reduce_bool(const value<BitsA> &a) {
1007 return value<BitsY> { a ? 1u : 0u };
1008 }
1009
1010 // Bitwise operations
1011 template<size_t BitsY, size_t BitsA>
1012 CXXRTL_ALWAYS_INLINE
1013 value<BitsY> not_u(const value<BitsA> &a) {
1014 return a.template zcast<BitsY>().bit_not();
1015 }
1016
1017 template<size_t BitsY, size_t BitsA>
1018 CXXRTL_ALWAYS_INLINE
1019 value<BitsY> not_s(const value<BitsA> &a) {
1020 return a.template scast<BitsY>().bit_not();
1021 }
1022
1023 template<size_t BitsY, size_t BitsA, size_t BitsB>
1024 CXXRTL_ALWAYS_INLINE
1025 value<BitsY> and_uu(const value<BitsA> &a, const value<BitsB> &b) {
1026 return a.template zcast<BitsY>().bit_and(b.template zcast<BitsY>());
1027 }
1028
1029 template<size_t BitsY, size_t BitsA, size_t BitsB>
1030 CXXRTL_ALWAYS_INLINE
1031 value<BitsY> and_ss(const value<BitsA> &a, const value<BitsB> &b) {
1032 return a.template scast<BitsY>().bit_and(b.template scast<BitsY>());
1033 }
1034
1035 template<size_t BitsY, size_t BitsA, size_t BitsB>
1036 CXXRTL_ALWAYS_INLINE
1037 value<BitsY> or_uu(const value<BitsA> &a, const value<BitsB> &b) {
1038 return a.template zcast<BitsY>().bit_or(b.template zcast<BitsY>());
1039 }
1040
1041 template<size_t BitsY, size_t BitsA, size_t BitsB>
1042 CXXRTL_ALWAYS_INLINE
1043 value<BitsY> or_ss(const value<BitsA> &a, const value<BitsB> &b) {
1044 return a.template scast<BitsY>().bit_or(b.template scast<BitsY>());
1045 }
1046
1047 template<size_t BitsY, size_t BitsA, size_t BitsB>
1048 CXXRTL_ALWAYS_INLINE
1049 value<BitsY> xor_uu(const value<BitsA> &a, const value<BitsB> &b) {
1050 return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>());
1051 }
1052
1053 template<size_t BitsY, size_t BitsA, size_t BitsB>
1054 CXXRTL_ALWAYS_INLINE
1055 value<BitsY> xor_ss(const value<BitsA> &a, const value<BitsB> &b) {
1056 return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>());
1057 }
1058
1059 template<size_t BitsY, size_t BitsA, size_t BitsB>
1060 CXXRTL_ALWAYS_INLINE
1061 value<BitsY> xnor_uu(const value<BitsA> &a, const value<BitsB> &b) {
1062 return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>()).bit_not();
1063 }
1064
1065 template<size_t BitsY, size_t BitsA, size_t BitsB>
1066 CXXRTL_ALWAYS_INLINE
1067 value<BitsY> xnor_ss(const value<BitsA> &a, const value<BitsB> &b) {
1068 return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>()).bit_not();
1069 }
1070
1071 template<size_t BitsY, size_t BitsA, size_t BitsB>
1072 CXXRTL_ALWAYS_INLINE
1073 value<BitsY> shl_uu(const value<BitsA> &a, const value<BitsB> &b) {
1074 return a.template zcast<BitsY>().template shl(b);
1075 }
1076
1077 template<size_t BitsY, size_t BitsA, size_t BitsB>
1078 CXXRTL_ALWAYS_INLINE
1079 value<BitsY> shl_su(const value<BitsA> &a, const value<BitsB> &b) {
1080 return a.template scast<BitsY>().template shl(b);
1081 }
1082
1083 template<size_t BitsY, size_t BitsA, size_t BitsB>
1084 CXXRTL_ALWAYS_INLINE
1085 value<BitsY> sshl_uu(const value<BitsA> &a, const value<BitsB> &b) {
1086 return a.template zcast<BitsY>().template shl(b);
1087 }
1088
1089 template<size_t BitsY, size_t BitsA, size_t BitsB>
1090 CXXRTL_ALWAYS_INLINE
1091 value<BitsY> sshl_su(const value<BitsA> &a, const value<BitsB> &b) {
1092 return a.template scast<BitsY>().template shl(b);
1093 }
1094
1095 template<size_t BitsY, size_t BitsA, size_t BitsB>
1096 CXXRTL_ALWAYS_INLINE
1097 value<BitsY> shr_uu(const value<BitsA> &a, const value<BitsB> &b) {
1098 return a.template shr(b).template zcast<BitsY>();
1099 }
1100
1101 template<size_t BitsY, size_t BitsA, size_t BitsB>
1102 CXXRTL_ALWAYS_INLINE
1103 value<BitsY> shr_su(const value<BitsA> &a, const value<BitsB> &b) {
1104 return a.template shr(b).template scast<BitsY>();
1105 }
1106
1107 template<size_t BitsY, size_t BitsA, size_t BitsB>
1108 CXXRTL_ALWAYS_INLINE
1109 value<BitsY> sshr_uu(const value<BitsA> &a, const value<BitsB> &b) {
1110 return a.template shr(b).template zcast<BitsY>();
1111 }
1112
1113 template<size_t BitsY, size_t BitsA, size_t BitsB>
1114 CXXRTL_ALWAYS_INLINE
1115 value<BitsY> sshr_su(const value<BitsA> &a, const value<BitsB> &b) {
1116 return a.template sshr(b).template scast<BitsY>();
1117 }
1118
1119 template<size_t BitsY, size_t BitsA, size_t BitsB>
1120 CXXRTL_ALWAYS_INLINE
1121 value<BitsY> shift_uu(const value<BitsA> &a, const value<BitsB> &b) {
1122 return shr_uu<BitsY>(a, b);
1123 }
1124
1125 template<size_t BitsY, size_t BitsA, size_t BitsB>
1126 CXXRTL_ALWAYS_INLINE
1127 value<BitsY> shift_su(const value<BitsA> &a, const value<BitsB> &b) {
1128 return shr_su<BitsY>(a, b);
1129 }
1130
1131 template<size_t BitsY, size_t BitsA, size_t BitsB>
1132 CXXRTL_ALWAYS_INLINE
1133 value<BitsY> shift_us(const value<BitsA> &a, const value<BitsB> &b) {
1134 return b.is_neg() ? shl_uu<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_uu<BitsY>(a, b);
1135 }
1136
1137 template<size_t BitsY, size_t BitsA, size_t BitsB>
1138 CXXRTL_ALWAYS_INLINE
1139 value<BitsY> shift_ss(const value<BitsA> &a, const value<BitsB> &b) {
1140 return b.is_neg() ? shl_su<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_su<BitsY>(a, b);
1141 }
1142
1143 template<size_t BitsY, size_t BitsA, size_t BitsB>
1144 CXXRTL_ALWAYS_INLINE
1145 value<BitsY> shiftx_uu(const value<BitsA> &a, const value<BitsB> &b) {
1146 return shift_uu<BitsY>(a, b);
1147 }
1148
1149 template<size_t BitsY, size_t BitsA, size_t BitsB>
1150 CXXRTL_ALWAYS_INLINE
1151 value<BitsY> shiftx_su(const value<BitsA> &a, const value<BitsB> &b) {
1152 return shift_su<BitsY>(a, b);
1153 }
1154
1155 template<size_t BitsY, size_t BitsA, size_t BitsB>
1156 CXXRTL_ALWAYS_INLINE
1157 value<BitsY> shiftx_us(const value<BitsA> &a, const value<BitsB> &b) {
1158 return shift_us<BitsY>(a, b);
1159 }
1160
1161 template<size_t BitsY, size_t BitsA, size_t BitsB>
1162 CXXRTL_ALWAYS_INLINE
1163 value<BitsY> shiftx_ss(const value<BitsA> &a, const value<BitsB> &b) {
1164 return shift_ss<BitsY>(a, b);
1165 }
1166
1167 // Comparison operations
1168 template<size_t BitsY, size_t BitsA, size_t BitsB>
1169 CXXRTL_ALWAYS_INLINE
1170 value<BitsY> eq_uu(const value<BitsA> &a, const value<BitsB> &b) {
1171 constexpr size_t BitsExt = max(BitsA, BitsB);
1172 return value<BitsY>{ a.template zext<BitsExt>() == b.template zext<BitsExt>() ? 1u : 0u };
1173 }
1174
1175 template<size_t BitsY, size_t BitsA, size_t BitsB>
1176 CXXRTL_ALWAYS_INLINE
1177 value<BitsY> eq_ss(const value<BitsA> &a, const value<BitsB> &b) {
1178 constexpr size_t BitsExt = max(BitsA, BitsB);
1179 return value<BitsY>{ a.template sext<BitsExt>() == b.template sext<BitsExt>() ? 1u : 0u };
1180 }
1181
1182 template<size_t BitsY, size_t BitsA, size_t BitsB>
1183 CXXRTL_ALWAYS_INLINE
1184 value<BitsY> ne_uu(const value<BitsA> &a, const value<BitsB> &b) {
1185 constexpr size_t BitsExt = max(BitsA, BitsB);
1186 return value<BitsY>{ a.template zext<BitsExt>() != b.template zext<BitsExt>() ? 1u : 0u };
1187 }
1188
1189 template<size_t BitsY, size_t BitsA, size_t BitsB>
1190 CXXRTL_ALWAYS_INLINE
1191 value<BitsY> ne_ss(const value<BitsA> &a, const value<BitsB> &b) {
1192 constexpr size_t BitsExt = max(BitsA, BitsB);
1193 return value<BitsY>{ a.template sext<BitsExt>() != b.template sext<BitsExt>() ? 1u : 0u };
1194 }
1195
1196 template<size_t BitsY, size_t BitsA, size_t BitsB>
1197 CXXRTL_ALWAYS_INLINE
1198 value<BitsY> eqx_uu(const value<BitsA> &a, const value<BitsB> &b) {
1199 return eq_uu<BitsY>(a, b);
1200 }
1201
1202 template<size_t BitsY, size_t BitsA, size_t BitsB>
1203 CXXRTL_ALWAYS_INLINE
1204 value<BitsY> eqx_ss(const value<BitsA> &a, const value<BitsB> &b) {
1205 return eq_ss<BitsY>(a, b);
1206 }
1207
1208 template<size_t BitsY, size_t BitsA, size_t BitsB>
1209 CXXRTL_ALWAYS_INLINE
1210 value<BitsY> nex_uu(const value<BitsA> &a, const value<BitsB> &b) {
1211 return ne_uu<BitsY>(a, b);
1212 }
1213
1214 template<size_t BitsY, size_t BitsA, size_t BitsB>
1215 CXXRTL_ALWAYS_INLINE
1216 value<BitsY> nex_ss(const value<BitsA> &a, const value<BitsB> &b) {
1217 return ne_ss<BitsY>(a, b);
1218 }
1219
1220 template<size_t BitsY, size_t BitsA, size_t BitsB>
1221 CXXRTL_ALWAYS_INLINE
1222 value<BitsY> gt_uu(const value<BitsA> &a, const value<BitsB> &b) {
1223 constexpr size_t BitsExt = max(BitsA, BitsB);
1224 return value<BitsY> { b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
1225 }
1226
1227 template<size_t BitsY, size_t BitsA, size_t BitsB>
1228 CXXRTL_ALWAYS_INLINE
1229 value<BitsY> gt_ss(const value<BitsA> &a, const value<BitsB> &b) {
1230 constexpr size_t BitsExt = max(BitsA, BitsB);
1231 return value<BitsY> { b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
1232 }
1233
1234 template<size_t BitsY, size_t BitsA, size_t BitsB>
1235 CXXRTL_ALWAYS_INLINE
1236 value<BitsY> ge_uu(const value<BitsA> &a, const value<BitsB> &b) {
1237 constexpr size_t BitsExt = max(BitsA, BitsB);
1238 return value<BitsY> { !a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
1239 }
1240
1241 template<size_t BitsY, size_t BitsA, size_t BitsB>
1242 CXXRTL_ALWAYS_INLINE
1243 value<BitsY> ge_ss(const value<BitsA> &a, const value<BitsB> &b) {
1244 constexpr size_t BitsExt = max(BitsA, BitsB);
1245 return value<BitsY> { !a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
1246 }
1247
1248 template<size_t BitsY, size_t BitsA, size_t BitsB>
1249 CXXRTL_ALWAYS_INLINE
1250 value<BitsY> lt_uu(const value<BitsA> &a, const value<BitsB> &b) {
1251 constexpr size_t BitsExt = max(BitsA, BitsB);
1252 return value<BitsY> { a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
1253 }
1254
1255 template<size_t BitsY, size_t BitsA, size_t BitsB>
1256 CXXRTL_ALWAYS_INLINE
1257 value<BitsY> lt_ss(const value<BitsA> &a, const value<BitsB> &b) {
1258 constexpr size_t BitsExt = max(BitsA, BitsB);
1259 return value<BitsY> { a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
1260 }
1261
1262 template<size_t BitsY, size_t BitsA, size_t BitsB>
1263 CXXRTL_ALWAYS_INLINE
1264 value<BitsY> le_uu(const value<BitsA> &a, const value<BitsB> &b) {
1265 constexpr size_t BitsExt = max(BitsA, BitsB);
1266 return value<BitsY> { !b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
1267 }
1268
1269 template<size_t BitsY, size_t BitsA, size_t BitsB>
1270 CXXRTL_ALWAYS_INLINE
1271 value<BitsY> le_ss(const value<BitsA> &a, const value<BitsB> &b) {
1272 constexpr size_t BitsExt = max(BitsA, BitsB);
1273 return value<BitsY> { !b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
1274 }
1275
1276 // Arithmetic operations
1277 template<size_t BitsY, size_t BitsA>
1278 CXXRTL_ALWAYS_INLINE
1279 value<BitsY> pos_u(const value<BitsA> &a) {
1280 return a.template zcast<BitsY>();
1281 }
1282
1283 template<size_t BitsY, size_t BitsA>
1284 CXXRTL_ALWAYS_INLINE
1285 value<BitsY> pos_s(const value<BitsA> &a) {
1286 return a.template scast<BitsY>();
1287 }
1288
1289 template<size_t BitsY, size_t BitsA>
1290 CXXRTL_ALWAYS_INLINE
1291 value<BitsY> neg_u(const value<BitsA> &a) {
1292 return a.template zcast<BitsY>().neg();
1293 }
1294
1295 template<size_t BitsY, size_t BitsA>
1296 CXXRTL_ALWAYS_INLINE
1297 value<BitsY> neg_s(const value<BitsA> &a) {
1298 return a.template scast<BitsY>().neg();
1299 }
1300
1301 template<size_t BitsY, size_t BitsA, size_t BitsB>
1302 CXXRTL_ALWAYS_INLINE
1303 value<BitsY> add_uu(const value<BitsA> &a, const value<BitsB> &b) {
1304 return a.template zcast<BitsY>().add(b.template zcast<BitsY>());
1305 }
1306
1307 template<size_t BitsY, size_t BitsA, size_t BitsB>
1308 CXXRTL_ALWAYS_INLINE
1309 value<BitsY> add_ss(const value<BitsA> &a, const value<BitsB> &b) {
1310 return a.template scast<BitsY>().add(b.template scast<BitsY>());
1311 }
1312
1313 template<size_t BitsY, size_t BitsA, size_t BitsB>
1314 CXXRTL_ALWAYS_INLINE
1315 value<BitsY> sub_uu(const value<BitsA> &a, const value<BitsB> &b) {
1316 return a.template zcast<BitsY>().sub(b.template zcast<BitsY>());
1317 }
1318
1319 template<size_t BitsY, size_t BitsA, size_t BitsB>
1320 CXXRTL_ALWAYS_INLINE
1321 value<BitsY> sub_ss(const value<BitsA> &a, const value<BitsB> &b) {
1322 return a.template scast<BitsY>().sub(b.template scast<BitsY>());
1323 }
1324
1325 template<size_t BitsY, size_t BitsA, size_t BitsB>
1326 CXXRTL_ALWAYS_INLINE
1327 value<BitsY> mul_uu(const value<BitsA> &a, const value<BitsB> &b) {
1328 constexpr size_t BitsM = BitsA >= BitsB ? BitsA : BitsB;
1329 return a.template zcast<BitsM>().template mul<BitsY>(b.template zcast<BitsM>());
1330 }
1331
1332 template<size_t BitsY, size_t BitsA, size_t BitsB>
1333 CXXRTL_ALWAYS_INLINE
1334 value<BitsY> mul_ss(const value<BitsA> &a, const value<BitsB> &b) {
1335 return a.template scast<BitsY>().template mul<BitsY>(b.template scast<BitsY>());
1336 }
1337
1338 template<size_t BitsY, size_t BitsA, size_t BitsB>
1339 CXXRTL_ALWAYS_INLINE
1340 std::pair<value<BitsY>, value<BitsY>> divmod_uu(const value<BitsA> &a, const value<BitsB> &b) {
1341 constexpr size_t Bits = max(BitsY, max(BitsA, BitsB));
1342 value<Bits> quotient;
1343 value<Bits> dividend = a.template zext<Bits>();
1344 value<Bits> divisor = b.template zext<Bits>();
1345 if (dividend.ucmp(divisor))
1346 return {/*quotient=*/value<BitsY> { 0u }, /*remainder=*/dividend.template trunc<BitsY>()};
1347 uint32_t divisor_shift = dividend.ctlz() - divisor.ctlz();
1348 divisor = divisor.shl(value<32> { divisor_shift });
1349 for (size_t step = 0; step <= divisor_shift; step++) {
1350 quotient = quotient.shl(value<1> { 1u });
1351 if (!dividend.ucmp(divisor)) {
1352 dividend = dividend.sub(divisor);
1353 quotient.set_bit(0, true);
1354 }
1355 divisor = divisor.shr(value<1> { 1u });
1356 }
1357 return {quotient.template trunc<BitsY>(), /*remainder=*/dividend.template trunc<BitsY>()};
1358 }
1359
1360 template<size_t BitsY, size_t BitsA, size_t BitsB>
1361 CXXRTL_ALWAYS_INLINE
1362 std::pair<value<BitsY>, value<BitsY>> divmod_ss(const value<BitsA> &a, const value<BitsB> &b) {
1363 value<BitsA + 1> ua = a.template sext<BitsA + 1>();
1364 value<BitsB + 1> ub = b.template sext<BitsB + 1>();
1365 if (ua.is_neg()) ua = ua.neg();
1366 if (ub.is_neg()) ub = ub.neg();
1367 value<BitsY> y, r;
1368 std::tie(y, r) = divmod_uu<BitsY>(ua, ub);
1369 if (a.is_neg() != b.is_neg()) y = y.neg();
1370 if (a.is_neg()) r = r.neg();
1371 return {y, r};
1372 }
1373
1374 template<size_t BitsY, size_t BitsA, size_t BitsB>
1375 CXXRTL_ALWAYS_INLINE
1376 value<BitsY> div_uu(const value<BitsA> &a, const value<BitsB> &b) {
1377 return divmod_uu<BitsY>(a, b).first;
1378 }
1379
1380 template<size_t BitsY, size_t BitsA, size_t BitsB>
1381 CXXRTL_ALWAYS_INLINE
1382 value<BitsY> div_ss(const value<BitsA> &a, const value<BitsB> &b) {
1383 return divmod_ss<BitsY>(a, b).first;
1384 }
1385
1386 template<size_t BitsY, size_t BitsA, size_t BitsB>
1387 CXXRTL_ALWAYS_INLINE
1388 value<BitsY> mod_uu(const value<BitsA> &a, const value<BitsB> &b) {
1389 return divmod_uu<BitsY>(a, b).second;
1390 }
1391
1392 template<size_t BitsY, size_t BitsA, size_t BitsB>
1393 CXXRTL_ALWAYS_INLINE
1394 value<BitsY> mod_ss(const value<BitsA> &a, const value<BitsB> &b) {
1395 return divmod_ss<BitsY>(a, b).second;
1396 }
1397
1398 // Memory helper
1399 struct memory_index {
1400 bool valid;
1401 size_t index;
1402
1403 template<size_t BitsAddr>
1404 memory_index(const value<BitsAddr> &addr, size_t offset, size_t depth) {
1405 static_assert(value<BitsAddr>::chunks <= 1, "memory address is too wide");
1406 size_t offset_index = addr.data[0];
1407
1408 valid = (offset_index >= offset && offset_index < offset + depth);
1409 index = offset_index - offset;
1410 }
1411 };
1412
1413 } // namespace cxxrtl_yosys
1414
1415 #endif