cxxrtl: fix sshr sign-extension.
[yosys.git] / backends / cxxrtl / cxxrtl.h
1 /*
2 * yosys -- Yosys Open SYnthesis Suite
3 *
4 * Copyright (C) 2019-2020 whitequark <whitequark@whitequark.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19 // This file is included by the designs generated with `write_cxxrtl`. It is not used in Yosys itself.
20 //
21 // The CXXRTL support library implements compile time specialized arbitrary width arithmetics, as well as provides
22 // composite lvalues made out of bit slices and concatenations of lvalues. This allows the `write_cxxrtl` pass
23 // to perform a straightforward translation of RTLIL structures to readable C++, relying on the C++ compiler
24 // to unwrap the abstraction and generate efficient code.
25
26 #ifndef CXXRTL_H
27 #define CXXRTL_H
28
29 #include <cstddef>
30 #include <cstdint>
31 #include <cassert>
32 #include <limits>
33 #include <type_traits>
34 #include <tuple>
35 #include <vector>
36 #include <map>
37 #include <algorithm>
38 #include <memory>
39 #include <sstream>
40
41 #include <backends/cxxrtl/cxxrtl_capi.h>
42
43 // CXXRTL essentially uses the C++ compiler as a hygienic macro engine that feeds an instruction selector.
44 // It generates a lot of specialized template functions with relatively large bodies that, when inlined
45 // into the caller and (for those with loops) unrolled, often expose many new optimization opportunities.
46 // Because of this, most of the CXXRTL runtime must be always inlined for best performance.
47 #ifndef __has_attribute
48 # define __has_attribute(x) 0
49 #endif
50 #if __has_attribute(always_inline)
51 #define CXXRTL_ALWAYS_INLINE inline __attribute__((__always_inline__))
52 #else
53 #define CXXRTL_ALWAYS_INLINE inline
54 #endif
55
56 namespace cxxrtl {
57
58 // All arbitrary-width values in CXXRTL are backed by arrays of unsigned integers called chunks. The chunk size
59 // is the same regardless of the value width to simplify manipulating values via FFI interfaces, e.g. driving
60 // and introspecting the simulation in Python.
61 //
62 // It is practical to use chunk sizes between 32 bits and platform register size because when arithmetics on
63 // narrower integer types is legalized by the C++ compiler, it inserts code to clear the high bits of the register.
64 // However, (a) most of our operations do not change those bits in the first place because of invariants that are
65 // invisible to the compiler, (b) we often operate on non-power-of-2 values and have to clear the high bits anyway.
66 // Therefore, using relatively wide chunks and clearing the high bits explicitly and only when we know they may be
67 // clobbered results in simpler generated code.
68 typedef uint32_t chunk_t;
69
70 template<typename T>
71 struct chunk_traits {
72 static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
73 "chunk type must be an unsigned integral type");
74 using type = T;
75 static constexpr size_t bits = std::numeric_limits<T>::digits;
76 static constexpr T mask = std::numeric_limits<T>::max();
77 };
78
79 template<class T>
80 struct expr_base;
81
82 template<size_t Bits>
83 struct value : public expr_base<value<Bits>> {
84 static constexpr size_t bits = Bits;
85
86 using chunk = chunk_traits<chunk_t>;
87 static constexpr chunk::type msb_mask = (Bits % chunk::bits == 0) ? chunk::mask
88 : chunk::mask >> (chunk::bits - (Bits % chunk::bits));
89
90 static constexpr size_t chunks = (Bits + chunk::bits - 1) / chunk::bits;
91 chunk::type data[chunks] = {};
92
93 value() = default;
94 template<typename... Init>
95 explicit constexpr value(Init ...init) : data{init...} {}
96
97 value(const value<Bits> &) = default;
98 value(value<Bits> &&) = default;
99 value<Bits> &operator=(const value<Bits> &) = default;
100
101 // A (no-op) helper that forces the cast to value<>.
102 CXXRTL_ALWAYS_INLINE
103 const value<Bits> &val() const {
104 return *this;
105 }
106
107 std::string str() const {
108 std::stringstream ss;
109 ss << *this;
110 return ss.str();
111 }
112
113 // Operations with compile-time parameters.
114 //
115 // These operations are used to implement slicing, concatenation, and blitting.
116 // The trunc, zext and sext operations add or remove most significant bits (i.e. on the left);
117 // the rtrunc and rzext operations add or remove least significant bits (i.e. on the right).
118 template<size_t NewBits>
119 CXXRTL_ALWAYS_INLINE
120 value<NewBits> trunc() const {
121 static_assert(NewBits <= Bits, "trunc() may not increase width");
122 value<NewBits> result;
123 for (size_t n = 0; n < result.chunks; n++)
124 result.data[n] = data[n];
125 result.data[result.chunks - 1] &= result.msb_mask;
126 return result;
127 }
128
129 template<size_t NewBits>
130 CXXRTL_ALWAYS_INLINE
131 value<NewBits> zext() const {
132 static_assert(NewBits >= Bits, "zext() may not decrease width");
133 value<NewBits> result;
134 for (size_t n = 0; n < chunks; n++)
135 result.data[n] = data[n];
136 return result;
137 }
138
139 template<size_t NewBits>
140 CXXRTL_ALWAYS_INLINE
141 value<NewBits> sext() const {
142 static_assert(NewBits >= Bits, "sext() may not decrease width");
143 value<NewBits> result;
144 for (size_t n = 0; n < chunks; n++)
145 result.data[n] = data[n];
146 if (is_neg()) {
147 result.data[chunks - 1] |= ~msb_mask;
148 for (size_t n = chunks; n < result.chunks; n++)
149 result.data[n] = chunk::mask;
150 result.data[result.chunks - 1] &= result.msb_mask;
151 }
152 return result;
153 }
154
155 template<size_t NewBits>
156 CXXRTL_ALWAYS_INLINE
157 value<NewBits> rtrunc() const {
158 static_assert(NewBits <= Bits, "rtrunc() may not increase width");
159 value<NewBits> result;
160 constexpr size_t shift_chunks = (Bits - NewBits) / chunk::bits;
161 constexpr size_t shift_bits = (Bits - NewBits) % chunk::bits;
162 chunk::type carry = 0;
163 if (shift_chunks + result.chunks < chunks) {
164 carry = (shift_bits == 0) ? 0
165 : data[shift_chunks + result.chunks] << (chunk::bits - shift_bits);
166 }
167 for (size_t n = result.chunks; n > 0; n--) {
168 result.data[n - 1] = carry | (data[shift_chunks + n - 1] >> shift_bits);
169 carry = (shift_bits == 0) ? 0
170 : data[shift_chunks + n - 1] << (chunk::bits - shift_bits);
171 }
172 return result;
173 }
174
175 template<size_t NewBits>
176 CXXRTL_ALWAYS_INLINE
177 value<NewBits> rzext() const {
178 static_assert(NewBits >= Bits, "rzext() may not decrease width");
179 value<NewBits> result;
180 constexpr size_t shift_chunks = (NewBits - Bits) / chunk::bits;
181 constexpr size_t shift_bits = (NewBits - Bits) % chunk::bits;
182 chunk::type carry = 0;
183 for (size_t n = 0; n < chunks; n++) {
184 result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
185 carry = (shift_bits == 0) ? 0
186 : data[n] >> (chunk::bits - shift_bits);
187 }
188 if (carry != 0)
189 result.data[result.chunks - 1] = carry;
190 return result;
191 }
192
193 // Bit blit operation, i.e. a partial read-modify-write.
194 template<size_t Stop, size_t Start>
195 CXXRTL_ALWAYS_INLINE
196 value<Bits> blit(const value<Stop - Start + 1> &source) const {
197 static_assert(Stop >= Start, "blit() may not reverse bit order");
198 constexpr chunk::type start_mask = ~(chunk::mask << (Start % chunk::bits));
199 constexpr chunk::type stop_mask = (Stop % chunk::bits + 1 == chunk::bits) ? 0
200 : (chunk::mask << (Stop % chunk::bits + 1));
201 value<Bits> masked = *this;
202 if (Start / chunk::bits == Stop / chunk::bits) {
203 masked.data[Start / chunk::bits] &= stop_mask | start_mask;
204 } else {
205 masked.data[Start / chunk::bits] &= start_mask;
206 for (size_t n = Start / chunk::bits + 1; n < Stop / chunk::bits; n++)
207 masked.data[n] = 0;
208 masked.data[Stop / chunk::bits] &= stop_mask;
209 }
210 value<Bits> shifted = source
211 .template rzext<Stop + 1>()
212 .template zext<Bits>();
213 return masked.bit_or(shifted);
214 }
215
216 // Helpers for selecting extending or truncating operation depending on whether the result is wider or narrower
217 // than the operand. In C++17 these can be replaced with `if constexpr`.
218 template<size_t NewBits, typename = void>
219 struct zext_cast {
220 CXXRTL_ALWAYS_INLINE
221 value<NewBits> operator()(const value<Bits> &val) {
222 return val.template zext<NewBits>();
223 }
224 };
225
226 template<size_t NewBits>
227 struct zext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
228 CXXRTL_ALWAYS_INLINE
229 value<NewBits> operator()(const value<Bits> &val) {
230 return val.template trunc<NewBits>();
231 }
232 };
233
234 template<size_t NewBits, typename = void>
235 struct sext_cast {
236 CXXRTL_ALWAYS_INLINE
237 value<NewBits> operator()(const value<Bits> &val) {
238 return val.template sext<NewBits>();
239 }
240 };
241
242 template<size_t NewBits>
243 struct sext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
244 CXXRTL_ALWAYS_INLINE
245 value<NewBits> operator()(const value<Bits> &val) {
246 return val.template trunc<NewBits>();
247 }
248 };
249
250 template<size_t NewBits>
251 CXXRTL_ALWAYS_INLINE
252 value<NewBits> zcast() const {
253 return zext_cast<NewBits>()(*this);
254 }
255
256 template<size_t NewBits>
257 CXXRTL_ALWAYS_INLINE
258 value<NewBits> scast() const {
259 return sext_cast<NewBits>()(*this);
260 }
261
262 // Operations with run-time parameters (offsets, amounts, etc).
263 //
264 // These operations are used for computations.
265 bool bit(size_t offset) const {
266 return data[offset / chunk::bits] & (1 << (offset % chunk::bits));
267 }
268
269 void set_bit(size_t offset, bool value = true) {
270 size_t offset_chunks = offset / chunk::bits;
271 size_t offset_bits = offset % chunk::bits;
272 data[offset_chunks] &= ~(1 << offset_bits);
273 data[offset_chunks] |= value ? 1 << offset_bits : 0;
274 }
275
276 bool is_zero() const {
277 for (size_t n = 0; n < chunks; n++)
278 if (data[n] != 0)
279 return false;
280 return true;
281 }
282
283 explicit operator bool() const {
284 return !is_zero();
285 }
286
287 bool is_neg() const {
288 return data[chunks - 1] & (1 << ((Bits - 1) % chunk::bits));
289 }
290
291 bool operator ==(const value<Bits> &other) const {
292 for (size_t n = 0; n < chunks; n++)
293 if (data[n] != other.data[n])
294 return false;
295 return true;
296 }
297
298 bool operator !=(const value<Bits> &other) const {
299 return !(*this == other);
300 }
301
302 value<Bits> bit_not() const {
303 value<Bits> result;
304 for (size_t n = 0; n < chunks; n++)
305 result.data[n] = ~data[n];
306 result.data[chunks - 1] &= msb_mask;
307 return result;
308 }
309
310 value<Bits> bit_and(const value<Bits> &other) const {
311 value<Bits> result;
312 for (size_t n = 0; n < chunks; n++)
313 result.data[n] = data[n] & other.data[n];
314 return result;
315 }
316
317 value<Bits> bit_or(const value<Bits> &other) const {
318 value<Bits> result;
319 for (size_t n = 0; n < chunks; n++)
320 result.data[n] = data[n] | other.data[n];
321 return result;
322 }
323
324 value<Bits> bit_xor(const value<Bits> &other) const {
325 value<Bits> result;
326 for (size_t n = 0; n < chunks; n++)
327 result.data[n] = data[n] ^ other.data[n];
328 return result;
329 }
330
331 value<Bits> update(const value<Bits> &val, const value<Bits> &mask) const {
332 return bit_and(mask.bit_not()).bit_or(val.bit_and(mask));
333 }
334
335 template<size_t AmountBits>
336 value<Bits> shl(const value<AmountBits> &amount) const {
337 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
338 static_assert(Bits <= chunk::mask, "shl() of unreasonably large values is not supported");
339 // Detect shifts definitely large than Bits early.
340 for (size_t n = 1; n < amount.chunks; n++)
341 if (amount.data[n] != 0)
342 return {};
343 // Past this point we can use the least significant chunk as the shift size.
344 size_t shift_chunks = amount.data[0] / chunk::bits;
345 size_t shift_bits = amount.data[0] % chunk::bits;
346 if (shift_chunks >= chunks)
347 return {};
348 value<Bits> result;
349 chunk::type carry = 0;
350 for (size_t n = 0; n < chunks - shift_chunks; n++) {
351 result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
352 carry = (shift_bits == 0) ? 0
353 : data[n] >> (chunk::bits - shift_bits);
354 }
355 return result;
356 }
357
358 template<size_t AmountBits, bool Signed = false>
359 value<Bits> shr(const value<AmountBits> &amount) const {
360 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
361 static_assert(Bits <= chunk::mask, "shr() of unreasonably large values is not supported");
362 // Detect shifts definitely large than Bits early.
363 for (size_t n = 1; n < amount.chunks; n++)
364 if (amount.data[n] != 0)
365 return {};
366 // Past this point we can use the least significant chunk as the shift size.
367 size_t shift_chunks = amount.data[0] / chunk::bits;
368 size_t shift_bits = amount.data[0] % chunk::bits;
369 if (shift_chunks >= chunks)
370 return {};
371 value<Bits> result;
372 chunk::type carry = 0;
373 for (size_t n = 0; n < chunks - shift_chunks; n++) {
374 result.data[chunks - shift_chunks - 1 - n] = carry | (data[chunks - 1 - n] >> shift_bits);
375 carry = (shift_bits == 0) ? 0
376 : data[chunks - 1 - n] << (chunk::bits - shift_bits);
377 }
378 if (Signed && is_neg()) {
379 size_t top_chunk_idx = (Bits - shift_bits) / chunk::bits;
380 size_t top_chunk_bits = (Bits - shift_bits) % chunk::bits;
381 for (size_t n = top_chunk_idx + 1; n < chunks; n++)
382 result.data[n] = chunk::mask;
383 if (shift_bits != 0)
384 result.data[top_chunk_idx] |= chunk::mask << top_chunk_bits;
385 }
386 return result;
387 }
388
389 template<size_t AmountBits>
390 value<Bits> sshr(const value<AmountBits> &amount) const {
391 return shr<AmountBits, /*Signed=*/true>(amount);
392 }
393
394 size_t ctpop() const {
395 size_t count = 0;
396 for (size_t n = 0; n < chunks; n++) {
397 // This loop implements the population count idiom as recognized by LLVM and GCC.
398 for (chunk::type x = data[n]; x != 0; count++)
399 x = x & (x - 1);
400 }
401 return count;
402 }
403
404 size_t ctlz() const {
405 size_t count = 0;
406 for (size_t n = 0; n < chunks; n++) {
407 chunk::type x = data[chunks - 1 - n];
408 if (x == 0) {
409 count += (n == 0 ? Bits % chunk::bits : chunk::bits);
410 } else {
411 // This loop implements the find first set idiom as recognized by LLVM.
412 for (; x != 0; count++)
413 x >>= 1;
414 }
415 }
416 return count;
417 }
418
419 template<bool Invert, bool CarryIn>
420 std::pair<value<Bits>, bool /*CarryOut*/> alu(const value<Bits> &other) const {
421 value<Bits> result;
422 bool carry = CarryIn;
423 for (size_t n = 0; n < result.chunks; n++) {
424 result.data[n] = data[n] + (Invert ? ~other.data[n] : other.data[n]) + carry;
425 carry = (result.data[n] < data[n]) ||
426 (result.data[n] == data[n] && carry);
427 }
428 result.data[result.chunks - 1] &= result.msb_mask;
429 return {result, carry};
430 }
431
432 value<Bits> add(const value<Bits> &other) const {
433 return alu</*Invert=*/false, /*CarryIn=*/false>(other).first;
434 }
435
436 value<Bits> sub(const value<Bits> &other) const {
437 return alu</*Invert=*/true, /*CarryIn=*/true>(other).first;
438 }
439
440 value<Bits> neg() const {
441 return value<Bits> { 0u }.sub(*this);
442 }
443
444 bool ucmp(const value<Bits> &other) const {
445 bool carry;
446 std::tie(std::ignore, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
447 return !carry; // a.ucmp(b) ≡ a u< b
448 }
449
450 bool scmp(const value<Bits> &other) const {
451 value<Bits> result;
452 bool carry;
453 std::tie(result, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
454 bool overflow = (is_neg() == !other.is_neg()) && (is_neg() != result.is_neg());
455 return result.is_neg() ^ overflow; // a.scmp(b) ≡ a s< b
456 }
457 };
458
459 // Expression template for a slice, usable as lvalue or rvalue, and composable with other expression templates here.
460 template<class T, size_t Stop, size_t Start>
461 struct slice_expr : public expr_base<slice_expr<T, Stop, Start>> {
462 static_assert(Stop >= Start, "slice_expr() may not reverse bit order");
463 static_assert(Start < T::bits && Stop < T::bits, "slice_expr() must be within bounds");
464 static constexpr size_t bits = Stop - Start + 1;
465
466 T &expr;
467
468 slice_expr(T &expr) : expr(expr) {}
469 slice_expr(const slice_expr<T, Stop, Start> &) = delete;
470
471 CXXRTL_ALWAYS_INLINE
472 operator value<bits>() const {
473 return static_cast<const value<T::bits> &>(expr)
474 .template rtrunc<T::bits - Start>()
475 .template trunc<bits>();
476 }
477
478 CXXRTL_ALWAYS_INLINE
479 slice_expr<T, Stop, Start> &operator=(const value<bits> &rhs) {
480 // Generic partial assignment implemented using a read-modify-write operation on the sliced expression.
481 expr = static_cast<const value<T::bits> &>(expr)
482 .template blit<Stop, Start>(rhs);
483 return *this;
484 }
485
486 // A helper that forces the cast to value<>, which allows deduction to work.
487 CXXRTL_ALWAYS_INLINE
488 value<bits> val() const {
489 return static_cast<const value<bits> &>(*this);
490 }
491 };
492
493 // Expression template for a concatenation, usable as lvalue or rvalue, and composable with other expression templates here.
494 template<class T, class U>
495 struct concat_expr : public expr_base<concat_expr<T, U>> {
496 static constexpr size_t bits = T::bits + U::bits;
497
498 T &ms_expr;
499 U &ls_expr;
500
501 concat_expr(T &ms_expr, U &ls_expr) : ms_expr(ms_expr), ls_expr(ls_expr) {}
502 concat_expr(const concat_expr<T, U> &) = delete;
503
504 CXXRTL_ALWAYS_INLINE
505 operator value<bits>() const {
506 value<bits> ms_shifted = static_cast<const value<T::bits> &>(ms_expr)
507 .template rzext<bits>();
508 value<bits> ls_extended = static_cast<const value<U::bits> &>(ls_expr)
509 .template zext<bits>();
510 return ms_shifted.bit_or(ls_extended);
511 }
512
513 CXXRTL_ALWAYS_INLINE
514 concat_expr<T, U> &operator=(const value<bits> &rhs) {
515 ms_expr = rhs.template rtrunc<T::bits>();
516 ls_expr = rhs.template trunc<U::bits>();
517 return *this;
518 }
519
520 // A helper that forces the cast to value<>, which allows deduction to work.
521 CXXRTL_ALWAYS_INLINE
522 value<bits> val() const {
523 return static_cast<const value<bits> &>(*this);
524 }
525 };
526
527 // Base class for expression templates, providing helper methods for operations that are valid on both rvalues and lvalues.
528 //
529 // Note that expression objects (slices and concatenations) constructed in this way should NEVER be captured because
530 // they refer to temporaries that will, in general, only live until the end of the statement. For example, both of
531 // these snippets perform use-after-free:
532 //
533 // const auto &a = val.slice<7,0>().slice<1>();
534 // value<1> b = a;
535 //
536 // auto &&c = val.slice<7,0>().slice<1>();
537 // c = value<1>{1u};
538 //
539 // An easy way to write code using slices and concatenations safely is to follow two simple rules:
540 // * Never explicitly name any type except `value<W>` or `const value<W> &`.
541 // * Never use a `const auto &` or `auto &&` in any such expression.
542 // Then, any code that compiles will be well-defined.
543 template<class T>
544 struct expr_base {
545 template<size_t Stop, size_t Start = Stop>
546 CXXRTL_ALWAYS_INLINE
547 slice_expr<const T, Stop, Start> slice() const {
548 return {*static_cast<const T *>(this)};
549 }
550
551 template<size_t Stop, size_t Start = Stop>
552 CXXRTL_ALWAYS_INLINE
553 slice_expr<T, Stop, Start> slice() {
554 return {*static_cast<T *>(this)};
555 }
556
557 template<class U>
558 CXXRTL_ALWAYS_INLINE
559 concat_expr<const T, typename std::remove_reference<const U>::type> concat(const U &other) const {
560 return {*static_cast<const T *>(this), other};
561 }
562
563 template<class U>
564 CXXRTL_ALWAYS_INLINE
565 concat_expr<T, typename std::remove_reference<U>::type> concat(U &&other) {
566 return {*static_cast<T *>(this), other};
567 }
568 };
569
570 template<size_t Bits>
571 std::ostream &operator<<(std::ostream &os, const value<Bits> &val) {
572 auto old_flags = os.flags(std::ios::right);
573 auto old_width = os.width(0);
574 auto old_fill = os.fill('0');
575 os << val.bits << '\'' << std::hex;
576 for (size_t n = val.chunks - 1; n != (size_t)-1; n--) {
577 if (n == val.chunks - 1 && Bits % value<Bits>::chunk::bits != 0)
578 os.width((Bits % value<Bits>::chunk::bits + 3) / 4);
579 else
580 os.width((value<Bits>::chunk::bits + 3) / 4);
581 os << val.data[n];
582 }
583 os.fill(old_fill);
584 os.width(old_width);
585 os.flags(old_flags);
586 return os;
587 }
588
589 template<size_t Bits>
590 struct wire {
591 static constexpr size_t bits = Bits;
592
593 value<Bits> curr;
594 value<Bits> next;
595
596 wire() = default;
597 constexpr wire(const value<Bits> &init) : curr(init), next(init) {}
598 template<typename... Init>
599 explicit constexpr wire(Init ...init) : curr{init...}, next{init...} {}
600
601 wire(const wire<Bits> &) = delete;
602 wire(wire<Bits> &&) = default;
603 wire<Bits> &operator=(const wire<Bits> &) = delete;
604
605 bool commit() {
606 if (curr != next) {
607 curr = next;
608 return true;
609 }
610 return false;
611 }
612 };
613
614 template<size_t Bits>
615 std::ostream &operator<<(std::ostream &os, const wire<Bits> &val) {
616 os << val.curr;
617 return os;
618 }
619
620 template<size_t Width>
621 struct memory {
622 std::vector<value<Width>> data;
623
624 size_t depth() const {
625 return data.size();
626 }
627
628 memory() = delete;
629 explicit memory(size_t depth) : data(depth) {}
630
631 memory(const memory<Width> &) = delete;
632 memory<Width> &operator=(const memory<Width> &) = delete;
633
634 // The only way to get the compiler to put the initializer in .rodata and do not copy it on stack is to stuff it
635 // into a plain array. You'd think an std::initializer_list would work here, but it doesn't, because you can't
636 // construct an initializer_list in a constexpr (or something) and so if you try to do that the whole thing is
637 // first copied on the stack (probably overflowing it) and then again into `data`.
638 template<size_t Size>
639 struct init {
640 size_t offset;
641 value<Width> data[Size];
642 };
643
644 template<size_t... InitSize>
645 explicit memory(size_t depth, const init<InitSize> &...init) : data(depth) {
646 data.resize(depth);
647 // This utterly reprehensible construct is the most reasonable way to apply a function to every element
648 // of a parameter pack, if the elements all have different types and so cannot be cast to an initializer list.
649 auto _ = {std::move(std::begin(init.data), std::end(init.data), data.begin() + init.offset)...};
650 (void)_;
651 }
652
653 // An operator for direct memory reads. May be used at any time during the simulation.
654 const value<Width> &operator [](size_t index) const {
655 assert(index < data.size());
656 return data[index];
657 }
658
659 // An operator for direct memory writes. May only be used before the simulation is started. If used
660 // after the simulation is started, the design may malfunction.
661 value<Width> &operator [](size_t index) {
662 assert(index < data.size());
663 return data[index];
664 }
665
666 // A simple way to make a writable memory would be to use an array of wires instead of an array of values.
667 // However, there are two significant downsides to this approach: first, it has large overhead (2× space
668 // overhead, and O(depth) time overhead during commit); second, it does not simplify handling write port
669 // priorities. Although in principle write ports could be ordered or conditionally enabled in generated
670 // code based on their priorities and selected addresses, the feedback arc set problem is computationally
671 // expensive, and the heuristic based algorithms are not easily modified to guarantee (rather than prefer)
672 // a particular write port evaluation order.
673 //
674 // The approach used here instead is to queue writes into a buffer during the eval phase, then perform
675 // the writes during the commit phase in the priority order. This approach has low overhead, with both space
676 // and time proportional to the amount of write ports. Because virtually every memory in a practical design
677 // has at most two write ports, linear search is used on every write, being the fastest and simplest approach.
678 struct write {
679 size_t index;
680 value<Width> val;
681 value<Width> mask;
682 int priority;
683 };
684 std::vector<write> write_queue;
685
686 void update(size_t index, const value<Width> &val, const value<Width> &mask, int priority = 0) {
687 assert(index < data.size());
688 // Queue up the write while keeping the queue sorted by priority.
689 write_queue.insert(
690 std::upper_bound(write_queue.begin(), write_queue.end(), priority,
691 [](const int a, const write& b) { return a < b.priority; }),
692 write { index, val, mask, priority });
693 }
694
695 bool commit() {
696 bool changed = false;
697 for (const write &entry : write_queue) {
698 value<Width> elem = data[entry.index];
699 elem = elem.update(entry.val, entry.mask);
700 changed |= (data[entry.index] != elem);
701 data[entry.index] = elem;
702 }
703 write_queue.clear();
704 return changed;
705 }
706 };
707
708 struct metadata {
709 const enum {
710 MISSING = 0,
711 UINT = 1,
712 SINT = 2,
713 STRING = 3,
714 DOUBLE = 4,
715 } value_type;
716
717 // In debug mode, using the wrong .as_*() function will assert.
718 // In release mode, using the wrong .as_*() function will safely return a default value.
719 const unsigned uint_value = 0;
720 const signed sint_value = 0;
721 const std::string string_value = "";
722 const double double_value = 0.0;
723
724 metadata() : value_type(MISSING) {}
725 metadata(unsigned value) : value_type(UINT), uint_value(value) {}
726 metadata(signed value) : value_type(SINT), sint_value(value) {}
727 metadata(const std::string &value) : value_type(STRING), string_value(value) {}
728 metadata(const char *value) : value_type(STRING), string_value(value) {}
729 metadata(double value) : value_type(DOUBLE), double_value(value) {}
730
731 metadata(const metadata &) = default;
732 metadata &operator=(const metadata &) = delete;
733
734 unsigned as_uint() const {
735 assert(value_type == UINT);
736 return uint_value;
737 }
738
739 signed as_sint() const {
740 assert(value_type == SINT);
741 return sint_value;
742 }
743
744 const std::string &as_string() const {
745 assert(value_type == STRING);
746 return string_value;
747 }
748
749 double as_double() const {
750 assert(value_type == DOUBLE);
751 return double_value;
752 }
753 };
754
755 typedef std::map<std::string, metadata> metadata_map;
756
757 // Helper class to disambiguate values/wires and their aliases.
758 struct debug_alias {};
759
760 // This structure is intended for consumption via foreign function interfaces, like Python's ctypes.
761 // Because of this it uses a C-style layout that is easy to parse rather than more idiomatic C++.
762 //
763 // To avoid violating strict aliasing rules, this structure has to be a subclass of the one used
764 // in the C API, or it would not be possible to cast between the pointers to these.
765 struct debug_item : ::cxxrtl_object {
766 enum : uint32_t {
767 VALUE = CXXRTL_VALUE,
768 WIRE = CXXRTL_WIRE,
769 MEMORY = CXXRTL_MEMORY,
770 ALIAS = CXXRTL_ALIAS,
771 };
772
773 debug_item(const ::cxxrtl_object &object) : cxxrtl_object(object) {}
774
775 template<size_t Bits>
776 debug_item(value<Bits> &item, size_t lsb_offset = 0) {
777 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
778 "value<Bits> is not compatible with C layout");
779 type = VALUE;
780 width = Bits;
781 lsb_at = lsb_offset;
782 depth = 1;
783 zero_at = 0;
784 curr = item.data;
785 next = item.data;
786 }
787
788 template<size_t Bits>
789 debug_item(const value<Bits> &item, size_t lsb_offset = 0) {
790 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
791 "value<Bits> is not compatible with C layout");
792 type = VALUE;
793 width = Bits;
794 lsb_at = lsb_offset;
795 depth = 1;
796 zero_at = 0;
797 curr = const_cast<chunk_t*>(item.data);
798 next = nullptr;
799 }
800
801 template<size_t Bits>
802 debug_item(wire<Bits> &item, size_t lsb_offset = 0) {
803 static_assert(sizeof(item.curr) == value<Bits>::chunks * sizeof(chunk_t) &&
804 sizeof(item.next) == value<Bits>::chunks * sizeof(chunk_t),
805 "wire<Bits> is not compatible with C layout");
806 type = WIRE;
807 width = Bits;
808 lsb_at = lsb_offset;
809 depth = 1;
810 zero_at = 0;
811 curr = item.curr.data;
812 next = item.next.data;
813 }
814
815 template<size_t Width>
816 debug_item(memory<Width> &item, size_t zero_offset = 0) {
817 static_assert(sizeof(item.data[0]) == value<Width>::chunks * sizeof(chunk_t),
818 "memory<Width> is not compatible with C layout");
819 type = MEMORY;
820 width = Width;
821 lsb_at = 0;
822 depth = item.data.size();
823 zero_at = zero_offset;
824 curr = item.data.empty() ? nullptr : item.data[0].data;
825 next = nullptr;
826 }
827
828 template<size_t Bits>
829 debug_item(debug_alias, const value<Bits> &item, size_t lsb_offset = 0) {
830 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
831 "value<Bits> is not compatible with C layout");
832 type = ALIAS;
833 width = Bits;
834 lsb_at = lsb_offset;
835 depth = 1;
836 zero_at = 0;
837 curr = const_cast<chunk_t*>(item.data);
838 next = nullptr;
839 }
840
841 template<size_t Bits>
842 debug_item(debug_alias, const wire<Bits> &item, size_t lsb_offset = 0) {
843 static_assert(sizeof(item.curr) == value<Bits>::chunks * sizeof(chunk_t) &&
844 sizeof(item.next) == value<Bits>::chunks * sizeof(chunk_t),
845 "wire<Bits> is not compatible with C layout");
846 type = ALIAS;
847 width = Bits;
848 lsb_at = lsb_offset;
849 depth = 1;
850 zero_at = 0;
851 curr = const_cast<chunk_t*>(item.curr.data);
852 next = nullptr;
853 }
854 };
855 static_assert(std::is_standard_layout<debug_item>::value, "debug_item is not compatible with C layout");
856
857 struct debug_items {
858 std::map<std::string, std::vector<debug_item>> table;
859
860 void add(const std::string &name, debug_item &&item) {
861 std::vector<debug_item> &parts = table[name];
862 parts.emplace_back(item);
863 std::sort(parts.begin(), parts.end(),
864 [](const debug_item &a, const debug_item &b) {
865 return a.lsb_at < b.lsb_at;
866 });
867 }
868
869 size_t count(const std::string &name) const {
870 if (table.count(name) == 0)
871 return 0;
872 return table.at(name).size();
873 }
874
875 const std::vector<debug_item> &parts_at(const std::string &name) const {
876 return table.at(name);
877 }
878
879 const debug_item &at(const std::string &name) const {
880 const std::vector<debug_item> &parts = table.at(name);
881 assert(parts.size() == 1);
882 return parts.at(0);
883 }
884
885 const debug_item &operator [](const std::string &name) const {
886 return at(name);
887 }
888 };
889
890 struct module {
891 module() {}
892 virtual ~module() {}
893
894 module(const module &) = delete;
895 module &operator=(const module &) = delete;
896
897 virtual bool eval() = 0;
898 virtual bool commit() = 0;
899
900 size_t step() {
901 size_t deltas = 0;
902 bool converged = false;
903 do {
904 converged = eval();
905 deltas++;
906 } while (commit() && !converged);
907 return deltas;
908 }
909
910 virtual void debug_info(debug_items &items, std::string path = "") {
911 (void)items, (void)path;
912 }
913 };
914
915 } // namespace cxxrtl
916
917 // Internal structure used to communicate with the implementation of the C interface.
918 typedef struct _cxxrtl_toplevel {
919 std::unique_ptr<cxxrtl::module> module;
920 } *cxxrtl_toplevel;
921
922 // Definitions of internal Yosys cells. Other than the functions in this namespace, CXXRTL is fully generic
923 // and indepenent of Yosys implementation details.
924 //
925 // The `write_cxxrtl` pass translates internal cells (cells with names that start with `$`) to calls of these
926 // functions. All of Yosys arithmetic and logical cells perform sign or zero extension on their operands,
927 // whereas basic operations on arbitrary width values require operands to be of the same width. These functions
928 // bridge the gap by performing the necessary casts. They are named similar to `cell_A[B]`, where A and B are `u`
929 // if the corresponding operand is unsigned, and `s` if it is signed.
930 namespace cxxrtl_yosys {
931
932 using namespace cxxrtl;
933
934 // std::max isn't constexpr until C++14 for no particular reason (it's an oversight), so we define our own.
935 template<class T>
936 CXXRTL_ALWAYS_INLINE
937 constexpr T max(const T &a, const T &b) {
938 return a > b ? a : b;
939 }
940
941 // Logic operations
942 template<size_t BitsY, size_t BitsA>
943 CXXRTL_ALWAYS_INLINE
944 value<BitsY> logic_not(const value<BitsA> &a) {
945 return value<BitsY> { a ? 0u : 1u };
946 }
947
948 template<size_t BitsY, size_t BitsA, size_t BitsB>
949 CXXRTL_ALWAYS_INLINE
950 value<BitsY> logic_and(const value<BitsA> &a, const value<BitsB> &b) {
951 return value<BitsY> { (bool(a) & bool(b)) ? 1u : 0u };
952 }
953
954 template<size_t BitsY, size_t BitsA, size_t BitsB>
955 CXXRTL_ALWAYS_INLINE
956 value<BitsY> logic_or(const value<BitsA> &a, const value<BitsB> &b) {
957 return value<BitsY> { (bool(a) | bool(b)) ? 1u : 0u };
958 }
959
960 // Reduction operations
961 template<size_t BitsY, size_t BitsA>
962 CXXRTL_ALWAYS_INLINE
963 value<BitsY> reduce_and(const value<BitsA> &a) {
964 return value<BitsY> { a.bit_not().is_zero() ? 1u : 0u };
965 }
966
967 template<size_t BitsY, size_t BitsA>
968 CXXRTL_ALWAYS_INLINE
969 value<BitsY> reduce_or(const value<BitsA> &a) {
970 return value<BitsY> { a ? 1u : 0u };
971 }
972
973 template<size_t BitsY, size_t BitsA>
974 CXXRTL_ALWAYS_INLINE
975 value<BitsY> reduce_xor(const value<BitsA> &a) {
976 return value<BitsY> { (a.ctpop() % 2) ? 1u : 0u };
977 }
978
979 template<size_t BitsY, size_t BitsA>
980 CXXRTL_ALWAYS_INLINE
981 value<BitsY> reduce_xnor(const value<BitsA> &a) {
982 return value<BitsY> { (a.ctpop() % 2) ? 0u : 1u };
983 }
984
985 template<size_t BitsY, size_t BitsA>
986 CXXRTL_ALWAYS_INLINE
987 value<BitsY> reduce_bool(const value<BitsA> &a) {
988 return value<BitsY> { a ? 1u : 0u };
989 }
990
991 // Bitwise operations
992 template<size_t BitsY, size_t BitsA>
993 CXXRTL_ALWAYS_INLINE
994 value<BitsY> not_u(const value<BitsA> &a) {
995 return a.template zcast<BitsY>().bit_not();
996 }
997
998 template<size_t BitsY, size_t BitsA>
999 CXXRTL_ALWAYS_INLINE
1000 value<BitsY> not_s(const value<BitsA> &a) {
1001 return a.template scast<BitsY>().bit_not();
1002 }
1003
1004 template<size_t BitsY, size_t BitsA, size_t BitsB>
1005 CXXRTL_ALWAYS_INLINE
1006 value<BitsY> and_uu(const value<BitsA> &a, const value<BitsB> &b) {
1007 return a.template zcast<BitsY>().bit_and(b.template zcast<BitsY>());
1008 }
1009
1010 template<size_t BitsY, size_t BitsA, size_t BitsB>
1011 CXXRTL_ALWAYS_INLINE
1012 value<BitsY> and_ss(const value<BitsA> &a, const value<BitsB> &b) {
1013 return a.template scast<BitsY>().bit_and(b.template scast<BitsY>());
1014 }
1015
1016 template<size_t BitsY, size_t BitsA, size_t BitsB>
1017 CXXRTL_ALWAYS_INLINE
1018 value<BitsY> or_uu(const value<BitsA> &a, const value<BitsB> &b) {
1019 return a.template zcast<BitsY>().bit_or(b.template zcast<BitsY>());
1020 }
1021
1022 template<size_t BitsY, size_t BitsA, size_t BitsB>
1023 CXXRTL_ALWAYS_INLINE
1024 value<BitsY> or_ss(const value<BitsA> &a, const value<BitsB> &b) {
1025 return a.template scast<BitsY>().bit_or(b.template scast<BitsY>());
1026 }
1027
1028 template<size_t BitsY, size_t BitsA, size_t BitsB>
1029 CXXRTL_ALWAYS_INLINE
1030 value<BitsY> xor_uu(const value<BitsA> &a, const value<BitsB> &b) {
1031 return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>());
1032 }
1033
1034 template<size_t BitsY, size_t BitsA, size_t BitsB>
1035 CXXRTL_ALWAYS_INLINE
1036 value<BitsY> xor_ss(const value<BitsA> &a, const value<BitsB> &b) {
1037 return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>());
1038 }
1039
1040 template<size_t BitsY, size_t BitsA, size_t BitsB>
1041 CXXRTL_ALWAYS_INLINE
1042 value<BitsY> xnor_uu(const value<BitsA> &a, const value<BitsB> &b) {
1043 return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>()).bit_not();
1044 }
1045
1046 template<size_t BitsY, size_t BitsA, size_t BitsB>
1047 CXXRTL_ALWAYS_INLINE
1048 value<BitsY> xnor_ss(const value<BitsA> &a, const value<BitsB> &b) {
1049 return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>()).bit_not();
1050 }
1051
1052 template<size_t BitsY, size_t BitsA, size_t BitsB>
1053 CXXRTL_ALWAYS_INLINE
1054 value<BitsY> shl_uu(const value<BitsA> &a, const value<BitsB> &b) {
1055 return a.template zcast<BitsY>().template shl(b);
1056 }
1057
1058 template<size_t BitsY, size_t BitsA, size_t BitsB>
1059 CXXRTL_ALWAYS_INLINE
1060 value<BitsY> shl_su(const value<BitsA> &a, const value<BitsB> &b) {
1061 return a.template scast<BitsY>().template shl(b);
1062 }
1063
1064 template<size_t BitsY, size_t BitsA, size_t BitsB>
1065 CXXRTL_ALWAYS_INLINE
1066 value<BitsY> sshl_uu(const value<BitsA> &a, const value<BitsB> &b) {
1067 return a.template zcast<BitsY>().template shl(b);
1068 }
1069
1070 template<size_t BitsY, size_t BitsA, size_t BitsB>
1071 CXXRTL_ALWAYS_INLINE
1072 value<BitsY> sshl_su(const value<BitsA> &a, const value<BitsB> &b) {
1073 return a.template scast<BitsY>().template shl(b);
1074 }
1075
1076 template<size_t BitsY, size_t BitsA, size_t BitsB>
1077 CXXRTL_ALWAYS_INLINE
1078 value<BitsY> shr_uu(const value<BitsA> &a, const value<BitsB> &b) {
1079 return a.template shr(b).template zcast<BitsY>();
1080 }
1081
1082 template<size_t BitsY, size_t BitsA, size_t BitsB>
1083 CXXRTL_ALWAYS_INLINE
1084 value<BitsY> shr_su(const value<BitsA> &a, const value<BitsB> &b) {
1085 return a.template shr(b).template scast<BitsY>();
1086 }
1087
1088 template<size_t BitsY, size_t BitsA, size_t BitsB>
1089 CXXRTL_ALWAYS_INLINE
1090 value<BitsY> sshr_uu(const value<BitsA> &a, const value<BitsB> &b) {
1091 return a.template shr(b).template zcast<BitsY>();
1092 }
1093
1094 template<size_t BitsY, size_t BitsA, size_t BitsB>
1095 CXXRTL_ALWAYS_INLINE
1096 value<BitsY> sshr_su(const value<BitsA> &a, const value<BitsB> &b) {
1097 return a.template sshr(b).template scast<BitsY>();
1098 }
1099
1100 template<size_t BitsY, size_t BitsA, size_t BitsB>
1101 CXXRTL_ALWAYS_INLINE
1102 value<BitsY> shift_uu(const value<BitsA> &a, const value<BitsB> &b) {
1103 return shr_uu<BitsY>(a, b);
1104 }
1105
1106 template<size_t BitsY, size_t BitsA, size_t BitsB>
1107 CXXRTL_ALWAYS_INLINE
1108 value<BitsY> shift_su(const value<BitsA> &a, const value<BitsB> &b) {
1109 return shr_su<BitsY>(a, b);
1110 }
1111
1112 template<size_t BitsY, size_t BitsA, size_t BitsB>
1113 CXXRTL_ALWAYS_INLINE
1114 value<BitsY> shift_us(const value<BitsA> &a, const value<BitsB> &b) {
1115 return b.is_neg() ? shl_uu<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_uu<BitsY>(a, b);
1116 }
1117
1118 template<size_t BitsY, size_t BitsA, size_t BitsB>
1119 CXXRTL_ALWAYS_INLINE
1120 value<BitsY> shift_ss(const value<BitsA> &a, const value<BitsB> &b) {
1121 return b.is_neg() ? shl_su<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_su<BitsY>(a, b);
1122 }
1123
1124 template<size_t BitsY, size_t BitsA, size_t BitsB>
1125 CXXRTL_ALWAYS_INLINE
1126 value<BitsY> shiftx_uu(const value<BitsA> &a, const value<BitsB> &b) {
1127 return shift_uu<BitsY>(a, b);
1128 }
1129
1130 template<size_t BitsY, size_t BitsA, size_t BitsB>
1131 CXXRTL_ALWAYS_INLINE
1132 value<BitsY> shiftx_su(const value<BitsA> &a, const value<BitsB> &b) {
1133 return shift_su<BitsY>(a, b);
1134 }
1135
1136 template<size_t BitsY, size_t BitsA, size_t BitsB>
1137 CXXRTL_ALWAYS_INLINE
1138 value<BitsY> shiftx_us(const value<BitsA> &a, const value<BitsB> &b) {
1139 return shift_us<BitsY>(a, b);
1140 }
1141
1142 template<size_t BitsY, size_t BitsA, size_t BitsB>
1143 CXXRTL_ALWAYS_INLINE
1144 value<BitsY> shiftx_ss(const value<BitsA> &a, const value<BitsB> &b) {
1145 return shift_ss<BitsY>(a, b);
1146 }
1147
1148 // Comparison operations
1149 template<size_t BitsY, size_t BitsA, size_t BitsB>
1150 CXXRTL_ALWAYS_INLINE
1151 value<BitsY> eq_uu(const value<BitsA> &a, const value<BitsB> &b) {
1152 constexpr size_t BitsExt = max(BitsA, BitsB);
1153 return value<BitsY>{ a.template zext<BitsExt>() == b.template zext<BitsExt>() ? 1u : 0u };
1154 }
1155
1156 template<size_t BitsY, size_t BitsA, size_t BitsB>
1157 CXXRTL_ALWAYS_INLINE
1158 value<BitsY> eq_ss(const value<BitsA> &a, const value<BitsB> &b) {
1159 constexpr size_t BitsExt = max(BitsA, BitsB);
1160 return value<BitsY>{ a.template sext<BitsExt>() == b.template sext<BitsExt>() ? 1u : 0u };
1161 }
1162
1163 template<size_t BitsY, size_t BitsA, size_t BitsB>
1164 CXXRTL_ALWAYS_INLINE
1165 value<BitsY> ne_uu(const value<BitsA> &a, const value<BitsB> &b) {
1166 constexpr size_t BitsExt = max(BitsA, BitsB);
1167 return value<BitsY>{ a.template zext<BitsExt>() != b.template zext<BitsExt>() ? 1u : 0u };
1168 }
1169
1170 template<size_t BitsY, size_t BitsA, size_t BitsB>
1171 CXXRTL_ALWAYS_INLINE
1172 value<BitsY> ne_ss(const value<BitsA> &a, const value<BitsB> &b) {
1173 constexpr size_t BitsExt = max(BitsA, BitsB);
1174 return value<BitsY>{ a.template sext<BitsExt>() != b.template sext<BitsExt>() ? 1u : 0u };
1175 }
1176
1177 template<size_t BitsY, size_t BitsA, size_t BitsB>
1178 CXXRTL_ALWAYS_INLINE
1179 value<BitsY> eqx_uu(const value<BitsA> &a, const value<BitsB> &b) {
1180 return eq_uu<BitsY>(a, b);
1181 }
1182
1183 template<size_t BitsY, size_t BitsA, size_t BitsB>
1184 CXXRTL_ALWAYS_INLINE
1185 value<BitsY> eqx_ss(const value<BitsA> &a, const value<BitsB> &b) {
1186 return eq_ss<BitsY>(a, b);
1187 }
1188
1189 template<size_t BitsY, size_t BitsA, size_t BitsB>
1190 CXXRTL_ALWAYS_INLINE
1191 value<BitsY> nex_uu(const value<BitsA> &a, const value<BitsB> &b) {
1192 return ne_uu<BitsY>(a, b);
1193 }
1194
1195 template<size_t BitsY, size_t BitsA, size_t BitsB>
1196 CXXRTL_ALWAYS_INLINE
1197 value<BitsY> nex_ss(const value<BitsA> &a, const value<BitsB> &b) {
1198 return ne_ss<BitsY>(a, b);
1199 }
1200
1201 template<size_t BitsY, size_t BitsA, size_t BitsB>
1202 CXXRTL_ALWAYS_INLINE
1203 value<BitsY> gt_uu(const value<BitsA> &a, const value<BitsB> &b) {
1204 constexpr size_t BitsExt = max(BitsA, BitsB);
1205 return value<BitsY> { b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
1206 }
1207
1208 template<size_t BitsY, size_t BitsA, size_t BitsB>
1209 CXXRTL_ALWAYS_INLINE
1210 value<BitsY> gt_ss(const value<BitsA> &a, const value<BitsB> &b) {
1211 constexpr size_t BitsExt = max(BitsA, BitsB);
1212 return value<BitsY> { b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
1213 }
1214
1215 template<size_t BitsY, size_t BitsA, size_t BitsB>
1216 CXXRTL_ALWAYS_INLINE
1217 value<BitsY> ge_uu(const value<BitsA> &a, const value<BitsB> &b) {
1218 constexpr size_t BitsExt = max(BitsA, BitsB);
1219 return value<BitsY> { !a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
1220 }
1221
1222 template<size_t BitsY, size_t BitsA, size_t BitsB>
1223 CXXRTL_ALWAYS_INLINE
1224 value<BitsY> ge_ss(const value<BitsA> &a, const value<BitsB> &b) {
1225 constexpr size_t BitsExt = max(BitsA, BitsB);
1226 return value<BitsY> { !a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
1227 }
1228
1229 template<size_t BitsY, size_t BitsA, size_t BitsB>
1230 CXXRTL_ALWAYS_INLINE
1231 value<BitsY> lt_uu(const value<BitsA> &a, const value<BitsB> &b) {
1232 constexpr size_t BitsExt = max(BitsA, BitsB);
1233 return value<BitsY> { a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
1234 }
1235
1236 template<size_t BitsY, size_t BitsA, size_t BitsB>
1237 CXXRTL_ALWAYS_INLINE
1238 value<BitsY> lt_ss(const value<BitsA> &a, const value<BitsB> &b) {
1239 constexpr size_t BitsExt = max(BitsA, BitsB);
1240 return value<BitsY> { a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
1241 }
1242
1243 template<size_t BitsY, size_t BitsA, size_t BitsB>
1244 CXXRTL_ALWAYS_INLINE
1245 value<BitsY> le_uu(const value<BitsA> &a, const value<BitsB> &b) {
1246 constexpr size_t BitsExt = max(BitsA, BitsB);
1247 return value<BitsY> { !b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
1248 }
1249
1250 template<size_t BitsY, size_t BitsA, size_t BitsB>
1251 CXXRTL_ALWAYS_INLINE
1252 value<BitsY> le_ss(const value<BitsA> &a, const value<BitsB> &b) {
1253 constexpr size_t BitsExt = max(BitsA, BitsB);
1254 return value<BitsY> { !b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
1255 }
1256
1257 // Arithmetic operations
1258 template<size_t BitsY, size_t BitsA>
1259 CXXRTL_ALWAYS_INLINE
1260 value<BitsY> pos_u(const value<BitsA> &a) {
1261 return a.template zcast<BitsY>();
1262 }
1263
1264 template<size_t BitsY, size_t BitsA>
1265 CXXRTL_ALWAYS_INLINE
1266 value<BitsY> pos_s(const value<BitsA> &a) {
1267 return a.template scast<BitsY>();
1268 }
1269
1270 template<size_t BitsY, size_t BitsA>
1271 CXXRTL_ALWAYS_INLINE
1272 value<BitsY> neg_u(const value<BitsA> &a) {
1273 return a.template zcast<BitsY>().neg();
1274 }
1275
1276 template<size_t BitsY, size_t BitsA>
1277 CXXRTL_ALWAYS_INLINE
1278 value<BitsY> neg_s(const value<BitsA> &a) {
1279 return a.template scast<BitsY>().neg();
1280 }
1281
1282 template<size_t BitsY, size_t BitsA, size_t BitsB>
1283 CXXRTL_ALWAYS_INLINE
1284 value<BitsY> add_uu(const value<BitsA> &a, const value<BitsB> &b) {
1285 return a.template zcast<BitsY>().add(b.template zcast<BitsY>());
1286 }
1287
1288 template<size_t BitsY, size_t BitsA, size_t BitsB>
1289 CXXRTL_ALWAYS_INLINE
1290 value<BitsY> add_ss(const value<BitsA> &a, const value<BitsB> &b) {
1291 return a.template scast<BitsY>().add(b.template scast<BitsY>());
1292 }
1293
1294 template<size_t BitsY, size_t BitsA, size_t BitsB>
1295 CXXRTL_ALWAYS_INLINE
1296 value<BitsY> sub_uu(const value<BitsA> &a, const value<BitsB> &b) {
1297 return a.template zcast<BitsY>().sub(b.template zcast<BitsY>());
1298 }
1299
1300 template<size_t BitsY, size_t BitsA, size_t BitsB>
1301 CXXRTL_ALWAYS_INLINE
1302 value<BitsY> sub_ss(const value<BitsA> &a, const value<BitsB> &b) {
1303 return a.template scast<BitsY>().sub(b.template scast<BitsY>());
1304 }
1305
1306 template<size_t BitsY, size_t BitsA, size_t BitsB>
1307 CXXRTL_ALWAYS_INLINE
1308 value<BitsY> mul_uu(const value<BitsA> &a, const value<BitsB> &b) {
1309 value<BitsY> product;
1310 value<BitsY> multiplicand = a.template zcast<BitsY>();
1311 const value<BitsB> &multiplier = b;
1312 uint32_t multiplicand_shift = 0;
1313 for (size_t step = 0; step < BitsB; step++) {
1314 if (multiplier.bit(step)) {
1315 multiplicand = multiplicand.shl(value<32> { multiplicand_shift });
1316 product = product.add(multiplicand);
1317 multiplicand_shift = 0;
1318 }
1319 multiplicand_shift++;
1320 }
1321 return product;
1322 }
1323
1324 template<size_t BitsY, size_t BitsA, size_t BitsB>
1325 CXXRTL_ALWAYS_INLINE
1326 value<BitsY> mul_ss(const value<BitsA> &a, const value<BitsB> &b) {
1327 value<BitsB + 1> ub = b.template sext<BitsB + 1>();
1328 if (ub.is_neg()) ub = ub.neg();
1329 value<BitsY> y = mul_uu<BitsY>(a.template scast<BitsY>(), ub);
1330 return b.is_neg() ? y.neg() : y;
1331 }
1332
1333 template<size_t BitsY, size_t BitsA, size_t BitsB>
1334 CXXRTL_ALWAYS_INLINE
1335 std::pair<value<BitsY>, value<BitsY>> divmod_uu(const value<BitsA> &a, const value<BitsB> &b) {
1336 constexpr size_t Bits = max(BitsY, max(BitsA, BitsB));
1337 value<Bits> quotient;
1338 value<Bits> dividend = a.template zext<Bits>();
1339 value<Bits> divisor = b.template zext<Bits>();
1340 if (dividend.ucmp(divisor))
1341 return {/*quotient=*/value<BitsY> { 0u }, /*remainder=*/dividend.template trunc<BitsY>()};
1342 uint32_t divisor_shift = dividend.ctlz() - divisor.ctlz();
1343 divisor = divisor.shl(value<32> { divisor_shift });
1344 for (size_t step = 0; step <= divisor_shift; step++) {
1345 quotient = quotient.shl(value<1> { 1u });
1346 if (!dividend.ucmp(divisor)) {
1347 dividend = dividend.sub(divisor);
1348 quotient.set_bit(0, true);
1349 }
1350 divisor = divisor.shr(value<1> { 1u });
1351 }
1352 return {quotient.template trunc<BitsY>(), /*remainder=*/dividend.template trunc<BitsY>()};
1353 }
1354
1355 template<size_t BitsY, size_t BitsA, size_t BitsB>
1356 CXXRTL_ALWAYS_INLINE
1357 std::pair<value<BitsY>, value<BitsY>> divmod_ss(const value<BitsA> &a, const value<BitsB> &b) {
1358 value<BitsA + 1> ua = a.template sext<BitsA + 1>();
1359 value<BitsB + 1> ub = b.template sext<BitsB + 1>();
1360 if (ua.is_neg()) ua = ua.neg();
1361 if (ub.is_neg()) ub = ub.neg();
1362 value<BitsY> y, r;
1363 std::tie(y, r) = divmod_uu<BitsY>(ua, ub);
1364 if (a.is_neg() != b.is_neg()) y = y.neg();
1365 if (a.is_neg()) r = r.neg();
1366 return {y, r};
1367 }
1368
1369 template<size_t BitsY, size_t BitsA, size_t BitsB>
1370 CXXRTL_ALWAYS_INLINE
1371 value<BitsY> div_uu(const value<BitsA> &a, const value<BitsB> &b) {
1372 return divmod_uu<BitsY>(a, b).first;
1373 }
1374
1375 template<size_t BitsY, size_t BitsA, size_t BitsB>
1376 CXXRTL_ALWAYS_INLINE
1377 value<BitsY> div_ss(const value<BitsA> &a, const value<BitsB> &b) {
1378 return divmod_ss<BitsY>(a, b).first;
1379 }
1380
1381 template<size_t BitsY, size_t BitsA, size_t BitsB>
1382 CXXRTL_ALWAYS_INLINE
1383 value<BitsY> mod_uu(const value<BitsA> &a, const value<BitsB> &b) {
1384 return divmod_uu<BitsY>(a, b).second;
1385 }
1386
1387 template<size_t BitsY, size_t BitsA, size_t BitsB>
1388 CXXRTL_ALWAYS_INLINE
1389 value<BitsY> mod_ss(const value<BitsA> &a, const value<BitsB> &b) {
1390 return divmod_ss<BitsY>(a, b).second;
1391 }
1392
1393 // Memory helper
1394 struct memory_index {
1395 bool valid;
1396 size_t index;
1397
1398 template<size_t BitsAddr>
1399 memory_index(const value<BitsAddr> &addr, size_t offset, size_t depth) {
1400 static_assert(value<BitsAddr>::chunks <= 1, "memory address is too wide");
1401 size_t offset_index = addr.data[0];
1402
1403 valid = (offset_index >= offset && offset_index < offset + depth);
1404 index = offset_index - offset;
1405 }
1406 };
1407
1408 } // namespace cxxrtl_yosys
1409
1410 #endif