Merge pull request #2469 from whitequark/cxxrtl-no-clk
[yosys.git] / backends / cxxrtl / cxxrtl.h
1 /*
2 * yosys -- Yosys Open SYnthesis Suite
3 *
4 * Copyright (C) 2019-2020 whitequark <whitequark@whitequark.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19 // This file is included by the designs generated with `write_cxxrtl`. It is not used in Yosys itself.
20 //
21 // The CXXRTL support library implements compile time specialized arbitrary width arithmetics, as well as provides
22 // composite lvalues made out of bit slices and concatenations of lvalues. This allows the `write_cxxrtl` pass
23 // to perform a straightforward translation of RTLIL structures to readable C++, relying on the C++ compiler
24 // to unwrap the abstraction and generate efficient code.
25
26 #ifndef CXXRTL_H
27 #define CXXRTL_H
28
29 #include <cstddef>
30 #include <cstdint>
31 #include <cassert>
32 #include <limits>
33 #include <type_traits>
34 #include <tuple>
35 #include <vector>
36 #include <map>
37 #include <algorithm>
38 #include <memory>
39 #include <sstream>
40
41 #include <backends/cxxrtl/cxxrtl_capi.h>
42
43 // CXXRTL essentially uses the C++ compiler as a hygienic macro engine that feeds an instruction selector.
44 // It generates a lot of specialized template functions with relatively large bodies that, when inlined
45 // into the caller and (for those with loops) unrolled, often expose many new optimization opportunities.
46 // Because of this, most of the CXXRTL runtime must be always inlined for best performance.
47 #ifndef __has_attribute
48 # define __has_attribute(x) 0
49 #endif
50 #if __has_attribute(always_inline)
51 #define CXXRTL_ALWAYS_INLINE inline __attribute__((__always_inline__))
52 #else
53 #define CXXRTL_ALWAYS_INLINE inline
54 #endif
55
56 namespace cxxrtl {
57
58 // All arbitrary-width values in CXXRTL are backed by arrays of unsigned integers called chunks. The chunk size
59 // is the same regardless of the value width to simplify manipulating values via FFI interfaces, e.g. driving
60 // and introspecting the simulation in Python.
61 //
62 // It is practical to use chunk sizes between 32 bits and platform register size because when arithmetics on
63 // narrower integer types is legalized by the C++ compiler, it inserts code to clear the high bits of the register.
64 // However, (a) most of our operations do not change those bits in the first place because of invariants that are
65 // invisible to the compiler, (b) we often operate on non-power-of-2 values and have to clear the high bits anyway.
66 // Therefore, using relatively wide chunks and clearing the high bits explicitly and only when we know they may be
67 // clobbered results in simpler generated code.
68 typedef uint32_t chunk_t;
69 typedef uint64_t wide_chunk_t;
70
71 template<typename T>
72 struct chunk_traits {
73 static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
74 "chunk type must be an unsigned integral type");
75 using type = T;
76 static constexpr size_t bits = std::numeric_limits<T>::digits;
77 static constexpr T mask = std::numeric_limits<T>::max();
78 };
79
80 template<class T>
81 struct expr_base;
82
83 template<size_t Bits>
84 struct value : public expr_base<value<Bits>> {
85 static constexpr size_t bits = Bits;
86
87 using chunk = chunk_traits<chunk_t>;
88 static constexpr chunk::type msb_mask = (Bits % chunk::bits == 0) ? chunk::mask
89 : chunk::mask >> (chunk::bits - (Bits % chunk::bits));
90
91 static constexpr size_t chunks = (Bits + chunk::bits - 1) / chunk::bits;
92 chunk::type data[chunks] = {};
93
94 value() = default;
95 template<typename... Init>
96 explicit constexpr value(Init ...init) : data{init...} {}
97
98 value(const value<Bits> &) = default;
99 value<Bits> &operator=(const value<Bits> &) = default;
100
101 value(value<Bits> &&) = default;
102 value<Bits> &operator=(value<Bits> &&) = default;
103
104 // A (no-op) helper that forces the cast to value<>.
105 CXXRTL_ALWAYS_INLINE
106 const value<Bits> &val() const {
107 return *this;
108 }
109
110 std::string str() const {
111 std::stringstream ss;
112 ss << *this;
113 return ss.str();
114 }
115
116 // Conversion operations.
117 //
118 // These functions ensure that a conversion is never out of range, and should be always used, if at all
119 // possible, instead of direct manipulation of the `data` member. For very large types, .slice() and
120 // .concat() can be used to split them into more manageable parts.
121 template<class IntegerT>
122 CXXRTL_ALWAYS_INLINE
123 IntegerT get() const {
124 static_assert(std::numeric_limits<IntegerT>::is_integer && !std::numeric_limits<IntegerT>::is_signed,
125 "get<T>() requires T to be an unsigned integral type");
126 static_assert(std::numeric_limits<IntegerT>::digits >= Bits,
127 "get<T>() requires T to be at least as wide as the value is");
128 IntegerT result = 0;
129 for (size_t n = 0; n < chunks; n++)
130 result |= IntegerT(data[n]) << (n * chunk::bits);
131 return result;
132 }
133
134 template<class IntegerT>
135 CXXRTL_ALWAYS_INLINE
136 void set(IntegerT other) {
137 static_assert(std::numeric_limits<IntegerT>::is_integer && !std::numeric_limits<IntegerT>::is_signed,
138 "set<T>() requires T to be an unsigned integral type");
139 static_assert(std::numeric_limits<IntegerT>::digits >= Bits,
140 "set<T>() requires the value to be at least as wide as T is");
141 for (size_t n = 0; n < chunks; n++)
142 data[n] = (other >> (n * chunk::bits)) & chunk::mask;
143 }
144
145 // Operations with compile-time parameters.
146 //
147 // These operations are used to implement slicing, concatenation, and blitting.
148 // The trunc, zext and sext operations add or remove most significant bits (i.e. on the left);
149 // the rtrunc and rzext operations add or remove least significant bits (i.e. on the right).
150 template<size_t NewBits>
151 CXXRTL_ALWAYS_INLINE
152 value<NewBits> trunc() const {
153 static_assert(NewBits <= Bits, "trunc() may not increase width");
154 value<NewBits> result;
155 for (size_t n = 0; n < result.chunks; n++)
156 result.data[n] = data[n];
157 result.data[result.chunks - 1] &= result.msb_mask;
158 return result;
159 }
160
161 template<size_t NewBits>
162 CXXRTL_ALWAYS_INLINE
163 value<NewBits> zext() const {
164 static_assert(NewBits >= Bits, "zext() may not decrease width");
165 value<NewBits> result;
166 for (size_t n = 0; n < chunks; n++)
167 result.data[n] = data[n];
168 return result;
169 }
170
171 template<size_t NewBits>
172 CXXRTL_ALWAYS_INLINE
173 value<NewBits> sext() const {
174 static_assert(NewBits >= Bits, "sext() may not decrease width");
175 value<NewBits> result;
176 for (size_t n = 0; n < chunks; n++)
177 result.data[n] = data[n];
178 if (is_neg()) {
179 result.data[chunks - 1] |= ~msb_mask;
180 for (size_t n = chunks; n < result.chunks; n++)
181 result.data[n] = chunk::mask;
182 result.data[result.chunks - 1] &= result.msb_mask;
183 }
184 return result;
185 }
186
187 template<size_t NewBits>
188 CXXRTL_ALWAYS_INLINE
189 value<NewBits> rtrunc() const {
190 static_assert(NewBits <= Bits, "rtrunc() may not increase width");
191 value<NewBits> result;
192 constexpr size_t shift_chunks = (Bits - NewBits) / chunk::bits;
193 constexpr size_t shift_bits = (Bits - NewBits) % chunk::bits;
194 chunk::type carry = 0;
195 if (shift_chunks + result.chunks < chunks) {
196 carry = (shift_bits == 0) ? 0
197 : data[shift_chunks + result.chunks] << (chunk::bits - shift_bits);
198 }
199 for (size_t n = result.chunks; n > 0; n--) {
200 result.data[n - 1] = carry | (data[shift_chunks + n - 1] >> shift_bits);
201 carry = (shift_bits == 0) ? 0
202 : data[shift_chunks + n - 1] << (chunk::bits - shift_bits);
203 }
204 return result;
205 }
206
207 template<size_t NewBits>
208 CXXRTL_ALWAYS_INLINE
209 value<NewBits> rzext() const {
210 static_assert(NewBits >= Bits, "rzext() may not decrease width");
211 value<NewBits> result;
212 constexpr size_t shift_chunks = (NewBits - Bits) / chunk::bits;
213 constexpr size_t shift_bits = (NewBits - Bits) % chunk::bits;
214 chunk::type carry = 0;
215 for (size_t n = 0; n < chunks; n++) {
216 result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
217 carry = (shift_bits == 0) ? 0
218 : data[n] >> (chunk::bits - shift_bits);
219 }
220 if (shift_chunks + chunks < result.chunks)
221 result.data[shift_chunks + chunks] = carry;
222 return result;
223 }
224
225 // Bit blit operation, i.e. a partial read-modify-write.
226 template<size_t Stop, size_t Start>
227 CXXRTL_ALWAYS_INLINE
228 value<Bits> blit(const value<Stop - Start + 1> &source) const {
229 static_assert(Stop >= Start, "blit() may not reverse bit order");
230 constexpr chunk::type start_mask = ~(chunk::mask << (Start % chunk::bits));
231 constexpr chunk::type stop_mask = (Stop % chunk::bits + 1 == chunk::bits) ? 0
232 : (chunk::mask << (Stop % chunk::bits + 1));
233 value<Bits> masked = *this;
234 if (Start / chunk::bits == Stop / chunk::bits) {
235 masked.data[Start / chunk::bits] &= stop_mask | start_mask;
236 } else {
237 masked.data[Start / chunk::bits] &= start_mask;
238 for (size_t n = Start / chunk::bits + 1; n < Stop / chunk::bits; n++)
239 masked.data[n] = 0;
240 masked.data[Stop / chunk::bits] &= stop_mask;
241 }
242 value<Bits> shifted = source
243 .template rzext<Stop + 1>()
244 .template zext<Bits>();
245 return masked.bit_or(shifted);
246 }
247
248 // Helpers for selecting extending or truncating operation depending on whether the result is wider or narrower
249 // than the operand. In C++17 these can be replaced with `if constexpr`.
250 template<size_t NewBits, typename = void>
251 struct zext_cast {
252 CXXRTL_ALWAYS_INLINE
253 value<NewBits> operator()(const value<Bits> &val) {
254 return val.template zext<NewBits>();
255 }
256 };
257
258 template<size_t NewBits>
259 struct zext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
260 CXXRTL_ALWAYS_INLINE
261 value<NewBits> operator()(const value<Bits> &val) {
262 return val.template trunc<NewBits>();
263 }
264 };
265
266 template<size_t NewBits, typename = void>
267 struct sext_cast {
268 CXXRTL_ALWAYS_INLINE
269 value<NewBits> operator()(const value<Bits> &val) {
270 return val.template sext<NewBits>();
271 }
272 };
273
274 template<size_t NewBits>
275 struct sext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
276 CXXRTL_ALWAYS_INLINE
277 value<NewBits> operator()(const value<Bits> &val) {
278 return val.template trunc<NewBits>();
279 }
280 };
281
282 template<size_t NewBits>
283 CXXRTL_ALWAYS_INLINE
284 value<NewBits> zcast() const {
285 return zext_cast<NewBits>()(*this);
286 }
287
288 template<size_t NewBits>
289 CXXRTL_ALWAYS_INLINE
290 value<NewBits> scast() const {
291 return sext_cast<NewBits>()(*this);
292 }
293
294 // Operations with run-time parameters (offsets, amounts, etc).
295 //
296 // These operations are used for computations.
297 bool bit(size_t offset) const {
298 return data[offset / chunk::bits] & (1 << (offset % chunk::bits));
299 }
300
301 void set_bit(size_t offset, bool value = true) {
302 size_t offset_chunks = offset / chunk::bits;
303 size_t offset_bits = offset % chunk::bits;
304 data[offset_chunks] &= ~(1 << offset_bits);
305 data[offset_chunks] |= value ? 1 << offset_bits : 0;
306 }
307
308 explicit operator bool() const {
309 return !is_zero();
310 }
311
312 bool is_zero() const {
313 for (size_t n = 0; n < chunks; n++)
314 if (data[n] != 0)
315 return false;
316 return true;
317 }
318
319 bool is_neg() const {
320 return data[chunks - 1] & (1 << ((Bits - 1) % chunk::bits));
321 }
322
323 bool operator ==(const value<Bits> &other) const {
324 for (size_t n = 0; n < chunks; n++)
325 if (data[n] != other.data[n])
326 return false;
327 return true;
328 }
329
330 bool operator !=(const value<Bits> &other) const {
331 return !(*this == other);
332 }
333
334 value<Bits> bit_not() const {
335 value<Bits> result;
336 for (size_t n = 0; n < chunks; n++)
337 result.data[n] = ~data[n];
338 result.data[chunks - 1] &= msb_mask;
339 return result;
340 }
341
342 value<Bits> bit_and(const value<Bits> &other) const {
343 value<Bits> result;
344 for (size_t n = 0; n < chunks; n++)
345 result.data[n] = data[n] & other.data[n];
346 return result;
347 }
348
349 value<Bits> bit_or(const value<Bits> &other) const {
350 value<Bits> result;
351 for (size_t n = 0; n < chunks; n++)
352 result.data[n] = data[n] | other.data[n];
353 return result;
354 }
355
356 value<Bits> bit_xor(const value<Bits> &other) const {
357 value<Bits> result;
358 for (size_t n = 0; n < chunks; n++)
359 result.data[n] = data[n] ^ other.data[n];
360 return result;
361 }
362
363 value<Bits> update(const value<Bits> &val, const value<Bits> &mask) const {
364 return bit_and(mask.bit_not()).bit_or(val.bit_and(mask));
365 }
366
367 template<size_t AmountBits>
368 value<Bits> shl(const value<AmountBits> &amount) const {
369 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
370 static_assert(Bits <= chunk::mask, "shl() of unreasonably large values is not supported");
371 // Detect shifts definitely large than Bits early.
372 for (size_t n = 1; n < amount.chunks; n++)
373 if (amount.data[n] != 0)
374 return {};
375 // Past this point we can use the least significant chunk as the shift size.
376 size_t shift_chunks = amount.data[0] / chunk::bits;
377 size_t shift_bits = amount.data[0] % chunk::bits;
378 if (shift_chunks >= chunks)
379 return {};
380 value<Bits> result;
381 chunk::type carry = 0;
382 for (size_t n = 0; n < chunks - shift_chunks; n++) {
383 result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
384 carry = (shift_bits == 0) ? 0
385 : data[n] >> (chunk::bits - shift_bits);
386 }
387 return result;
388 }
389
390 template<size_t AmountBits, bool Signed = false>
391 value<Bits> shr(const value<AmountBits> &amount) const {
392 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
393 static_assert(Bits <= chunk::mask, "shr() of unreasonably large values is not supported");
394 // Detect shifts definitely large than Bits early.
395 for (size_t n = 1; n < amount.chunks; n++)
396 if (amount.data[n] != 0)
397 return {};
398 // Past this point we can use the least significant chunk as the shift size.
399 size_t shift_chunks = amount.data[0] / chunk::bits;
400 size_t shift_bits = amount.data[0] % chunk::bits;
401 if (shift_chunks >= chunks)
402 return {};
403 value<Bits> result;
404 chunk::type carry = 0;
405 for (size_t n = 0; n < chunks - shift_chunks; n++) {
406 result.data[chunks - shift_chunks - 1 - n] = carry | (data[chunks - 1 - n] >> shift_bits);
407 carry = (shift_bits == 0) ? 0
408 : data[chunks - 1 - n] << (chunk::bits - shift_bits);
409 }
410 if (Signed && is_neg()) {
411 size_t top_chunk_idx = (Bits - shift_bits) / chunk::bits;
412 size_t top_chunk_bits = (Bits - shift_bits) % chunk::bits;
413 for (size_t n = top_chunk_idx + 1; n < chunks; n++)
414 result.data[n] = chunk::mask;
415 if (shift_bits != 0)
416 result.data[top_chunk_idx] |= chunk::mask << top_chunk_bits;
417 }
418 return result;
419 }
420
421 template<size_t AmountBits>
422 value<Bits> sshr(const value<AmountBits> &amount) const {
423 return shr<AmountBits, /*Signed=*/true>(amount);
424 }
425
426 size_t ctpop() const {
427 size_t count = 0;
428 for (size_t n = 0; n < chunks; n++) {
429 // This loop implements the population count idiom as recognized by LLVM and GCC.
430 for (chunk::type x = data[n]; x != 0; count++)
431 x = x & (x - 1);
432 }
433 return count;
434 }
435
436 size_t ctlz() const {
437 size_t count = 0;
438 for (size_t n = 0; n < chunks; n++) {
439 chunk::type x = data[chunks - 1 - n];
440 if (x == 0) {
441 count += (n == 0 ? Bits % chunk::bits : chunk::bits);
442 } else {
443 // This loop implements the find first set idiom as recognized by LLVM.
444 for (; x != 0; count++)
445 x >>= 1;
446 }
447 }
448 return count;
449 }
450
451 template<bool Invert, bool CarryIn>
452 std::pair<value<Bits>, bool /*CarryOut*/> alu(const value<Bits> &other) const {
453 value<Bits> result;
454 bool carry = CarryIn;
455 for (size_t n = 0; n < result.chunks; n++) {
456 result.data[n] = data[n] + (Invert ? ~other.data[n] : other.data[n]) + carry;
457 if (result.chunks - 1 == n)
458 result.data[result.chunks - 1] &= result.msb_mask;
459 carry = (result.data[n] < data[n]) ||
460 (result.data[n] == data[n] && carry);
461 }
462 return {result, carry};
463 }
464
465 value<Bits> add(const value<Bits> &other) const {
466 return alu</*Invert=*/false, /*CarryIn=*/false>(other).first;
467 }
468
469 value<Bits> sub(const value<Bits> &other) const {
470 return alu</*Invert=*/true, /*CarryIn=*/true>(other).first;
471 }
472
473 value<Bits> neg() const {
474 return value<Bits> { 0u }.sub(*this);
475 }
476
477 bool ucmp(const value<Bits> &other) const {
478 bool carry;
479 std::tie(std::ignore, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
480 return !carry; // a.ucmp(b) ≡ a u< b
481 }
482
483 bool scmp(const value<Bits> &other) const {
484 value<Bits> result;
485 bool carry;
486 std::tie(result, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
487 bool overflow = (is_neg() == !other.is_neg()) && (is_neg() != result.is_neg());
488 return result.is_neg() ^ overflow; // a.scmp(b) ≡ a s< b
489 }
490
491 template<size_t ResultBits>
492 value<ResultBits> mul(const value<Bits> &other) const {
493 value<ResultBits> result;
494 wide_chunk_t wide_result[result.chunks + 1] = {};
495 for (size_t n = 0; n < chunks; n++) {
496 for (size_t m = 0; m < chunks && n + m < result.chunks; m++) {
497 wide_result[n + m] += wide_chunk_t(data[n]) * wide_chunk_t(other.data[m]);
498 wide_result[n + m + 1] += wide_result[n + m] >> chunk::bits;
499 wide_result[n + m] &= chunk::mask;
500 }
501 }
502 for (size_t n = 0; n < result.chunks; n++) {
503 result.data[n] = wide_result[n];
504 }
505 result.data[result.chunks - 1] &= result.msb_mask;
506 return result;
507 }
508 };
509
510 // Expression template for a slice, usable as lvalue or rvalue, and composable with other expression templates here.
511 template<class T, size_t Stop, size_t Start>
512 struct slice_expr : public expr_base<slice_expr<T, Stop, Start>> {
513 static_assert(Stop >= Start, "slice_expr() may not reverse bit order");
514 static_assert(Start < T::bits && Stop < T::bits, "slice_expr() must be within bounds");
515 static constexpr size_t bits = Stop - Start + 1;
516
517 T &expr;
518
519 slice_expr(T &expr) : expr(expr) {}
520 slice_expr(const slice_expr<T, Stop, Start> &) = delete;
521
522 CXXRTL_ALWAYS_INLINE
523 operator value<bits>() const {
524 return static_cast<const value<T::bits> &>(expr)
525 .template rtrunc<T::bits - Start>()
526 .template trunc<bits>();
527 }
528
529 CXXRTL_ALWAYS_INLINE
530 slice_expr<T, Stop, Start> &operator=(const value<bits> &rhs) {
531 // Generic partial assignment implemented using a read-modify-write operation on the sliced expression.
532 expr = static_cast<const value<T::bits> &>(expr)
533 .template blit<Stop, Start>(rhs);
534 return *this;
535 }
536
537 // A helper that forces the cast to value<>, which allows deduction to work.
538 CXXRTL_ALWAYS_INLINE
539 value<bits> val() const {
540 return static_cast<const value<bits> &>(*this);
541 }
542 };
543
544 // Expression template for a concatenation, usable as lvalue or rvalue, and composable with other expression templates here.
545 template<class T, class U>
546 struct concat_expr : public expr_base<concat_expr<T, U>> {
547 static constexpr size_t bits = T::bits + U::bits;
548
549 T &ms_expr;
550 U &ls_expr;
551
552 concat_expr(T &ms_expr, U &ls_expr) : ms_expr(ms_expr), ls_expr(ls_expr) {}
553 concat_expr(const concat_expr<T, U> &) = delete;
554
555 CXXRTL_ALWAYS_INLINE
556 operator value<bits>() const {
557 value<bits> ms_shifted = static_cast<const value<T::bits> &>(ms_expr)
558 .template rzext<bits>();
559 value<bits> ls_extended = static_cast<const value<U::bits> &>(ls_expr)
560 .template zext<bits>();
561 return ms_shifted.bit_or(ls_extended);
562 }
563
564 CXXRTL_ALWAYS_INLINE
565 concat_expr<T, U> &operator=(const value<bits> &rhs) {
566 ms_expr = rhs.template rtrunc<T::bits>();
567 ls_expr = rhs.template trunc<U::bits>();
568 return *this;
569 }
570
571 // A helper that forces the cast to value<>, which allows deduction to work.
572 CXXRTL_ALWAYS_INLINE
573 value<bits> val() const {
574 return static_cast<const value<bits> &>(*this);
575 }
576 };
577
578 // Base class for expression templates, providing helper methods for operations that are valid on both rvalues and lvalues.
579 //
580 // Note that expression objects (slices and concatenations) constructed in this way should NEVER be captured because
581 // they refer to temporaries that will, in general, only live until the end of the statement. For example, both of
582 // these snippets perform use-after-free:
583 //
584 // const auto &a = val.slice<7,0>().slice<1>();
585 // value<1> b = a;
586 //
587 // auto &&c = val.slice<7,0>().slice<1>();
588 // c = value<1>{1u};
589 //
590 // An easy way to write code using slices and concatenations safely is to follow two simple rules:
591 // * Never explicitly name any type except `value<W>` or `const value<W> &`.
592 // * Never use a `const auto &` or `auto &&` in any such expression.
593 // Then, any code that compiles will be well-defined.
594 template<class T>
595 struct expr_base {
596 template<size_t Stop, size_t Start = Stop>
597 CXXRTL_ALWAYS_INLINE
598 slice_expr<const T, Stop, Start> slice() const {
599 return {*static_cast<const T *>(this)};
600 }
601
602 template<size_t Stop, size_t Start = Stop>
603 CXXRTL_ALWAYS_INLINE
604 slice_expr<T, Stop, Start> slice() {
605 return {*static_cast<T *>(this)};
606 }
607
608 template<class U>
609 CXXRTL_ALWAYS_INLINE
610 concat_expr<const T, typename std::remove_reference<const U>::type> concat(const U &other) const {
611 return {*static_cast<const T *>(this), other};
612 }
613
614 template<class U>
615 CXXRTL_ALWAYS_INLINE
616 concat_expr<T, typename std::remove_reference<U>::type> concat(U &&other) {
617 return {*static_cast<T *>(this), other};
618 }
619 };
620
621 template<size_t Bits>
622 std::ostream &operator<<(std::ostream &os, const value<Bits> &val) {
623 auto old_flags = os.flags(std::ios::right);
624 auto old_width = os.width(0);
625 auto old_fill = os.fill('0');
626 os << val.bits << '\'' << std::hex;
627 for (size_t n = val.chunks - 1; n != (size_t)-1; n--) {
628 if (n == val.chunks - 1 && Bits % value<Bits>::chunk::bits != 0)
629 os.width((Bits % value<Bits>::chunk::bits + 3) / 4);
630 else
631 os.width((value<Bits>::chunk::bits + 3) / 4);
632 os << val.data[n];
633 }
634 os.fill(old_fill);
635 os.width(old_width);
636 os.flags(old_flags);
637 return os;
638 }
639
640 template<size_t Bits>
641 struct wire {
642 static constexpr size_t bits = Bits;
643
644 value<Bits> curr;
645 value<Bits> next;
646
647 wire() = default;
648 constexpr wire(const value<Bits> &init) : curr(init), next(init) {}
649 template<typename... Init>
650 explicit constexpr wire(Init ...init) : curr{init...}, next{init...} {}
651
652 // Copying and copy-assigning values is natural. If, however, a value is replaced with a wire,
653 // e.g. because a module is built with a different optimization level, then existing code could
654 // unintentionally copy a wire instead, which would create a subtle but serious bug. To make sure
655 // this doesn't happen, prohibit copying and copy-assigning wires.
656 wire(const wire<Bits> &) = delete;
657 wire<Bits> &operator=(const wire<Bits> &) = delete;
658
659 wire(wire<Bits> &&) = default;
660 wire<Bits> &operator=(wire<Bits> &&) = default;
661
662 template<class IntegerT>
663 CXXRTL_ALWAYS_INLINE
664 IntegerT get() const {
665 return curr.template get<IntegerT>();
666 }
667
668 template<class IntegerT>
669 CXXRTL_ALWAYS_INLINE
670 void set(IntegerT other) {
671 next.template set<IntegerT>(other);
672 }
673
674 bool commit() {
675 if (curr != next) {
676 curr = next;
677 return true;
678 }
679 return false;
680 }
681 };
682
683 template<size_t Bits>
684 std::ostream &operator<<(std::ostream &os, const wire<Bits> &val) {
685 os << val.curr;
686 return os;
687 }
688
689 template<size_t Width>
690 struct memory {
691 std::vector<value<Width>> data;
692
693 size_t depth() const {
694 return data.size();
695 }
696
697 memory() = delete;
698 explicit memory(size_t depth) : data(depth) {}
699
700 memory(const memory<Width> &) = delete;
701 memory<Width> &operator=(const memory<Width> &) = delete;
702
703 memory(memory<Width> &&) = default;
704 memory<Width> &operator=(memory<Width> &&) = default;
705
706 // The only way to get the compiler to put the initializer in .rodata and do not copy it on stack is to stuff it
707 // into a plain array. You'd think an std::initializer_list would work here, but it doesn't, because you can't
708 // construct an initializer_list in a constexpr (or something) and so if you try to do that the whole thing is
709 // first copied on the stack (probably overflowing it) and then again into `data`.
710 template<size_t Size>
711 struct init {
712 size_t offset;
713 value<Width> data[Size];
714 };
715
716 template<size_t... InitSize>
717 explicit memory(size_t depth, const init<InitSize> &...init) : data(depth) {
718 data.resize(depth);
719 // This utterly reprehensible construct is the most reasonable way to apply a function to every element
720 // of a parameter pack, if the elements all have different types and so cannot be cast to an initializer list.
721 auto _ = {std::move(std::begin(init.data), std::end(init.data), data.begin() + init.offset)...};
722 (void)_;
723 }
724
725 // An operator for direct memory reads. May be used at any time during the simulation.
726 const value<Width> &operator [](size_t index) const {
727 assert(index < data.size());
728 return data[index];
729 }
730
731 // An operator for direct memory writes. May only be used before the simulation is started. If used
732 // after the simulation is started, the design may malfunction.
733 value<Width> &operator [](size_t index) {
734 assert(index < data.size());
735 return data[index];
736 }
737
738 // A simple way to make a writable memory would be to use an array of wires instead of an array of values.
739 // However, there are two significant downsides to this approach: first, it has large overhead (2× space
740 // overhead, and O(depth) time overhead during commit); second, it does not simplify handling write port
741 // priorities. Although in principle write ports could be ordered or conditionally enabled in generated
742 // code based on their priorities and selected addresses, the feedback arc set problem is computationally
743 // expensive, and the heuristic based algorithms are not easily modified to guarantee (rather than prefer)
744 // a particular write port evaluation order.
745 //
746 // The approach used here instead is to queue writes into a buffer during the eval phase, then perform
747 // the writes during the commit phase in the priority order. This approach has low overhead, with both space
748 // and time proportional to the amount of write ports. Because virtually every memory in a practical design
749 // has at most two write ports, linear search is used on every write, being the fastest and simplest approach.
750 struct write {
751 size_t index;
752 value<Width> val;
753 value<Width> mask;
754 int priority;
755 };
756 std::vector<write> write_queue;
757
758 void update(size_t index, const value<Width> &val, const value<Width> &mask, int priority = 0) {
759 assert(index < data.size());
760 // Queue up the write while keeping the queue sorted by priority.
761 write_queue.insert(
762 std::upper_bound(write_queue.begin(), write_queue.end(), priority,
763 [](const int a, const write& b) { return a < b.priority; }),
764 write { index, val, mask, priority });
765 }
766
767 bool commit() {
768 bool changed = false;
769 for (const write &entry : write_queue) {
770 value<Width> elem = data[entry.index];
771 elem = elem.update(entry.val, entry.mask);
772 changed |= (data[entry.index] != elem);
773 data[entry.index] = elem;
774 }
775 write_queue.clear();
776 return changed;
777 }
778 };
779
780 struct metadata {
781 const enum {
782 MISSING = 0,
783 UINT = 1,
784 SINT = 2,
785 STRING = 3,
786 DOUBLE = 4,
787 } value_type;
788
789 // In debug mode, using the wrong .as_*() function will assert.
790 // In release mode, using the wrong .as_*() function will safely return a default value.
791 const unsigned uint_value = 0;
792 const signed sint_value = 0;
793 const std::string string_value = "";
794 const double double_value = 0.0;
795
796 metadata() : value_type(MISSING) {}
797 metadata(unsigned value) : value_type(UINT), uint_value(value) {}
798 metadata(signed value) : value_type(SINT), sint_value(value) {}
799 metadata(const std::string &value) : value_type(STRING), string_value(value) {}
800 metadata(const char *value) : value_type(STRING), string_value(value) {}
801 metadata(double value) : value_type(DOUBLE), double_value(value) {}
802
803 metadata(const metadata &) = default;
804 metadata &operator=(const metadata &) = delete;
805
806 unsigned as_uint() const {
807 assert(value_type == UINT);
808 return uint_value;
809 }
810
811 signed as_sint() const {
812 assert(value_type == SINT);
813 return sint_value;
814 }
815
816 const std::string &as_string() const {
817 assert(value_type == STRING);
818 return string_value;
819 }
820
821 double as_double() const {
822 assert(value_type == DOUBLE);
823 return double_value;
824 }
825 };
826
827 typedef std::map<std::string, metadata> metadata_map;
828
829 // Tag class to disambiguate values/wires and their aliases.
830 struct debug_alias {};
831
832 // This structure is intended for consumption via foreign function interfaces, like Python's ctypes.
833 // Because of this it uses a C-style layout that is easy to parse rather than more idiomatic C++.
834 //
835 // To avoid violating strict aliasing rules, this structure has to be a subclass of the one used
836 // in the C API, or it would not be possible to cast between the pointers to these.
837 struct debug_item : ::cxxrtl_object {
838 // Object types.
839 enum : uint32_t {
840 VALUE = CXXRTL_VALUE,
841 WIRE = CXXRTL_WIRE,
842 MEMORY = CXXRTL_MEMORY,
843 ALIAS = CXXRTL_ALIAS,
844 };
845
846 // Object flags.
847 enum : uint32_t {
848 INPUT = CXXRTL_INPUT,
849 OUTPUT = CXXRTL_OUTPUT,
850 INOUT = CXXRTL_INOUT,
851 DRIVEN_SYNC = CXXRTL_DRIVEN_SYNC,
852 DRIVEN_COMB = CXXRTL_DRIVEN_COMB,
853 UNDRIVEN = CXXRTL_UNDRIVEN,
854 };
855
856 debug_item(const ::cxxrtl_object &object) : cxxrtl_object(object) {}
857
858 template<size_t Bits>
859 debug_item(value<Bits> &item, size_t lsb_offset = 0, uint32_t flags_ = 0) {
860 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
861 "value<Bits> is not compatible with C layout");
862 type = VALUE;
863 flags = flags_;
864 width = Bits;
865 lsb_at = lsb_offset;
866 depth = 1;
867 zero_at = 0;
868 curr = item.data;
869 next = item.data;
870 }
871
872 template<size_t Bits>
873 debug_item(const value<Bits> &item, size_t lsb_offset = 0) {
874 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
875 "value<Bits> is not compatible with C layout");
876 type = VALUE;
877 flags = DRIVEN_COMB;
878 width = Bits;
879 lsb_at = lsb_offset;
880 depth = 1;
881 zero_at = 0;
882 curr = const_cast<chunk_t*>(item.data);
883 next = nullptr;
884 }
885
886 template<size_t Bits>
887 debug_item(wire<Bits> &item, size_t lsb_offset = 0, uint32_t flags_ = 0) {
888 static_assert(sizeof(item.curr) == value<Bits>::chunks * sizeof(chunk_t) &&
889 sizeof(item.next) == value<Bits>::chunks * sizeof(chunk_t),
890 "wire<Bits> is not compatible with C layout");
891 type = WIRE;
892 flags = flags_;
893 width = Bits;
894 lsb_at = lsb_offset;
895 depth = 1;
896 zero_at = 0;
897 curr = item.curr.data;
898 next = item.next.data;
899 }
900
901 template<size_t Width>
902 debug_item(memory<Width> &item, size_t zero_offset = 0) {
903 static_assert(sizeof(item.data[0]) == value<Width>::chunks * sizeof(chunk_t),
904 "memory<Width> is not compatible with C layout");
905 type = MEMORY;
906 flags = 0;
907 width = Width;
908 lsb_at = 0;
909 depth = item.data.size();
910 zero_at = zero_offset;
911 curr = item.data.empty() ? nullptr : item.data[0].data;
912 next = nullptr;
913 }
914
915 template<size_t Bits>
916 debug_item(debug_alias, const value<Bits> &item, size_t lsb_offset = 0) {
917 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
918 "value<Bits> is not compatible with C layout");
919 type = ALIAS;
920 flags = DRIVEN_COMB;
921 width = Bits;
922 lsb_at = lsb_offset;
923 depth = 1;
924 zero_at = 0;
925 curr = const_cast<chunk_t*>(item.data);
926 next = nullptr;
927 }
928
929 template<size_t Bits>
930 debug_item(debug_alias, const wire<Bits> &item, size_t lsb_offset = 0) {
931 static_assert(sizeof(item.curr) == value<Bits>::chunks * sizeof(chunk_t) &&
932 sizeof(item.next) == value<Bits>::chunks * sizeof(chunk_t),
933 "wire<Bits> is not compatible with C layout");
934 type = ALIAS;
935 flags = DRIVEN_COMB;
936 width = Bits;
937 lsb_at = lsb_offset;
938 depth = 1;
939 zero_at = 0;
940 curr = const_cast<chunk_t*>(item.curr.data);
941 next = nullptr;
942 }
943 };
944 static_assert(std::is_standard_layout<debug_item>::value, "debug_item is not compatible with C layout");
945
946 struct debug_items {
947 std::map<std::string, std::vector<debug_item>> table;
948
949 void add(const std::string &name, debug_item &&item) {
950 std::vector<debug_item> &parts = table[name];
951 parts.emplace_back(item);
952 std::sort(parts.begin(), parts.end(),
953 [](const debug_item &a, const debug_item &b) {
954 return a.lsb_at < b.lsb_at;
955 });
956 }
957
958 size_t count(const std::string &name) const {
959 if (table.count(name) == 0)
960 return 0;
961 return table.at(name).size();
962 }
963
964 const std::vector<debug_item> &parts_at(const std::string &name) const {
965 return table.at(name);
966 }
967
968 const debug_item &at(const std::string &name) const {
969 const std::vector<debug_item> &parts = table.at(name);
970 assert(parts.size() == 1);
971 return parts.at(0);
972 }
973
974 const debug_item &operator [](const std::string &name) const {
975 return at(name);
976 }
977 };
978
979 // Tag class to disambiguate module move constructor and module constructor that takes black boxes
980 // out of another instance of the module.
981 struct adopt {};
982
983 struct module {
984 module() {}
985 virtual ~module() {}
986
987 // Modules with black boxes cannot be copied. Although not all designs include black boxes,
988 // delete the copy constructor and copy assignment operator to make sure that any downstream
989 // code that manipulates modules doesn't accidentally depend on their availability.
990 module(const module &) = delete;
991 module &operator=(const module &) = delete;
992
993 module(module &&) = default;
994 module &operator=(module &&) = default;
995
996 virtual void reset() = 0;
997
998 virtual bool eval() = 0;
999 virtual bool commit() = 0;
1000
1001 size_t step() {
1002 size_t deltas = 0;
1003 bool converged = false;
1004 do {
1005 converged = eval();
1006 deltas++;
1007 } while (commit() && !converged);
1008 return deltas;
1009 }
1010
1011 virtual void debug_info(debug_items &items, std::string path = "") {
1012 (void)items, (void)path;
1013 }
1014 };
1015
1016 } // namespace cxxrtl
1017
1018 // Internal structure used to communicate with the implementation of the C interface.
1019 typedef struct _cxxrtl_toplevel {
1020 std::unique_ptr<cxxrtl::module> module;
1021 } *cxxrtl_toplevel;
1022
1023 // Definitions of internal Yosys cells. Other than the functions in this namespace, CXXRTL is fully generic
1024 // and indepenent of Yosys implementation details.
1025 //
1026 // The `write_cxxrtl` pass translates internal cells (cells with names that start with `$`) to calls of these
1027 // functions. All of Yosys arithmetic and logical cells perform sign or zero extension on their operands,
1028 // whereas basic operations on arbitrary width values require operands to be of the same width. These functions
1029 // bridge the gap by performing the necessary casts. They are named similar to `cell_A[B]`, where A and B are `u`
1030 // if the corresponding operand is unsigned, and `s` if it is signed.
1031 namespace cxxrtl_yosys {
1032
1033 using namespace cxxrtl;
1034
1035 // std::max isn't constexpr until C++14 for no particular reason (it's an oversight), so we define our own.
1036 template<class T>
1037 CXXRTL_ALWAYS_INLINE
1038 constexpr T max(const T &a, const T &b) {
1039 return a > b ? a : b;
1040 }
1041
1042 // Logic operations
1043 template<size_t BitsY, size_t BitsA>
1044 CXXRTL_ALWAYS_INLINE
1045 value<BitsY> logic_not(const value<BitsA> &a) {
1046 return value<BitsY> { a ? 0u : 1u };
1047 }
1048
1049 template<size_t BitsY, size_t BitsA, size_t BitsB>
1050 CXXRTL_ALWAYS_INLINE
1051 value<BitsY> logic_and(const value<BitsA> &a, const value<BitsB> &b) {
1052 return value<BitsY> { (bool(a) && bool(b)) ? 1u : 0u };
1053 }
1054
1055 template<size_t BitsY, size_t BitsA, size_t BitsB>
1056 CXXRTL_ALWAYS_INLINE
1057 value<BitsY> logic_or(const value<BitsA> &a, const value<BitsB> &b) {
1058 return value<BitsY> { (bool(a) || bool(b)) ? 1u : 0u };
1059 }
1060
1061 // Reduction operations
1062 template<size_t BitsY, size_t BitsA>
1063 CXXRTL_ALWAYS_INLINE
1064 value<BitsY> reduce_and(const value<BitsA> &a) {
1065 return value<BitsY> { a.bit_not().is_zero() ? 1u : 0u };
1066 }
1067
1068 template<size_t BitsY, size_t BitsA>
1069 CXXRTL_ALWAYS_INLINE
1070 value<BitsY> reduce_or(const value<BitsA> &a) {
1071 return value<BitsY> { a ? 1u : 0u };
1072 }
1073
1074 template<size_t BitsY, size_t BitsA>
1075 CXXRTL_ALWAYS_INLINE
1076 value<BitsY> reduce_xor(const value<BitsA> &a) {
1077 return value<BitsY> { (a.ctpop() % 2) ? 1u : 0u };
1078 }
1079
1080 template<size_t BitsY, size_t BitsA>
1081 CXXRTL_ALWAYS_INLINE
1082 value<BitsY> reduce_xnor(const value<BitsA> &a) {
1083 return value<BitsY> { (a.ctpop() % 2) ? 0u : 1u };
1084 }
1085
1086 template<size_t BitsY, size_t BitsA>
1087 CXXRTL_ALWAYS_INLINE
1088 value<BitsY> reduce_bool(const value<BitsA> &a) {
1089 return value<BitsY> { a ? 1u : 0u };
1090 }
1091
1092 // Bitwise operations
1093 template<size_t BitsY, size_t BitsA>
1094 CXXRTL_ALWAYS_INLINE
1095 value<BitsY> not_u(const value<BitsA> &a) {
1096 return a.template zcast<BitsY>().bit_not();
1097 }
1098
1099 template<size_t BitsY, size_t BitsA>
1100 CXXRTL_ALWAYS_INLINE
1101 value<BitsY> not_s(const value<BitsA> &a) {
1102 return a.template scast<BitsY>().bit_not();
1103 }
1104
1105 template<size_t BitsY, size_t BitsA, size_t BitsB>
1106 CXXRTL_ALWAYS_INLINE
1107 value<BitsY> and_uu(const value<BitsA> &a, const value<BitsB> &b) {
1108 return a.template zcast<BitsY>().bit_and(b.template zcast<BitsY>());
1109 }
1110
1111 template<size_t BitsY, size_t BitsA, size_t BitsB>
1112 CXXRTL_ALWAYS_INLINE
1113 value<BitsY> and_ss(const value<BitsA> &a, const value<BitsB> &b) {
1114 return a.template scast<BitsY>().bit_and(b.template scast<BitsY>());
1115 }
1116
1117 template<size_t BitsY, size_t BitsA, size_t BitsB>
1118 CXXRTL_ALWAYS_INLINE
1119 value<BitsY> or_uu(const value<BitsA> &a, const value<BitsB> &b) {
1120 return a.template zcast<BitsY>().bit_or(b.template zcast<BitsY>());
1121 }
1122
1123 template<size_t BitsY, size_t BitsA, size_t BitsB>
1124 CXXRTL_ALWAYS_INLINE
1125 value<BitsY> or_ss(const value<BitsA> &a, const value<BitsB> &b) {
1126 return a.template scast<BitsY>().bit_or(b.template scast<BitsY>());
1127 }
1128
1129 template<size_t BitsY, size_t BitsA, size_t BitsB>
1130 CXXRTL_ALWAYS_INLINE
1131 value<BitsY> xor_uu(const value<BitsA> &a, const value<BitsB> &b) {
1132 return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>());
1133 }
1134
1135 template<size_t BitsY, size_t BitsA, size_t BitsB>
1136 CXXRTL_ALWAYS_INLINE
1137 value<BitsY> xor_ss(const value<BitsA> &a, const value<BitsB> &b) {
1138 return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>());
1139 }
1140
1141 template<size_t BitsY, size_t BitsA, size_t BitsB>
1142 CXXRTL_ALWAYS_INLINE
1143 value<BitsY> xnor_uu(const value<BitsA> &a, const value<BitsB> &b) {
1144 return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>()).bit_not();
1145 }
1146
1147 template<size_t BitsY, size_t BitsA, size_t BitsB>
1148 CXXRTL_ALWAYS_INLINE
1149 value<BitsY> xnor_ss(const value<BitsA> &a, const value<BitsB> &b) {
1150 return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>()).bit_not();
1151 }
1152
1153 template<size_t BitsY, size_t BitsA, size_t BitsB>
1154 CXXRTL_ALWAYS_INLINE
1155 value<BitsY> shl_uu(const value<BitsA> &a, const value<BitsB> &b) {
1156 return a.template zcast<BitsY>().template shl(b);
1157 }
1158
1159 template<size_t BitsY, size_t BitsA, size_t BitsB>
1160 CXXRTL_ALWAYS_INLINE
1161 value<BitsY> shl_su(const value<BitsA> &a, const value<BitsB> &b) {
1162 return a.template scast<BitsY>().template shl(b);
1163 }
1164
1165 template<size_t BitsY, size_t BitsA, size_t BitsB>
1166 CXXRTL_ALWAYS_INLINE
1167 value<BitsY> sshl_uu(const value<BitsA> &a, const value<BitsB> &b) {
1168 return a.template zcast<BitsY>().template shl(b);
1169 }
1170
1171 template<size_t BitsY, size_t BitsA, size_t BitsB>
1172 CXXRTL_ALWAYS_INLINE
1173 value<BitsY> sshl_su(const value<BitsA> &a, const value<BitsB> &b) {
1174 return a.template scast<BitsY>().template shl(b);
1175 }
1176
1177 template<size_t BitsY, size_t BitsA, size_t BitsB>
1178 CXXRTL_ALWAYS_INLINE
1179 value<BitsY> shr_uu(const value<BitsA> &a, const value<BitsB> &b) {
1180 return a.template shr(b).template zcast<BitsY>();
1181 }
1182
1183 template<size_t BitsY, size_t BitsA, size_t BitsB>
1184 CXXRTL_ALWAYS_INLINE
1185 value<BitsY> shr_su(const value<BitsA> &a, const value<BitsB> &b) {
1186 return a.template shr(b).template scast<BitsY>();
1187 }
1188
1189 template<size_t BitsY, size_t BitsA, size_t BitsB>
1190 CXXRTL_ALWAYS_INLINE
1191 value<BitsY> sshr_uu(const value<BitsA> &a, const value<BitsB> &b) {
1192 return a.template shr(b).template zcast<BitsY>();
1193 }
1194
1195 template<size_t BitsY, size_t BitsA, size_t BitsB>
1196 CXXRTL_ALWAYS_INLINE
1197 value<BitsY> sshr_su(const value<BitsA> &a, const value<BitsB> &b) {
1198 return a.template sshr(b).template scast<BitsY>();
1199 }
1200
1201 template<size_t BitsY, size_t BitsA, size_t BitsB>
1202 CXXRTL_ALWAYS_INLINE
1203 value<BitsY> shift_uu(const value<BitsA> &a, const value<BitsB> &b) {
1204 return shr_uu<BitsY>(a, b);
1205 }
1206
1207 template<size_t BitsY, size_t BitsA, size_t BitsB>
1208 CXXRTL_ALWAYS_INLINE
1209 value<BitsY> shift_su(const value<BitsA> &a, const value<BitsB> &b) {
1210 return shr_su<BitsY>(a, b);
1211 }
1212
1213 template<size_t BitsY, size_t BitsA, size_t BitsB>
1214 CXXRTL_ALWAYS_INLINE
1215 value<BitsY> shift_us(const value<BitsA> &a, const value<BitsB> &b) {
1216 return b.is_neg() ? shl_uu<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_uu<BitsY>(a, b);
1217 }
1218
1219 template<size_t BitsY, size_t BitsA, size_t BitsB>
1220 CXXRTL_ALWAYS_INLINE
1221 value<BitsY> shift_ss(const value<BitsA> &a, const value<BitsB> &b) {
1222 return b.is_neg() ? shl_su<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_su<BitsY>(a, b);
1223 }
1224
1225 template<size_t BitsY, size_t BitsA, size_t BitsB>
1226 CXXRTL_ALWAYS_INLINE
1227 value<BitsY> shiftx_uu(const value<BitsA> &a, const value<BitsB> &b) {
1228 return shift_uu<BitsY>(a, b);
1229 }
1230
1231 template<size_t BitsY, size_t BitsA, size_t BitsB>
1232 CXXRTL_ALWAYS_INLINE
1233 value<BitsY> shiftx_su(const value<BitsA> &a, const value<BitsB> &b) {
1234 return shift_su<BitsY>(a, b);
1235 }
1236
1237 template<size_t BitsY, size_t BitsA, size_t BitsB>
1238 CXXRTL_ALWAYS_INLINE
1239 value<BitsY> shiftx_us(const value<BitsA> &a, const value<BitsB> &b) {
1240 return shift_us<BitsY>(a, b);
1241 }
1242
1243 template<size_t BitsY, size_t BitsA, size_t BitsB>
1244 CXXRTL_ALWAYS_INLINE
1245 value<BitsY> shiftx_ss(const value<BitsA> &a, const value<BitsB> &b) {
1246 return shift_ss<BitsY>(a, b);
1247 }
1248
1249 // Comparison operations
1250 template<size_t BitsY, size_t BitsA, size_t BitsB>
1251 CXXRTL_ALWAYS_INLINE
1252 value<BitsY> eq_uu(const value<BitsA> &a, const value<BitsB> &b) {
1253 constexpr size_t BitsExt = max(BitsA, BitsB);
1254 return value<BitsY>{ a.template zext<BitsExt>() == b.template zext<BitsExt>() ? 1u : 0u };
1255 }
1256
1257 template<size_t BitsY, size_t BitsA, size_t BitsB>
1258 CXXRTL_ALWAYS_INLINE
1259 value<BitsY> eq_ss(const value<BitsA> &a, const value<BitsB> &b) {
1260 constexpr size_t BitsExt = max(BitsA, BitsB);
1261 return value<BitsY>{ a.template sext<BitsExt>() == b.template sext<BitsExt>() ? 1u : 0u };
1262 }
1263
1264 template<size_t BitsY, size_t BitsA, size_t BitsB>
1265 CXXRTL_ALWAYS_INLINE
1266 value<BitsY> ne_uu(const value<BitsA> &a, const value<BitsB> &b) {
1267 constexpr size_t BitsExt = max(BitsA, BitsB);
1268 return value<BitsY>{ a.template zext<BitsExt>() != b.template zext<BitsExt>() ? 1u : 0u };
1269 }
1270
1271 template<size_t BitsY, size_t BitsA, size_t BitsB>
1272 CXXRTL_ALWAYS_INLINE
1273 value<BitsY> ne_ss(const value<BitsA> &a, const value<BitsB> &b) {
1274 constexpr size_t BitsExt = max(BitsA, BitsB);
1275 return value<BitsY>{ a.template sext<BitsExt>() != b.template sext<BitsExt>() ? 1u : 0u };
1276 }
1277
1278 template<size_t BitsY, size_t BitsA, size_t BitsB>
1279 CXXRTL_ALWAYS_INLINE
1280 value<BitsY> eqx_uu(const value<BitsA> &a, const value<BitsB> &b) {
1281 return eq_uu<BitsY>(a, b);
1282 }
1283
1284 template<size_t BitsY, size_t BitsA, size_t BitsB>
1285 CXXRTL_ALWAYS_INLINE
1286 value<BitsY> eqx_ss(const value<BitsA> &a, const value<BitsB> &b) {
1287 return eq_ss<BitsY>(a, b);
1288 }
1289
1290 template<size_t BitsY, size_t BitsA, size_t BitsB>
1291 CXXRTL_ALWAYS_INLINE
1292 value<BitsY> nex_uu(const value<BitsA> &a, const value<BitsB> &b) {
1293 return ne_uu<BitsY>(a, b);
1294 }
1295
1296 template<size_t BitsY, size_t BitsA, size_t BitsB>
1297 CXXRTL_ALWAYS_INLINE
1298 value<BitsY> nex_ss(const value<BitsA> &a, const value<BitsB> &b) {
1299 return ne_ss<BitsY>(a, b);
1300 }
1301
1302 template<size_t BitsY, size_t BitsA, size_t BitsB>
1303 CXXRTL_ALWAYS_INLINE
1304 value<BitsY> gt_uu(const value<BitsA> &a, const value<BitsB> &b) {
1305 constexpr size_t BitsExt = max(BitsA, BitsB);
1306 return value<BitsY> { b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
1307 }
1308
1309 template<size_t BitsY, size_t BitsA, size_t BitsB>
1310 CXXRTL_ALWAYS_INLINE
1311 value<BitsY> gt_ss(const value<BitsA> &a, const value<BitsB> &b) {
1312 constexpr size_t BitsExt = max(BitsA, BitsB);
1313 return value<BitsY> { b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
1314 }
1315
1316 template<size_t BitsY, size_t BitsA, size_t BitsB>
1317 CXXRTL_ALWAYS_INLINE
1318 value<BitsY> ge_uu(const value<BitsA> &a, const value<BitsB> &b) {
1319 constexpr size_t BitsExt = max(BitsA, BitsB);
1320 return value<BitsY> { !a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
1321 }
1322
1323 template<size_t BitsY, size_t BitsA, size_t BitsB>
1324 CXXRTL_ALWAYS_INLINE
1325 value<BitsY> ge_ss(const value<BitsA> &a, const value<BitsB> &b) {
1326 constexpr size_t BitsExt = max(BitsA, BitsB);
1327 return value<BitsY> { !a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
1328 }
1329
1330 template<size_t BitsY, size_t BitsA, size_t BitsB>
1331 CXXRTL_ALWAYS_INLINE
1332 value<BitsY> lt_uu(const value<BitsA> &a, const value<BitsB> &b) {
1333 constexpr size_t BitsExt = max(BitsA, BitsB);
1334 return value<BitsY> { a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
1335 }
1336
1337 template<size_t BitsY, size_t BitsA, size_t BitsB>
1338 CXXRTL_ALWAYS_INLINE
1339 value<BitsY> lt_ss(const value<BitsA> &a, const value<BitsB> &b) {
1340 constexpr size_t BitsExt = max(BitsA, BitsB);
1341 return value<BitsY> { a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
1342 }
1343
1344 template<size_t BitsY, size_t BitsA, size_t BitsB>
1345 CXXRTL_ALWAYS_INLINE
1346 value<BitsY> le_uu(const value<BitsA> &a, const value<BitsB> &b) {
1347 constexpr size_t BitsExt = max(BitsA, BitsB);
1348 return value<BitsY> { !b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
1349 }
1350
1351 template<size_t BitsY, size_t BitsA, size_t BitsB>
1352 CXXRTL_ALWAYS_INLINE
1353 value<BitsY> le_ss(const value<BitsA> &a, const value<BitsB> &b) {
1354 constexpr size_t BitsExt = max(BitsA, BitsB);
1355 return value<BitsY> { !b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
1356 }
1357
1358 // Arithmetic operations
1359 template<size_t BitsY, size_t BitsA>
1360 CXXRTL_ALWAYS_INLINE
1361 value<BitsY> pos_u(const value<BitsA> &a) {
1362 return a.template zcast<BitsY>();
1363 }
1364
1365 template<size_t BitsY, size_t BitsA>
1366 CXXRTL_ALWAYS_INLINE
1367 value<BitsY> pos_s(const value<BitsA> &a) {
1368 return a.template scast<BitsY>();
1369 }
1370
1371 template<size_t BitsY, size_t BitsA>
1372 CXXRTL_ALWAYS_INLINE
1373 value<BitsY> neg_u(const value<BitsA> &a) {
1374 return a.template zcast<BitsY>().neg();
1375 }
1376
1377 template<size_t BitsY, size_t BitsA>
1378 CXXRTL_ALWAYS_INLINE
1379 value<BitsY> neg_s(const value<BitsA> &a) {
1380 return a.template scast<BitsY>().neg();
1381 }
1382
1383 template<size_t BitsY, size_t BitsA, size_t BitsB>
1384 CXXRTL_ALWAYS_INLINE
1385 value<BitsY> add_uu(const value<BitsA> &a, const value<BitsB> &b) {
1386 return a.template zcast<BitsY>().add(b.template zcast<BitsY>());
1387 }
1388
1389 template<size_t BitsY, size_t BitsA, size_t BitsB>
1390 CXXRTL_ALWAYS_INLINE
1391 value<BitsY> add_ss(const value<BitsA> &a, const value<BitsB> &b) {
1392 return a.template scast<BitsY>().add(b.template scast<BitsY>());
1393 }
1394
1395 template<size_t BitsY, size_t BitsA, size_t BitsB>
1396 CXXRTL_ALWAYS_INLINE
1397 value<BitsY> sub_uu(const value<BitsA> &a, const value<BitsB> &b) {
1398 return a.template zcast<BitsY>().sub(b.template zcast<BitsY>());
1399 }
1400
1401 template<size_t BitsY, size_t BitsA, size_t BitsB>
1402 CXXRTL_ALWAYS_INLINE
1403 value<BitsY> sub_ss(const value<BitsA> &a, const value<BitsB> &b) {
1404 return a.template scast<BitsY>().sub(b.template scast<BitsY>());
1405 }
1406
1407 template<size_t BitsY, size_t BitsA, size_t BitsB>
1408 CXXRTL_ALWAYS_INLINE
1409 value<BitsY> mul_uu(const value<BitsA> &a, const value<BitsB> &b) {
1410 constexpr size_t BitsM = BitsA >= BitsB ? BitsA : BitsB;
1411 return a.template zcast<BitsM>().template mul<BitsY>(b.template zcast<BitsM>());
1412 }
1413
1414 template<size_t BitsY, size_t BitsA, size_t BitsB>
1415 CXXRTL_ALWAYS_INLINE
1416 value<BitsY> mul_ss(const value<BitsA> &a, const value<BitsB> &b) {
1417 return a.template scast<BitsY>().template mul<BitsY>(b.template scast<BitsY>());
1418 }
1419
1420 template<size_t BitsY, size_t BitsA, size_t BitsB>
1421 CXXRTL_ALWAYS_INLINE
1422 std::pair<value<BitsY>, value<BitsY>> divmod_uu(const value<BitsA> &a, const value<BitsB> &b) {
1423 constexpr size_t Bits = max(BitsY, max(BitsA, BitsB));
1424 value<Bits> quotient;
1425 value<Bits> dividend = a.template zext<Bits>();
1426 value<Bits> divisor = b.template zext<Bits>();
1427 if (dividend.ucmp(divisor))
1428 return {/*quotient=*/value<BitsY> { 0u }, /*remainder=*/dividend.template trunc<BitsY>()};
1429 uint32_t divisor_shift = dividend.ctlz() - divisor.ctlz();
1430 divisor = divisor.shl(value<32> { divisor_shift });
1431 for (size_t step = 0; step <= divisor_shift; step++) {
1432 quotient = quotient.shl(value<1> { 1u });
1433 if (!dividend.ucmp(divisor)) {
1434 dividend = dividend.sub(divisor);
1435 quotient.set_bit(0, true);
1436 }
1437 divisor = divisor.shr(value<1> { 1u });
1438 }
1439 return {quotient.template trunc<BitsY>(), /*remainder=*/dividend.template trunc<BitsY>()};
1440 }
1441
1442 template<size_t BitsY, size_t BitsA, size_t BitsB>
1443 CXXRTL_ALWAYS_INLINE
1444 std::pair<value<BitsY>, value<BitsY>> divmod_ss(const value<BitsA> &a, const value<BitsB> &b) {
1445 value<BitsA + 1> ua = a.template sext<BitsA + 1>();
1446 value<BitsB + 1> ub = b.template sext<BitsB + 1>();
1447 if (ua.is_neg()) ua = ua.neg();
1448 if (ub.is_neg()) ub = ub.neg();
1449 value<BitsY> y, r;
1450 std::tie(y, r) = divmod_uu<BitsY>(ua, ub);
1451 if (a.is_neg() != b.is_neg()) y = y.neg();
1452 if (a.is_neg()) r = r.neg();
1453 return {y, r};
1454 }
1455
1456 template<size_t BitsY, size_t BitsA, size_t BitsB>
1457 CXXRTL_ALWAYS_INLINE
1458 value<BitsY> div_uu(const value<BitsA> &a, const value<BitsB> &b) {
1459 return divmod_uu<BitsY>(a, b).first;
1460 }
1461
1462 template<size_t BitsY, size_t BitsA, size_t BitsB>
1463 CXXRTL_ALWAYS_INLINE
1464 value<BitsY> div_ss(const value<BitsA> &a, const value<BitsB> &b) {
1465 return divmod_ss<BitsY>(a, b).first;
1466 }
1467
1468 template<size_t BitsY, size_t BitsA, size_t BitsB>
1469 CXXRTL_ALWAYS_INLINE
1470 value<BitsY> mod_uu(const value<BitsA> &a, const value<BitsB> &b) {
1471 return divmod_uu<BitsY>(a, b).second;
1472 }
1473
1474 template<size_t BitsY, size_t BitsA, size_t BitsB>
1475 CXXRTL_ALWAYS_INLINE
1476 value<BitsY> mod_ss(const value<BitsA> &a, const value<BitsB> &b) {
1477 return divmod_ss<BitsY>(a, b).second;
1478 }
1479
1480 // Memory helper
1481 struct memory_index {
1482 bool valid;
1483 size_t index;
1484
1485 template<size_t BitsAddr>
1486 memory_index(const value<BitsAddr> &addr, size_t offset, size_t depth) {
1487 static_assert(value<BitsAddr>::chunks <= 1, "memory address is too wide");
1488 size_t offset_index = addr.data[0];
1489
1490 valid = (offset_index >= offset && offset_index < offset + depth);
1491 index = offset_index - offset;
1492 }
1493 };
1494
1495 } // namespace cxxrtl_yosys
1496
1497 #endif