Merge pull request #2295 from epfl-vlsc/firrtl_blackbox_generic_parameters
[yosys.git] / backends / cxxrtl / cxxrtl.h
1 /*
2 * yosys -- Yosys Open SYnthesis Suite
3 *
4 * Copyright (C) 2019-2020 whitequark <whitequark@whitequark.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19 // This file is included by the designs generated with `write_cxxrtl`. It is not used in Yosys itself.
20 //
21 // The CXXRTL support library implements compile time specialized arbitrary width arithmetics, as well as provides
22 // composite lvalues made out of bit slices and concatenations of lvalues. This allows the `write_cxxrtl` pass
23 // to perform a straightforward translation of RTLIL structures to readable C++, relying on the C++ compiler
24 // to unwrap the abstraction and generate efficient code.
25
26 #ifndef CXXRTL_H
27 #define CXXRTL_H
28
29 #include <cstddef>
30 #include <cstdint>
31 #include <cassert>
32 #include <limits>
33 #include <type_traits>
34 #include <tuple>
35 #include <vector>
36 #include <map>
37 #include <algorithm>
38 #include <memory>
39 #include <sstream>
40
41 #include <backends/cxxrtl/cxxrtl_capi.h>
42
43 // CXXRTL essentially uses the C++ compiler as a hygienic macro engine that feeds an instruction selector.
44 // It generates a lot of specialized template functions with relatively large bodies that, when inlined
45 // into the caller and (for those with loops) unrolled, often expose many new optimization opportunities.
46 // Because of this, most of the CXXRTL runtime must be always inlined for best performance.
47 #ifndef __has_attribute
48 # define __has_attribute(x) 0
49 #endif
50 #if __has_attribute(always_inline)
51 #define CXXRTL_ALWAYS_INLINE inline __attribute__((__always_inline__))
52 #else
53 #define CXXRTL_ALWAYS_INLINE inline
54 #endif
55
56 namespace cxxrtl {
57
58 // All arbitrary-width values in CXXRTL are backed by arrays of unsigned integers called chunks. The chunk size
59 // is the same regardless of the value width to simplify manipulating values via FFI interfaces, e.g. driving
60 // and introspecting the simulation in Python.
61 //
62 // It is practical to use chunk sizes between 32 bits and platform register size because when arithmetics on
63 // narrower integer types is legalized by the C++ compiler, it inserts code to clear the high bits of the register.
64 // However, (a) most of our operations do not change those bits in the first place because of invariants that are
65 // invisible to the compiler, (b) we often operate on non-power-of-2 values and have to clear the high bits anyway.
66 // Therefore, using relatively wide chunks and clearing the high bits explicitly and only when we know they may be
67 // clobbered results in simpler generated code.
68 typedef uint32_t chunk_t;
69 typedef uint64_t wide_chunk_t;
70
71 template<typename T>
72 struct chunk_traits {
73 static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
74 "chunk type must be an unsigned integral type");
75 using type = T;
76 static constexpr size_t bits = std::numeric_limits<T>::digits;
77 static constexpr T mask = std::numeric_limits<T>::max();
78 };
79
80 template<class T>
81 struct expr_base;
82
83 template<size_t Bits>
84 struct value : public expr_base<value<Bits>> {
85 static constexpr size_t bits = Bits;
86
87 using chunk = chunk_traits<chunk_t>;
88 static constexpr chunk::type msb_mask = (Bits % chunk::bits == 0) ? chunk::mask
89 : chunk::mask >> (chunk::bits - (Bits % chunk::bits));
90
91 static constexpr size_t chunks = (Bits + chunk::bits - 1) / chunk::bits;
92 chunk::type data[chunks] = {};
93
94 value() = default;
95 template<typename... Init>
96 explicit constexpr value(Init ...init) : data{init...} {}
97
98 value(const value<Bits> &) = default;
99 value(value<Bits> &&) = default;
100 value<Bits> &operator=(const value<Bits> &) = default;
101
102 // A (no-op) helper that forces the cast to value<>.
103 CXXRTL_ALWAYS_INLINE
104 const value<Bits> &val() const {
105 return *this;
106 }
107
108 std::string str() const {
109 std::stringstream ss;
110 ss << *this;
111 return ss.str();
112 }
113
114 // Conversion operations.
115 //
116 // These functions ensure that a conversion is never out of range, and should be always used, if at all
117 // possible, instead of direct manipulation of the `data` member. For very large types, .slice() and
118 // .concat() can be used to split them into more manageable parts.
119 template<class IntegerT>
120 CXXRTL_ALWAYS_INLINE
121 IntegerT get() const {
122 static_assert(std::numeric_limits<IntegerT>::is_integer && !std::numeric_limits<IntegerT>::is_signed,
123 "get<T>() requires T to be an unsigned integral type");
124 static_assert(std::numeric_limits<IntegerT>::digits >= Bits,
125 "get<T>() requires T to be at least as wide as the value is");
126 IntegerT result = 0;
127 for (size_t n = 0; n < chunks; n++)
128 result |= IntegerT(data[n]) << (n * chunk::bits);
129 return result;
130 }
131
132 template<class IntegerT>
133 CXXRTL_ALWAYS_INLINE
134 void set(IntegerT other) {
135 static_assert(std::numeric_limits<IntegerT>::is_integer && !std::numeric_limits<IntegerT>::is_signed,
136 "set<T>() requires T to be an unsigned integral type");
137 static_assert(std::numeric_limits<IntegerT>::digits >= Bits,
138 "set<T>() requires the value to be at least as wide as T is");
139 for (size_t n = 0; n < chunks; n++)
140 data[n] = (other >> (n * chunk::bits)) & chunk::mask;
141 }
142
143 // Operations with compile-time parameters.
144 //
145 // These operations are used to implement slicing, concatenation, and blitting.
146 // The trunc, zext and sext operations add or remove most significant bits (i.e. on the left);
147 // the rtrunc and rzext operations add or remove least significant bits (i.e. on the right).
148 template<size_t NewBits>
149 CXXRTL_ALWAYS_INLINE
150 value<NewBits> trunc() const {
151 static_assert(NewBits <= Bits, "trunc() may not increase width");
152 value<NewBits> result;
153 for (size_t n = 0; n < result.chunks; n++)
154 result.data[n] = data[n];
155 result.data[result.chunks - 1] &= result.msb_mask;
156 return result;
157 }
158
159 template<size_t NewBits>
160 CXXRTL_ALWAYS_INLINE
161 value<NewBits> zext() const {
162 static_assert(NewBits >= Bits, "zext() may not decrease width");
163 value<NewBits> result;
164 for (size_t n = 0; n < chunks; n++)
165 result.data[n] = data[n];
166 return result;
167 }
168
169 template<size_t NewBits>
170 CXXRTL_ALWAYS_INLINE
171 value<NewBits> sext() const {
172 static_assert(NewBits >= Bits, "sext() may not decrease width");
173 value<NewBits> result;
174 for (size_t n = 0; n < chunks; n++)
175 result.data[n] = data[n];
176 if (is_neg()) {
177 result.data[chunks - 1] |= ~msb_mask;
178 for (size_t n = chunks; n < result.chunks; n++)
179 result.data[n] = chunk::mask;
180 result.data[result.chunks - 1] &= result.msb_mask;
181 }
182 return result;
183 }
184
185 template<size_t NewBits>
186 CXXRTL_ALWAYS_INLINE
187 value<NewBits> rtrunc() const {
188 static_assert(NewBits <= Bits, "rtrunc() may not increase width");
189 value<NewBits> result;
190 constexpr size_t shift_chunks = (Bits - NewBits) / chunk::bits;
191 constexpr size_t shift_bits = (Bits - NewBits) % chunk::bits;
192 chunk::type carry = 0;
193 if (shift_chunks + result.chunks < chunks) {
194 carry = (shift_bits == 0) ? 0
195 : data[shift_chunks + result.chunks] << (chunk::bits - shift_bits);
196 }
197 for (size_t n = result.chunks; n > 0; n--) {
198 result.data[n - 1] = carry | (data[shift_chunks + n - 1] >> shift_bits);
199 carry = (shift_bits == 0) ? 0
200 : data[shift_chunks + n - 1] << (chunk::bits - shift_bits);
201 }
202 return result;
203 }
204
205 template<size_t NewBits>
206 CXXRTL_ALWAYS_INLINE
207 value<NewBits> rzext() const {
208 static_assert(NewBits >= Bits, "rzext() may not decrease width");
209 value<NewBits> result;
210 constexpr size_t shift_chunks = (NewBits - Bits) / chunk::bits;
211 constexpr size_t shift_bits = (NewBits - Bits) % chunk::bits;
212 chunk::type carry = 0;
213 for (size_t n = 0; n < chunks; n++) {
214 result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
215 carry = (shift_bits == 0) ? 0
216 : data[n] >> (chunk::bits - shift_bits);
217 }
218 if (shift_chunks + chunks < result.chunks)
219 result.data[shift_chunks + chunks] = carry;
220 return result;
221 }
222
223 // Bit blit operation, i.e. a partial read-modify-write.
224 template<size_t Stop, size_t Start>
225 CXXRTL_ALWAYS_INLINE
226 value<Bits> blit(const value<Stop - Start + 1> &source) const {
227 static_assert(Stop >= Start, "blit() may not reverse bit order");
228 constexpr chunk::type start_mask = ~(chunk::mask << (Start % chunk::bits));
229 constexpr chunk::type stop_mask = (Stop % chunk::bits + 1 == chunk::bits) ? 0
230 : (chunk::mask << (Stop % chunk::bits + 1));
231 value<Bits> masked = *this;
232 if (Start / chunk::bits == Stop / chunk::bits) {
233 masked.data[Start / chunk::bits] &= stop_mask | start_mask;
234 } else {
235 masked.data[Start / chunk::bits] &= start_mask;
236 for (size_t n = Start / chunk::bits + 1; n < Stop / chunk::bits; n++)
237 masked.data[n] = 0;
238 masked.data[Stop / chunk::bits] &= stop_mask;
239 }
240 value<Bits> shifted = source
241 .template rzext<Stop + 1>()
242 .template zext<Bits>();
243 return masked.bit_or(shifted);
244 }
245
246 // Helpers for selecting extending or truncating operation depending on whether the result is wider or narrower
247 // than the operand. In C++17 these can be replaced with `if constexpr`.
248 template<size_t NewBits, typename = void>
249 struct zext_cast {
250 CXXRTL_ALWAYS_INLINE
251 value<NewBits> operator()(const value<Bits> &val) {
252 return val.template zext<NewBits>();
253 }
254 };
255
256 template<size_t NewBits>
257 struct zext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
258 CXXRTL_ALWAYS_INLINE
259 value<NewBits> operator()(const value<Bits> &val) {
260 return val.template trunc<NewBits>();
261 }
262 };
263
264 template<size_t NewBits, typename = void>
265 struct sext_cast {
266 CXXRTL_ALWAYS_INLINE
267 value<NewBits> operator()(const value<Bits> &val) {
268 return val.template sext<NewBits>();
269 }
270 };
271
272 template<size_t NewBits>
273 struct sext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
274 CXXRTL_ALWAYS_INLINE
275 value<NewBits> operator()(const value<Bits> &val) {
276 return val.template trunc<NewBits>();
277 }
278 };
279
280 template<size_t NewBits>
281 CXXRTL_ALWAYS_INLINE
282 value<NewBits> zcast() const {
283 return zext_cast<NewBits>()(*this);
284 }
285
286 template<size_t NewBits>
287 CXXRTL_ALWAYS_INLINE
288 value<NewBits> scast() const {
289 return sext_cast<NewBits>()(*this);
290 }
291
292 // Operations with run-time parameters (offsets, amounts, etc).
293 //
294 // These operations are used for computations.
295 bool bit(size_t offset) const {
296 return data[offset / chunk::bits] & (1 << (offset % chunk::bits));
297 }
298
299 void set_bit(size_t offset, bool value = true) {
300 size_t offset_chunks = offset / chunk::bits;
301 size_t offset_bits = offset % chunk::bits;
302 data[offset_chunks] &= ~(1 << offset_bits);
303 data[offset_chunks] |= value ? 1 << offset_bits : 0;
304 }
305
306 explicit operator bool() const {
307 return !is_zero();
308 }
309
310 bool is_zero() const {
311 for (size_t n = 0; n < chunks; n++)
312 if (data[n] != 0)
313 return false;
314 return true;
315 }
316
317 bool is_neg() const {
318 return data[chunks - 1] & (1 << ((Bits - 1) % chunk::bits));
319 }
320
321 bool operator ==(const value<Bits> &other) const {
322 for (size_t n = 0; n < chunks; n++)
323 if (data[n] != other.data[n])
324 return false;
325 return true;
326 }
327
328 bool operator !=(const value<Bits> &other) const {
329 return !(*this == other);
330 }
331
332 value<Bits> bit_not() const {
333 value<Bits> result;
334 for (size_t n = 0; n < chunks; n++)
335 result.data[n] = ~data[n];
336 result.data[chunks - 1] &= msb_mask;
337 return result;
338 }
339
340 value<Bits> bit_and(const value<Bits> &other) const {
341 value<Bits> result;
342 for (size_t n = 0; n < chunks; n++)
343 result.data[n] = data[n] & other.data[n];
344 return result;
345 }
346
347 value<Bits> bit_or(const value<Bits> &other) const {
348 value<Bits> result;
349 for (size_t n = 0; n < chunks; n++)
350 result.data[n] = data[n] | other.data[n];
351 return result;
352 }
353
354 value<Bits> bit_xor(const value<Bits> &other) const {
355 value<Bits> result;
356 for (size_t n = 0; n < chunks; n++)
357 result.data[n] = data[n] ^ other.data[n];
358 return result;
359 }
360
361 value<Bits> update(const value<Bits> &val, const value<Bits> &mask) const {
362 return bit_and(mask.bit_not()).bit_or(val.bit_and(mask));
363 }
364
365 template<size_t AmountBits>
366 value<Bits> shl(const value<AmountBits> &amount) const {
367 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
368 static_assert(Bits <= chunk::mask, "shl() of unreasonably large values is not supported");
369 // Detect shifts definitely large than Bits early.
370 for (size_t n = 1; n < amount.chunks; n++)
371 if (amount.data[n] != 0)
372 return {};
373 // Past this point we can use the least significant chunk as the shift size.
374 size_t shift_chunks = amount.data[0] / chunk::bits;
375 size_t shift_bits = amount.data[0] % chunk::bits;
376 if (shift_chunks >= chunks)
377 return {};
378 value<Bits> result;
379 chunk::type carry = 0;
380 for (size_t n = 0; n < chunks - shift_chunks; n++) {
381 result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
382 carry = (shift_bits == 0) ? 0
383 : data[n] >> (chunk::bits - shift_bits);
384 }
385 return result;
386 }
387
388 template<size_t AmountBits, bool Signed = false>
389 value<Bits> shr(const value<AmountBits> &amount) const {
390 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
391 static_assert(Bits <= chunk::mask, "shr() of unreasonably large values is not supported");
392 // Detect shifts definitely large than Bits early.
393 for (size_t n = 1; n < amount.chunks; n++)
394 if (amount.data[n] != 0)
395 return {};
396 // Past this point we can use the least significant chunk as the shift size.
397 size_t shift_chunks = amount.data[0] / chunk::bits;
398 size_t shift_bits = amount.data[0] % chunk::bits;
399 if (shift_chunks >= chunks)
400 return {};
401 value<Bits> result;
402 chunk::type carry = 0;
403 for (size_t n = 0; n < chunks - shift_chunks; n++) {
404 result.data[chunks - shift_chunks - 1 - n] = carry | (data[chunks - 1 - n] >> shift_bits);
405 carry = (shift_bits == 0) ? 0
406 : data[chunks - 1 - n] << (chunk::bits - shift_bits);
407 }
408 if (Signed && is_neg()) {
409 size_t top_chunk_idx = (Bits - shift_bits) / chunk::bits;
410 size_t top_chunk_bits = (Bits - shift_bits) % chunk::bits;
411 for (size_t n = top_chunk_idx + 1; n < chunks; n++)
412 result.data[n] = chunk::mask;
413 if (shift_bits != 0)
414 result.data[top_chunk_idx] |= chunk::mask << top_chunk_bits;
415 }
416 return result;
417 }
418
419 template<size_t AmountBits>
420 value<Bits> sshr(const value<AmountBits> &amount) const {
421 return shr<AmountBits, /*Signed=*/true>(amount);
422 }
423
424 size_t ctpop() const {
425 size_t count = 0;
426 for (size_t n = 0; n < chunks; n++) {
427 // This loop implements the population count idiom as recognized by LLVM and GCC.
428 for (chunk::type x = data[n]; x != 0; count++)
429 x = x & (x - 1);
430 }
431 return count;
432 }
433
434 size_t ctlz() const {
435 size_t count = 0;
436 for (size_t n = 0; n < chunks; n++) {
437 chunk::type x = data[chunks - 1 - n];
438 if (x == 0) {
439 count += (n == 0 ? Bits % chunk::bits : chunk::bits);
440 } else {
441 // This loop implements the find first set idiom as recognized by LLVM.
442 for (; x != 0; count++)
443 x >>= 1;
444 }
445 }
446 return count;
447 }
448
449 template<bool Invert, bool CarryIn>
450 std::pair<value<Bits>, bool /*CarryOut*/> alu(const value<Bits> &other) const {
451 value<Bits> result;
452 bool carry = CarryIn;
453 for (size_t n = 0; n < result.chunks; n++) {
454 result.data[n] = data[n] + (Invert ? ~other.data[n] : other.data[n]) + carry;
455 if (result.chunks - 1 == n)
456 result.data[result.chunks - 1] &= result.msb_mask;
457 carry = (result.data[n] < data[n]) ||
458 (result.data[n] == data[n] && carry);
459 }
460 return {result, carry};
461 }
462
463 value<Bits> add(const value<Bits> &other) const {
464 return alu</*Invert=*/false, /*CarryIn=*/false>(other).first;
465 }
466
467 value<Bits> sub(const value<Bits> &other) const {
468 return alu</*Invert=*/true, /*CarryIn=*/true>(other).first;
469 }
470
471 value<Bits> neg() const {
472 return value<Bits> { 0u }.sub(*this);
473 }
474
475 bool ucmp(const value<Bits> &other) const {
476 bool carry;
477 std::tie(std::ignore, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
478 return !carry; // a.ucmp(b) ≡ a u< b
479 }
480
481 bool scmp(const value<Bits> &other) const {
482 value<Bits> result;
483 bool carry;
484 std::tie(result, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
485 bool overflow = (is_neg() == !other.is_neg()) && (is_neg() != result.is_neg());
486 return result.is_neg() ^ overflow; // a.scmp(b) ≡ a s< b
487 }
488
489 template<size_t ResultBits>
490 value<ResultBits> mul(const value<Bits> &other) const {
491 value<ResultBits> result;
492 wide_chunk_t wide_result[result.chunks + 1] = {};
493 for (size_t n = 0; n < chunks; n++) {
494 for (size_t m = 0; m < chunks && n + m < result.chunks; m++) {
495 wide_result[n + m] += wide_chunk_t(data[n]) * wide_chunk_t(other.data[m]);
496 wide_result[n + m + 1] += wide_result[n + m] >> chunk::bits;
497 wide_result[n + m] &= chunk::mask;
498 }
499 }
500 for (size_t n = 0; n < result.chunks; n++) {
501 result.data[n] = wide_result[n];
502 }
503 result.data[result.chunks - 1] &= result.msb_mask;
504 return result;
505 }
506 };
507
508 // Expression template for a slice, usable as lvalue or rvalue, and composable with other expression templates here.
509 template<class T, size_t Stop, size_t Start>
510 struct slice_expr : public expr_base<slice_expr<T, Stop, Start>> {
511 static_assert(Stop >= Start, "slice_expr() may not reverse bit order");
512 static_assert(Start < T::bits && Stop < T::bits, "slice_expr() must be within bounds");
513 static constexpr size_t bits = Stop - Start + 1;
514
515 T &expr;
516
517 slice_expr(T &expr) : expr(expr) {}
518 slice_expr(const slice_expr<T, Stop, Start> &) = delete;
519
520 CXXRTL_ALWAYS_INLINE
521 operator value<bits>() const {
522 return static_cast<const value<T::bits> &>(expr)
523 .template rtrunc<T::bits - Start>()
524 .template trunc<bits>();
525 }
526
527 CXXRTL_ALWAYS_INLINE
528 slice_expr<T, Stop, Start> &operator=(const value<bits> &rhs) {
529 // Generic partial assignment implemented using a read-modify-write operation on the sliced expression.
530 expr = static_cast<const value<T::bits> &>(expr)
531 .template blit<Stop, Start>(rhs);
532 return *this;
533 }
534
535 // A helper that forces the cast to value<>, which allows deduction to work.
536 CXXRTL_ALWAYS_INLINE
537 value<bits> val() const {
538 return static_cast<const value<bits> &>(*this);
539 }
540 };
541
542 // Expression template for a concatenation, usable as lvalue or rvalue, and composable with other expression templates here.
543 template<class T, class U>
544 struct concat_expr : public expr_base<concat_expr<T, U>> {
545 static constexpr size_t bits = T::bits + U::bits;
546
547 T &ms_expr;
548 U &ls_expr;
549
550 concat_expr(T &ms_expr, U &ls_expr) : ms_expr(ms_expr), ls_expr(ls_expr) {}
551 concat_expr(const concat_expr<T, U> &) = delete;
552
553 CXXRTL_ALWAYS_INLINE
554 operator value<bits>() const {
555 value<bits> ms_shifted = static_cast<const value<T::bits> &>(ms_expr)
556 .template rzext<bits>();
557 value<bits> ls_extended = static_cast<const value<U::bits> &>(ls_expr)
558 .template zext<bits>();
559 return ms_shifted.bit_or(ls_extended);
560 }
561
562 CXXRTL_ALWAYS_INLINE
563 concat_expr<T, U> &operator=(const value<bits> &rhs) {
564 ms_expr = rhs.template rtrunc<T::bits>();
565 ls_expr = rhs.template trunc<U::bits>();
566 return *this;
567 }
568
569 // A helper that forces the cast to value<>, which allows deduction to work.
570 CXXRTL_ALWAYS_INLINE
571 value<bits> val() const {
572 return static_cast<const value<bits> &>(*this);
573 }
574 };
575
576 // Base class for expression templates, providing helper methods for operations that are valid on both rvalues and lvalues.
577 //
578 // Note that expression objects (slices and concatenations) constructed in this way should NEVER be captured because
579 // they refer to temporaries that will, in general, only live until the end of the statement. For example, both of
580 // these snippets perform use-after-free:
581 //
582 // const auto &a = val.slice<7,0>().slice<1>();
583 // value<1> b = a;
584 //
585 // auto &&c = val.slice<7,0>().slice<1>();
586 // c = value<1>{1u};
587 //
588 // An easy way to write code using slices and concatenations safely is to follow two simple rules:
589 // * Never explicitly name any type except `value<W>` or `const value<W> &`.
590 // * Never use a `const auto &` or `auto &&` in any such expression.
591 // Then, any code that compiles will be well-defined.
592 template<class T>
593 struct expr_base {
594 template<size_t Stop, size_t Start = Stop>
595 CXXRTL_ALWAYS_INLINE
596 slice_expr<const T, Stop, Start> slice() const {
597 return {*static_cast<const T *>(this)};
598 }
599
600 template<size_t Stop, size_t Start = Stop>
601 CXXRTL_ALWAYS_INLINE
602 slice_expr<T, Stop, Start> slice() {
603 return {*static_cast<T *>(this)};
604 }
605
606 template<class U>
607 CXXRTL_ALWAYS_INLINE
608 concat_expr<const T, typename std::remove_reference<const U>::type> concat(const U &other) const {
609 return {*static_cast<const T *>(this), other};
610 }
611
612 template<class U>
613 CXXRTL_ALWAYS_INLINE
614 concat_expr<T, typename std::remove_reference<U>::type> concat(U &&other) {
615 return {*static_cast<T *>(this), other};
616 }
617 };
618
619 template<size_t Bits>
620 std::ostream &operator<<(std::ostream &os, const value<Bits> &val) {
621 auto old_flags = os.flags(std::ios::right);
622 auto old_width = os.width(0);
623 auto old_fill = os.fill('0');
624 os << val.bits << '\'' << std::hex;
625 for (size_t n = val.chunks - 1; n != (size_t)-1; n--) {
626 if (n == val.chunks - 1 && Bits % value<Bits>::chunk::bits != 0)
627 os.width((Bits % value<Bits>::chunk::bits + 3) / 4);
628 else
629 os.width((value<Bits>::chunk::bits + 3) / 4);
630 os << val.data[n];
631 }
632 os.fill(old_fill);
633 os.width(old_width);
634 os.flags(old_flags);
635 return os;
636 }
637
638 template<size_t Bits>
639 struct wire {
640 static constexpr size_t bits = Bits;
641
642 value<Bits> curr;
643 value<Bits> next;
644
645 wire() = default;
646 constexpr wire(const value<Bits> &init) : curr(init), next(init) {}
647 template<typename... Init>
648 explicit constexpr wire(Init ...init) : curr{init...}, next{init...} {}
649
650 wire(const wire<Bits> &) = delete;
651 wire(wire<Bits> &&) = default;
652 wire<Bits> &operator=(const wire<Bits> &) = delete;
653
654 template<class IntegerT>
655 CXXRTL_ALWAYS_INLINE
656 IntegerT get() const {
657 return curr.template get<IntegerT>();
658 }
659
660 template<class IntegerT>
661 CXXRTL_ALWAYS_INLINE
662 void set(IntegerT other) {
663 next.template set<IntegerT>(other);
664 }
665
666 bool commit() {
667 if (curr != next) {
668 curr = next;
669 return true;
670 }
671 return false;
672 }
673 };
674
675 template<size_t Bits>
676 std::ostream &operator<<(std::ostream &os, const wire<Bits> &val) {
677 os << val.curr;
678 return os;
679 }
680
681 template<size_t Width>
682 struct memory {
683 std::vector<value<Width>> data;
684
685 size_t depth() const {
686 return data.size();
687 }
688
689 memory() = delete;
690 explicit memory(size_t depth) : data(depth) {}
691
692 memory(const memory<Width> &) = delete;
693 memory<Width> &operator=(const memory<Width> &) = delete;
694
695 // The only way to get the compiler to put the initializer in .rodata and do not copy it on stack is to stuff it
696 // into a plain array. You'd think an std::initializer_list would work here, but it doesn't, because you can't
697 // construct an initializer_list in a constexpr (or something) and so if you try to do that the whole thing is
698 // first copied on the stack (probably overflowing it) and then again into `data`.
699 template<size_t Size>
700 struct init {
701 size_t offset;
702 value<Width> data[Size];
703 };
704
705 template<size_t... InitSize>
706 explicit memory(size_t depth, const init<InitSize> &...init) : data(depth) {
707 data.resize(depth);
708 // This utterly reprehensible construct is the most reasonable way to apply a function to every element
709 // of a parameter pack, if the elements all have different types and so cannot be cast to an initializer list.
710 auto _ = {std::move(std::begin(init.data), std::end(init.data), data.begin() + init.offset)...};
711 (void)_;
712 }
713
714 // An operator for direct memory reads. May be used at any time during the simulation.
715 const value<Width> &operator [](size_t index) const {
716 assert(index < data.size());
717 return data[index];
718 }
719
720 // An operator for direct memory writes. May only be used before the simulation is started. If used
721 // after the simulation is started, the design may malfunction.
722 value<Width> &operator [](size_t index) {
723 assert(index < data.size());
724 return data[index];
725 }
726
727 // A simple way to make a writable memory would be to use an array of wires instead of an array of values.
728 // However, there are two significant downsides to this approach: first, it has large overhead (2× space
729 // overhead, and O(depth) time overhead during commit); second, it does not simplify handling write port
730 // priorities. Although in principle write ports could be ordered or conditionally enabled in generated
731 // code based on their priorities and selected addresses, the feedback arc set problem is computationally
732 // expensive, and the heuristic based algorithms are not easily modified to guarantee (rather than prefer)
733 // a particular write port evaluation order.
734 //
735 // The approach used here instead is to queue writes into a buffer during the eval phase, then perform
736 // the writes during the commit phase in the priority order. This approach has low overhead, with both space
737 // and time proportional to the amount of write ports. Because virtually every memory in a practical design
738 // has at most two write ports, linear search is used on every write, being the fastest and simplest approach.
739 struct write {
740 size_t index;
741 value<Width> val;
742 value<Width> mask;
743 int priority;
744 };
745 std::vector<write> write_queue;
746
747 void update(size_t index, const value<Width> &val, const value<Width> &mask, int priority = 0) {
748 assert(index < data.size());
749 // Queue up the write while keeping the queue sorted by priority.
750 write_queue.insert(
751 std::upper_bound(write_queue.begin(), write_queue.end(), priority,
752 [](const int a, const write& b) { return a < b.priority; }),
753 write { index, val, mask, priority });
754 }
755
756 bool commit() {
757 bool changed = false;
758 for (const write &entry : write_queue) {
759 value<Width> elem = data[entry.index];
760 elem = elem.update(entry.val, entry.mask);
761 changed |= (data[entry.index] != elem);
762 data[entry.index] = elem;
763 }
764 write_queue.clear();
765 return changed;
766 }
767 };
768
769 struct metadata {
770 const enum {
771 MISSING = 0,
772 UINT = 1,
773 SINT = 2,
774 STRING = 3,
775 DOUBLE = 4,
776 } value_type;
777
778 // In debug mode, using the wrong .as_*() function will assert.
779 // In release mode, using the wrong .as_*() function will safely return a default value.
780 const unsigned uint_value = 0;
781 const signed sint_value = 0;
782 const std::string string_value = "";
783 const double double_value = 0.0;
784
785 metadata() : value_type(MISSING) {}
786 metadata(unsigned value) : value_type(UINT), uint_value(value) {}
787 metadata(signed value) : value_type(SINT), sint_value(value) {}
788 metadata(const std::string &value) : value_type(STRING), string_value(value) {}
789 metadata(const char *value) : value_type(STRING), string_value(value) {}
790 metadata(double value) : value_type(DOUBLE), double_value(value) {}
791
792 metadata(const metadata &) = default;
793 metadata &operator=(const metadata &) = delete;
794
795 unsigned as_uint() const {
796 assert(value_type == UINT);
797 return uint_value;
798 }
799
800 signed as_sint() const {
801 assert(value_type == SINT);
802 return sint_value;
803 }
804
805 const std::string &as_string() const {
806 assert(value_type == STRING);
807 return string_value;
808 }
809
810 double as_double() const {
811 assert(value_type == DOUBLE);
812 return double_value;
813 }
814 };
815
816 typedef std::map<std::string, metadata> metadata_map;
817
818 // Helper class to disambiguate values/wires and their aliases.
819 struct debug_alias {};
820
821 // This structure is intended for consumption via foreign function interfaces, like Python's ctypes.
822 // Because of this it uses a C-style layout that is easy to parse rather than more idiomatic C++.
823 //
824 // To avoid violating strict aliasing rules, this structure has to be a subclass of the one used
825 // in the C API, or it would not be possible to cast between the pointers to these.
826 struct debug_item : ::cxxrtl_object {
827 // Object types.
828 enum : uint32_t {
829 VALUE = CXXRTL_VALUE,
830 WIRE = CXXRTL_WIRE,
831 MEMORY = CXXRTL_MEMORY,
832 ALIAS = CXXRTL_ALIAS,
833 };
834
835 // Object flags.
836 enum : uint32_t {
837 INPUT = CXXRTL_INPUT,
838 OUTPUT = CXXRTL_OUTPUT,
839 INOUT = CXXRTL_INOUT,
840 DRIVEN_SYNC = CXXRTL_DRIVEN_SYNC,
841 DRIVEN_COMB = CXXRTL_DRIVEN_COMB,
842 UNDRIVEN = CXXRTL_UNDRIVEN,
843 };
844
845 debug_item(const ::cxxrtl_object &object) : cxxrtl_object(object) {}
846
847 template<size_t Bits>
848 debug_item(value<Bits> &item, size_t lsb_offset = 0, uint32_t flags_ = 0) {
849 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
850 "value<Bits> is not compatible with C layout");
851 type = VALUE;
852 flags = flags_;
853 width = Bits;
854 lsb_at = lsb_offset;
855 depth = 1;
856 zero_at = 0;
857 curr = item.data;
858 next = item.data;
859 }
860
861 template<size_t Bits>
862 debug_item(const value<Bits> &item, size_t lsb_offset = 0) {
863 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
864 "value<Bits> is not compatible with C layout");
865 type = VALUE;
866 flags = DRIVEN_COMB;
867 width = Bits;
868 lsb_at = lsb_offset;
869 depth = 1;
870 zero_at = 0;
871 curr = const_cast<chunk_t*>(item.data);
872 next = nullptr;
873 }
874
875 template<size_t Bits>
876 debug_item(wire<Bits> &item, size_t lsb_offset = 0, uint32_t flags_ = 0) {
877 static_assert(sizeof(item.curr) == value<Bits>::chunks * sizeof(chunk_t) &&
878 sizeof(item.next) == value<Bits>::chunks * sizeof(chunk_t),
879 "wire<Bits> is not compatible with C layout");
880 type = WIRE;
881 flags = flags_;
882 width = Bits;
883 lsb_at = lsb_offset;
884 depth = 1;
885 zero_at = 0;
886 curr = item.curr.data;
887 next = item.next.data;
888 }
889
890 template<size_t Width>
891 debug_item(memory<Width> &item, size_t zero_offset = 0) {
892 static_assert(sizeof(item.data[0]) == value<Width>::chunks * sizeof(chunk_t),
893 "memory<Width> is not compatible with C layout");
894 type = MEMORY;
895 flags = 0;
896 width = Width;
897 lsb_at = 0;
898 depth = item.data.size();
899 zero_at = zero_offset;
900 curr = item.data.empty() ? nullptr : item.data[0].data;
901 next = nullptr;
902 }
903
904 template<size_t Bits>
905 debug_item(debug_alias, const value<Bits> &item, size_t lsb_offset = 0) {
906 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
907 "value<Bits> is not compatible with C layout");
908 type = ALIAS;
909 flags = DRIVEN_COMB;
910 width = Bits;
911 lsb_at = lsb_offset;
912 depth = 1;
913 zero_at = 0;
914 curr = const_cast<chunk_t*>(item.data);
915 next = nullptr;
916 }
917
918 template<size_t Bits>
919 debug_item(debug_alias, const wire<Bits> &item, size_t lsb_offset = 0) {
920 static_assert(sizeof(item.curr) == value<Bits>::chunks * sizeof(chunk_t) &&
921 sizeof(item.next) == value<Bits>::chunks * sizeof(chunk_t),
922 "wire<Bits> is not compatible with C layout");
923 type = ALIAS;
924 flags = DRIVEN_COMB;
925 width = Bits;
926 lsb_at = lsb_offset;
927 depth = 1;
928 zero_at = 0;
929 curr = const_cast<chunk_t*>(item.curr.data);
930 next = nullptr;
931 }
932 };
933 static_assert(std::is_standard_layout<debug_item>::value, "debug_item is not compatible with C layout");
934
935 struct debug_items {
936 std::map<std::string, std::vector<debug_item>> table;
937
938 void add(const std::string &name, debug_item &&item) {
939 std::vector<debug_item> &parts = table[name];
940 parts.emplace_back(item);
941 std::sort(parts.begin(), parts.end(),
942 [](const debug_item &a, const debug_item &b) {
943 return a.lsb_at < b.lsb_at;
944 });
945 }
946
947 size_t count(const std::string &name) const {
948 if (table.count(name) == 0)
949 return 0;
950 return table.at(name).size();
951 }
952
953 const std::vector<debug_item> &parts_at(const std::string &name) const {
954 return table.at(name);
955 }
956
957 const debug_item &at(const std::string &name) const {
958 const std::vector<debug_item> &parts = table.at(name);
959 assert(parts.size() == 1);
960 return parts.at(0);
961 }
962
963 const debug_item &operator [](const std::string &name) const {
964 return at(name);
965 }
966 };
967
968 struct module {
969 module() {}
970 virtual ~module() {}
971
972 module(const module &) = delete;
973 module &operator=(const module &) = delete;
974
975 virtual bool eval() = 0;
976 virtual bool commit() = 0;
977
978 size_t step() {
979 size_t deltas = 0;
980 bool converged = false;
981 do {
982 converged = eval();
983 deltas++;
984 } while (commit() && !converged);
985 return deltas;
986 }
987
988 virtual void debug_info(debug_items &items, std::string path = "") {
989 (void)items, (void)path;
990 }
991 };
992
993 } // namespace cxxrtl
994
995 // Internal structure used to communicate with the implementation of the C interface.
996 typedef struct _cxxrtl_toplevel {
997 std::unique_ptr<cxxrtl::module> module;
998 } *cxxrtl_toplevel;
999
1000 // Definitions of internal Yosys cells. Other than the functions in this namespace, CXXRTL is fully generic
1001 // and indepenent of Yosys implementation details.
1002 //
1003 // The `write_cxxrtl` pass translates internal cells (cells with names that start with `$`) to calls of these
1004 // functions. All of Yosys arithmetic and logical cells perform sign or zero extension on their operands,
1005 // whereas basic operations on arbitrary width values require operands to be of the same width. These functions
1006 // bridge the gap by performing the necessary casts. They are named similar to `cell_A[B]`, where A and B are `u`
1007 // if the corresponding operand is unsigned, and `s` if it is signed.
1008 namespace cxxrtl_yosys {
1009
1010 using namespace cxxrtl;
1011
1012 // std::max isn't constexpr until C++14 for no particular reason (it's an oversight), so we define our own.
1013 template<class T>
1014 CXXRTL_ALWAYS_INLINE
1015 constexpr T max(const T &a, const T &b) {
1016 return a > b ? a : b;
1017 }
1018
1019 // Logic operations
1020 template<size_t BitsY, size_t BitsA>
1021 CXXRTL_ALWAYS_INLINE
1022 value<BitsY> logic_not(const value<BitsA> &a) {
1023 return value<BitsY> { a ? 0u : 1u };
1024 }
1025
1026 template<size_t BitsY, size_t BitsA, size_t BitsB>
1027 CXXRTL_ALWAYS_INLINE
1028 value<BitsY> logic_and(const value<BitsA> &a, const value<BitsB> &b) {
1029 return value<BitsY> { (bool(a) && bool(b)) ? 1u : 0u };
1030 }
1031
1032 template<size_t BitsY, size_t BitsA, size_t BitsB>
1033 CXXRTL_ALWAYS_INLINE
1034 value<BitsY> logic_or(const value<BitsA> &a, const value<BitsB> &b) {
1035 return value<BitsY> { (bool(a) || bool(b)) ? 1u : 0u };
1036 }
1037
1038 // Reduction operations
1039 template<size_t BitsY, size_t BitsA>
1040 CXXRTL_ALWAYS_INLINE
1041 value<BitsY> reduce_and(const value<BitsA> &a) {
1042 return value<BitsY> { a.bit_not().is_zero() ? 1u : 0u };
1043 }
1044
1045 template<size_t BitsY, size_t BitsA>
1046 CXXRTL_ALWAYS_INLINE
1047 value<BitsY> reduce_or(const value<BitsA> &a) {
1048 return value<BitsY> { a ? 1u : 0u };
1049 }
1050
1051 template<size_t BitsY, size_t BitsA>
1052 CXXRTL_ALWAYS_INLINE
1053 value<BitsY> reduce_xor(const value<BitsA> &a) {
1054 return value<BitsY> { (a.ctpop() % 2) ? 1u : 0u };
1055 }
1056
1057 template<size_t BitsY, size_t BitsA>
1058 CXXRTL_ALWAYS_INLINE
1059 value<BitsY> reduce_xnor(const value<BitsA> &a) {
1060 return value<BitsY> { (a.ctpop() % 2) ? 0u : 1u };
1061 }
1062
1063 template<size_t BitsY, size_t BitsA>
1064 CXXRTL_ALWAYS_INLINE
1065 value<BitsY> reduce_bool(const value<BitsA> &a) {
1066 return value<BitsY> { a ? 1u : 0u };
1067 }
1068
1069 // Bitwise operations
1070 template<size_t BitsY, size_t BitsA>
1071 CXXRTL_ALWAYS_INLINE
1072 value<BitsY> not_u(const value<BitsA> &a) {
1073 return a.template zcast<BitsY>().bit_not();
1074 }
1075
1076 template<size_t BitsY, size_t BitsA>
1077 CXXRTL_ALWAYS_INLINE
1078 value<BitsY> not_s(const value<BitsA> &a) {
1079 return a.template scast<BitsY>().bit_not();
1080 }
1081
1082 template<size_t BitsY, size_t BitsA, size_t BitsB>
1083 CXXRTL_ALWAYS_INLINE
1084 value<BitsY> and_uu(const value<BitsA> &a, const value<BitsB> &b) {
1085 return a.template zcast<BitsY>().bit_and(b.template zcast<BitsY>());
1086 }
1087
1088 template<size_t BitsY, size_t BitsA, size_t BitsB>
1089 CXXRTL_ALWAYS_INLINE
1090 value<BitsY> and_ss(const value<BitsA> &a, const value<BitsB> &b) {
1091 return a.template scast<BitsY>().bit_and(b.template scast<BitsY>());
1092 }
1093
1094 template<size_t BitsY, size_t BitsA, size_t BitsB>
1095 CXXRTL_ALWAYS_INLINE
1096 value<BitsY> or_uu(const value<BitsA> &a, const value<BitsB> &b) {
1097 return a.template zcast<BitsY>().bit_or(b.template zcast<BitsY>());
1098 }
1099
1100 template<size_t BitsY, size_t BitsA, size_t BitsB>
1101 CXXRTL_ALWAYS_INLINE
1102 value<BitsY> or_ss(const value<BitsA> &a, const value<BitsB> &b) {
1103 return a.template scast<BitsY>().bit_or(b.template scast<BitsY>());
1104 }
1105
1106 template<size_t BitsY, size_t BitsA, size_t BitsB>
1107 CXXRTL_ALWAYS_INLINE
1108 value<BitsY> xor_uu(const value<BitsA> &a, const value<BitsB> &b) {
1109 return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>());
1110 }
1111
1112 template<size_t BitsY, size_t BitsA, size_t BitsB>
1113 CXXRTL_ALWAYS_INLINE
1114 value<BitsY> xor_ss(const value<BitsA> &a, const value<BitsB> &b) {
1115 return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>());
1116 }
1117
1118 template<size_t BitsY, size_t BitsA, size_t BitsB>
1119 CXXRTL_ALWAYS_INLINE
1120 value<BitsY> xnor_uu(const value<BitsA> &a, const value<BitsB> &b) {
1121 return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>()).bit_not();
1122 }
1123
1124 template<size_t BitsY, size_t BitsA, size_t BitsB>
1125 CXXRTL_ALWAYS_INLINE
1126 value<BitsY> xnor_ss(const value<BitsA> &a, const value<BitsB> &b) {
1127 return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>()).bit_not();
1128 }
1129
1130 template<size_t BitsY, size_t BitsA, size_t BitsB>
1131 CXXRTL_ALWAYS_INLINE
1132 value<BitsY> shl_uu(const value<BitsA> &a, const value<BitsB> &b) {
1133 return a.template zcast<BitsY>().template shl(b);
1134 }
1135
1136 template<size_t BitsY, size_t BitsA, size_t BitsB>
1137 CXXRTL_ALWAYS_INLINE
1138 value<BitsY> shl_su(const value<BitsA> &a, const value<BitsB> &b) {
1139 return a.template scast<BitsY>().template shl(b);
1140 }
1141
1142 template<size_t BitsY, size_t BitsA, size_t BitsB>
1143 CXXRTL_ALWAYS_INLINE
1144 value<BitsY> sshl_uu(const value<BitsA> &a, const value<BitsB> &b) {
1145 return a.template zcast<BitsY>().template shl(b);
1146 }
1147
1148 template<size_t BitsY, size_t BitsA, size_t BitsB>
1149 CXXRTL_ALWAYS_INLINE
1150 value<BitsY> sshl_su(const value<BitsA> &a, const value<BitsB> &b) {
1151 return a.template scast<BitsY>().template shl(b);
1152 }
1153
1154 template<size_t BitsY, size_t BitsA, size_t BitsB>
1155 CXXRTL_ALWAYS_INLINE
1156 value<BitsY> shr_uu(const value<BitsA> &a, const value<BitsB> &b) {
1157 return a.template shr(b).template zcast<BitsY>();
1158 }
1159
1160 template<size_t BitsY, size_t BitsA, size_t BitsB>
1161 CXXRTL_ALWAYS_INLINE
1162 value<BitsY> shr_su(const value<BitsA> &a, const value<BitsB> &b) {
1163 return a.template shr(b).template scast<BitsY>();
1164 }
1165
1166 template<size_t BitsY, size_t BitsA, size_t BitsB>
1167 CXXRTL_ALWAYS_INLINE
1168 value<BitsY> sshr_uu(const value<BitsA> &a, const value<BitsB> &b) {
1169 return a.template shr(b).template zcast<BitsY>();
1170 }
1171
1172 template<size_t BitsY, size_t BitsA, size_t BitsB>
1173 CXXRTL_ALWAYS_INLINE
1174 value<BitsY> sshr_su(const value<BitsA> &a, const value<BitsB> &b) {
1175 return a.template sshr(b).template scast<BitsY>();
1176 }
1177
1178 template<size_t BitsY, size_t BitsA, size_t BitsB>
1179 CXXRTL_ALWAYS_INLINE
1180 value<BitsY> shift_uu(const value<BitsA> &a, const value<BitsB> &b) {
1181 return shr_uu<BitsY>(a, b);
1182 }
1183
1184 template<size_t BitsY, size_t BitsA, size_t BitsB>
1185 CXXRTL_ALWAYS_INLINE
1186 value<BitsY> shift_su(const value<BitsA> &a, const value<BitsB> &b) {
1187 return shr_su<BitsY>(a, b);
1188 }
1189
1190 template<size_t BitsY, size_t BitsA, size_t BitsB>
1191 CXXRTL_ALWAYS_INLINE
1192 value<BitsY> shift_us(const value<BitsA> &a, const value<BitsB> &b) {
1193 return b.is_neg() ? shl_uu<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_uu<BitsY>(a, b);
1194 }
1195
1196 template<size_t BitsY, size_t BitsA, size_t BitsB>
1197 CXXRTL_ALWAYS_INLINE
1198 value<BitsY> shift_ss(const value<BitsA> &a, const value<BitsB> &b) {
1199 return b.is_neg() ? shl_su<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_su<BitsY>(a, b);
1200 }
1201
1202 template<size_t BitsY, size_t BitsA, size_t BitsB>
1203 CXXRTL_ALWAYS_INLINE
1204 value<BitsY> shiftx_uu(const value<BitsA> &a, const value<BitsB> &b) {
1205 return shift_uu<BitsY>(a, b);
1206 }
1207
1208 template<size_t BitsY, size_t BitsA, size_t BitsB>
1209 CXXRTL_ALWAYS_INLINE
1210 value<BitsY> shiftx_su(const value<BitsA> &a, const value<BitsB> &b) {
1211 return shift_su<BitsY>(a, b);
1212 }
1213
1214 template<size_t BitsY, size_t BitsA, size_t BitsB>
1215 CXXRTL_ALWAYS_INLINE
1216 value<BitsY> shiftx_us(const value<BitsA> &a, const value<BitsB> &b) {
1217 return shift_us<BitsY>(a, b);
1218 }
1219
1220 template<size_t BitsY, size_t BitsA, size_t BitsB>
1221 CXXRTL_ALWAYS_INLINE
1222 value<BitsY> shiftx_ss(const value<BitsA> &a, const value<BitsB> &b) {
1223 return shift_ss<BitsY>(a, b);
1224 }
1225
1226 // Comparison operations
1227 template<size_t BitsY, size_t BitsA, size_t BitsB>
1228 CXXRTL_ALWAYS_INLINE
1229 value<BitsY> eq_uu(const value<BitsA> &a, const value<BitsB> &b) {
1230 constexpr size_t BitsExt = max(BitsA, BitsB);
1231 return value<BitsY>{ a.template zext<BitsExt>() == b.template zext<BitsExt>() ? 1u : 0u };
1232 }
1233
1234 template<size_t BitsY, size_t BitsA, size_t BitsB>
1235 CXXRTL_ALWAYS_INLINE
1236 value<BitsY> eq_ss(const value<BitsA> &a, const value<BitsB> &b) {
1237 constexpr size_t BitsExt = max(BitsA, BitsB);
1238 return value<BitsY>{ a.template sext<BitsExt>() == b.template sext<BitsExt>() ? 1u : 0u };
1239 }
1240
1241 template<size_t BitsY, size_t BitsA, size_t BitsB>
1242 CXXRTL_ALWAYS_INLINE
1243 value<BitsY> ne_uu(const value<BitsA> &a, const value<BitsB> &b) {
1244 constexpr size_t BitsExt = max(BitsA, BitsB);
1245 return value<BitsY>{ a.template zext<BitsExt>() != b.template zext<BitsExt>() ? 1u : 0u };
1246 }
1247
1248 template<size_t BitsY, size_t BitsA, size_t BitsB>
1249 CXXRTL_ALWAYS_INLINE
1250 value<BitsY> ne_ss(const value<BitsA> &a, const value<BitsB> &b) {
1251 constexpr size_t BitsExt = max(BitsA, BitsB);
1252 return value<BitsY>{ a.template sext<BitsExt>() != b.template sext<BitsExt>() ? 1u : 0u };
1253 }
1254
1255 template<size_t BitsY, size_t BitsA, size_t BitsB>
1256 CXXRTL_ALWAYS_INLINE
1257 value<BitsY> eqx_uu(const value<BitsA> &a, const value<BitsB> &b) {
1258 return eq_uu<BitsY>(a, b);
1259 }
1260
1261 template<size_t BitsY, size_t BitsA, size_t BitsB>
1262 CXXRTL_ALWAYS_INLINE
1263 value<BitsY> eqx_ss(const value<BitsA> &a, const value<BitsB> &b) {
1264 return eq_ss<BitsY>(a, b);
1265 }
1266
1267 template<size_t BitsY, size_t BitsA, size_t BitsB>
1268 CXXRTL_ALWAYS_INLINE
1269 value<BitsY> nex_uu(const value<BitsA> &a, const value<BitsB> &b) {
1270 return ne_uu<BitsY>(a, b);
1271 }
1272
1273 template<size_t BitsY, size_t BitsA, size_t BitsB>
1274 CXXRTL_ALWAYS_INLINE
1275 value<BitsY> nex_ss(const value<BitsA> &a, const value<BitsB> &b) {
1276 return ne_ss<BitsY>(a, b);
1277 }
1278
1279 template<size_t BitsY, size_t BitsA, size_t BitsB>
1280 CXXRTL_ALWAYS_INLINE
1281 value<BitsY> gt_uu(const value<BitsA> &a, const value<BitsB> &b) {
1282 constexpr size_t BitsExt = max(BitsA, BitsB);
1283 return value<BitsY> { b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
1284 }
1285
1286 template<size_t BitsY, size_t BitsA, size_t BitsB>
1287 CXXRTL_ALWAYS_INLINE
1288 value<BitsY> gt_ss(const value<BitsA> &a, const value<BitsB> &b) {
1289 constexpr size_t BitsExt = max(BitsA, BitsB);
1290 return value<BitsY> { b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
1291 }
1292
1293 template<size_t BitsY, size_t BitsA, size_t BitsB>
1294 CXXRTL_ALWAYS_INLINE
1295 value<BitsY> ge_uu(const value<BitsA> &a, const value<BitsB> &b) {
1296 constexpr size_t BitsExt = max(BitsA, BitsB);
1297 return value<BitsY> { !a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
1298 }
1299
1300 template<size_t BitsY, size_t BitsA, size_t BitsB>
1301 CXXRTL_ALWAYS_INLINE
1302 value<BitsY> ge_ss(const value<BitsA> &a, const value<BitsB> &b) {
1303 constexpr size_t BitsExt = max(BitsA, BitsB);
1304 return value<BitsY> { !a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
1305 }
1306
1307 template<size_t BitsY, size_t BitsA, size_t BitsB>
1308 CXXRTL_ALWAYS_INLINE
1309 value<BitsY> lt_uu(const value<BitsA> &a, const value<BitsB> &b) {
1310 constexpr size_t BitsExt = max(BitsA, BitsB);
1311 return value<BitsY> { a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
1312 }
1313
1314 template<size_t BitsY, size_t BitsA, size_t BitsB>
1315 CXXRTL_ALWAYS_INLINE
1316 value<BitsY> lt_ss(const value<BitsA> &a, const value<BitsB> &b) {
1317 constexpr size_t BitsExt = max(BitsA, BitsB);
1318 return value<BitsY> { a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
1319 }
1320
1321 template<size_t BitsY, size_t BitsA, size_t BitsB>
1322 CXXRTL_ALWAYS_INLINE
1323 value<BitsY> le_uu(const value<BitsA> &a, const value<BitsB> &b) {
1324 constexpr size_t BitsExt = max(BitsA, BitsB);
1325 return value<BitsY> { !b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
1326 }
1327
1328 template<size_t BitsY, size_t BitsA, size_t BitsB>
1329 CXXRTL_ALWAYS_INLINE
1330 value<BitsY> le_ss(const value<BitsA> &a, const value<BitsB> &b) {
1331 constexpr size_t BitsExt = max(BitsA, BitsB);
1332 return value<BitsY> { !b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
1333 }
1334
1335 // Arithmetic operations
1336 template<size_t BitsY, size_t BitsA>
1337 CXXRTL_ALWAYS_INLINE
1338 value<BitsY> pos_u(const value<BitsA> &a) {
1339 return a.template zcast<BitsY>();
1340 }
1341
1342 template<size_t BitsY, size_t BitsA>
1343 CXXRTL_ALWAYS_INLINE
1344 value<BitsY> pos_s(const value<BitsA> &a) {
1345 return a.template scast<BitsY>();
1346 }
1347
1348 template<size_t BitsY, size_t BitsA>
1349 CXXRTL_ALWAYS_INLINE
1350 value<BitsY> neg_u(const value<BitsA> &a) {
1351 return a.template zcast<BitsY>().neg();
1352 }
1353
1354 template<size_t BitsY, size_t BitsA>
1355 CXXRTL_ALWAYS_INLINE
1356 value<BitsY> neg_s(const value<BitsA> &a) {
1357 return a.template scast<BitsY>().neg();
1358 }
1359
1360 template<size_t BitsY, size_t BitsA, size_t BitsB>
1361 CXXRTL_ALWAYS_INLINE
1362 value<BitsY> add_uu(const value<BitsA> &a, const value<BitsB> &b) {
1363 return a.template zcast<BitsY>().add(b.template zcast<BitsY>());
1364 }
1365
1366 template<size_t BitsY, size_t BitsA, size_t BitsB>
1367 CXXRTL_ALWAYS_INLINE
1368 value<BitsY> add_ss(const value<BitsA> &a, const value<BitsB> &b) {
1369 return a.template scast<BitsY>().add(b.template scast<BitsY>());
1370 }
1371
1372 template<size_t BitsY, size_t BitsA, size_t BitsB>
1373 CXXRTL_ALWAYS_INLINE
1374 value<BitsY> sub_uu(const value<BitsA> &a, const value<BitsB> &b) {
1375 return a.template zcast<BitsY>().sub(b.template zcast<BitsY>());
1376 }
1377
1378 template<size_t BitsY, size_t BitsA, size_t BitsB>
1379 CXXRTL_ALWAYS_INLINE
1380 value<BitsY> sub_ss(const value<BitsA> &a, const value<BitsB> &b) {
1381 return a.template scast<BitsY>().sub(b.template scast<BitsY>());
1382 }
1383
1384 template<size_t BitsY, size_t BitsA, size_t BitsB>
1385 CXXRTL_ALWAYS_INLINE
1386 value<BitsY> mul_uu(const value<BitsA> &a, const value<BitsB> &b) {
1387 constexpr size_t BitsM = BitsA >= BitsB ? BitsA : BitsB;
1388 return a.template zcast<BitsM>().template mul<BitsY>(b.template zcast<BitsM>());
1389 }
1390
1391 template<size_t BitsY, size_t BitsA, size_t BitsB>
1392 CXXRTL_ALWAYS_INLINE
1393 value<BitsY> mul_ss(const value<BitsA> &a, const value<BitsB> &b) {
1394 return a.template scast<BitsY>().template mul<BitsY>(b.template scast<BitsY>());
1395 }
1396
1397 template<size_t BitsY, size_t BitsA, size_t BitsB>
1398 CXXRTL_ALWAYS_INLINE
1399 std::pair<value<BitsY>, value<BitsY>> divmod_uu(const value<BitsA> &a, const value<BitsB> &b) {
1400 constexpr size_t Bits = max(BitsY, max(BitsA, BitsB));
1401 value<Bits> quotient;
1402 value<Bits> dividend = a.template zext<Bits>();
1403 value<Bits> divisor = b.template zext<Bits>();
1404 if (dividend.ucmp(divisor))
1405 return {/*quotient=*/value<BitsY> { 0u }, /*remainder=*/dividend.template trunc<BitsY>()};
1406 uint32_t divisor_shift = dividend.ctlz() - divisor.ctlz();
1407 divisor = divisor.shl(value<32> { divisor_shift });
1408 for (size_t step = 0; step <= divisor_shift; step++) {
1409 quotient = quotient.shl(value<1> { 1u });
1410 if (!dividend.ucmp(divisor)) {
1411 dividend = dividend.sub(divisor);
1412 quotient.set_bit(0, true);
1413 }
1414 divisor = divisor.shr(value<1> { 1u });
1415 }
1416 return {quotient.template trunc<BitsY>(), /*remainder=*/dividend.template trunc<BitsY>()};
1417 }
1418
1419 template<size_t BitsY, size_t BitsA, size_t BitsB>
1420 CXXRTL_ALWAYS_INLINE
1421 std::pair<value<BitsY>, value<BitsY>> divmod_ss(const value<BitsA> &a, const value<BitsB> &b) {
1422 value<BitsA + 1> ua = a.template sext<BitsA + 1>();
1423 value<BitsB + 1> ub = b.template sext<BitsB + 1>();
1424 if (ua.is_neg()) ua = ua.neg();
1425 if (ub.is_neg()) ub = ub.neg();
1426 value<BitsY> y, r;
1427 std::tie(y, r) = divmod_uu<BitsY>(ua, ub);
1428 if (a.is_neg() != b.is_neg()) y = y.neg();
1429 if (a.is_neg()) r = r.neg();
1430 return {y, r};
1431 }
1432
1433 template<size_t BitsY, size_t BitsA, size_t BitsB>
1434 CXXRTL_ALWAYS_INLINE
1435 value<BitsY> div_uu(const value<BitsA> &a, const value<BitsB> &b) {
1436 return divmod_uu<BitsY>(a, b).first;
1437 }
1438
1439 template<size_t BitsY, size_t BitsA, size_t BitsB>
1440 CXXRTL_ALWAYS_INLINE
1441 value<BitsY> div_ss(const value<BitsA> &a, const value<BitsB> &b) {
1442 return divmod_ss<BitsY>(a, b).first;
1443 }
1444
1445 template<size_t BitsY, size_t BitsA, size_t BitsB>
1446 CXXRTL_ALWAYS_INLINE
1447 value<BitsY> mod_uu(const value<BitsA> &a, const value<BitsB> &b) {
1448 return divmod_uu<BitsY>(a, b).second;
1449 }
1450
1451 template<size_t BitsY, size_t BitsA, size_t BitsB>
1452 CXXRTL_ALWAYS_INLINE
1453 value<BitsY> mod_ss(const value<BitsA> &a, const value<BitsB> &b) {
1454 return divmod_ss<BitsY>(a, b).second;
1455 }
1456
1457 // Memory helper
1458 struct memory_index {
1459 bool valid;
1460 size_t index;
1461
1462 template<size_t BitsAddr>
1463 memory_index(const value<BitsAddr> &addr, size_t offset, size_t depth) {
1464 static_assert(value<BitsAddr>::chunks <= 1, "memory address is too wide");
1465 size_t offset_index = addr.data[0];
1466
1467 valid = (offset_index >= offset && offset_index < offset + depth);
1468 index = offset_index - offset;
1469 }
1470 };
1471
1472 } // namespace cxxrtl_yosys
1473
1474 #endif