cxxrtl: handle multipart signals.
[yosys.git] / backends / cxxrtl / cxxrtl.h
1 /*
2 * yosys -- Yosys Open SYnthesis Suite
3 *
4 * Copyright (C) 2019-2020 whitequark <whitequark@whitequark.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19 // This file is included by the designs generated with `write_cxxrtl`. It is not used in Yosys itself.
20
21 #ifndef CXXRTL_H
22 #define CXXRTL_H
23
24 #include <cstddef>
25 #include <cstdint>
26 #include <cassert>
27 #include <limits>
28 #include <type_traits>
29 #include <tuple>
30 #include <vector>
31 #include <map>
32 #include <algorithm>
33 #include <memory>
34 #include <sstream>
35
36 #include <backends/cxxrtl/cxxrtl_capi.h>
37
38 // The CXXRTL support library implements compile time specialized arbitrary width arithmetics, as well as provides
39 // composite lvalues made out of bit slices and concatenations of lvalues. This allows the `write_cxxrtl` pass
40 // to perform a straightforward translation of RTLIL structures to readable C++, relying on the C++ compiler
41 // to unwrap the abstraction and generate efficient code.
42 namespace cxxrtl {
43
44 // All arbitrary-width values in CXXRTL are backed by arrays of unsigned integers called chunks. The chunk size
45 // is the same regardless of the value width to simplify manipulating values via FFI interfaces, e.g. driving
46 // and introspecting the simulation in Python.
47 //
48 // It is practical to use chunk sizes between 32 bits and platform register size because when arithmetics on
49 // narrower integer types is legalized by the C++ compiler, it inserts code to clear the high bits of the register.
50 // However, (a) most of our operations do not change those bits in the first place because of invariants that are
51 // invisible to the compiler, (b) we often operate on non-power-of-2 values and have to clear the high bits anyway.
52 // Therefore, using relatively wide chunks and clearing the high bits explicitly and only when we know they may be
53 // clobbered results in simpler generated code.
54 typedef uint32_t chunk_t;
55
56 template<typename T>
57 struct chunk_traits {
58 static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
59 "chunk type must be an unsigned integral type");
60 using type = T;
61 static constexpr size_t bits = std::numeric_limits<T>::digits;
62 static constexpr T mask = std::numeric_limits<T>::max();
63 };
64
65 template<class T>
66 struct expr_base;
67
68 template<size_t Bits>
69 struct value : public expr_base<value<Bits>> {
70 static constexpr size_t bits = Bits;
71
72 using chunk = chunk_traits<chunk_t>;
73 static constexpr chunk::type msb_mask = (Bits % chunk::bits == 0) ? chunk::mask
74 : chunk::mask >> (chunk::bits - (Bits % chunk::bits));
75
76 static constexpr size_t chunks = (Bits + chunk::bits - 1) / chunk::bits;
77 chunk::type data[chunks] = {};
78
79 value() = default;
80 template<typename... Init>
81 explicit constexpr value(Init ...init) : data{init...} {}
82
83 value(const value<Bits> &) = default;
84 value(value<Bits> &&) = default;
85 value<Bits> &operator=(const value<Bits> &) = default;
86
87 // A (no-op) helper that forces the cast to value<>.
88 const value<Bits> &val() const {
89 return *this;
90 }
91
92 std::string str() const {
93 std::stringstream ss;
94 ss << *this;
95 return ss.str();
96 }
97
98 // Operations with compile-time parameters.
99 //
100 // These operations are used to implement slicing, concatenation, and blitting.
101 // The trunc, zext and sext operations add or remove most significant bits (i.e. on the left);
102 // the rtrunc and rzext operations add or remove least significant bits (i.e. on the right).
103 template<size_t NewBits>
104 value<NewBits> trunc() const {
105 static_assert(NewBits <= Bits, "trunc() may not increase width");
106 value<NewBits> result;
107 for (size_t n = 0; n < result.chunks; n++)
108 result.data[n] = data[n];
109 result.data[result.chunks - 1] &= result.msb_mask;
110 return result;
111 }
112
113 template<size_t NewBits>
114 value<NewBits> zext() const {
115 static_assert(NewBits >= Bits, "zext() may not decrease width");
116 value<NewBits> result;
117 for (size_t n = 0; n < chunks; n++)
118 result.data[n] = data[n];
119 return result;
120 }
121
122 template<size_t NewBits>
123 value<NewBits> sext() const {
124 static_assert(NewBits >= Bits, "sext() may not decrease width");
125 value<NewBits> result;
126 for (size_t n = 0; n < chunks; n++)
127 result.data[n] = data[n];
128 if (is_neg()) {
129 result.data[chunks - 1] |= ~msb_mask;
130 for (size_t n = chunks; n < result.chunks; n++)
131 result.data[n] = chunk::mask;
132 result.data[result.chunks - 1] &= result.msb_mask;
133 }
134 return result;
135 }
136
137 template<size_t NewBits>
138 value<NewBits> rtrunc() const {
139 static_assert(NewBits <= Bits, "rtrunc() may not increase width");
140 value<NewBits> result;
141 constexpr size_t shift_chunks = (Bits - NewBits) / chunk::bits;
142 constexpr size_t shift_bits = (Bits - NewBits) % chunk::bits;
143 chunk::type carry = 0;
144 if (shift_chunks + result.chunks < chunks) {
145 carry = (shift_bits == 0) ? 0
146 : data[shift_chunks + result.chunks] << (chunk::bits - shift_bits);
147 }
148 for (size_t n = result.chunks; n > 0; n--) {
149 result.data[n - 1] = carry | (data[shift_chunks + n - 1] >> shift_bits);
150 carry = (shift_bits == 0) ? 0
151 : data[shift_chunks + n - 1] << (chunk::bits - shift_bits);
152 }
153 return result;
154 }
155
156 template<size_t NewBits>
157 value<NewBits> rzext() const {
158 static_assert(NewBits >= Bits, "rzext() may not decrease width");
159 value<NewBits> result;
160 constexpr size_t shift_chunks = (NewBits - Bits) / chunk::bits;
161 constexpr size_t shift_bits = (NewBits - Bits) % chunk::bits;
162 chunk::type carry = 0;
163 for (size_t n = 0; n < chunks; n++) {
164 result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
165 carry = (shift_bits == 0) ? 0
166 : data[n] >> (chunk::bits - shift_bits);
167 }
168 if (carry != 0)
169 result.data[result.chunks - 1] = carry;
170 return result;
171 }
172
173 // Bit blit operation, i.e. a partial read-modify-write.
174 template<size_t Stop, size_t Start>
175 value<Bits> blit(const value<Stop - Start + 1> &source) const {
176 static_assert(Stop >= Start, "blit() may not reverse bit order");
177 constexpr chunk::type start_mask = ~(chunk::mask << (Start % chunk::bits));
178 constexpr chunk::type stop_mask = (Stop % chunk::bits + 1 == chunk::bits) ? 0
179 : (chunk::mask << (Stop % chunk::bits + 1));
180 value<Bits> masked = *this;
181 if (Start / chunk::bits == Stop / chunk::bits) {
182 masked.data[Start / chunk::bits] &= stop_mask | start_mask;
183 } else {
184 masked.data[Start / chunk::bits] &= start_mask;
185 for (size_t n = Start / chunk::bits + 1; n < Stop / chunk::bits; n++)
186 masked.data[n] = 0;
187 masked.data[Stop / chunk::bits] &= stop_mask;
188 }
189 value<Bits> shifted = source
190 .template rzext<Stop + 1>()
191 .template zext<Bits>();
192 return masked.bit_or(shifted);
193 }
194
195 // Helpers for selecting extending or truncating operation depending on whether the result is wider or narrower
196 // than the operand. In C++17 these can be replaced with `if constexpr`.
197 template<size_t NewBits, typename = void>
198 struct zext_cast {
199 value<NewBits> operator()(const value<Bits> &val) {
200 return val.template zext<NewBits>();
201 }
202 };
203
204 template<size_t NewBits>
205 struct zext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
206 value<NewBits> operator()(const value<Bits> &val) {
207 return val.template trunc<NewBits>();
208 }
209 };
210
211 template<size_t NewBits, typename = void>
212 struct sext_cast {
213 value<NewBits> operator()(const value<Bits> &val) {
214 return val.template sext<NewBits>();
215 }
216 };
217
218 template<size_t NewBits>
219 struct sext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
220 value<NewBits> operator()(const value<Bits> &val) {
221 return val.template trunc<NewBits>();
222 }
223 };
224
225 template<size_t NewBits>
226 value<NewBits> zcast() const {
227 return zext_cast<NewBits>()(*this);
228 }
229
230 template<size_t NewBits>
231 value<NewBits> scast() const {
232 return sext_cast<NewBits>()(*this);
233 }
234
235 // Operations with run-time parameters (offsets, amounts, etc).
236 //
237 // These operations are used for computations.
238 bool bit(size_t offset) const {
239 return data[offset / chunk::bits] & (1 << (offset % chunk::bits));
240 }
241
242 void set_bit(size_t offset, bool value = true) {
243 size_t offset_chunks = offset / chunk::bits;
244 size_t offset_bits = offset % chunk::bits;
245 data[offset_chunks] &= ~(1 << offset_bits);
246 data[offset_chunks] |= value ? 1 << offset_bits : 0;
247 }
248
249 bool is_zero() const {
250 for (size_t n = 0; n < chunks; n++)
251 if (data[n] != 0)
252 return false;
253 return true;
254 }
255
256 explicit operator bool() const {
257 return !is_zero();
258 }
259
260 bool is_neg() const {
261 return data[chunks - 1] & (1 << ((Bits - 1) % chunk::bits));
262 }
263
264 bool operator ==(const value<Bits> &other) const {
265 for (size_t n = 0; n < chunks; n++)
266 if (data[n] != other.data[n])
267 return false;
268 return true;
269 }
270
271 bool operator !=(const value<Bits> &other) const {
272 return !(*this == other);
273 }
274
275 value<Bits> bit_not() const {
276 value<Bits> result;
277 for (size_t n = 0; n < chunks; n++)
278 result.data[n] = ~data[n];
279 result.data[chunks - 1] &= msb_mask;
280 return result;
281 }
282
283 value<Bits> bit_and(const value<Bits> &other) const {
284 value<Bits> result;
285 for (size_t n = 0; n < chunks; n++)
286 result.data[n] = data[n] & other.data[n];
287 return result;
288 }
289
290 value<Bits> bit_or(const value<Bits> &other) const {
291 value<Bits> result;
292 for (size_t n = 0; n < chunks; n++)
293 result.data[n] = data[n] | other.data[n];
294 return result;
295 }
296
297 value<Bits> bit_xor(const value<Bits> &other) const {
298 value<Bits> result;
299 for (size_t n = 0; n < chunks; n++)
300 result.data[n] = data[n] ^ other.data[n];
301 return result;
302 }
303
304 value<Bits> update(const value<Bits> &val, const value<Bits> &mask) const {
305 return bit_and(mask.bit_not()).bit_or(val.bit_and(mask));
306 }
307
308 template<size_t AmountBits>
309 value<Bits> shl(const value<AmountBits> &amount) const {
310 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
311 static_assert(Bits <= chunk::mask, "shl() of unreasonably large values is not supported");
312 // Detect shifts definitely large than Bits early.
313 for (size_t n = 1; n < amount.chunks; n++)
314 if (amount.data[n] != 0)
315 return {};
316 // Past this point we can use the least significant chunk as the shift size.
317 size_t shift_chunks = amount.data[0] / chunk::bits;
318 size_t shift_bits = amount.data[0] % chunk::bits;
319 if (shift_chunks >= chunks)
320 return {};
321 value<Bits> result;
322 chunk::type carry = 0;
323 for (size_t n = 0; n < chunks - shift_chunks; n++) {
324 result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
325 carry = (shift_bits == 0) ? 0
326 : data[n] >> (chunk::bits - shift_bits);
327 }
328 return result;
329 }
330
331 template<size_t AmountBits, bool Signed = false>
332 value<Bits> shr(const value<AmountBits> &amount) const {
333 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
334 static_assert(Bits <= chunk::mask, "shr() of unreasonably large values is not supported");
335 // Detect shifts definitely large than Bits early.
336 for (size_t n = 1; n < amount.chunks; n++)
337 if (amount.data[n] != 0)
338 return {};
339 // Past this point we can use the least significant chunk as the shift size.
340 size_t shift_chunks = amount.data[0] / chunk::bits;
341 size_t shift_bits = amount.data[0] % chunk::bits;
342 if (shift_chunks >= chunks)
343 return {};
344 value<Bits> result;
345 chunk::type carry = 0;
346 for (size_t n = 0; n < chunks - shift_chunks; n++) {
347 result.data[chunks - shift_chunks - 1 - n] = carry | (data[chunks - 1 - n] >> shift_bits);
348 carry = (shift_bits == 0) ? 0
349 : data[chunks - 1 - n] << (chunk::bits - shift_bits);
350 }
351 if (Signed && is_neg()) {
352 for (size_t n = chunks - shift_chunks; n < chunks; n++)
353 result.data[n] = chunk::mask;
354 if (shift_bits != 0)
355 result.data[chunks - shift_chunks] |= chunk::mask << (chunk::bits - shift_bits);
356 }
357 return result;
358 }
359
360 template<size_t AmountBits>
361 value<Bits> sshr(const value<AmountBits> &amount) const {
362 return shr<AmountBits, /*Signed=*/true>(amount);
363 }
364
365 size_t ctpop() const {
366 size_t count = 0;
367 for (size_t n = 0; n < chunks; n++) {
368 // This loop implements the population count idiom as recognized by LLVM and GCC.
369 for (chunk::type x = data[n]; x != 0; count++)
370 x = x & (x - 1);
371 }
372 return count;
373 }
374
375 size_t ctlz() const {
376 size_t count = 0;
377 for (size_t n = 0; n < chunks; n++) {
378 chunk::type x = data[chunks - 1 - n];
379 if (x == 0) {
380 count += (n == 0 ? Bits % chunk::bits : chunk::bits);
381 } else {
382 // This loop implements the find first set idiom as recognized by LLVM.
383 for (; x != 0; count++)
384 x >>= 1;
385 }
386 }
387 return count;
388 }
389
390 template<bool Invert, bool CarryIn>
391 std::pair<value<Bits>, bool /*CarryOut*/> alu(const value<Bits> &other) const {
392 value<Bits> result;
393 bool carry = CarryIn;
394 for (size_t n = 0; n < result.chunks; n++) {
395 result.data[n] = data[n] + (Invert ? ~other.data[n] : other.data[n]) + carry;
396 carry = (result.data[n] < data[n]) ||
397 (result.data[n] == data[n] && carry);
398 }
399 result.data[result.chunks - 1] &= result.msb_mask;
400 return {result, carry};
401 }
402
403 value<Bits> add(const value<Bits> &other) const {
404 return alu</*Invert=*/false, /*CarryIn=*/false>(other).first;
405 }
406
407 value<Bits> sub(const value<Bits> &other) const {
408 return alu</*Invert=*/true, /*CarryIn=*/true>(other).first;
409 }
410
411 value<Bits> neg() const {
412 return value<Bits> { 0u }.sub(*this);
413 }
414
415 bool ucmp(const value<Bits> &other) const {
416 bool carry;
417 std::tie(std::ignore, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
418 return !carry; // a.ucmp(b) ≡ a u< b
419 }
420
421 bool scmp(const value<Bits> &other) const {
422 value<Bits> result;
423 bool carry;
424 std::tie(result, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
425 bool overflow = (is_neg() == !other.is_neg()) && (is_neg() != result.is_neg());
426 return result.is_neg() ^ overflow; // a.scmp(b) ≡ a s< b
427 }
428 };
429
430 // Expression template for a slice, usable as lvalue or rvalue, and composable with other expression templates here.
431 template<class T, size_t Stop, size_t Start>
432 struct slice_expr : public expr_base<slice_expr<T, Stop, Start>> {
433 static_assert(Stop >= Start, "slice_expr() may not reverse bit order");
434 static_assert(Start < T::bits && Stop < T::bits, "slice_expr() must be within bounds");
435 static constexpr size_t bits = Stop - Start + 1;
436
437 T &expr;
438
439 slice_expr(T &expr) : expr(expr) {}
440 slice_expr(const slice_expr<T, Stop, Start> &) = delete;
441
442 operator value<bits>() const {
443 return static_cast<const value<T::bits> &>(expr)
444 .template rtrunc<T::bits - Start>()
445 .template trunc<bits>();
446 }
447
448 slice_expr<T, Stop, Start> &operator=(const value<bits> &rhs) {
449 // Generic partial assignment implemented using a read-modify-write operation on the sliced expression.
450 expr = static_cast<const value<T::bits> &>(expr)
451 .template blit<Stop, Start>(rhs);
452 return *this;
453 }
454
455 // A helper that forces the cast to value<>, which allows deduction to work.
456 value<bits> val() const {
457 return static_cast<const value<bits> &>(*this);
458 }
459 };
460
461 // Expression template for a concatenation, usable as lvalue or rvalue, and composable with other expression templates here.
462 template<class T, class U>
463 struct concat_expr : public expr_base<concat_expr<T, U>> {
464 static constexpr size_t bits = T::bits + U::bits;
465
466 T &ms_expr;
467 U &ls_expr;
468
469 concat_expr(T &ms_expr, U &ls_expr) : ms_expr(ms_expr), ls_expr(ls_expr) {}
470 concat_expr(const concat_expr<T, U> &) = delete;
471
472 operator value<bits>() const {
473 value<bits> ms_shifted = static_cast<const value<T::bits> &>(ms_expr)
474 .template rzext<bits>();
475 value<bits> ls_extended = static_cast<const value<U::bits> &>(ls_expr)
476 .template zext<bits>();
477 return ms_shifted.bit_or(ls_extended);
478 }
479
480 concat_expr<T, U> &operator=(const value<bits> &rhs) {
481 ms_expr = rhs.template rtrunc<T::bits>();
482 ls_expr = rhs.template trunc<U::bits>();
483 return *this;
484 }
485
486 // A helper that forces the cast to value<>, which allows deduction to work.
487 value<bits> val() const {
488 return static_cast<const value<bits> &>(*this);
489 }
490 };
491
492 // Base class for expression templates, providing helper methods for operations that are valid on both rvalues and lvalues.
493 //
494 // Note that expression objects (slices and concatenations) constructed in this way should NEVER be captured because
495 // they refer to temporaries that will, in general, only live until the end of the statement. For example, both of
496 // these snippets perform use-after-free:
497 //
498 // const auto &a = val.slice<7,0>().slice<1>();
499 // value<1> b = a;
500 //
501 // auto &&c = val.slice<7,0>().slice<1>();
502 // c = value<1>{1u};
503 //
504 // An easy way to write code using slices and concatenations safely is to follow two simple rules:
505 // * Never explicitly name any type except `value<W>` or `const value<W> &`.
506 // * Never use a `const auto &` or `auto &&` in any such expression.
507 // Then, any code that compiles will be well-defined.
508 template<class T>
509 struct expr_base {
510 template<size_t Stop, size_t Start = Stop>
511 slice_expr<const T, Stop, Start> slice() const {
512 return {*static_cast<const T *>(this)};
513 }
514
515 template<size_t Stop, size_t Start = Stop>
516 slice_expr<T, Stop, Start> slice() {
517 return {*static_cast<T *>(this)};
518 }
519
520 template<class U>
521 concat_expr<const T, typename std::remove_reference<const U>::type> concat(const U &other) const {
522 return {*static_cast<const T *>(this), other};
523 }
524
525 template<class U>
526 concat_expr<T, typename std::remove_reference<U>::type> concat(U &&other) {
527 return {*static_cast<T *>(this), other};
528 }
529 };
530
531 template<size_t Bits>
532 std::ostream &operator<<(std::ostream &os, const value<Bits> &val) {
533 auto old_flags = os.flags(std::ios::right);
534 auto old_width = os.width(0);
535 auto old_fill = os.fill('0');
536 os << val.bits << '\'' << std::hex;
537 for (size_t n = val.chunks - 1; n != (size_t)-1; n--) {
538 if (n == val.chunks - 1 && Bits % value<Bits>::chunk::bits != 0)
539 os.width((Bits % value<Bits>::chunk::bits + 3) / 4);
540 else
541 os.width((value<Bits>::chunk::bits + 3) / 4);
542 os << val.data[n];
543 }
544 os.fill(old_fill);
545 os.width(old_width);
546 os.flags(old_flags);
547 return os;
548 }
549
550 template<size_t Bits>
551 struct wire {
552 static constexpr size_t bits = Bits;
553
554 value<Bits> curr;
555 value<Bits> next;
556
557 wire() = default;
558 constexpr wire(const value<Bits> &init) : curr(init), next(init) {}
559 template<typename... Init>
560 explicit constexpr wire(Init ...init) : curr{init...}, next{init...} {}
561
562 wire(const wire<Bits> &) = delete;
563 wire(wire<Bits> &&) = default;
564 wire<Bits> &operator=(const wire<Bits> &) = delete;
565
566 bool commit() {
567 if (curr != next) {
568 curr = next;
569 return true;
570 }
571 return false;
572 }
573 };
574
575 template<size_t Bits>
576 std::ostream &operator<<(std::ostream &os, const wire<Bits> &val) {
577 os << val.curr;
578 return os;
579 }
580
581 template<size_t Width>
582 struct memory {
583 std::vector<value<Width>> data;
584
585 size_t depth() const {
586 return data.size();
587 }
588
589 memory() = delete;
590 explicit memory(size_t depth) : data(depth) {}
591
592 memory(const memory<Width> &) = delete;
593 memory<Width> &operator=(const memory<Width> &) = delete;
594
595 // The only way to get the compiler to put the initializer in .rodata and do not copy it on stack is to stuff it
596 // into a plain array. You'd think an std::initializer_list would work here, but it doesn't, because you can't
597 // construct an initializer_list in a constexpr (or something) and so if you try to do that the whole thing is
598 // first copied on the stack (probably overflowing it) and then again into `data`.
599 template<size_t Size>
600 struct init {
601 size_t offset;
602 value<Width> data[Size];
603 };
604
605 template<size_t... InitSize>
606 explicit memory(size_t depth, const init<InitSize> &...init) : data(depth) {
607 data.resize(depth);
608 // This utterly reprehensible construct is the most reasonable way to apply a function to every element
609 // of a parameter pack, if the elements all have different types and so cannot be cast to an initializer list.
610 auto _ = {std::move(std::begin(init.data), std::end(init.data), data.begin() + init.offset)...};
611 (void)_;
612 }
613
614 // An operator for direct memory reads. May be used at any time during the simulation.
615 const value<Width> &operator [](size_t index) const {
616 assert(index < data.size());
617 return data[index];
618 }
619
620 // An operator for direct memory writes. May only be used before the simulation is started. If used
621 // after the simulation is started, the design may malfunction.
622 value<Width> &operator [](size_t index) {
623 assert(index < data.size());
624 return data[index];
625 }
626
627 // A simple way to make a writable memory would be to use an array of wires instead of an array of values.
628 // However, there are two significant downsides to this approach: first, it has large overhead (2× space
629 // overhead, and O(depth) time overhead during commit); second, it does not simplify handling write port
630 // priorities. Although in principle write ports could be ordered or conditionally enabled in generated
631 // code based on their priorities and selected addresses, the feedback arc set problem is computationally
632 // expensive, and the heuristic based algorithms are not easily modified to guarantee (rather than prefer)
633 // a particular write port evaluation order.
634 //
635 // The approach used here instead is to queue writes into a buffer during the eval phase, then perform
636 // the writes during the commit phase in the priority order. This approach has low overhead, with both space
637 // and time proportional to the amount of write ports. Because virtually every memory in a practical design
638 // has at most two write ports, linear search is used on every write, being the fastest and simplest approach.
639 struct write {
640 size_t index;
641 value<Width> val;
642 value<Width> mask;
643 int priority;
644 };
645 std::vector<write> write_queue;
646
647 void update(size_t index, const value<Width> &val, const value<Width> &mask, int priority = 0) {
648 assert(index < data.size());
649 // Queue up the write while keeping the queue sorted by priority.
650 write_queue.insert(
651 std::upper_bound(write_queue.begin(), write_queue.end(), priority,
652 [](const int a, const write& b) { return a < b.priority; }),
653 write { index, val, mask, priority });
654 }
655
656 bool commit() {
657 bool changed = false;
658 for (const write &entry : write_queue) {
659 value<Width> elem = data[entry.index];
660 elem = elem.update(entry.val, entry.mask);
661 changed |= (data[entry.index] != elem);
662 data[entry.index] = elem;
663 }
664 write_queue.clear();
665 return changed;
666 }
667 };
668
669 struct metadata {
670 const enum {
671 MISSING = 0,
672 UINT = 1,
673 SINT = 2,
674 STRING = 3,
675 DOUBLE = 4,
676 } value_type;
677
678 // In debug mode, using the wrong .as_*() function will assert.
679 // In release mode, using the wrong .as_*() function will safely return a default value.
680 const unsigned uint_value = 0;
681 const signed sint_value = 0;
682 const std::string string_value = "";
683 const double double_value = 0.0;
684
685 metadata() : value_type(MISSING) {}
686 metadata(unsigned value) : value_type(UINT), uint_value(value) {}
687 metadata(signed value) : value_type(SINT), sint_value(value) {}
688 metadata(const std::string &value) : value_type(STRING), string_value(value) {}
689 metadata(const char *value) : value_type(STRING), string_value(value) {}
690 metadata(double value) : value_type(DOUBLE), double_value(value) {}
691
692 metadata(const metadata &) = default;
693 metadata &operator=(const metadata &) = delete;
694
695 unsigned as_uint() const {
696 assert(value_type == UINT);
697 return uint_value;
698 }
699
700 signed as_sint() const {
701 assert(value_type == SINT);
702 return sint_value;
703 }
704
705 const std::string &as_string() const {
706 assert(value_type == STRING);
707 return string_value;
708 }
709
710 double as_double() const {
711 assert(value_type == DOUBLE);
712 return double_value;
713 }
714 };
715
716 typedef std::map<std::string, metadata> metadata_map;
717
718 // Helper class to disambiguate values/wires and their aliases.
719 struct debug_alias {};
720
721 // This structure is intended for consumption via foreign function interfaces, like Python's ctypes.
722 // Because of this it uses a C-style layout that is easy to parse rather than more idiomatic C++.
723 //
724 // To avoid violating strict aliasing rules, this structure has to be a subclass of the one used
725 // in the C API, or it would not be possible to cast between the pointers to these.
726 struct debug_item : ::cxxrtl_object {
727 enum : uint32_t {
728 VALUE = CXXRTL_VALUE,
729 WIRE = CXXRTL_WIRE,
730 MEMORY = CXXRTL_MEMORY,
731 ALIAS = CXXRTL_ALIAS,
732 };
733
734 debug_item(const ::cxxrtl_object &object) : cxxrtl_object(object) {}
735
736 template<size_t Bits>
737 debug_item(value<Bits> &item, size_t lsb_offset = 0) {
738 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
739 "value<Bits> is not compatible with C layout");
740 type = VALUE;
741 width = Bits;
742 lsb_at = lsb_offset;
743 depth = 1;
744 zero_at = 0;
745 curr = item.data;
746 next = item.data;
747 }
748
749 template<size_t Bits>
750 debug_item(const value<Bits> &item, size_t lsb_offset = 0) {
751 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
752 "value<Bits> is not compatible with C layout");
753 type = VALUE;
754 width = Bits;
755 lsb_at = lsb_offset;
756 depth = 1;
757 zero_at = 0;
758 curr = const_cast<chunk_t*>(item.data);
759 next = nullptr;
760 }
761
762 template<size_t Bits>
763 debug_item(wire<Bits> &item, size_t lsb_offset = 0) {
764 static_assert(sizeof(item.curr) == value<Bits>::chunks * sizeof(chunk_t) &&
765 sizeof(item.next) == value<Bits>::chunks * sizeof(chunk_t),
766 "wire<Bits> is not compatible with C layout");
767 type = WIRE;
768 width = Bits;
769 lsb_at = lsb_offset;
770 depth = 1;
771 zero_at = 0;
772 curr = item.curr.data;
773 next = item.next.data;
774 }
775
776 template<size_t Width>
777 debug_item(memory<Width> &item, size_t zero_offset = 0) {
778 static_assert(sizeof(item.data[0]) == value<Width>::chunks * sizeof(chunk_t),
779 "memory<Width> is not compatible with C layout");
780 type = MEMORY;
781 width = Width;
782 lsb_at = 0;
783 depth = item.data.size();
784 zero_at = zero_offset;
785 curr = item.data.empty() ? nullptr : item.data[0].data;
786 next = nullptr;
787 }
788
789 template<size_t Bits>
790 debug_item(debug_alias, const value<Bits> &item, size_t lsb_offset = 0) {
791 static_assert(sizeof(item) == value<Bits>::chunks * sizeof(chunk_t),
792 "value<Bits> is not compatible with C layout");
793 type = ALIAS;
794 width = Bits;
795 lsb_at = lsb_offset;
796 depth = 1;
797 zero_at = 0;
798 curr = const_cast<chunk_t*>(item.data);
799 next = nullptr;
800 }
801
802 template<size_t Bits>
803 debug_item(debug_alias, const wire<Bits> &item, size_t lsb_offset = 0) {
804 static_assert(sizeof(item.curr) == value<Bits>::chunks * sizeof(chunk_t) &&
805 sizeof(item.next) == value<Bits>::chunks * sizeof(chunk_t),
806 "wire<Bits> is not compatible with C layout");
807 type = ALIAS;
808 width = Bits;
809 lsb_at = lsb_offset;
810 depth = 1;
811 zero_at = 0;
812 curr = const_cast<chunk_t*>(item.curr.data);
813 next = nullptr;
814 }
815 };
816 static_assert(std::is_standard_layout<debug_item>::value, "debug_item is not compatible with C layout");
817
818 struct debug_items {
819 std::map<std::string, std::vector<debug_item>> table;
820
821 void add(const std::string &name, debug_item &&item) {
822 std::vector<debug_item> &parts = table[name];
823 parts.emplace_back(item);
824 std::sort(parts.begin(), parts.end(),
825 [](const debug_item &a, const debug_item &b) {
826 return a.lsb_at < b.lsb_at;
827 });
828 }
829
830 size_t count(const std::string &name) const {
831 if (table.count(name) == 0)
832 return 0;
833 return table.at(name).size();
834 }
835
836 const std::vector<debug_item> &parts_at(const std::string &name) const {
837 return table.at(name);
838 }
839
840 const debug_item &at(const std::string &name) const {
841 const std::vector<debug_item> &parts = table.at(name);
842 assert(parts.size() == 1);
843 return parts.at(0);
844 }
845
846 const debug_item &operator [](const std::string &name) const {
847 return at(name);
848 }
849 };
850
851 struct module {
852 module() {}
853 virtual ~module() {}
854
855 module(const module &) = delete;
856 module &operator=(const module &) = delete;
857
858 virtual bool eval() = 0;
859 virtual bool commit() = 0;
860
861 size_t step() {
862 size_t deltas = 0;
863 bool converged = false;
864 do {
865 converged = eval();
866 deltas++;
867 } while (commit() && !converged);
868 return deltas;
869 }
870
871 virtual void debug_info(debug_items &items, std::string path = "") {
872 (void)items, (void)path;
873 }
874 };
875
876 } // namespace cxxrtl
877
878 // Internal structure used to communicate with the implementation of the C interface.
879 typedef struct _cxxrtl_toplevel {
880 std::unique_ptr<cxxrtl::module> module;
881 } *cxxrtl_toplevel;
882
883 // Definitions of internal Yosys cells. Other than the functions in this namespace, CXXRTL is fully generic
884 // and indepenent of Yosys implementation details.
885 //
886 // The `write_cxxrtl` pass translates internal cells (cells with names that start with `$`) to calls of these
887 // functions. All of Yosys arithmetic and logical cells perform sign or zero extension on their operands,
888 // whereas basic operations on arbitrary width values require operands to be of the same width. These functions
889 // bridge the gap by performing the necessary casts. They are named similar to `cell_A[B]`, where A and B are `u`
890 // if the corresponding operand is unsigned, and `s` if it is signed.
891 namespace cxxrtl_yosys {
892
893 using namespace cxxrtl;
894
895 // std::max isn't constexpr until C++14 for no particular reason (it's an oversight), so we define our own.
896 template<class T>
897 constexpr T max(const T &a, const T &b) {
898 return a > b ? a : b;
899 }
900
901 // Logic operations
902 template<size_t BitsY, size_t BitsA>
903 value<BitsY> logic_not(const value<BitsA> &a) {
904 return value<BitsY> { a ? 0u : 1u };
905 }
906
907 template<size_t BitsY, size_t BitsA, size_t BitsB>
908 value<BitsY> logic_and(const value<BitsA> &a, const value<BitsB> &b) {
909 return value<BitsY> { (bool(a) & bool(b)) ? 1u : 0u };
910 }
911
912 template<size_t BitsY, size_t BitsA, size_t BitsB>
913 value<BitsY> logic_or(const value<BitsA> &a, const value<BitsB> &b) {
914 return value<BitsY> { (bool(a) | bool(b)) ? 1u : 0u };
915 }
916
917 // Reduction operations
918 template<size_t BitsY, size_t BitsA>
919 value<BitsY> reduce_and(const value<BitsA> &a) {
920 return value<BitsY> { a.bit_not().is_zero() ? 1u : 0u };
921 }
922
923 template<size_t BitsY, size_t BitsA>
924 value<BitsY> reduce_or(const value<BitsA> &a) {
925 return value<BitsY> { a ? 1u : 0u };
926 }
927
928 template<size_t BitsY, size_t BitsA>
929 value<BitsY> reduce_xor(const value<BitsA> &a) {
930 return value<BitsY> { (a.ctpop() % 2) ? 1u : 0u };
931 }
932
933 template<size_t BitsY, size_t BitsA>
934 value<BitsY> reduce_xnor(const value<BitsA> &a) {
935 return value<BitsY> { (a.ctpop() % 2) ? 0u : 1u };
936 }
937
938 template<size_t BitsY, size_t BitsA>
939 value<BitsY> reduce_bool(const value<BitsA> &a) {
940 return value<BitsY> { a ? 1u : 0u };
941 }
942
943 // Bitwise operations
944 template<size_t BitsY, size_t BitsA>
945 value<BitsY> not_u(const value<BitsA> &a) {
946 return a.template zcast<BitsY>().bit_not();
947 }
948
949 template<size_t BitsY, size_t BitsA>
950 value<BitsY> not_s(const value<BitsA> &a) {
951 return a.template scast<BitsY>().bit_not();
952 }
953
954 template<size_t BitsY, size_t BitsA, size_t BitsB>
955 value<BitsY> and_uu(const value<BitsA> &a, const value<BitsB> &b) {
956 return a.template zcast<BitsY>().bit_and(b.template zcast<BitsY>());
957 }
958
959 template<size_t BitsY, size_t BitsA, size_t BitsB>
960 value<BitsY> and_ss(const value<BitsA> &a, const value<BitsB> &b) {
961 return a.template scast<BitsY>().bit_and(b.template scast<BitsY>());
962 }
963
964 template<size_t BitsY, size_t BitsA, size_t BitsB>
965 value<BitsY> or_uu(const value<BitsA> &a, const value<BitsB> &b) {
966 return a.template zcast<BitsY>().bit_or(b.template zcast<BitsY>());
967 }
968
969 template<size_t BitsY, size_t BitsA, size_t BitsB>
970 value<BitsY> or_ss(const value<BitsA> &a, const value<BitsB> &b) {
971 return a.template scast<BitsY>().bit_or(b.template scast<BitsY>());
972 }
973
974 template<size_t BitsY, size_t BitsA, size_t BitsB>
975 value<BitsY> xor_uu(const value<BitsA> &a, const value<BitsB> &b) {
976 return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>());
977 }
978
979 template<size_t BitsY, size_t BitsA, size_t BitsB>
980 value<BitsY> xor_ss(const value<BitsA> &a, const value<BitsB> &b) {
981 return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>());
982 }
983
984 template<size_t BitsY, size_t BitsA, size_t BitsB>
985 value<BitsY> xnor_uu(const value<BitsA> &a, const value<BitsB> &b) {
986 return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>()).bit_not();
987 }
988
989 template<size_t BitsY, size_t BitsA, size_t BitsB>
990 value<BitsY> xnor_ss(const value<BitsA> &a, const value<BitsB> &b) {
991 return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>()).bit_not();
992 }
993
994 template<size_t BitsY, size_t BitsA, size_t BitsB>
995 value<BitsY> shl_uu(const value<BitsA> &a, const value<BitsB> &b) {
996 return a.template zcast<BitsY>().template shl(b);
997 }
998
999 template<size_t BitsY, size_t BitsA, size_t BitsB>
1000 value<BitsY> shl_su(const value<BitsA> &a, const value<BitsB> &b) {
1001 return a.template scast<BitsY>().template shl(b);
1002 }
1003
1004 template<size_t BitsY, size_t BitsA, size_t BitsB>
1005 value<BitsY> sshl_uu(const value<BitsA> &a, const value<BitsB> &b) {
1006 return a.template zcast<BitsY>().template shl(b);
1007 }
1008
1009 template<size_t BitsY, size_t BitsA, size_t BitsB>
1010 value<BitsY> sshl_su(const value<BitsA> &a, const value<BitsB> &b) {
1011 return a.template scast<BitsY>().template shl(b);
1012 }
1013
1014 template<size_t BitsY, size_t BitsA, size_t BitsB>
1015 value<BitsY> shr_uu(const value<BitsA> &a, const value<BitsB> &b) {
1016 return a.template shr(b).template zcast<BitsY>();
1017 }
1018
1019 template<size_t BitsY, size_t BitsA, size_t BitsB>
1020 value<BitsY> shr_su(const value<BitsA> &a, const value<BitsB> &b) {
1021 return a.template shr(b).template scast<BitsY>();
1022 }
1023
1024 template<size_t BitsY, size_t BitsA, size_t BitsB>
1025 value<BitsY> sshr_uu(const value<BitsA> &a, const value<BitsB> &b) {
1026 return a.template shr(b).template zcast<BitsY>();
1027 }
1028
1029 template<size_t BitsY, size_t BitsA, size_t BitsB>
1030 value<BitsY> sshr_su(const value<BitsA> &a, const value<BitsB> &b) {
1031 return a.template sshr(b).template scast<BitsY>();
1032 }
1033
1034 template<size_t BitsY, size_t BitsA, size_t BitsB>
1035 value<BitsY> shift_uu(const value<BitsA> &a, const value<BitsB> &b) {
1036 return shr_uu<BitsY>(a, b);
1037 }
1038
1039 template<size_t BitsY, size_t BitsA, size_t BitsB>
1040 value<BitsY> shift_su(const value<BitsA> &a, const value<BitsB> &b) {
1041 return shr_su<BitsY>(a, b);
1042 }
1043
1044 template<size_t BitsY, size_t BitsA, size_t BitsB>
1045 value<BitsY> shift_us(const value<BitsA> &a, const value<BitsB> &b) {
1046 return b.is_neg() ? shl_uu<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_uu<BitsY>(a, b);
1047 }
1048
1049 template<size_t BitsY, size_t BitsA, size_t BitsB>
1050 value<BitsY> shift_ss(const value<BitsA> &a, const value<BitsB> &b) {
1051 return b.is_neg() ? shl_su<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_su<BitsY>(a, b);
1052 }
1053
1054 template<size_t BitsY, size_t BitsA, size_t BitsB>
1055 value<BitsY> shiftx_uu(const value<BitsA> &a, const value<BitsB> &b) {
1056 return shift_uu<BitsY>(a, b);
1057 }
1058
1059 template<size_t BitsY, size_t BitsA, size_t BitsB>
1060 value<BitsY> shiftx_su(const value<BitsA> &a, const value<BitsB> &b) {
1061 return shift_su<BitsY>(a, b);
1062 }
1063
1064 template<size_t BitsY, size_t BitsA, size_t BitsB>
1065 value<BitsY> shiftx_us(const value<BitsA> &a, const value<BitsB> &b) {
1066 return shift_us<BitsY>(a, b);
1067 }
1068
1069 template<size_t BitsY, size_t BitsA, size_t BitsB>
1070 value<BitsY> shiftx_ss(const value<BitsA> &a, const value<BitsB> &b) {
1071 return shift_ss<BitsY>(a, b);
1072 }
1073
1074 // Comparison operations
1075 template<size_t BitsY, size_t BitsA, size_t BitsB>
1076 value<BitsY> eq_uu(const value<BitsA> &a, const value<BitsB> &b) {
1077 constexpr size_t BitsExt = max(BitsA, BitsB);
1078 return value<BitsY>{ a.template zext<BitsExt>() == b.template zext<BitsExt>() ? 1u : 0u };
1079 }
1080
1081 template<size_t BitsY, size_t BitsA, size_t BitsB>
1082 value<BitsY> eq_ss(const value<BitsA> &a, const value<BitsB> &b) {
1083 constexpr size_t BitsExt = max(BitsA, BitsB);
1084 return value<BitsY>{ a.template sext<BitsExt>() == b.template sext<BitsExt>() ? 1u : 0u };
1085 }
1086
1087 template<size_t BitsY, size_t BitsA, size_t BitsB>
1088 value<BitsY> ne_uu(const value<BitsA> &a, const value<BitsB> &b) {
1089 constexpr size_t BitsExt = max(BitsA, BitsB);
1090 return value<BitsY>{ a.template zext<BitsExt>() != b.template zext<BitsExt>() ? 1u : 0u };
1091 }
1092
1093 template<size_t BitsY, size_t BitsA, size_t BitsB>
1094 value<BitsY> ne_ss(const value<BitsA> &a, const value<BitsB> &b) {
1095 constexpr size_t BitsExt = max(BitsA, BitsB);
1096 return value<BitsY>{ a.template sext<BitsExt>() != b.template sext<BitsExt>() ? 1u : 0u };
1097 }
1098
1099 template<size_t BitsY, size_t BitsA, size_t BitsB>
1100 value<BitsY> eqx_uu(const value<BitsA> &a, const value<BitsB> &b) {
1101 return eq_uu<BitsY>(a, b);
1102 }
1103
1104 template<size_t BitsY, size_t BitsA, size_t BitsB>
1105 value<BitsY> eqx_ss(const value<BitsA> &a, const value<BitsB> &b) {
1106 return eq_ss<BitsY>(a, b);
1107 }
1108
1109 template<size_t BitsY, size_t BitsA, size_t BitsB>
1110 value<BitsY> nex_uu(const value<BitsA> &a, const value<BitsB> &b) {
1111 return ne_uu<BitsY>(a, b);
1112 }
1113
1114 template<size_t BitsY, size_t BitsA, size_t BitsB>
1115 value<BitsY> nex_ss(const value<BitsA> &a, const value<BitsB> &b) {
1116 return ne_ss<BitsY>(a, b);
1117 }
1118
1119 template<size_t BitsY, size_t BitsA, size_t BitsB>
1120 value<BitsY> gt_uu(const value<BitsA> &a, const value<BitsB> &b) {
1121 constexpr size_t BitsExt = max(BitsA, BitsB);
1122 return value<BitsY> { b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
1123 }
1124
1125 template<size_t BitsY, size_t BitsA, size_t BitsB>
1126 value<BitsY> gt_ss(const value<BitsA> &a, const value<BitsB> &b) {
1127 constexpr size_t BitsExt = max(BitsA, BitsB);
1128 return value<BitsY> { b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
1129 }
1130
1131 template<size_t BitsY, size_t BitsA, size_t BitsB>
1132 value<BitsY> ge_uu(const value<BitsA> &a, const value<BitsB> &b) {
1133 constexpr size_t BitsExt = max(BitsA, BitsB);
1134 return value<BitsY> { !a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
1135 }
1136
1137 template<size_t BitsY, size_t BitsA, size_t BitsB>
1138 value<BitsY> ge_ss(const value<BitsA> &a, const value<BitsB> &b) {
1139 constexpr size_t BitsExt = max(BitsA, BitsB);
1140 return value<BitsY> { !a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
1141 }
1142
1143 template<size_t BitsY, size_t BitsA, size_t BitsB>
1144 value<BitsY> lt_uu(const value<BitsA> &a, const value<BitsB> &b) {
1145 constexpr size_t BitsExt = max(BitsA, BitsB);
1146 return value<BitsY> { a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
1147 }
1148
1149 template<size_t BitsY, size_t BitsA, size_t BitsB>
1150 value<BitsY> lt_ss(const value<BitsA> &a, const value<BitsB> &b) {
1151 constexpr size_t BitsExt = max(BitsA, BitsB);
1152 return value<BitsY> { a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
1153 }
1154
1155 template<size_t BitsY, size_t BitsA, size_t BitsB>
1156 value<BitsY> le_uu(const value<BitsA> &a, const value<BitsB> &b) {
1157 constexpr size_t BitsExt = max(BitsA, BitsB);
1158 return value<BitsY> { !b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
1159 }
1160
1161 template<size_t BitsY, size_t BitsA, size_t BitsB>
1162 value<BitsY> le_ss(const value<BitsA> &a, const value<BitsB> &b) {
1163 constexpr size_t BitsExt = max(BitsA, BitsB);
1164 return value<BitsY> { !b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
1165 }
1166
1167 // Arithmetic operations
1168 template<size_t BitsY, size_t BitsA>
1169 value<BitsY> pos_u(const value<BitsA> &a) {
1170 return a.template zcast<BitsY>();
1171 }
1172
1173 template<size_t BitsY, size_t BitsA>
1174 value<BitsY> pos_s(const value<BitsA> &a) {
1175 return a.template scast<BitsY>();
1176 }
1177
1178 template<size_t BitsY, size_t BitsA>
1179 value<BitsY> neg_u(const value<BitsA> &a) {
1180 return a.template zcast<BitsY>().neg();
1181 }
1182
1183 template<size_t BitsY, size_t BitsA>
1184 value<BitsY> neg_s(const value<BitsA> &a) {
1185 return a.template scast<BitsY>().neg();
1186 }
1187
1188 template<size_t BitsY, size_t BitsA, size_t BitsB>
1189 value<BitsY> add_uu(const value<BitsA> &a, const value<BitsB> &b) {
1190 return a.template zcast<BitsY>().add(b.template zcast<BitsY>());
1191 }
1192
1193 template<size_t BitsY, size_t BitsA, size_t BitsB>
1194 value<BitsY> add_ss(const value<BitsA> &a, const value<BitsB> &b) {
1195 return a.template scast<BitsY>().add(b.template scast<BitsY>());
1196 }
1197
1198 template<size_t BitsY, size_t BitsA, size_t BitsB>
1199 value<BitsY> sub_uu(const value<BitsA> &a, const value<BitsB> &b) {
1200 return a.template zcast<BitsY>().sub(b.template zcast<BitsY>());
1201 }
1202
1203 template<size_t BitsY, size_t BitsA, size_t BitsB>
1204 value<BitsY> sub_ss(const value<BitsA> &a, const value<BitsB> &b) {
1205 return a.template scast<BitsY>().sub(b.template scast<BitsY>());
1206 }
1207
1208 template<size_t BitsY, size_t BitsA, size_t BitsB>
1209 value<BitsY> mul_uu(const value<BitsA> &a, const value<BitsB> &b) {
1210 value<BitsY> product;
1211 value<BitsY> multiplicand = a.template zcast<BitsY>();
1212 const value<BitsB> &multiplier = b;
1213 uint32_t multiplicand_shift = 0;
1214 for (size_t step = 0; step < BitsB; step++) {
1215 if (multiplier.bit(step)) {
1216 multiplicand = multiplicand.shl(value<32> { multiplicand_shift });
1217 product = product.add(multiplicand);
1218 multiplicand_shift = 0;
1219 }
1220 multiplicand_shift++;
1221 }
1222 return product;
1223 }
1224
1225 template<size_t BitsY, size_t BitsA, size_t BitsB>
1226 value<BitsY> mul_ss(const value<BitsA> &a, const value<BitsB> &b) {
1227 value<BitsB + 1> ub = b.template sext<BitsB + 1>();
1228 if (ub.is_neg()) ub = ub.neg();
1229 value<BitsY> y = mul_uu<BitsY>(a.template scast<BitsY>(), ub);
1230 return b.is_neg() ? y.neg() : y;
1231 }
1232
1233 template<size_t BitsY, size_t BitsA, size_t BitsB>
1234 std::pair<value<BitsY>, value<BitsY>> divmod_uu(const value<BitsA> &a, const value<BitsB> &b) {
1235 constexpr size_t Bits = max(BitsY, max(BitsA, BitsB));
1236 value<Bits> quotient;
1237 value<Bits> dividend = a.template zext<Bits>();
1238 value<Bits> divisor = b.template zext<Bits>();
1239 if (dividend.ucmp(divisor))
1240 return {/*quotient=*/value<BitsY> { 0u }, /*remainder=*/dividend.template trunc<BitsY>()};
1241 uint32_t divisor_shift = dividend.ctlz() - divisor.ctlz();
1242 divisor = divisor.shl(value<32> { divisor_shift });
1243 for (size_t step = 0; step <= divisor_shift; step++) {
1244 quotient = quotient.shl(value<1> { 1u });
1245 if (!dividend.ucmp(divisor)) {
1246 dividend = dividend.sub(divisor);
1247 quotient.set_bit(0, true);
1248 }
1249 divisor = divisor.shr(value<1> { 1u });
1250 }
1251 return {quotient.template trunc<BitsY>(), /*remainder=*/dividend.template trunc<BitsY>()};
1252 }
1253
1254 template<size_t BitsY, size_t BitsA, size_t BitsB>
1255 std::pair<value<BitsY>, value<BitsY>> divmod_ss(const value<BitsA> &a, const value<BitsB> &b) {
1256 value<BitsA + 1> ua = a.template sext<BitsA + 1>();
1257 value<BitsB + 1> ub = b.template sext<BitsB + 1>();
1258 if (ua.is_neg()) ua = ua.neg();
1259 if (ub.is_neg()) ub = ub.neg();
1260 value<BitsY> y, r;
1261 std::tie(y, r) = divmod_uu<BitsY>(ua, ub);
1262 if (a.is_neg() != b.is_neg()) y = y.neg();
1263 if (a.is_neg()) r = r.neg();
1264 return {y, r};
1265 }
1266
1267 template<size_t BitsY, size_t BitsA, size_t BitsB>
1268 value<BitsY> div_uu(const value<BitsA> &a, const value<BitsB> &b) {
1269 return divmod_uu<BitsY>(a, b).first;
1270 }
1271
1272 template<size_t BitsY, size_t BitsA, size_t BitsB>
1273 value<BitsY> div_ss(const value<BitsA> &a, const value<BitsB> &b) {
1274 return divmod_ss<BitsY>(a, b).first;
1275 }
1276
1277 template<size_t BitsY, size_t BitsA, size_t BitsB>
1278 value<BitsY> mod_uu(const value<BitsA> &a, const value<BitsB> &b) {
1279 return divmod_uu<BitsY>(a, b).second;
1280 }
1281
1282 template<size_t BitsY, size_t BitsA, size_t BitsB>
1283 value<BitsY> mod_ss(const value<BitsA> &a, const value<BitsB> &b) {
1284 return divmod_ss<BitsY>(a, b).second;
1285 }
1286
1287 // Memory helper
1288 struct memory_index {
1289 bool valid;
1290 size_t index;
1291
1292 template<size_t BitsAddr>
1293 memory_index(const value<BitsAddr> &addr, size_t offset, size_t depth) {
1294 static_assert(value<BitsAddr>::chunks <= 1, "memory address is too wide");
1295 size_t offset_index = addr.data[0];
1296
1297 valid = (offset_index >= offset && offset_index < offset + depth);
1298 index = offset_index - offset;
1299 }
1300 };
1301
1302 } // namespace cxxrtl_yosys
1303
1304 #endif