Merge pull request #2113 from whitequark/cxxrtl-fix-sshr
[yosys.git] / backends / cxxrtl / cxxrtl.h
1 /*
2 * yosys -- Yosys Open SYnthesis Suite
3 *
4 * Copyright (C) 2019-2020 whitequark <whitequark@whitequark.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19 // This file is included by the designs generated with `write_cxxrtl`. It is not used in Yosys itself.
20
21 #ifndef CXXRTL_H
22 #define CXXRTL_H
23
24 #include <cstddef>
25 #include <cstdint>
26 #include <cassert>
27 #include <limits>
28 #include <type_traits>
29 #include <tuple>
30 #include <vector>
31 #include <map>
32 #include <algorithm>
33 #include <memory>
34 #include <sstream>
35
36 // The cxxrtl support library implements compile time specialized arbitrary width arithmetics, as well as provides
37 // composite lvalues made out of bit slices and concatenations of lvalues. This allows the `write_cxxrtl` pass
38 // to perform a straightforward translation of RTLIL structures to readable C++, relying on the C++ compiler
39 // to unwrap the abstraction and generate efficient code.
40 namespace cxxrtl {
41
42 // All arbitrary-width values in cxxrtl are backed by arrays of unsigned integers called chunks. The chunk size
43 // is the same regardless of the value width to simplify manipulating values via FFI interfaces, e.g. driving
44 // and introspecting the simulation in Python.
45 //
46 // It is practical to use chunk sizes between 32 bits and platform register size because when arithmetics on
47 // narrower integer types is legalized by the C++ compiler, it inserts code to clear the high bits of the register.
48 // However, (a) most of our operations do not change those bits in the first place because of invariants that are
49 // invisible to the compiler, (b) we often operate on non-power-of-2 values and have to clear the high bits anyway.
50 // Therefore, using relatively wide chunks and clearing the high bits explicitly and only when we know they may be
51 // clobbered results in simpler generated code.
52 template<typename T>
53 struct chunk_traits {
54 static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
55 "chunk type must be an unsigned integral type");
56 using type = T;
57 static constexpr size_t bits = std::numeric_limits<T>::digits;
58 static constexpr T mask = std::numeric_limits<T>::max();
59 };
60
61 template<class T>
62 struct expr_base;
63
64 template<size_t Bits>
65 struct value : public expr_base<value<Bits>> {
66 static constexpr size_t bits = Bits;
67
68 using chunk = chunk_traits<uint32_t>;
69 static constexpr chunk::type msb_mask = (Bits % chunk::bits == 0) ? chunk::mask
70 : chunk::mask >> (chunk::bits - (Bits % chunk::bits));
71
72 static constexpr size_t chunks = (Bits + chunk::bits - 1) / chunk::bits;
73 chunk::type data[chunks] = {};
74
75 value() = default;
76 template<typename... Init>
77 explicit constexpr value(Init ...init) : data{init...} {}
78
79 value(const value<Bits> &) = default;
80 value(value<Bits> &&) = default;
81 value<Bits> &operator=(const value<Bits> &) = default;
82
83 // A (no-op) helper that forces the cast to value<>.
84 const value<Bits> &val() const {
85 return *this;
86 }
87
88 std::string str() const {
89 std::stringstream ss;
90 ss << *this;
91 return ss.str();
92 }
93
94 // Operations with compile-time parameters.
95 //
96 // These operations are used to implement slicing, concatenation, and blitting.
97 // The trunc, zext and sext operations add or remove most significant bits (i.e. on the left);
98 // the rtrunc and rzext operations add or remove least significant bits (i.e. on the right).
99 template<size_t NewBits>
100 value<NewBits> trunc() const {
101 static_assert(NewBits <= Bits, "trunc() may not increase width");
102 value<NewBits> result;
103 for (size_t n = 0; n < result.chunks; n++)
104 result.data[n] = data[n];
105 result.data[result.chunks - 1] &= result.msb_mask;
106 return result;
107 }
108
109 template<size_t NewBits>
110 value<NewBits> zext() const {
111 static_assert(NewBits >= Bits, "zext() may not decrease width");
112 value<NewBits> result;
113 for (size_t n = 0; n < chunks; n++)
114 result.data[n] = data[n];
115 return result;
116 }
117
118 template<size_t NewBits>
119 value<NewBits> sext() const {
120 static_assert(NewBits >= Bits, "sext() may not decrease width");
121 value<NewBits> result;
122 for (size_t n = 0; n < chunks; n++)
123 result.data[n] = data[n];
124 if (is_neg()) {
125 result.data[chunks - 1] |= ~msb_mask;
126 for (size_t n = chunks; n < result.chunks; n++)
127 result.data[n] = chunk::mask;
128 result.data[result.chunks - 1] &= result.msb_mask;
129 }
130 return result;
131 }
132
133 template<size_t NewBits>
134 value<NewBits> rtrunc() const {
135 static_assert(NewBits <= Bits, "rtrunc() may not increase width");
136 value<NewBits> result;
137 constexpr size_t shift_chunks = (Bits - NewBits) / chunk::bits;
138 constexpr size_t shift_bits = (Bits - NewBits) % chunk::bits;
139 chunk::type carry = 0;
140 if (shift_chunks + result.chunks < chunks) {
141 carry = (shift_bits == 0) ? 0
142 : data[shift_chunks + result.chunks] << (chunk::bits - shift_bits);
143 }
144 for (size_t n = result.chunks; n > 0; n--) {
145 result.data[n - 1] = carry | (data[shift_chunks + n - 1] >> shift_bits);
146 carry = (shift_bits == 0) ? 0
147 : data[shift_chunks + n - 1] << (chunk::bits - shift_bits);
148 }
149 return result;
150 }
151
152 template<size_t NewBits>
153 value<NewBits> rzext() const {
154 static_assert(NewBits >= Bits, "rzext() may not decrease width");
155 value<NewBits> result;
156 constexpr size_t shift_chunks = (NewBits - Bits) / chunk::bits;
157 constexpr size_t shift_bits = (NewBits - Bits) % chunk::bits;
158 chunk::type carry = 0;
159 for (size_t n = 0; n < chunks; n++) {
160 result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
161 carry = (shift_bits == 0) ? 0
162 : data[n] >> (chunk::bits - shift_bits);
163 }
164 if (carry != 0)
165 result.data[result.chunks - 1] = carry;
166 return result;
167 }
168
169 // Bit blit operation, i.e. a partial read-modify-write.
170 template<size_t Stop, size_t Start>
171 value<Bits> blit(const value<Stop - Start + 1> &source) const {
172 static_assert(Stop >= Start, "blit() may not reverse bit order");
173 constexpr chunk::type start_mask = ~(chunk::mask << (Start % chunk::bits));
174 constexpr chunk::type stop_mask = (Stop % chunk::bits + 1 == chunk::bits) ? 0
175 : (chunk::mask << (Stop % chunk::bits + 1));
176 value<Bits> masked = *this;
177 if (Start / chunk::bits == Stop / chunk::bits) {
178 masked.data[Start / chunk::bits] &= stop_mask | start_mask;
179 } else {
180 masked.data[Start / chunk::bits] &= start_mask;
181 for (size_t n = Start / chunk::bits + 1; n < Stop / chunk::bits; n++)
182 masked.data[n] = 0;
183 masked.data[Stop / chunk::bits] &= stop_mask;
184 }
185 value<Bits> shifted = source
186 .template rzext<Stop + 1>()
187 .template zext<Bits>();
188 return masked.bit_or(shifted);
189 }
190
191 // Helpers for selecting extending or truncating operation depending on whether the result is wider or narrower
192 // than the operand. In C++17 these can be replaced with `if constexpr`.
193 template<size_t NewBits, typename = void>
194 struct zext_cast {
195 value<NewBits> operator()(const value<Bits> &val) {
196 return val.template zext<NewBits>();
197 }
198 };
199
200 template<size_t NewBits>
201 struct zext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
202 value<NewBits> operator()(const value<Bits> &val) {
203 return val.template trunc<NewBits>();
204 }
205 };
206
207 template<size_t NewBits, typename = void>
208 struct sext_cast {
209 value<NewBits> operator()(const value<Bits> &val) {
210 return val.template sext<NewBits>();
211 }
212 };
213
214 template<size_t NewBits>
215 struct sext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
216 value<NewBits> operator()(const value<Bits> &val) {
217 return val.template trunc<NewBits>();
218 }
219 };
220
221 template<size_t NewBits>
222 value<NewBits> zcast() const {
223 return zext_cast<NewBits>()(*this);
224 }
225
226 template<size_t NewBits>
227 value<NewBits> scast() const {
228 return sext_cast<NewBits>()(*this);
229 }
230
231 // Operations with run-time parameters (offsets, amounts, etc).
232 //
233 // These operations are used for computations.
234 bool bit(size_t offset) const {
235 return data[offset / chunk::bits] & (1 << (offset % chunk::bits));
236 }
237
238 void set_bit(size_t offset, bool value = true) {
239 size_t offset_chunks = offset / chunk::bits;
240 size_t offset_bits = offset % chunk::bits;
241 data[offset_chunks] &= ~(1 << offset_bits);
242 data[offset_chunks] |= value ? 1 << offset_bits : 0;
243 }
244
245 bool is_zero() const {
246 for (size_t n = 0; n < chunks; n++)
247 if (data[n] != 0)
248 return false;
249 return true;
250 }
251
252 explicit operator bool() const {
253 return !is_zero();
254 }
255
256 bool is_neg() const {
257 return data[chunks - 1] & (1 << ((Bits - 1) % chunk::bits));
258 }
259
260 bool operator ==(const value<Bits> &other) const {
261 for (size_t n = 0; n < chunks; n++)
262 if (data[n] != other.data[n])
263 return false;
264 return true;
265 }
266
267 bool operator !=(const value<Bits> &other) const {
268 return !(*this == other);
269 }
270
271 value<Bits> bit_not() const {
272 value<Bits> result;
273 for (size_t n = 0; n < chunks; n++)
274 result.data[n] = ~data[n];
275 result.data[chunks - 1] &= msb_mask;
276 return result;
277 }
278
279 value<Bits> bit_and(const value<Bits> &other) const {
280 value<Bits> result;
281 for (size_t n = 0; n < chunks; n++)
282 result.data[n] = data[n] & other.data[n];
283 return result;
284 }
285
286 value<Bits> bit_or(const value<Bits> &other) const {
287 value<Bits> result;
288 for (size_t n = 0; n < chunks; n++)
289 result.data[n] = data[n] | other.data[n];
290 return result;
291 }
292
293 value<Bits> bit_xor(const value<Bits> &other) const {
294 value<Bits> result;
295 for (size_t n = 0; n < chunks; n++)
296 result.data[n] = data[n] ^ other.data[n];
297 return result;
298 }
299
300 value<Bits> update(const value<Bits> &val, const value<Bits> &mask) const {
301 return bit_and(mask.bit_not()).bit_or(val.bit_and(mask));
302 }
303
304 template<size_t AmountBits>
305 value<Bits> shl(const value<AmountBits> &amount) const {
306 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
307 static_assert(Bits <= chunk::mask, "shl() of unreasonably large values is not supported");
308 // Detect shifts definitely large than Bits early.
309 for (size_t n = 1; n < amount.chunks; n++)
310 if (amount.data[n] != 0)
311 return {};
312 // Past this point we can use the least significant chunk as the shift size.
313 size_t shift_chunks = amount.data[0] / chunk::bits;
314 size_t shift_bits = amount.data[0] % chunk::bits;
315 if (shift_chunks >= chunks)
316 return {};
317 value<Bits> result;
318 chunk::type carry = 0;
319 for (size_t n = 0; n < chunks - shift_chunks; n++) {
320 result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
321 carry = (shift_bits == 0) ? 0
322 : data[n] >> (chunk::bits - shift_bits);
323 }
324 return result;
325 }
326
327 template<size_t AmountBits, bool Signed = false>
328 value<Bits> shr(const value<AmountBits> &amount) const {
329 // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
330 static_assert(Bits <= chunk::mask, "shr() of unreasonably large values is not supported");
331 // Detect shifts definitely large than Bits early.
332 for (size_t n = 1; n < amount.chunks; n++)
333 if (amount.data[n] != 0)
334 return {};
335 // Past this point we can use the least significant chunk as the shift size.
336 size_t shift_chunks = amount.data[0] / chunk::bits;
337 size_t shift_bits = amount.data[0] % chunk::bits;
338 if (shift_chunks >= chunks)
339 return {};
340 value<Bits> result;
341 chunk::type carry = 0;
342 for (size_t n = 0; n < chunks - shift_chunks; n++) {
343 result.data[chunks - shift_chunks - 1 - n] = carry | (data[chunks - 1 - n] >> shift_bits);
344 carry = (shift_bits == 0) ? 0
345 : data[chunks - 1 - n] << (chunk::bits - shift_bits);
346 }
347 if (Signed && is_neg()) {
348 for (size_t n = chunks - shift_chunks; n < chunks; n++)
349 result.data[n] = chunk::mask;
350 if (shift_bits != 0)
351 result.data[chunks - shift_chunks] |= chunk::mask << (chunk::bits - shift_bits);
352 }
353 return result;
354 }
355
356 template<size_t AmountBits>
357 value<Bits> sshr(const value<AmountBits> &amount) const {
358 return shr<AmountBits, /*Signed=*/true>(amount);
359 }
360
361 size_t ctpop() const {
362 size_t count = 0;
363 for (size_t n = 0; n < chunks; n++) {
364 // This loop implements the population count idiom as recognized by LLVM and GCC.
365 for (chunk::type x = data[n]; x != 0; count++)
366 x = x & (x - 1);
367 }
368 return count;
369 }
370
371 size_t ctlz() const {
372 size_t count = 0;
373 for (size_t n = 0; n < chunks; n++) {
374 chunk::type x = data[chunks - 1 - n];
375 if (x == 0) {
376 count += (n == 0 ? Bits % chunk::bits : chunk::bits);
377 } else {
378 // This loop implements the find first set idiom as recognized by LLVM.
379 for (; x != 0; count++)
380 x >>= 1;
381 }
382 }
383 return count;
384 }
385
386 template<bool Invert, bool CarryIn>
387 std::pair<value<Bits>, bool /*CarryOut*/> alu(const value<Bits> &other) const {
388 value<Bits> result;
389 bool carry = CarryIn;
390 for (size_t n = 0; n < result.chunks; n++) {
391 result.data[n] = data[n] + (Invert ? ~other.data[n] : other.data[n]) + carry;
392 carry = (result.data[n] < data[n]) ||
393 (result.data[n] == data[n] && carry);
394 }
395 result.data[result.chunks - 1] &= result.msb_mask;
396 return {result, carry};
397 }
398
399 value<Bits> add(const value<Bits> &other) const {
400 return alu</*Invert=*/false, /*CarryIn=*/false>(other).first;
401 }
402
403 value<Bits> sub(const value<Bits> &other) const {
404 return alu</*Invert=*/true, /*CarryIn=*/true>(other).first;
405 }
406
407 value<Bits> neg() const {
408 return value<Bits> { 0u }.sub(*this);
409 }
410
411 bool ucmp(const value<Bits> &other) const {
412 bool carry;
413 std::tie(std::ignore, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
414 return !carry; // a.ucmp(b) ≡ a u< b
415 }
416
417 bool scmp(const value<Bits> &other) const {
418 value<Bits> result;
419 bool carry;
420 std::tie(result, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
421 bool overflow = (is_neg() == !other.is_neg()) && (is_neg() != result.is_neg());
422 return result.is_neg() ^ overflow; // a.scmp(b) ≡ a s< b
423 }
424 };
425
426 // Expression template for a slice, usable as lvalue or rvalue, and composable with other expression templates here.
427 template<class T, size_t Stop, size_t Start>
428 struct slice_expr : public expr_base<slice_expr<T, Stop, Start>> {
429 static_assert(Stop >= Start, "slice_expr() may not reverse bit order");
430 static_assert(Start < T::bits && Stop < T::bits, "slice_expr() must be within bounds");
431 static constexpr size_t bits = Stop - Start + 1;
432
433 T &expr;
434
435 slice_expr(T &expr) : expr(expr) {}
436 slice_expr(const slice_expr<T, Stop, Start> &) = delete;
437
438 operator value<bits>() const {
439 return static_cast<const value<T::bits> &>(expr)
440 .template rtrunc<T::bits - Start>()
441 .template trunc<bits>();
442 }
443
444 slice_expr<T, Stop, Start> &operator=(const value<bits> &rhs) {
445 // Generic partial assignment implemented using a read-modify-write operation on the sliced expression.
446 expr = static_cast<const value<T::bits> &>(expr)
447 .template blit<Stop, Start>(rhs);
448 return *this;
449 }
450
451 // A helper that forces the cast to value<>, which allows deduction to work.
452 value<bits> val() const {
453 return static_cast<const value<bits> &>(*this);
454 }
455 };
456
457 // Expression template for a concatenation, usable as lvalue or rvalue, and composable with other expression templates here.
458 template<class T, class U>
459 struct concat_expr : public expr_base<concat_expr<T, U>> {
460 static constexpr size_t bits = T::bits + U::bits;
461
462 T &ms_expr;
463 U &ls_expr;
464
465 concat_expr(T &ms_expr, U &ls_expr) : ms_expr(ms_expr), ls_expr(ls_expr) {}
466 concat_expr(const concat_expr<T, U> &) = delete;
467
468 operator value<bits>() const {
469 value<bits> ms_shifted = static_cast<const value<T::bits> &>(ms_expr)
470 .template rzext<bits>();
471 value<bits> ls_extended = static_cast<const value<U::bits> &>(ls_expr)
472 .template zext<bits>();
473 return ms_shifted.bit_or(ls_extended);
474 }
475
476 concat_expr<T, U> &operator=(const value<bits> &rhs) {
477 ms_expr = rhs.template rtrunc<T::bits>();
478 ls_expr = rhs.template trunc<U::bits>();
479 return *this;
480 }
481
482 // A helper that forces the cast to value<>, which allows deduction to work.
483 value<bits> val() const {
484 return static_cast<const value<bits> &>(*this);
485 }
486 };
487
488 // Base class for expression templates, providing helper methods for operations that are valid on both rvalues and lvalues.
489 //
490 // Note that expression objects (slices and concatenations) constructed in this way should NEVER be captured because
491 // they refer to temporaries that will, in general, only live until the end of the statement. For example, both of
492 // these snippets perform use-after-free:
493 //
494 // const auto &a = val.slice<7,0>().slice<1>();
495 // value<1> b = a;
496 //
497 // auto &&c = val.slice<7,0>().slice<1>();
498 // c = value<1>{1u};
499 //
500 // An easy way to write code using slices and concatenations safely is to follow two simple rules:
501 // * Never explicitly name any type except `value<W>` or `const value<W> &`.
502 // * Never use a `const auto &` or `auto &&` in any such expression.
503 // Then, any code that compiles will be well-defined.
504 template<class T>
505 struct expr_base {
506 template<size_t Stop, size_t Start = Stop>
507 slice_expr<const T, Stop, Start> slice() const {
508 return {*static_cast<const T *>(this)};
509 }
510
511 template<size_t Stop, size_t Start = Stop>
512 slice_expr<T, Stop, Start> slice() {
513 return {*static_cast<T *>(this)};
514 }
515
516 template<class U>
517 concat_expr<const T, typename std::remove_reference<const U>::type> concat(const U &other) const {
518 return {*static_cast<const T *>(this), other};
519 }
520
521 template<class U>
522 concat_expr<T, typename std::remove_reference<U>::type> concat(U &&other) {
523 return {*static_cast<T *>(this), other};
524 }
525 };
526
527 template<size_t Bits>
528 std::ostream &operator<<(std::ostream &os, const value<Bits> &val) {
529 auto old_flags = os.flags(std::ios::right);
530 auto old_width = os.width(0);
531 auto old_fill = os.fill('0');
532 os << val.bits << '\'' << std::hex;
533 for (size_t n = val.chunks - 1; n != (size_t)-1; n--) {
534 if (n == val.chunks - 1 && Bits % value<Bits>::chunk::bits != 0)
535 os.width((Bits % value<Bits>::chunk::bits + 3) / 4);
536 else
537 os.width((value<Bits>::chunk::bits + 3) / 4);
538 os << val.data[n];
539 }
540 os.fill(old_fill);
541 os.width(old_width);
542 os.flags(old_flags);
543 return os;
544 }
545
546 template<size_t Bits>
547 struct wire {
548 static constexpr size_t bits = Bits;
549
550 value<Bits> curr;
551 value<Bits> next;
552
553 wire() = default;
554 constexpr wire(const value<Bits> &init) : curr(init), next(init) {}
555 template<typename... Init>
556 explicit constexpr wire(Init ...init) : curr{init...}, next{init...} {}
557
558 wire(const wire<Bits> &) = delete;
559 wire(wire<Bits> &&) = default;
560 wire<Bits> &operator=(const wire<Bits> &) = delete;
561
562 bool commit() {
563 if (curr != next) {
564 curr = next;
565 return true;
566 }
567 return false;
568 }
569 };
570
571 template<size_t Bits>
572 std::ostream &operator<<(std::ostream &os, const wire<Bits> &val) {
573 os << val.curr;
574 return os;
575 }
576
577 template<size_t Width>
578 struct memory {
579 std::vector<value<Width>> data;
580
581 size_t depth() const {
582 return data.size();
583 }
584
585 memory() = delete;
586 explicit memory(size_t depth) : data(depth) {}
587
588 memory(const memory<Width> &) = delete;
589 memory<Width> &operator=(const memory<Width> &) = delete;
590
591 // The only way to get the compiler to put the initializer in .rodata and do not copy it on stack is to stuff it
592 // into a plain array. You'd think an std::initializer_list would work here, but it doesn't, because you can't
593 // construct an initializer_list in a constexpr (or something) and so if you try to do that the whole thing is
594 // first copied on the stack (probably overflowing it) and then again into `data`.
595 template<size_t Size>
596 struct init {
597 size_t offset;
598 value<Width> data[Size];
599 };
600
601 template<size_t... InitSize>
602 explicit memory(size_t depth, const init<InitSize> &...init) : data(depth) {
603 data.resize(depth);
604 // This utterly reprehensible construct is the most reasonable way to apply a function to every element
605 // of a parameter pack, if the elements all have different types and so cannot be cast to an initializer list.
606 auto _ = {std::move(std::begin(init.data), std::end(init.data), data.begin() + init.offset)...};
607 }
608
609 // An operator for direct memory reads. May be used at any time during the simulation.
610 const value<Width> &operator [](size_t index) const {
611 assert(index < data.size());
612 return data[index];
613 }
614
615 // An operator for direct memory writes. May only be used before the simulation is started. If used
616 // after the simulation is started, the design may malfunction.
617 value<Width> &operator [](size_t index) {
618 assert(index < data.size());
619 return data[index];
620 }
621
622 // A simple way to make a writable memory would be to use an array of wires instead of an array of values.
623 // However, there are two significant downsides to this approach: first, it has large overhead (2× space
624 // overhead, and O(depth) time overhead during commit); second, it does not simplify handling write port
625 // priorities. Although in principle write ports could be ordered or conditionally enabled in generated
626 // code based on their priorities and selected addresses, the feedback arc set problem is computationally
627 // expensive, and the heuristic based algorithms are not easily modified to guarantee (rather than prefer)
628 // a particular write port evaluation order.
629 //
630 // The approach used here instead is to queue writes into a buffer during the eval phase, then perform
631 // the writes during the commit phase in the priority order. This approach has low overhead, with both space
632 // and time proportional to the amount of write ports. Because virtually every memory in a practical design
633 // has at most two write ports, linear search is used on every write, being the fastest and simplest approach.
634 struct write {
635 size_t index;
636 value<Width> val;
637 value<Width> mask;
638 int priority;
639 };
640 std::vector<write> write_queue;
641
642 void update(size_t index, const value<Width> &val, const value<Width> &mask, int priority = 0) {
643 assert(index < data.size());
644 // Queue up the write while keeping the queue sorted by priority.
645 write_queue.insert(
646 std::upper_bound(write_queue.begin(), write_queue.end(), priority,
647 [](const int a, const write& b) { return a < b.priority; }),
648 write { index, val, mask, priority });
649 }
650
651 bool commit() {
652 bool changed = false;
653 for (const write &entry : write_queue) {
654 value<Width> elem = data[entry.index];
655 elem = elem.update(entry.val, entry.mask);
656 changed |= (data[entry.index] != elem);
657 data[entry.index] = elem;
658 }
659 write_queue.clear();
660 return changed;
661 }
662 };
663
664 struct metadata {
665 const enum {
666 MISSING = 0,
667 UINT = 1,
668 SINT = 2,
669 STRING = 3,
670 DOUBLE = 4,
671 } value_type;
672
673 // In debug mode, using the wrong .as_*() function will assert.
674 // In release mode, using the wrong .as_*() function will safely return a default value.
675 union {
676 const unsigned uint_value = 0;
677 const signed sint_value;
678 };
679 const std::string string_value = "";
680 const double double_value = 0.0;
681
682 metadata() : value_type(MISSING) {}
683 metadata(unsigned value) : value_type(UINT), uint_value(value) {}
684 metadata(signed value) : value_type(SINT), sint_value(value) {}
685 metadata(const std::string &value) : value_type(STRING), string_value(value) {}
686 metadata(const char *value) : value_type(STRING), string_value(value) {}
687 metadata(double value) : value_type(DOUBLE), double_value(value) {}
688
689 metadata(const metadata &) = default;
690 metadata &operator=(const metadata &) = delete;
691
692 unsigned as_uint() const {
693 assert(value_type == UINT);
694 return uint_value;
695 }
696
697 signed as_sint() const {
698 assert(value_type == SINT);
699 return sint_value;
700 }
701
702 const std::string &as_string() const {
703 assert(value_type == STRING);
704 return string_value;
705 }
706
707 double as_double() const {
708 assert(value_type == DOUBLE);
709 return double_value;
710 }
711 };
712
713 typedef std::map<std::string, metadata> metadata_map;
714
715 struct module {
716 module() {}
717 virtual ~module() {}
718
719 module(const module &) = delete;
720 module &operator=(const module &) = delete;
721
722 virtual bool eval() = 0;
723 virtual bool commit() = 0;
724
725 size_t step() {
726 size_t deltas = 0;
727 bool converged = false;
728 do {
729 converged = eval();
730 deltas++;
731 } while (commit() && !converged);
732 return deltas;
733 }
734 };
735
736 } // namespace cxxrtl
737
738 // Definitions of internal Yosys cells. Other than the functions in this namespace, cxxrtl is fully generic
739 // and indepenent of Yosys implementation details.
740 //
741 // The `write_cxxrtl` pass translates internal cells (cells with names that start with `$`) to calls of these
742 // functions. All of Yosys arithmetic and logical cells perform sign or zero extension on their operands,
743 // whereas basic operations on arbitrary width values require operands to be of the same width. These functions
744 // bridge the gap by performing the necessary casts. They are named similar to `cell_A[B]`, where A and B are `u`
745 // if the corresponding operand is unsigned, and `s` if it is signed.
746 namespace cxxrtl_yosys {
747
748 using namespace cxxrtl;
749
750 // std::max isn't constexpr until C++14 for no particular reason (it's an oversight), so we define our own.
751 template<class T>
752 constexpr T max(const T &a, const T &b) {
753 return a > b ? a : b;
754 }
755
756 // Logic operations
757 template<size_t BitsY, size_t BitsA>
758 value<BitsY> not_u(const value<BitsA> &a) {
759 return a.template zcast<BitsY>().bit_not();
760 }
761
762 template<size_t BitsY, size_t BitsA>
763 value<BitsY> not_s(const value<BitsA> &a) {
764 return a.template scast<BitsY>().bit_not();
765 }
766
767 template<size_t BitsY, size_t BitsA>
768 value<BitsY> logic_not_u(const value<BitsA> &a) {
769 return value<BitsY> { a ? 0u : 1u };
770 }
771
772 template<size_t BitsY, size_t BitsA>
773 value<BitsY> logic_not_s(const value<BitsA> &a) {
774 return value<BitsY> { a ? 0u : 1u };
775 }
776
777 template<size_t BitsY, size_t BitsA>
778 value<BitsY> reduce_and_u(const value<BitsA> &a) {
779 return value<BitsY> { a.bit_not().is_zero() ? 1u : 0u };
780 }
781
782 template<size_t BitsY, size_t BitsA>
783 value<BitsY> reduce_and_s(const value<BitsA> &a) {
784 return value<BitsY> { a.bit_not().is_zero() ? 1u : 0u };
785 }
786
787 template<size_t BitsY, size_t BitsA>
788 value<BitsY> reduce_or_u(const value<BitsA> &a) {
789 return value<BitsY> { a ? 1u : 0u };
790 }
791
792 template<size_t BitsY, size_t BitsA>
793 value<BitsY> reduce_or_s(const value<BitsA> &a) {
794 return value<BitsY> { a ? 1u : 0u };
795 }
796
797 template<size_t BitsY, size_t BitsA>
798 value<BitsY> reduce_xor_u(const value<BitsA> &a) {
799 return value<BitsY> { (a.ctpop() % 2) ? 1u : 0u };
800 }
801
802 template<size_t BitsY, size_t BitsA>
803 value<BitsY> reduce_xor_s(const value<BitsA> &a) {
804 return value<BitsY> { (a.ctpop() % 2) ? 1u : 0u };
805 }
806
807 template<size_t BitsY, size_t BitsA>
808 value<BitsY> reduce_xnor_u(const value<BitsA> &a) {
809 return value<BitsY> { (a.ctpop() % 2) ? 0u : 1u };
810 }
811
812 template<size_t BitsY, size_t BitsA>
813 value<BitsY> reduce_xnor_s(const value<BitsA> &a) {
814 return value<BitsY> { (a.ctpop() % 2) ? 0u : 1u };
815 }
816
817 template<size_t BitsY, size_t BitsA>
818 value<BitsY> reduce_bool_u(const value<BitsA> &a) {
819 return value<BitsY> { a ? 1u : 0u };
820 }
821
822 template<size_t BitsY, size_t BitsA>
823 value<BitsY> reduce_bool_s(const value<BitsA> &a) {
824 return value<BitsY> { a ? 1u : 0u };
825 }
826
827 template<size_t BitsY, size_t BitsA, size_t BitsB>
828 value<BitsY> and_uu(const value<BitsA> &a, const value<BitsB> &b) {
829 return a.template zcast<BitsY>().bit_and(b.template zcast<BitsY>());
830 }
831
832 template<size_t BitsY, size_t BitsA, size_t BitsB>
833 value<BitsY> and_ss(const value<BitsA> &a, const value<BitsB> &b) {
834 return a.template scast<BitsY>().bit_and(b.template scast<BitsY>());
835 }
836
837 template<size_t BitsY, size_t BitsA, size_t BitsB>
838 value<BitsY> or_uu(const value<BitsA> &a, const value<BitsB> &b) {
839 return a.template zcast<BitsY>().bit_or(b.template zcast<BitsY>());
840 }
841
842 template<size_t BitsY, size_t BitsA, size_t BitsB>
843 value<BitsY> or_ss(const value<BitsA> &a, const value<BitsB> &b) {
844 return a.template scast<BitsY>().bit_or(b.template scast<BitsY>());
845 }
846
847 template<size_t BitsY, size_t BitsA, size_t BitsB>
848 value<BitsY> xor_uu(const value<BitsA> &a, const value<BitsB> &b) {
849 return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>());
850 }
851
852 template<size_t BitsY, size_t BitsA, size_t BitsB>
853 value<BitsY> xor_ss(const value<BitsA> &a, const value<BitsB> &b) {
854 return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>());
855 }
856
857 template<size_t BitsY, size_t BitsA, size_t BitsB>
858 value<BitsY> xnor_uu(const value<BitsA> &a, const value<BitsB> &b) {
859 return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>()).bit_not();
860 }
861
862 template<size_t BitsY, size_t BitsA, size_t BitsB>
863 value<BitsY> xnor_ss(const value<BitsA> &a, const value<BitsB> &b) {
864 return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>()).bit_not();
865 }
866
867 template<size_t BitsY, size_t BitsA, size_t BitsB>
868 value<BitsY> logic_and_uu(const value<BitsA> &a, const value<BitsB> &b) {
869 return value<BitsY> { (bool(a) & bool(b)) ? 1u : 0u };
870 }
871
872 template<size_t BitsY, size_t BitsA, size_t BitsB>
873 value<BitsY> logic_and_ss(const value<BitsA> &a, const value<BitsB> &b) {
874 return value<BitsY> { (bool(a) & bool(b)) ? 1u : 0u };
875 }
876
877 template<size_t BitsY, size_t BitsA, size_t BitsB>
878 value<BitsY> logic_or_uu(const value<BitsA> &a, const value<BitsB> &b) {
879 return value<BitsY> { (bool(a) | bool(b)) ? 1u : 0u };
880 }
881
882 template<size_t BitsY, size_t BitsA, size_t BitsB>
883 value<BitsY> logic_or_ss(const value<BitsA> &a, const value<BitsB> &b) {
884 return value<BitsY> { (bool(a) | bool(b)) ? 1u : 0u };
885 }
886
887 template<size_t BitsY, size_t BitsA, size_t BitsB>
888 value<BitsY> shl_uu(const value<BitsA> &a, const value<BitsB> &b) {
889 return a.template zcast<BitsY>().template shl(b);
890 }
891
892 template<size_t BitsY, size_t BitsA, size_t BitsB>
893 value<BitsY> shl_su(const value<BitsA> &a, const value<BitsB> &b) {
894 return a.template scast<BitsY>().template shl(b);
895 }
896
897 template<size_t BitsY, size_t BitsA, size_t BitsB>
898 value<BitsY> sshl_uu(const value<BitsA> &a, const value<BitsB> &b) {
899 return a.template zcast<BitsY>().template shl(b);
900 }
901
902 template<size_t BitsY, size_t BitsA, size_t BitsB>
903 value<BitsY> sshl_su(const value<BitsA> &a, const value<BitsB> &b) {
904 return a.template scast<BitsY>().template shl(b);
905 }
906
907 template<size_t BitsY, size_t BitsA, size_t BitsB>
908 value<BitsY> shr_uu(const value<BitsA> &a, const value<BitsB> &b) {
909 return a.template shr(b).template zcast<BitsY>();
910 }
911
912 template<size_t BitsY, size_t BitsA, size_t BitsB>
913 value<BitsY> shr_su(const value<BitsA> &a, const value<BitsB> &b) {
914 return a.template shr(b).template scast<BitsY>();
915 }
916
917 template<size_t BitsY, size_t BitsA, size_t BitsB>
918 value<BitsY> sshr_uu(const value<BitsA> &a, const value<BitsB> &b) {
919 return a.template shr(b).template zcast<BitsY>();
920 }
921
922 template<size_t BitsY, size_t BitsA, size_t BitsB>
923 value<BitsY> sshr_su(const value<BitsA> &a, const value<BitsB> &b) {
924 return a.template sshr(b).template scast<BitsY>();
925 }
926
927 template<size_t BitsY, size_t BitsA, size_t BitsB>
928 value<BitsY> shift_uu(const value<BitsA> &a, const value<BitsB> &b) {
929 return shr_uu<BitsY>(a, b);
930 }
931
932 template<size_t BitsY, size_t BitsA, size_t BitsB>
933 value<BitsY> shift_su(const value<BitsA> &a, const value<BitsB> &b) {
934 return shr_su<BitsY>(a, b);
935 }
936
937 template<size_t BitsY, size_t BitsA, size_t BitsB>
938 value<BitsY> shift_us(const value<BitsA> &a, const value<BitsB> &b) {
939 return b.is_neg() ? shl_uu<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_uu<BitsY>(a, b);
940 }
941
942 template<size_t BitsY, size_t BitsA, size_t BitsB>
943 value<BitsY> shift_ss(const value<BitsA> &a, const value<BitsB> &b) {
944 return b.is_neg() ? shl_su<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_su<BitsY>(a, b);
945 }
946
947 template<size_t BitsY, size_t BitsA, size_t BitsB>
948 value<BitsY> shiftx_uu(const value<BitsA> &a, const value<BitsB> &b) {
949 return shift_uu<BitsY>(a, b);
950 }
951
952 template<size_t BitsY, size_t BitsA, size_t BitsB>
953 value<BitsY> shiftx_su(const value<BitsA> &a, const value<BitsB> &b) {
954 return shift_su<BitsY>(a, b);
955 }
956
957 template<size_t BitsY, size_t BitsA, size_t BitsB>
958 value<BitsY> shiftx_us(const value<BitsA> &a, const value<BitsB> &b) {
959 return shift_us<BitsY>(a, b);
960 }
961
962 template<size_t BitsY, size_t BitsA, size_t BitsB>
963 value<BitsY> shiftx_ss(const value<BitsA> &a, const value<BitsB> &b) {
964 return shift_ss<BitsY>(a, b);
965 }
966
967 // Comparison operations
968 template<size_t BitsY, size_t BitsA, size_t BitsB>
969 value<BitsY> eq_uu(const value<BitsA> &a, const value<BitsB> &b) {
970 constexpr size_t BitsExt = max(BitsA, BitsB);
971 return value<BitsY>{ a.template zext<BitsExt>() == b.template zext<BitsExt>() ? 1u : 0u };
972 }
973
974 template<size_t BitsY, size_t BitsA, size_t BitsB>
975 value<BitsY> eq_ss(const value<BitsA> &a, const value<BitsB> &b) {
976 constexpr size_t BitsExt = max(BitsA, BitsB);
977 return value<BitsY>{ a.template sext<BitsExt>() == b.template sext<BitsExt>() ? 1u : 0u };
978 }
979
980 template<size_t BitsY, size_t BitsA, size_t BitsB>
981 value<BitsY> ne_uu(const value<BitsA> &a, const value<BitsB> &b) {
982 constexpr size_t BitsExt = max(BitsA, BitsB);
983 return value<BitsY>{ a.template zext<BitsExt>() != b.template zext<BitsExt>() ? 1u : 0u };
984 }
985
986 template<size_t BitsY, size_t BitsA, size_t BitsB>
987 value<BitsY> ne_ss(const value<BitsA> &a, const value<BitsB> &b) {
988 constexpr size_t BitsExt = max(BitsA, BitsB);
989 return value<BitsY>{ a.template sext<BitsExt>() != b.template sext<BitsExt>() ? 1u : 0u };
990 }
991
992 template<size_t BitsY, size_t BitsA, size_t BitsB>
993 value<BitsY> eqx_uu(const value<BitsA> &a, const value<BitsB> &b) {
994 return eq_uu<BitsY>(a, b);
995 }
996
997 template<size_t BitsY, size_t BitsA, size_t BitsB>
998 value<BitsY> eqx_ss(const value<BitsA> &a, const value<BitsB> &b) {
999 return eq_ss<BitsY>(a, b);
1000 }
1001
1002 template<size_t BitsY, size_t BitsA, size_t BitsB>
1003 value<BitsY> nex_uu(const value<BitsA> &a, const value<BitsB> &b) {
1004 return ne_uu<BitsY>(a, b);
1005 }
1006
1007 template<size_t BitsY, size_t BitsA, size_t BitsB>
1008 value<BitsY> nex_ss(const value<BitsA> &a, const value<BitsB> &b) {
1009 return ne_ss<BitsY>(a, b);
1010 }
1011
1012 template<size_t BitsY, size_t BitsA, size_t BitsB>
1013 value<BitsY> gt_uu(const value<BitsA> &a, const value<BitsB> &b) {
1014 constexpr size_t BitsExt = max(BitsA, BitsB);
1015 return value<BitsY> { b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
1016 }
1017
1018 template<size_t BitsY, size_t BitsA, size_t BitsB>
1019 value<BitsY> gt_ss(const value<BitsA> &a, const value<BitsB> &b) {
1020 constexpr size_t BitsExt = max(BitsA, BitsB);
1021 return value<BitsY> { b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
1022 }
1023
1024 template<size_t BitsY, size_t BitsA, size_t BitsB>
1025 value<BitsY> ge_uu(const value<BitsA> &a, const value<BitsB> &b) {
1026 constexpr size_t BitsExt = max(BitsA, BitsB);
1027 return value<BitsY> { !a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
1028 }
1029
1030 template<size_t BitsY, size_t BitsA, size_t BitsB>
1031 value<BitsY> ge_ss(const value<BitsA> &a, const value<BitsB> &b) {
1032 constexpr size_t BitsExt = max(BitsA, BitsB);
1033 return value<BitsY> { !a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
1034 }
1035
1036 template<size_t BitsY, size_t BitsA, size_t BitsB>
1037 value<BitsY> lt_uu(const value<BitsA> &a, const value<BitsB> &b) {
1038 constexpr size_t BitsExt = max(BitsA, BitsB);
1039 return value<BitsY> { a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
1040 }
1041
1042 template<size_t BitsY, size_t BitsA, size_t BitsB>
1043 value<BitsY> lt_ss(const value<BitsA> &a, const value<BitsB> &b) {
1044 constexpr size_t BitsExt = max(BitsA, BitsB);
1045 return value<BitsY> { a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
1046 }
1047
1048 template<size_t BitsY, size_t BitsA, size_t BitsB>
1049 value<BitsY> le_uu(const value<BitsA> &a, const value<BitsB> &b) {
1050 constexpr size_t BitsExt = max(BitsA, BitsB);
1051 return value<BitsY> { !b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
1052 }
1053
1054 template<size_t BitsY, size_t BitsA, size_t BitsB>
1055 value<BitsY> le_ss(const value<BitsA> &a, const value<BitsB> &b) {
1056 constexpr size_t BitsExt = max(BitsA, BitsB);
1057 return value<BitsY> { !b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
1058 }
1059
1060 // Arithmetic operations
1061 template<size_t BitsY, size_t BitsA>
1062 value<BitsY> pos_u(const value<BitsA> &a) {
1063 return a.template zcast<BitsY>();
1064 }
1065
1066 template<size_t BitsY, size_t BitsA>
1067 value<BitsY> pos_s(const value<BitsA> &a) {
1068 return a.template scast<BitsY>();
1069 }
1070
1071 template<size_t BitsY, size_t BitsA>
1072 value<BitsY> neg_u(const value<BitsA> &a) {
1073 return a.template zcast<BitsY>().neg();
1074 }
1075
1076 template<size_t BitsY, size_t BitsA>
1077 value<BitsY> neg_s(const value<BitsA> &a) {
1078 return a.template scast<BitsY>().neg();
1079 }
1080
1081 template<size_t BitsY, size_t BitsA, size_t BitsB>
1082 value<BitsY> add_uu(const value<BitsA> &a, const value<BitsB> &b) {
1083 return a.template zcast<BitsY>().add(b.template zcast<BitsY>());
1084 }
1085
1086 template<size_t BitsY, size_t BitsA, size_t BitsB>
1087 value<BitsY> add_ss(const value<BitsA> &a, const value<BitsB> &b) {
1088 return a.template scast<BitsY>().add(b.template scast<BitsY>());
1089 }
1090
1091 template<size_t BitsY, size_t BitsA, size_t BitsB>
1092 value<BitsY> sub_uu(const value<BitsA> &a, const value<BitsB> &b) {
1093 return a.template zcast<BitsY>().sub(b.template zcast<BitsY>());
1094 }
1095
1096 template<size_t BitsY, size_t BitsA, size_t BitsB>
1097 value<BitsY> sub_ss(const value<BitsA> &a, const value<BitsB> &b) {
1098 return a.template scast<BitsY>().sub(b.template scast<BitsY>());
1099 }
1100
1101 template<size_t BitsY, size_t BitsA, size_t BitsB>
1102 value<BitsY> mul_uu(const value<BitsA> &a, const value<BitsB> &b) {
1103 value<BitsY> product;
1104 value<BitsY> multiplicand = a.template zcast<BitsY>();
1105 const value<BitsB> &multiplier = b;
1106 uint32_t multiplicand_shift = 0;
1107 for (size_t step = 0; step < BitsB; step++) {
1108 if (multiplier.bit(step)) {
1109 multiplicand = multiplicand.shl(value<32> { multiplicand_shift });
1110 product = product.add(multiplicand);
1111 multiplicand_shift = 0;
1112 }
1113 multiplicand_shift++;
1114 }
1115 return product;
1116 }
1117
1118 template<size_t BitsY, size_t BitsA, size_t BitsB>
1119 value<BitsY> mul_ss(const value<BitsA> &a, const value<BitsB> &b) {
1120 value<BitsB + 1> ub = b.template sext<BitsB + 1>();
1121 if (ub.is_neg()) ub = ub.neg();
1122 value<BitsY> y = mul_uu<BitsY>(a.template scast<BitsY>(), ub);
1123 return b.is_neg() ? y.neg() : y;
1124 }
1125
1126 template<size_t BitsY, size_t BitsA, size_t BitsB>
1127 std::pair<value<BitsY>, value<BitsY>> divmod_uu(const value<BitsA> &a, const value<BitsB> &b) {
1128 constexpr size_t Bits = max(BitsY, max(BitsA, BitsB));
1129 value<Bits> quotient;
1130 value<Bits> dividend = a.template zext<Bits>();
1131 value<Bits> divisor = b.template zext<Bits>();
1132 if (dividend.ucmp(divisor))
1133 return {/*quotient=*/value<BitsY> { 0u }, /*remainder=*/dividend.template trunc<BitsY>()};
1134 uint32_t divisor_shift = dividend.ctlz() - divisor.ctlz();
1135 divisor = divisor.shl(value<32> { divisor_shift });
1136 for (size_t step = 0; step <= divisor_shift; step++) {
1137 quotient = quotient.shl(value<1> { 1u });
1138 if (!dividend.ucmp(divisor)) {
1139 dividend = dividend.sub(divisor);
1140 quotient.set_bit(0, true);
1141 }
1142 divisor = divisor.shr(value<1> { 1u });
1143 }
1144 return {quotient.template trunc<BitsY>(), /*remainder=*/dividend.template trunc<BitsY>()};
1145 }
1146
1147 template<size_t BitsY, size_t BitsA, size_t BitsB>
1148 std::pair<value<BitsY>, value<BitsY>> divmod_ss(const value<BitsA> &a, const value<BitsB> &b) {
1149 value<BitsA + 1> ua = a.template sext<BitsA + 1>();
1150 value<BitsB + 1> ub = b.template sext<BitsB + 1>();
1151 if (ua.is_neg()) ua = ua.neg();
1152 if (ub.is_neg()) ub = ub.neg();
1153 value<BitsY> y, r;
1154 std::tie(y, r) = divmod_uu<BitsY>(ua, ub);
1155 if (a.is_neg() != b.is_neg()) y = y.neg();
1156 if (a.is_neg()) r = r.neg();
1157 return {y, r};
1158 }
1159
1160 template<size_t BitsY, size_t BitsA, size_t BitsB>
1161 value<BitsY> div_uu(const value<BitsA> &a, const value<BitsB> &b) {
1162 return divmod_uu<BitsY>(a, b).first;
1163 }
1164
1165 template<size_t BitsY, size_t BitsA, size_t BitsB>
1166 value<BitsY> div_ss(const value<BitsA> &a, const value<BitsB> &b) {
1167 return divmod_ss<BitsY>(a, b).first;
1168 }
1169
1170 template<size_t BitsY, size_t BitsA, size_t BitsB>
1171 value<BitsY> mod_uu(const value<BitsA> &a, const value<BitsB> &b) {
1172 return divmod_uu<BitsY>(a, b).second;
1173 }
1174
1175 template<size_t BitsY, size_t BitsA, size_t BitsB>
1176 value<BitsY> mod_ss(const value<BitsA> &a, const value<BitsB> &b) {
1177 return divmod_ss<BitsY>(a, b).second;
1178 }
1179
1180 // Memory helper
1181 struct memory_index {
1182 bool valid;
1183 size_t index;
1184
1185 template<size_t BitsAddr>
1186 memory_index(const value<BitsAddr> &addr, size_t offset, size_t depth) {
1187 static_assert(value<BitsAddr>::chunks <= 1, "memory address is too wide");
1188 size_t offset_index = addr.data[0];
1189
1190 valid = (offset_index >= offset && offset_index < offset + depth);
1191 index = offset_index - offset;
1192 }
1193 };
1194
1195 } // namespace cxxrtl_yosys
1196
1197 #endif