write_cxxrtl: new backend.
authorwhitequark <whitequark@whitequark.org>
Sun, 1 Dec 2019 01:51:16 +0000 (01:51 +0000)
committerwhitequark <whitequark@whitequark.org>
Thu, 9 Apr 2020 04:08:36 +0000 (04:08 +0000)
This commit adds a basic implementation that isn't very performant
but implements most of the planned features.

Makefile
backends/cxxrtl/Makefile.inc [new file with mode: 0644]
backends/cxxrtl/cxxrtl.cc [new file with mode: 0644]
backends/cxxrtl/cxxrtl.h [new file with mode: 0644]
kernel/yosys.cc
kernel/yosys.h

index 218863b32dad67d0e0e02b5b25e6b1a6b6379320..d1c4a2030ff3306ea3ab44709cbc938d8e303b32 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -546,6 +546,7 @@ $(eval $(call add_include_file,libs/json11/json11.hpp))
 $(eval $(call add_include_file,passes/fsm/fsmdata.h))
 $(eval $(call add_include_file,frontends/ast/ast.h))
 $(eval $(call add_include_file,backends/ilang/ilang_backend.h))
+$(eval $(call add_include_file,backends/cxxrtl/cxxrtl.h))
 
 OBJS += kernel/driver.o kernel/register.o kernel/rtlil.o kernel/log.o kernel/calc.o kernel/yosys.o
 OBJS += kernel/cellaigs.o kernel/celledges.o
diff --git a/backends/cxxrtl/Makefile.inc b/backends/cxxrtl/Makefile.inc
new file mode 100644 (file)
index 0000000..f93e65f
--- /dev/null
@@ -0,0 +1,2 @@
+
+OBJS += backends/cxxrtl/cxxrtl.o
diff --git a/backends/cxxrtl/cxxrtl.cc b/backends/cxxrtl/cxxrtl.cc
new file mode 100644 (file)
index 0000000..2dc7b3d
--- /dev/null
@@ -0,0 +1,904 @@
+/*
+ *  yosys -- Yosys Open SYnthesis Suite
+ *
+ *  Copyright (C) 2019  whitequark <whitequark@whitequark.org>
+ *
+ *  Permission to use, copy, modify, and/or distribute this software for any
+ *  purpose with or without fee is hereby granted, provided that the above
+ *  copyright notice and this permission notice appear in all copies.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ *  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ *  ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ *  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ *  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ *  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "kernel/rtlil.h"
+#include "kernel/register.h"
+#include "kernel/sigtools.h"
+#include "kernel/celltypes.h"
+#include "kernel/log.h"
+
+USING_YOSYS_NAMESPACE
+PRIVATE_NAMESPACE_BEGIN
+
+struct CxxrtlWorker {
+       std::ostream &f;
+       std::string indent;
+       int temporary = 0;
+
+       dict<const RTLIL::Module*, SigMap> sigmaps;
+       pool<const RTLIL::Wire*> sync_wires;
+       dict<RTLIL::SigBit, RTLIL::SyncType> sync_types;
+       pool<const RTLIL::Memory*> writable_memories;
+
+       CxxrtlWorker(std::ostream &f) : f(f) {}
+
+       void inc_indent() {
+               indent += "\t";
+       }
+       void dec_indent() {
+               indent.resize(indent.size() - 1);
+       }
+
+       // RTLIL allows any characters in names other than whitespace. This presents an issue for generating C++ code
+       // because C++ identifiers may be only alphanumeric, cannot clash with C++ keywords, and cannot clash with cxxrtl
+       // identifiers. This issue can be solved with a name mangling scheme. We choose a name mangling scheme that results
+       // in readable identifiers, does not depend on an up-to-date list of C++ keywords, and is easy to apply. Its rules:
+       //  1. All generated identifiers start with `_`.
+       //  1a. Generated identifiers for public names (beginning with `\`) start with `p_`.
+       //  1b. Generated identifiers for internal names (beginning with `$`) start with `i_`.
+       //  2. An underscore is escaped with another underscore, i.e. `__`.
+       //  3. Any other non-alnum character is escaped with underscores around its lowercase hex code, e.g. `@` as `_40_`.
+       std::string mangle_name(const RTLIL::IdString &name)
+       {
+               std::string mangled;
+               bool first = true;
+               for (char c : name.str()) {
+                       if (first) {
+                               first = false;
+                               if (c == '\\')
+                                       mangled += "p_";
+                               else if (c == '$')
+                                       mangled += "i_";
+                               else
+                                       log_assert(false);
+                       } else {
+                               if (isalnum(c)) {
+                                       mangled += c;
+                               } else if (c == '_') {
+                                       mangled += "__";
+                               } else {
+                                       char l = c & 0xf, h = (c >> 4) & 0xf;
+                                       mangled += '_';
+                                       mangled += (h < 10 ? '0' + h : 'a' + h - 10);
+                                       mangled += (l < 10 ? '0' + l : 'a' + l - 10);
+                                       mangled += '_';
+                               }
+                       }
+               }
+               return mangled;
+       }
+
+       std::string mangle_module_name(const RTLIL::IdString &name)
+       {
+               // Class namespace.
+               return mangle_name(name);
+       }
+
+       std::string mangle_memory_name(const RTLIL::IdString &name)
+       {
+               // Class member namespace.
+               return "memory_" + mangle_name(name);
+       }
+
+       std::string mangle_wire_name(const RTLIL::IdString &name)
+       {
+               // Class member namespace.
+               return mangle_name(name);
+       }
+
+       std::string mangle(const RTLIL::Module *module)
+       {
+               return mangle_module_name(module->name);
+       }
+
+       std::string mangle(const RTLIL::Memory *memory)
+       {
+               return mangle_memory_name(memory->name);
+       }
+
+       std::string mangle(const RTLIL::Wire *wire)
+       {
+               return mangle_wire_name(wire->name);
+       }
+
+       std::string mangle(RTLIL::SigBit sigbit)
+       {
+               log_assert(sigbit.wire != NULL);
+               if (sigbit.wire->width == 1)
+                       return mangle(sigbit.wire);
+               return mangle(sigbit.wire) + "_" + std::to_string(sigbit.offset);
+       }
+
+       std::string fresh_temporary()
+       {
+               return stringf("tmp_%d", temporary++);
+       }
+
+       void dump_attrs(const RTLIL::AttrObject *object)
+       {
+               for (auto attr : object->attributes) {
+                       f << indent << "// " << attr.first.str() << ": ";
+                       if (attr.second.flags & RTLIL::CONST_FLAG_STRING) {
+                               f << attr.second.decode_string();
+                       } else {
+                               f << attr.second.as_int(/*is_signed=*/attr.second.flags & RTLIL::CONST_FLAG_SIGNED);
+                       }
+                       f << "\n";
+               }
+       }
+
+       void dump_const_init(const RTLIL::Const &data, int width, int offset = 0, bool fixed_width = false)
+       {
+               f << "{";
+               while (width > 0) {
+                       const int CHUNK_SIZE = 32;
+                       uint32_t chunk = data.extract(offset, width > CHUNK_SIZE ? CHUNK_SIZE : width).as_int();
+                       if (fixed_width)
+                               f << stringf("0x%08xu", chunk);
+                       else
+                               f << stringf("%#xu", chunk);
+                       if (width > CHUNK_SIZE)
+                               f << ',';
+                       offset += CHUNK_SIZE;
+                       width  -= CHUNK_SIZE;
+               }
+               f << "}";
+       }
+
+       void dump_const_init(const RTLIL::Const &data)
+       {
+               dump_const_init(data, data.size());
+       }
+
+       void dump_const(const RTLIL::Const &data, int width, int offset = 0, bool fixed_width = false)
+       {
+               f << "value<" << width << ">";
+               dump_const_init(data, width, offset, fixed_width);
+       }
+
+       void dump_const(const RTLIL::Const &data)
+       {
+               dump_const(data, data.size());
+       }
+
+       bool dump_sigchunk(const RTLIL::SigChunk &chunk, bool is_lhs)
+       {
+               if (chunk.wire == NULL) {
+                       dump_const(chunk.data, chunk.width, chunk.offset);
+                       return false;
+               } else {
+                       f << mangle(chunk.wire) << (is_lhs ? ".next" : ".curr");
+                       if (chunk.width == chunk.wire->width && chunk.offset == 0)
+                               return false;
+                       else if (chunk.width == 1)
+                               f << ".slice<" << chunk.offset << ">()";
+                       else
+                               f << ".slice<" << chunk.offset+chunk.width-1 << "," << chunk.offset << ">()";
+                       return true;
+               }
+       }
+
+       bool dump_sigspec(const RTLIL::SigSpec &sig, bool is_lhs)
+       {
+               if (sig.empty()) {
+                       f << "value<0>()";
+                       return false;
+               } else if (sig.is_chunk()) {
+                       return dump_sigchunk(sig.as_chunk(), is_lhs);
+               } else {
+                       dump_sigchunk(*sig.chunks().rbegin(), is_lhs);
+                       for (auto it = sig.chunks().rbegin() + 1; it != sig.chunks().rend(); ++it) {
+                               f << ".concat(";
+                               dump_sigchunk(*it, is_lhs);
+                               f << ")";
+                       }
+                       return true;
+               }
+       }
+
+       void dump_sigspec_lhs(const RTLIL::SigSpec &sig)
+       {
+               dump_sigspec(sig, /*is_lhs=*/true);
+       }
+
+       void dump_sigspec_rhs(const RTLIL::SigSpec &sig)
+       {
+               // In the contexts where we want template argument deduction to occur for `template<size_t Bits> ... value<Bits>`,
+               // it is necessary to have the argument to already be a `value<N>`, since template argument deduction and implicit
+               // type conversion are mutually exclusive. In these contexts, we use dump_sigspec_rhs() to emit an explicit
+               // type conversion, but only if the expression needs it.
+               bool is_complex = dump_sigspec(sig, /*is_lhs=*/false);
+               if (is_complex)
+                       f << ".val()";
+       }
+
+       void dump_assign(const RTLIL::SigSig &sigsig)
+       {
+               f << indent;
+               dump_sigspec_lhs(sigsig.first);
+               f << " = ";
+               dump_sigspec_rhs(sigsig.second);
+               f << ";\n";
+       }
+
+       void dump_cell(const RTLIL::Cell *cell)
+       {
+               dump_attrs(cell);
+               f << indent << "// cell " << cell->name.str() << "\n";
+               // Unary cells
+               if (cell->type.in(
+                   ID($not), ID($logic_not), ID($reduce_and), ID($reduce_or), ID($reduce_xor), ID($reduce_xnor), ID($reduce_bool),
+                   ID($pos), ID($neg))) {
+                       f << indent;
+                       dump_sigspec_lhs(cell->getPort(ID(Y)));
+                       f << " = " << cell->type.substr(1) << '_' <<
+                            (cell->getParam(ID(A_SIGNED)).as_bool() ? 's' : 'u') <<
+                            "<" << cell->getParam(ID(Y_WIDTH)).as_int() << ">(";
+                       dump_sigspec_rhs(cell->getPort(ID(A)));
+                       f << ");\n";
+               // Binary cells
+               } else if (cell->type.in(
+                   ID($and), ID($or), ID($xor), ID($xnor), ID($logic_and), ID($logic_or),
+                   ID($shl), ID($sshl), ID($shr), ID($sshr), ID($shift), ID($shiftx),
+                   ID($eq), ID($ne), ID($eqx), ID($nex), ID($gt), ID($ge), ID($lt), ID($le),
+                   ID($add), ID($sub), ID($mul), ID($div), ID($mod))) {
+                       f << indent;
+                       dump_sigspec_lhs(cell->getPort(ID(Y)));
+                       f << " = " << cell->type.substr(1) << '_' <<
+                            (cell->getParam(ID(A_SIGNED)).as_bool() ? 's' : 'u') <<
+                            (cell->getParam(ID(B_SIGNED)).as_bool() ? 's' : 'u') <<
+                            "<" << cell->getParam(ID(Y_WIDTH)).as_int() << ">(";
+                       dump_sigspec_rhs(cell->getPort(ID(A)));
+                       f << ", ";
+                       dump_sigspec_rhs(cell->getPort(ID(B)));
+                       f << ");\n";
+               // Muxes
+               } else if (cell->type == ID($mux)) {
+                       f << indent;
+                       dump_sigspec_lhs(cell->getPort(ID(Y)));
+                       f << " = ";
+                       dump_sigspec_rhs(cell->getPort(ID(S)));
+                       f << " ? ";
+                       dump_sigspec_rhs(cell->getPort(ID(B)));
+                       f << " : ";
+                       dump_sigspec_rhs(cell->getPort(ID(A)));
+                       f << ";\n";
+               // Parallel (one-hot) muxes
+               } else if (cell->type == ID($pmux)) {
+                       int width = cell->getParam(ID(WIDTH)).as_int();
+                       int s_width = cell->getParam(ID(S_WIDTH)).as_int();
+                       bool first = true;
+                       for (int part = 0; part < s_width; part++) {
+                               f << (first ? indent : " else ");
+                               first = false;
+                               f << "if (";
+                               dump_sigspec_rhs(cell->getPort(ID(S)).extract(part));
+                               f << ") {\n";
+                               inc_indent();
+                                       f << indent;
+                                       dump_sigspec_lhs(cell->getPort(ID(Y)));
+                                       f << " = ";
+                                       dump_sigspec_rhs(cell->getPort(ID(B)).extract(part * width, width));
+                                       f << ";\n";
+                               dec_indent();
+                               f << indent << "}";
+                       }
+                       f << " else {\n";
+                       inc_indent();
+                               f << indent;
+                               dump_sigspec_lhs(cell->getPort(ID(Y)));
+                               f << " = ";
+                               dump_sigspec_rhs(cell->getPort(ID(A)));
+                               f << ";\n";
+                       dec_indent();
+                       f << indent << "}\n";
+               // Flip-flops
+               } else if (cell->type.in(ID($dff), ID($dffe), ID($adff), ID($dffsr))) {
+                       if (cell->getPort(ID(CLK)).is_wire()) {
+                               // Edge-sensitive logic
+                               RTLIL::SigBit clk_bit = cell->getPort(ID(CLK))[0];
+                               clk_bit = sigmaps[clk_bit.wire->module](clk_bit);
+                               f << indent << "if (" << (cell->getParam(ID(CLK_POLARITY)).as_bool() ? "posedge_" : "negedge_")
+                                           << mangle(clk_bit) << ") {\n";
+                               inc_indent();
+                                       if (cell->type == ID($dffe)) {
+                                               f << indent << "if (";
+                                               dump_sigspec_rhs(cell->getPort(ID(EN)));
+                                               f << " == value<1> {" << cell->getParam(ID(EN_POLARITY)).as_bool() << "}) {\n";
+                                               inc_indent();
+                                       }
+                                       f << indent;
+                                       dump_sigspec_lhs(cell->getPort(ID(Q)));
+                                       f << " = ";
+                                       dump_sigspec_rhs(cell->getPort(ID(D)));
+                                       f << ";\n";
+                                       if (cell->type == ID($dffe)) {
+                                               dec_indent();
+                                               f << indent << "}\n";
+                                       }
+                               dec_indent();
+                               f << indent << "}\n";
+                       }
+                       // Level-sensitive logic
+                       if (cell->type == ID($adff)) {
+                               f << indent << "if (";
+                               dump_sigspec_rhs(cell->getPort(ID(ARST)));
+                               f << " == value<1> {" << cell->getParam(ID(ARST_POLARITY)).as_bool() << "}) {\n";
+                               inc_indent();
+                                       f << indent;
+                                       dump_sigspec_lhs(cell->getPort(ID(Q)));
+                                       f << " = ";
+                                       dump_const(cell->getParam(ID(ARST_VALUE)));
+                                       f << ";\n";
+                               dec_indent();
+                               f << indent << "}\n";
+                       } else if (cell->type == ID($dffsr)) {
+                               f << indent << "if (";
+                               dump_sigspec_rhs(cell->getPort(ID(CLR)));
+                               f << " == value<1> {" << cell->getParam(ID(CLR_POLARITY)).as_bool() << "}) {\n";
+                               inc_indent();
+                                       f << indent;
+                                       dump_sigspec_lhs(cell->getPort(ID(Q)));
+                                       f << " = ";
+                                       dump_const(RTLIL::Const(RTLIL::S0, cell->getParam(ID(WIDTH)).as_int()));
+                                       f << ";\n";
+                               dec_indent();
+                               f << indent << "} else if (";
+                               dump_sigspec_rhs(cell->getPort(ID(SET)));
+                               f << " == value<1> {" << cell->getParam(ID(SET_POLARITY)).as_bool() << "}) {\n";
+                               inc_indent();
+                                       f << indent;
+                                       dump_sigspec_lhs(cell->getPort(ID(Q)));
+                                       f << " = ";
+                                       dump_const(RTLIL::Const(RTLIL::S1, cell->getParam(ID(WIDTH)).as_int()));
+                                       f << ";\n";
+                               dec_indent();
+                               f << indent << "}\n";
+                       }
+               // Memory ports
+               } else if (cell->type.in(ID($memrd), ID($memwr))) {
+                       if (cell->getParam(ID(CLK_ENABLE)).as_bool()) {
+                               RTLIL::SigBit clk_bit = cell->getPort(ID(CLK))[0];
+                               clk_bit = sigmaps[clk_bit.wire->module](clk_bit);
+                               f << indent << "if (" << (cell->getParam(ID(CLK_POLARITY)).as_bool() ? "posedge_" : "negedge_")
+                                           << mangle(clk_bit) << ") {\n";
+                               inc_indent();
+                       }
+                       RTLIL::Memory *memory = cell->module->memories[cell->getParam(ID(MEMID)).decode_string()];
+                       if (cell->type == ID($memrd)) {
+                               if (!cell->getPort(ID(EN)).is_fully_ones()) {
+                                       f << indent << "if (";
+                                       dump_sigspec_rhs(cell->getPort(ID(EN)));
+                                       f << ") {\n";
+                                       inc_indent();
+                               }
+                               f << indent;
+                               dump_sigspec_lhs(cell->getPort(ID(DATA)));
+                               f << " = " << mangle(memory) << "[";
+                               dump_sigspec_rhs(cell->getPort(ID(ADDR)));
+                               if (writable_memories[memory]) {
+                                       // FIXME: the handling of transparent read ports is a bit naughty: normally, nothing on RHS should ever
+                                       // read from `next`, since this can result in evaluation order nondeterminism, as well as issues with
+                                       // latches. However, for now this is the right tradeoff to make, since it allows to simplify $memrd/$memwr
+                                       // codegen dramatically.
+                                       f << "]." << (cell->getParam(ID(TRANSPARENT)).as_bool() ? "next" : "curr") << ";\n";
+                               } else {
+                                       f << "];\n";
+                               }
+                               if (!cell->getPort(ID(EN)).is_fully_ones()) {
+                                       dec_indent();
+                                       f << indent << "}\n";
+                               }
+                       } else /*if (cell->type == ID($memwr))*/ {
+                               log_assert(writable_memories[memory]);
+                               // FIXME: handle write port priority.
+                               int width = cell->getParam(ID(WIDTH)).as_int();
+                               std::string lhs_temp = fresh_temporary();
+                               f << indent << "wire<" << width << "> &" << lhs_temp << " = " << mangle(memory) << "[";
+                               dump_sigspec_rhs(cell->getPort(ID(ADDR)));
+                               f << "];\n";
+                               int start = 0;
+                               RTLIL::SigBit prev_en_bit = RTLIL::Sm;
+                               for (int stop = 0; stop < width + 1; stop++) {
+                                       if (stop == width || (prev_en_bit != RTLIL::Sm && prev_en_bit != cell->getPort(ID(EN))[stop])) {
+                                               f << indent << "if (";
+                                               dump_sigspec_rhs(prev_en_bit);
+                                               f << ") {\n";
+                                               inc_indent();
+                                                       f << indent << lhs_temp << ".next.slice<" << (stop - 1) << "," << start << ">() = ";
+                                                       dump_sigspec_rhs(cell->getPort(ID(DATA)).extract(start, stop - start));
+                                                       f << ";\n";
+                                               dec_indent();
+                                               f << indent << "}\n";
+                                               start = stop + 1;
+                                       }
+                                       if (stop != width)
+                                               prev_en_bit = cell->getPort(ID(EN))[stop];
+                               }
+                       }
+                       if (cell->getParam(ID(CLK_ENABLE)).as_bool()) {
+                               dec_indent();
+                               f << indent << "}\n";
+                       }
+               // Memory initializers
+               } else if (cell->type == ID($meminit)) {
+                       // Handled elsewhere.
+               } else if (cell->type[0] == '$') {
+                       log_cmd_error("Unsupported internal cell `%s'.\n", cell->type.c_str());
+               } else {
+                       log_assert(false);
+               }
+       }
+
+       void dump_case_rule(const RTLIL::CaseRule *rule)
+       {
+               for (auto action : rule->actions)
+                       dump_assign(action);
+               for (auto switch_ : rule->switches)
+                       dump_switch_rule(switch_);
+       }
+
+       void dump_switch_rule(const RTLIL::SwitchRule *rule)
+       {
+               // The switch attributes are printed before the switch condition is captured.
+               dump_attrs(rule);
+               std::string signal_temp = fresh_temporary();
+               f << indent << "const value<" << rule->signal.size() << "> &" << signal_temp << " = ";
+               dump_sigspec(rule->signal, /*is_lhs=*/false);
+               f << ";\n";
+
+               bool first = true;
+               for (auto case_ : rule->cases) {
+                       // The case attributes (for nested cases) are printed before the if/else if/else statement.
+                       dump_attrs(rule);
+                       f << indent;
+                       if (!first)
+                               f << "} else ";
+                       first = false;
+                       if (!case_->compare.empty()) {
+                               f << "if (";
+                               bool first = true;
+                               for (auto &compare : case_->compare) {
+                                       if (!first)
+                                               f << " || ";
+                                       first = false;
+                                       if (compare.is_fully_def()) {
+                                               f << signal_temp << " == ";
+                                               dump_sigspec(compare, /*is_lhs=*/false);
+                                       } else if (compare.is_fully_const()) {
+                                               RTLIL::Const compare_mask, compare_value;
+                                               for (auto bit : compare.as_const()) {
+                                                       switch (bit) {
+                                                               case RTLIL::S0:
+                                                               case RTLIL::S1:
+                                                                       compare_mask.bits.push_back(RTLIL::S1);
+                                                                       compare_value.bits.push_back(bit);
+                                                                       break;
+
+                                                               case RTLIL::Sx:
+                                                               case RTLIL::Sz:
+                                                               case RTLIL::Sa:
+                                                                       compare_mask.bits.push_back(RTLIL::S0);
+                                                                       compare_value.bits.push_back(RTLIL::S0);
+                                                                       break;
+
+                                                               default:
+                                                                       log_assert(false);
+                                                       }
+                                               }
+                                               f << "and_uu<" << compare.size() << ">(" << signal_temp << ", ";
+                                               dump_const(compare_mask);
+                                               f << ") == ";
+                                               dump_const(compare_value);
+                                       } else {
+                                               log_assert(false);
+                                       }
+                               }
+                               f << ") ";
+                       }
+                       f << "{\n";
+                       inc_indent();
+                               dump_case_rule(case_);
+                       dec_indent();
+               }
+               f << indent << "}\n";
+       }
+
+       void dump_process(const RTLIL::Process *proc)
+       {
+               dump_attrs(proc);
+               f << indent << "// process " << proc->name.str() << "\n";
+               // The case attributes (for root case) are always empty.
+               log_assert(proc->root_case.attributes.empty());
+               dump_case_rule(&proc->root_case);
+               for (auto sync : proc->syncs) {
+                       RTLIL::SigBit sync_bit = sync->signal[0];
+                       sync_bit = sigmaps[sync_bit.wire->module](sync_bit);
+
+                       pool<std::string> events;
+                       switch (sync->type) {
+                               case RTLIL::STp:
+                                       events.insert("posedge_" + mangle(sync_bit));
+                                       break;
+                               case RTLIL::STn:
+                                       events.insert("negedge_" + mangle(sync_bit));
+                               case RTLIL::STe:
+                                       events.insert("posedge_" + mangle(sync_bit));
+                                       events.insert("negedge_" + mangle(sync_bit));
+                                       break;
+
+                               case RTLIL::ST0:
+                               case RTLIL::ST1:
+                               case RTLIL::STa:
+                               case RTLIL::STg:
+                               case RTLIL::STi:
+                                       log_assert(false);
+                       }
+                       if (!events.empty()) {
+                               f << indent << "if (";
+                               bool first = true;
+                               for (auto &event : events) {
+                                       if (!first)
+                                               f << " || ";
+                                       first = false;
+                                       f << event;
+                               }
+                               f << ") {\n";
+                               inc_indent();
+                                       for (auto action : sync->actions)
+                                               dump_assign(action);
+                               dec_indent();
+                               f << indent << "}\n";
+                       }
+               }
+       }
+
+       void dump_wire(const RTLIL::Wire *wire)
+       {
+               dump_attrs(wire);
+               f << indent << "wire<" << wire->width << "> " << mangle(wire);
+               if (wire->attributes.count(ID(init))) {
+                       f << " ";
+                       dump_const_init(wire->attributes.at(ID(init)));
+               }
+               f << ";\n";
+               if (sync_wires[wire]) {
+                       for (auto sync_type : sync_types) {
+                               if (sync_type.first.wire == wire) {
+                                       if (sync_type.second != RTLIL::STn)
+                                               f << indent << "bool posedge_" << mangle(sync_type.first) << " = false;\n";
+                                       if (sync_type.second != RTLIL::STp)
+                                               f << indent << "bool negedge_" << mangle(sync_type.first) << " = false;\n";
+                               }
+                       }
+               }
+       }
+
+       void dump_memory(RTLIL::Module *module, const RTLIL::Memory *memory)
+       {
+               vector<const RTLIL::Cell*> init_cells;
+               for (auto cell : module->cells())
+                       if (cell->type == ID($meminit) && cell->getParam(ID(MEMID)).decode_string() == memory->name.str())
+                               init_cells.push_back(cell);
+
+               std::sort(init_cells.begin(), init_cells.end(), [](const RTLIL::Cell *a, const RTLIL::Cell *b) {
+                       int a_addr = a->getPort(ID(ADDR)).as_int(), b_addr = b->getPort(ID(ADDR)).as_int();
+                       int a_prio = a->getParam(ID(PRIORITY)).as_int(), b_prio = b->getParam(ID(PRIORITY)).as_int();
+                       return a_prio > b_prio || (a_prio == b_prio && a_addr < b_addr);
+               });
+
+               dump_attrs(memory);
+               f << indent << "memory_" << (writable_memories[memory] ? "rw" : "ro")
+                           << "<" << memory->width << "> " << mangle(memory)
+                           << " { " << memory->size << "u";
+               if (init_cells.empty()) {
+                       f << " };\n";
+               } else {
+                       f << ",\n";
+                       inc_indent();
+                               for (auto cell : init_cells) {
+                                       dump_attrs(cell);
+                                       RTLIL::Const data = cell->getPort(ID(DATA)).as_const();
+                                       size_t width = cell->getParam(ID(WIDTH)).as_int();
+                                       size_t words = cell->getParam(ID(WORDS)).as_int();
+                                       f << indent << "memory_" << (writable_memories[memory] ? "rw" : "ro")
+                                                   << "<" << memory->width << ">::init<" << words << "> { "
+                                                   << stringf("%#x", cell->getPort(ID(ADDR)).as_int()) << ", {";
+                                       inc_indent();
+                                               for (size_t n = 0; n < words; n++) {
+                                                       if (n % 4 == 0)
+                                                               f << "\n" << indent;
+                                                       else
+                                                               f << " ";
+                                                       dump_const(data, width, n * width, /*fixed_width=*/true);
+                                                       f << ",";
+                                               }
+                                       dec_indent();
+                                       f << "\n" << indent << "}},\n";
+                               }
+                       dec_indent();
+                       f << indent << "};\n";
+               }
+       }
+
+       void dump_module(RTLIL::Module *module)
+       {
+               dump_attrs(module);
+               f << "struct " << mangle(module) << " : public module {\n";
+               inc_indent();
+                       for (auto wire : module->wires())
+                               dump_wire(wire);
+                       f << "\n";
+                       for (auto memory : module->memories)
+                               dump_memory(module, memory.second);
+                       if (!module->memories.empty())
+                               f << "\n";
+                       f << indent << "void eval() override;\n";
+                       f << indent << "bool commit() override;\n";
+               dec_indent();
+               f << "}; // struct " << mangle(module) << "\n";
+               f << "\n";
+
+               f << "void " << mangle(module) << "::eval() {\n";
+               inc_indent();
+                       for (auto cell : module->cells())
+                               dump_cell(cell);
+                       f << indent << "// connections\n";
+                       for (auto conn : module->connections())
+                               dump_assign(conn);
+                       for (auto proc : module->processes)
+                               dump_process(proc.second);
+                       for (auto sync_type : sync_types) {
+                               if (sync_type.first.wire->module == module) {
+                                       if (sync_type.second != RTLIL::STn)
+                                               f << indent << "posedge_" << mangle(sync_type.first) << " = false;\n";
+                                       if (sync_type.second != RTLIL::STp)
+                                               f << indent << "negedge_" << mangle(sync_type.first) << " = false;\n";
+                               }
+                       }
+               dec_indent();
+               f << "}\n";
+
+               f << "\n";
+               f << "bool " << mangle(module) << "::commit() {\n";
+               inc_indent();
+                       f << indent << "bool changed = false;\n";
+                       for (auto wire : module->wires()) {
+                               if (sync_wires[wire]) {
+                                       std::string wire_prev = mangle(wire) + "_prev";
+                                       std::string wire_curr = mangle(wire) + ".curr";
+                                       std::string wire_edge = mangle(wire) + "_edge";
+                                       f << indent << "value<" << wire->width << "> " << wire_prev << " = " << wire_curr << ";\n";
+                                       f << indent << "if (" << mangle(wire) << ".commit()) {\n";
+                                       inc_indent();
+                                               f << indent << "value<" << wire->width << "> " << wire_edge << " = "
+                                                           << wire_prev << ".bit_xor(" << wire_curr << ");\n";
+                                               for (auto sync_type : sync_types) {
+                                                       if (sync_type.first.wire != wire)
+                                                               continue;
+                                                       if (sync_type.second != RTLIL::STn) {
+                                                               f << indent << "if (" << wire_edge << ".slice<" << sync_type.first.offset << ">().val() && "
+                                                                           << wire_curr << ".slice<" << sync_type.first.offset << ">().val())\n";
+                                                               inc_indent();
+                                                                       f << indent << "posedge_" << mangle(sync_type.first) << " = true;\n";
+                                                               dec_indent();
+                                                       }
+                                                       if (sync_type.second != RTLIL::STp) {
+                                                               f << indent << "if (" << wire_edge << ".slice<" << sync_type.first.offset << ">().val() && "
+                                                                           << "!" << wire_curr << ".slice<" << sync_type.first.offset << ">().val())\n";
+                                                               inc_indent();
+                                                                       f << indent << "negedge_" << mangle(sync_type.first) << " = true;\n";
+                                                               dec_indent();
+                                                       }
+                                                       f << indent << "changed = true;\n";
+                                               }
+                                       dec_indent();
+                                       f << indent << "}\n";
+                               } else {
+                                       f << indent << "changed |= " << mangle(wire) << ".commit();\n";
+                               }
+                       }
+                       for (auto memory : module->memories) {
+                               if (!writable_memories[memory.second])
+                                       continue;
+                               f << indent << "for (size_t i = 0; i < " << memory.second->size << "u; i++)\n";
+                               inc_indent();
+                                       f << indent << "changed |= " << mangle(memory.second) << "[i].commit();\n";
+                               dec_indent();
+                       }
+                       f << indent << "return changed;\n";
+               dec_indent();
+               f << "}\n";
+       }
+
+       void dump_design(RTLIL::Design *design)
+       {
+               f << "#include <cxxrtl.h>\n";
+               f << "\n";
+               f << "using namespace cxxrtl_yosys;\n";
+               f << "\n";
+               f << "namespace cxxrtl_design {\n";
+               for (auto module : design->modules()) {
+                       if (module->get_blackbox_attribute())
+                               continue;
+
+                       if (!design->selected_module(module))
+                               continue;
+
+                       f << "\n";
+                       dump_module(module);
+               }
+               f << "\n";
+               f << "} // namespace cxxrtl_design\n";
+       }
+
+       // Edge-type sync rules require us to emit edge detectors, which require coordination between
+       // eval and commit phases. To do this we need to collect them upfront.
+       //
+       // Note that the simulator commit phase operates at wire granularity but edge-type sync rules
+       // operate at wire bit granularity; it is possible to have code similar to:
+       //     wire [3:0] clocks;
+       //     always @(posedge clocks[0]) ...
+       // To handle this we track edge sensitivity both for wires and wire bits.
+       void register_edge_signal(SigMap &sigmap, RTLIL::SigSpec signal, RTLIL::SyncType type)
+       {
+               signal = sigmap(signal);
+               log_assert(signal.is_wire() && signal.is_bit());
+               log_assert(type == RTLIL::STp || type == RTLIL::STn || type == RTLIL::STe);
+
+               RTLIL::SigBit sigbit = signal[0];
+               if (!sync_types.count(sigbit))
+                       sync_types[sigbit] = type;
+               else if (sync_types[sigbit] != type)
+                       sync_types[sigbit] = RTLIL::STe;
+               sync_wires.insert(signal.as_wire());
+       }
+
+       void analyze_design(RTLIL::Design *design)
+       {
+               for (auto module : design->modules()) {
+                       SigMap &sigmap = sigmaps[module];
+                       sigmap.set(module);
+
+                       for (auto cell : module->cells()) {
+                               // Various DFF cells are treated like posedge/negedge processes, see above for details.
+                               if (cell->type.in(ID($dff), ID($dffe), ID($adff), ID($dffsr))) {
+                                       if (cell->getPort(ID(CLK)).is_wire())
+                                               register_edge_signal(sigmap, cell->getPort(ID(CLK)),
+                                                       cell->parameters[ID(CLK_POLARITY)].as_bool() ? RTLIL::STp : RTLIL::STn);
+                                       // The $adff and $dffsr cells are level-sensitive, not edge-sensitive (in spite of the fact that they
+                                       // are inferred from an edge-sensitive Verilog process) and do not correspond to an edge-type sync rule.
+                               }
+                               // Similar for memory port cells.
+                               if (cell->type.in(ID($memrd), ID($memwr))) {
+                                       if (cell->getParam(ID(CLK_ENABLE)).as_bool()) {
+                                               if (cell->getPort(ID(CLK)).is_wire())
+                                                       register_edge_signal(sigmap, cell->getPort(ID(CLK)),
+                                                               cell->parameters[ID(CLK_POLARITY)].as_bool() ? RTLIL::STp : RTLIL::STn);
+                                       }
+                               }
+                               // Optimize access to read-only memories.
+                               if (cell->type == ID($memwr))
+                                       writable_memories.insert(module->memories[cell->getParam(ID(MEMID)).decode_string()]);
+                               // Handling of packed memories is delegated to the `memory_unpack` pass, so we can rely on the presence
+                               // of RTLIL memory objects and $memrd/$memwr/$meminit cells.
+                               if (cell->type.in(ID($mem)))
+                                       log_assert(false);
+                       }
+
+                       for (auto proc : module->processes)
+                               for (auto sync : proc.second->syncs)
+                                       switch (sync->type) {
+                                               // Edge-type sync rules require pre-registration.
+                                               case RTLIL::STp:
+                                               case RTLIL::STn:
+                                               case RTLIL::STe:
+                                                       register_edge_signal(sigmap, sync->signal, sync->type);
+                                                       break;
+
+                                               // Level-type sync rules require no special handling.
+                                               case RTLIL::ST0:
+                                               case RTLIL::ST1:
+                                               case RTLIL::STa:
+                                                       break;
+
+                                               // Handling of init-type sync rules is delegated to the `proc_init` pass, so we can use the wire
+                                               // attribute regardless of input.
+                                               case RTLIL::STi:
+                                                       log_assert(false);
+
+                                               case RTLIL::STg:
+                                                       log_cmd_error("Global clock is not supported.\n");
+                                       }
+               }
+       }
+
+       void check_design(RTLIL::Design *design, bool &has_sync_init, bool &has_packed_mem)
+       {
+               has_sync_init = has_packed_mem = false;
+
+               for (auto module : design->modules()) {
+                       if (module->get_blackbox_attribute())
+                               continue;
+
+                       if (!design->selected_whole_module(module))
+                               if (design->selected_module(module))
+                                       log_cmd_error("Can't handle partially selected module %s!\n", id2cstr(module->name));
+
+                       for (auto proc : module->processes)
+                               for (auto sync : proc.second->syncs)
+                                       if (sync->type == RTLIL::STi)
+                                               has_sync_init = true;
+
+                       for (auto cell : module->cells())
+                               if (cell->type == ID($mem))
+                                       has_packed_mem = true;
+               }
+       }
+
+       void prepare_design(RTLIL::Design *design)
+       {
+               bool has_sync_init, has_packed_mem;
+               check_design(design, has_sync_init, has_packed_mem);
+               if (has_sync_init)
+                       Pass::call(design, "proc_init");
+               if (has_packed_mem)
+                       Pass::call(design, "memory_unpack");
+               // Recheck the design if it was modified.
+               if (has_sync_init || has_packed_mem)
+                       check_design(design, has_sync_init, has_packed_mem);
+
+               log_assert(!(has_sync_init || has_packed_mem));
+               analyze_design(design);
+       }
+};
+
+struct CxxrtlBackend : public Backend {
+       CxxrtlBackend() : Backend("cxxrtl", "convert design to C++ RTL simulation") { }
+       void help() YS_OVERRIDE
+       {
+               //   |---v---|---v---|---v---|---v---|---v---|---v---|---v---|---v---|---v---|---v---|
+               log("\n");
+               log("    write_cxxrtl [options] [filename]\n");
+               log("\n");
+               log("Write C++ code for simulating the design.\n");
+               log("\n");
+       }
+       void execute(std::ostream *&f, std::string filename, std::vector<std::string> args, RTLIL::Design *design) YS_OVERRIDE
+       {
+               log_header(design, "Executing CXXRTL backend.\n");
+
+               size_t argidx;
+               for (argidx = 1; argidx < args.size(); argidx++)
+               {
+                       // if (args[argidx] == "-top" && argidx+1 < args.size()) {
+                       //      top_module_name = args[++argidx];
+                       //      continue;
+                       // }
+                       break;
+               }
+               extra_args(f, filename, args, argidx);
+
+               CxxrtlWorker worker(*f);
+               worker.prepare_design(design);
+               worker.dump_design(design);
+       }
+} CxxrtlBackend;
+
+PRIVATE_NAMESPACE_END
diff --git a/backends/cxxrtl/cxxrtl.h b/backends/cxxrtl/cxxrtl.h
new file mode 100644 (file)
index 0000000..d066530
--- /dev/null
@@ -0,0 +1,1104 @@
+/*
+ *  yosys -- Yosys Open SYnthesis Suite
+ *
+ *  Copyright (C) 2019  whitequark <whitequark@whitequark.org>
+ *
+ *  Permission to use, copy, modify, and/or distribute this software for any
+ *  purpose with or without fee is hereby granted.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ *  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ *  ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ *  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ *  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ *  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+// This file is included by the designs generated with `write_cxxrtl`. It is not used in Yosys itself.
+
+#ifndef CXXRTL_H
+#define CXXRTL_H
+
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+#include <type_traits>
+#include <tuple>
+#include <vector>
+#include <sstream>
+
+// The cxxrtl support library implements compile time specialized arbitrary width arithmetics, as well as provides
+// composite lvalues made out of bit slices and concatenations of lvalues. This allows the `write_cxxrtl` pass
+// to perform a straightforward translation of RTLIL structures to readable C++, relying on the C++ compiler
+// to unwrap the abstraction and generate efficient code.
+namespace cxxrtl {
+
+// All arbitrary-width values in cxxrtl are backed by arrays of unsigned integers called chunks. The chunk size
+// is the same regardless of the value width to simplify manipulating values via FFI interfaces, e.g. driving
+// and introspecting the simulation in Python.
+//
+// It is practical to use chunk sizes between 32 bits and platform register size because when arithmetics on
+// narrower integer types is legalized by the C++ compiler, it inserts code to clear the high bits of the register.
+// However, (a) most of our operations do not change those bits in the first place because of invariants that are
+// invisible to the compiler, (b) we often operate on non-power-of-2 values and have to clear the high bits anyway.
+// Therefore, using relatively wide chunks and clearing the high bits explicitly and only when we know they may be
+// clobbered results in simpler generated code.
+template<typename T>
+struct chunk_traits {
+       static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
+                     "chunk type must be an unsigned integral type");
+       using type = T;
+       static constexpr size_t bits = std::numeric_limits<T>::digits;
+       static constexpr T mask = std::numeric_limits<T>::max();
+};
+
+template<class T>
+struct expr_base;
+
+template<size_t Bits>
+struct value : public expr_base<value<Bits>> {
+       static constexpr size_t bits = Bits;
+
+       using chunk = chunk_traits<uint32_t>;
+       static constexpr chunk::type msb_mask = (Bits % chunk::bits == 0) ? chunk::mask
+               : chunk::mask >> (chunk::bits - (Bits % chunk::bits));
+
+       static constexpr size_t chunks = (Bits + chunk::bits - 1) / chunk::bits;
+       chunk::type data[chunks] = {};
+
+       value() = default;
+       template<typename... Init>
+       explicit constexpr value(Init ...init) : data{init...} {}
+
+       // This allows using value<> as well as wire<> in memory initializers.
+       using init = value<Bits>;
+
+       value(const value<Bits> &) = default;
+       value(value<Bits> &&) = default;
+       value<Bits> &operator=(const value<Bits> &) = default;
+
+       // A (no-op) helper that forces the cast to value<>.
+       const value<Bits> &val() const {
+               return *this;
+       }
+
+       std::string str() const {
+               std::stringstream ss;
+               ss << *this;
+               return ss.str();
+       }
+
+       // Operations with compile-time parameters.
+       //
+       // These operations are used to implement slicing, concatenation, and blitting.
+       // The trunc, zext and sext operations add or remove most significant bits (i.e. on the left);
+       // the rtrunc and rzext operations add or remove least significant bits (i.e. on the right).
+       template<size_t NewBits>
+       value<NewBits> trunc() const {
+               static_assert(NewBits <= Bits, "trunc() may not increase width");
+               value<NewBits> result;
+               for (size_t n = 0; n < result.chunks; n++)
+                       result.data[n] = data[n];
+               result.data[result.chunks - 1] &= result.msb_mask;
+               return result;
+       }
+
+       template<size_t NewBits>
+       value<NewBits> zext() const {
+               static_assert(NewBits >= Bits, "zext() may not decrease width");
+               value<NewBits> result;
+               for (size_t n = 0; n < chunks; n++)
+                       result.data[n] = data[n];
+               return result;
+       }
+
+       template<size_t NewBits>
+       value<NewBits> sext() const {
+               static_assert(NewBits >= Bits, "sext() may not decrease width");
+               value<NewBits> result;
+               for (size_t n = 0; n < chunks; n++)
+                       result.data[n] = data[n];
+               if (is_neg()) {
+                       result.data[chunks - 1] |= ~msb_mask;
+                       for (size_t n = chunks; n < result.chunks; n++)
+                               result.data[n] = chunk::mask;
+                       result.data[result.chunks - 1] &= result.msb_mask;
+               }
+               return result;
+       }
+
+       template<size_t NewBits>
+       value<NewBits> rtrunc() const {
+               static_assert(NewBits <= Bits, "rtrunc() may not increase width");
+               value<NewBits> result;
+               constexpr size_t shift_chunks = (Bits - NewBits) / chunk::bits;
+               constexpr size_t shift_bits   = (Bits - NewBits) % chunk::bits;
+               chunk::type carry = 0;
+               if (shift_chunks + result.chunks < chunks) {
+                       carry = (shift_bits == 0) ? 0
+                               : data[shift_chunks + result.chunks] << (chunk::bits - shift_bits);
+               }
+               for (size_t n = result.chunks; n > 0; n--) {
+                       result.data[n - 1] = carry | (data[shift_chunks + n - 1] >> shift_bits);
+                       carry = (shift_bits == 0) ? 0
+                               : data[shift_chunks + n - 1] << (chunk::bits - shift_bits);
+               }
+               return result;
+       }
+
+       template<size_t NewBits>
+       value<NewBits> rzext() const {
+               static_assert(NewBits >= Bits, "rzext() may not decrease width");
+               value<NewBits> result;
+               constexpr size_t shift_chunks = (NewBits - Bits) / chunk::bits;
+               constexpr size_t shift_bits   = (NewBits - Bits) % chunk::bits;
+               chunk::type carry = 0;
+               for (size_t n = 0; n < chunks; n++) {
+                       result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
+                       carry = (shift_bits == 0) ? 0
+                               : data[n] >> (chunk::bits - shift_bits);
+               }
+               if (carry != 0)
+                       result.data[result.chunks - 1] = carry;
+               return result;
+       }
+
+       // Bit blit operation, i.e. a partial read-modify-write.
+       template<size_t Stop, size_t Start>
+       value<Bits> blit(const value<Stop - Start + 1> &source) const {
+               static_assert(Stop >= Start, "blit() may not reverse bit order");
+               constexpr chunk::type start_mask = ~(chunk::mask << (Start % chunk::bits));
+               constexpr chunk::type stop_mask = (Stop % chunk::bits + 1 == chunk::bits) ? 0
+                       : (chunk::mask << (Stop % chunk::bits + 1));
+               value<Bits> masked = *this;
+               if (Start / chunk::bits == Stop / chunk::bits) {
+                       masked.data[Start / chunk::bits] &= stop_mask | start_mask;
+               } else {
+                       masked.data[Start / chunk::bits] &= start_mask;
+                       for (size_t n = Start / chunk::bits + 1; n < Stop / chunk::bits; n++)
+                               masked.data[n] = 0;
+                       masked.data[Stop / chunk::bits] &= stop_mask;
+               }
+               value<Bits> shifted = source
+                       .template rzext<Stop + 1>()
+                       .template zext<Bits>();
+               return masked.bit_or(shifted);
+       }
+
+       // Helpers for selecting extending or truncating operation depending on whether the result is wider or narrower
+       // than the operand. In C++17 these can be replaced with `if constexpr`.
+       template<size_t NewBits, typename = void>
+       struct zext_cast {
+               value<NewBits> operator()(const value<Bits> &val) {
+                       return val.template zext<NewBits>();
+               }
+       };
+
+       template<size_t NewBits>
+       struct zext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
+               value<NewBits> operator()(const value<Bits> &val) {
+                       return val.template trunc<NewBits>();
+               }
+       };
+
+       template<size_t NewBits, typename = void>
+       struct sext_cast {
+               value<NewBits> operator()(const value<Bits> &val) {
+                       return val.template sext<NewBits>();
+               }
+       };
+
+       template<size_t NewBits>
+       struct sext_cast<NewBits, typename std::enable_if<(NewBits < Bits)>::type> {
+               value<NewBits> operator()(const value<Bits> &val) {
+                       return val.template trunc<NewBits>();
+               }
+       };
+
+       template<size_t NewBits>
+       value<NewBits> zcast() const {
+               return zext_cast<NewBits>()(*this);
+       }
+
+       template<size_t NewBits>
+       value<NewBits> scast() const {
+               return sext_cast<NewBits>()(*this);
+       }
+
+       // Operations with run-time parameters (offsets, amounts, etc).
+       //
+       // These operations are used for computations.
+       bool bit(size_t offset) const {
+               return data[offset / chunk::bits] & (1 << (offset % chunk::bits));
+       }
+
+       void set_bit(size_t offset, bool value = true) {
+               size_t offset_chunks = offset / chunk::bits;
+               size_t offset_bits = offset % chunk::bits;
+               data[offset_chunks] &= ~(1 << offset_bits);
+               data[offset_chunks] |= value ? 1 << offset_bits : 0;
+       }
+
+       bool is_zero() const {
+               for (size_t n = 0; n < chunks; n++)
+                       if (data[n] != 0)
+                               return false;
+               return true;
+       }
+
+       explicit operator bool() const {
+               return !is_zero();
+       }
+
+       bool is_neg() const {
+               return data[chunks - 1] & (1 << ((Bits - 1) % chunk::bits));
+       }
+
+       bool operator ==(const value<Bits> &other) const {
+               for (size_t n = 0; n < chunks; n++)
+                       if (data[n] != other.data[n])
+                               return false;
+               return true;
+       }
+
+       bool operator !=(const value<Bits> &other) const {
+               return !(*this == other);
+       }
+
+       value<Bits> bit_not() const {
+               value<Bits> result;
+               for (size_t n = 0; n < chunks; n++)
+                       result.data[n] = ~data[n];
+               result.data[chunks - 1] &= msb_mask;
+               return result;
+       }
+
+       value<Bits> bit_and(const value<Bits> &other) const {
+               value<Bits> result;
+               for (size_t n = 0; n < chunks; n++)
+                       result.data[n] = data[n] & other.data[n];
+               return result;
+       }
+
+       value<Bits> bit_or(const value<Bits> &other) const {
+               value<Bits> result;
+               for (size_t n = 0; n < chunks; n++)
+                       result.data[n] = data[n] | other.data[n];
+               return result;
+       }
+
+       value<Bits> bit_xor(const value<Bits> &other) const {
+               value<Bits> result;
+               for (size_t n = 0; n < chunks; n++)
+                       result.data[n] = data[n] ^ other.data[n];
+               return result;
+       }
+
+       template<size_t AmountBits>
+       value<Bits> shl(const value<AmountBits> &amount) const {
+               // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
+               static_assert(Bits <= chunk::mask, "shl() of unreasonably large values is not supported");
+               // Detect shifts definitely large than Bits early.
+               for (size_t n = 1; n < amount.chunks; n++)
+                       if (amount.data[n] != 0)
+                               return {};
+               // Past this point we can use the least significant chunk as the shift size.
+               size_t shift_chunks = amount.data[0] / chunk::bits;
+               size_t shift_bits   = amount.data[0] % chunk::bits;
+               if (shift_chunks >= chunks)
+                       return {};
+               value<Bits> result;
+               chunk::type carry = 0;
+               for (size_t n = 0; n < chunks - shift_chunks; n++) {
+                       result.data[shift_chunks + n] = (data[n] << shift_bits) | carry;
+                       carry = (shift_bits == 0) ? 0
+                               : data[n] >> (chunk::bits - shift_bits);
+               }
+               return result;
+       }
+
+       template<size_t AmountBits, bool Signed = false>
+       value<Bits> shr(const value<AmountBits> &amount) const {
+               // Ensure our early return is correct by prohibiting values larger than 4 Gbit.
+               static_assert(Bits <= chunk::mask, "shr() of unreasonably large values is not supported");
+               // Detect shifts definitely large than Bits early.
+               for (size_t n = 1; n < amount.chunks; n++)
+                       if (amount.data[n] != 0)
+                               return {};
+               // Past this point we can use the least significant chunk as the shift size.
+               size_t shift_chunks = amount.data[0] / chunk::bits;
+               size_t shift_bits   = amount.data[0] % chunk::bits;
+               if (shift_chunks >= chunks)
+                       return {};
+               value<Bits> result;
+               chunk::type carry = 0;
+               for (size_t n = 0; n < chunks - shift_chunks; n++) {
+                       result.data[chunks - shift_chunks - 1 - n] = carry | (data[chunks - 1 - n] >> shift_bits);
+                       carry = (shift_bits == 0) ? 0
+                               : data[chunks - 1 - n] << (chunk::bits - shift_bits);
+               }
+               if (Signed && is_neg()) {
+                       for (size_t n = chunks - shift_chunks; n < chunks; n++)
+                               result.data[n] = chunk::mask;
+                       if (shift_bits != 0)
+                               result.data[chunks - shift_chunks] |= chunk::mask << (chunk::bits - shift_bits);
+               }
+               return result;
+       }
+
+       template<size_t AmountBits>
+       value<Bits> sshr(const value<AmountBits> &amount) const {
+               return shr<AmountBits, /*Signed=*/true>(amount);
+       }
+
+       size_t ctpop() const {
+               size_t count = 0;
+               for (size_t n = 0; n < chunks; n++) {
+                       // This loop implements the population count idiom as recognized by LLVM and GCC.
+                       for (chunk::type x = data[n]; x != 0; count++)
+                               x = x & (x - 1);
+               }
+               return count;
+       }
+
+       size_t ctlz() const {
+               size_t count = 0;
+               for (size_t n = 0; n < chunks; n++) {
+                       chunk::type x = data[chunks - 1 - n];
+                       if (x == 0) {
+                               count += (n == 0 ? Bits % chunk::bits : chunk::bits);
+                       } else {
+                               // This loop implements the find first set idiom as recognized by LLVM.
+                               for (; x != 0; count++)
+                                       x >>= 1;
+                       }
+               }
+               return count;
+       }
+
+       template<bool Invert, bool CarryIn>
+       std::pair<value<Bits>, bool /*CarryOut*/> alu(const value<Bits> &other) const {
+               value<Bits> result;
+               bool carry = CarryIn;
+               for (size_t n = 0; n < result.chunks; n++) {
+                       result.data[n] = data[n] + (Invert ? ~other.data[n] : other.data[n]) + carry;
+                       carry = (result.data[n] <  data[n]) ||
+                               (result.data[n] == data[n] && carry);
+               }
+               result.data[result.chunks - 1] &= result.msb_mask;
+               return {result, carry};
+       }
+
+       value<Bits> add(const value<Bits> &other) const {
+               return alu</*Invert=*/false, /*CarryIn=*/false>(other).first;
+       }
+
+       value<Bits> sub(const value<Bits> &other) const {
+               return alu</*Invert=*/true, /*CarryIn=*/true>(other).first;
+       }
+
+       value<Bits> neg() const {
+               return value<Bits> { 0u }.sub(*this);
+       }
+
+       bool ucmp(const value<Bits> &other) const {
+               bool carry;
+               std::tie(std::ignore, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
+               return !carry; // a.ucmp(b) â‰¡ a u< b
+       }
+
+       bool scmp(const value<Bits> &other) const {
+               value<Bits> result;
+               bool carry;
+               std::tie(result, carry) = alu</*Invert=*/true, /*CarryIn=*/true>(other);
+               bool overflow = (is_neg() == !other.is_neg()) && (is_neg() != result.is_neg());
+               return result.is_neg() ^ overflow; // a.scmp(b) â‰¡ a s< b
+       }
+};
+
+// Expression template for a slice, usable as lvalue or rvalue, and composable with other expression templates here.
+template<class T, size_t Stop, size_t Start>
+struct slice_expr : public expr_base<slice_expr<T, Stop, Start>> {
+       static_assert(Stop >= Start, "slice_expr() may not reverse bit order");
+       static_assert(Start < T::bits && Stop < T::bits, "slice_expr() must be within bounds");
+       static constexpr size_t bits = Stop - Start + 1;
+
+       T &expr;
+
+       slice_expr(T &expr) : expr(expr) {}
+       slice_expr(const slice_expr<T, Stop, Start> &) = delete;
+
+       operator value<bits>() const {
+               return static_cast<const value<T::bits> &>(expr)
+                       .template rtrunc<T::bits - Start>()
+                       .template trunc<bits>();
+       }
+
+       slice_expr<T, Stop, Start> &operator=(const value<bits> &rhs) {
+               // Generic partial assignment implemented using a read-modify-write operation on the sliced expression.
+               expr = static_cast<const value<T::bits> &>(expr)
+                       .template blit<Stop, Start>(rhs);
+               return *this;
+       }
+
+       // A helper that forces the cast to value<>, which allows deduction to work.
+       value<bits> val() const {
+               return static_cast<const value<bits> &>(*this);
+       }
+};
+
+// Expression template for a concatenation, usable as lvalue or rvalue, and composable with other expression templates here.
+template<class T, class U>
+struct concat_expr : public expr_base<concat_expr<T, U>> {
+       static constexpr size_t bits = T::bits + U::bits;
+
+       T &ms_expr;
+       U &ls_expr;
+
+       concat_expr(T &ms_expr, U &ls_expr) : ms_expr(ms_expr), ls_expr(ls_expr) {}
+       concat_expr(const concat_expr<T, U> &) = delete;
+
+       operator value<bits>() const {
+               value<bits> ms_shifted = static_cast<const value<T::bits> &>(ms_expr)
+                       .template rzext<bits>();
+               value<bits> ls_extended = static_cast<const value<U::bits> &>(ls_expr)
+                       .template zext<bits>();
+               return ms_shifted.bit_or(ls_extended);
+       }
+
+       concat_expr<T, U> &operator=(const value<bits> &rhs) {
+               ms_expr = rhs.template rtrunc<T::bits>();
+               ls_expr = rhs.template trunc<U::bits>();
+               return *this;
+       }
+
+       // A helper that forces the cast to value<>, which allows deduction to work.
+       value<bits> val() const {
+               return static_cast<const value<bits> &>(*this);
+       }
+};
+
+// Base class for expression templates, providing helper methods for operations that are valid on both rvalues and lvalues.
+//
+// Note that expression objects (slices and concatenations) constructed in this way should NEVER be captured because
+// they refer to temporaries that will, in general, only live until the end of the statement. For example, both of
+// these snippets perform use-after-free:
+//
+//    const auto &a = val.slice<7,0>().slice<1>();
+//    value<1> b = a;
+//
+//    auto &&c = val.slice<7,0>().slice<1>();
+//    c = value<1>{1u};
+//
+// An easy way to write code using slices and concatenations safely is to follow two simple rules:
+//   * Never explicitly name any type except `value<W>` or `const value<W> &`.
+//   * Never use a `const auto &` or `auto &&` in any such expression.
+// Then, any code that compiles will be well-defined.
+template<class T>
+struct expr_base {
+       template<size_t Stop, size_t Start = Stop>
+       slice_expr<const T, Stop, Start> slice() const {
+               return {*static_cast<const T *>(this)};
+       }
+
+       template<size_t Stop, size_t Start = Stop>
+       slice_expr<T, Stop, Start> slice() {
+               return {*static_cast<T *>(this)};
+       }
+
+       template<class U>
+       concat_expr<const T, typename std::remove_reference<const U>::type> concat(const U &other) const {
+               return {*static_cast<const T *>(this), other};
+       }
+
+       template<class U>
+       concat_expr<T, typename std::remove_reference<U>::type> concat(U &&other) {
+               return {*static_cast<T *>(this), other};
+       }
+};
+
+template<size_t Bits>
+std::ostream &operator<<(std::ostream &os, const value<Bits> &val) {
+       auto old_flags = os.flags(std::ios::right);
+       auto old_width = os.width(0);
+       auto old_fill  = os.fill('0');
+       os << val.bits << '\'' << std::hex;
+       for (size_t n = val.chunks - 1; n != (size_t)-1; n--) {
+               if (n == val.chunks - 1 && Bits % value<Bits>::chunk::bits != 0)
+                       os.width((Bits % value<Bits>::chunk::bits + 3) / 4);
+               else
+                       os.width((value<Bits>::chunk::bits + 3) / 4);
+               os << val.data[n];
+       }
+       os.fill(old_fill);
+       os.width(old_width);
+       os.flags(old_flags);
+       return os;
+}
+
+template<size_t Bits>
+struct wire {
+       static constexpr size_t bits = Bits;
+
+       value<Bits> curr;
+       value<Bits> next;
+
+       wire() = default;
+       constexpr wire(const value<Bits> &init) : curr(init), next(init) {}
+       template<typename... Init>
+       explicit constexpr wire(Init ...init) : curr{init...}, next{init...} {}
+
+       wire(const wire<Bits> &) = delete;
+       wire(wire<Bits> &&) = default;
+       wire<Bits> &operator=(const wire<Bits> &) = delete;
+
+       // We want to avoid having operator=(wire<>) or operator=(value<>) that overwrites both curr and next,
+       // since this operation is almost always wrong. But we also need an operation like that for memory
+       // initialization. This is solved by adding a wrapper and making the use of operator= valid only when
+       // this wrapper is used.
+       struct init {
+               value<Bits> data;
+       };
+
+       wire<Bits> &operator=(const init &init) {
+               curr = next = init.data;
+               return *this;
+       }
+
+       bool commit() {
+               if (curr != next) {
+                       curr = next;
+                       return true;
+               }
+               return false;
+       }
+};
+
+template<size_t Bits>
+std::ostream &operator<<(std::ostream &os, const wire<Bits> &val) {
+       os << val.curr;
+       return os;
+}
+
+template<class Elem>
+struct memory {
+       using StoredElem = typename std::remove_const<Elem>::type;
+       std::vector<StoredElem> data;
+
+       static constexpr size_t width = StoredElem::bits;
+       size_t depth() const {
+               return data.size();
+       }
+
+       memory() = delete;
+       explicit memory(size_t depth) : data(depth) {}
+
+       memory(const memory<Elem> &) = delete;
+       memory<Elem> &operator=(const memory<Elem> &) = delete;
+
+       // The only way to get the compiler to put the initializer in .rodata and do not copy it on stack is to stuff it
+       // into a plain array. You'd think an std::initializer_list would work here, but it doesn't, because you can't
+       // construct an initializer_list in a constexpr (or something) and so if you try to do that the whole thing is
+       // first copied on the stack (probably overflowing it) and then again into `data`.
+       template<size_t Size>
+       struct init {
+               size_t offset;
+               typename Elem::init data[Size];
+       };
+
+       template<size_t... InitSize>
+       explicit memory(size_t depth, const init<InitSize> &...init) : data(depth) {
+               // FIXME: assert(init.size() <= depth);
+               data.resize(depth);
+               // This utterly reprehensible construct is the most reasonable way to apply a function to every element
+               // of a parameter pack, if the elements all have different types and so cannot be cast to an initializer list.
+               auto _ = {std::move(std::begin(init.data), std::end(init.data), data.begin() + init.offset)...};
+       }
+
+       Elem &operator [](size_t index) {
+               // FIXME: assert(index < data.size());
+               return data[index];
+       }
+
+       template<size_t AddrBits>
+       Elem &operator [](const value<AddrBits> &addr) {
+               static_assert(value<AddrBits>::chunks <= 1, "memory indexing with unreasonably large address is not supported");
+               return (*this)[addr.data[0]];
+       }
+};
+
+template<size_t Width>
+using memory_rw = memory<wire<Width>>;
+
+template<size_t Width>
+using memory_ro = memory<const value<Width>>;
+
+struct module {
+       module() {}
+       virtual ~module() {}
+
+       module(const module &) = delete;
+       module &operator=(const module &) = delete;
+
+       virtual void eval() = 0;
+       virtual bool commit() = 0;
+
+       size_t step() {
+               size_t deltas = 0;
+               do {
+                       eval();
+                       deltas++;
+               } while (commit());
+               return deltas;
+       }
+};
+
+} // namespace cxxrtl
+
+// Definitions of internal Yosys cells. Other than the functions in this namespace, cxxrtl is fully generic
+// and indepenent of Yosys implementation details.
+//
+// The `write_cxxrtl` pass translates internal cells (cells with names that start with `$`) to calls of these
+// functions. All of Yosys arithmetic and logical cells perform sign or zero extension on their operands,
+// whereas basic operations on arbitrary width values require operands to be of the same width. These functions
+// bridge the gap by performing the necessary casts. They are named similar to `cell_A[B]`, where A and B are `u`
+// if the corresponding operand is unsigned, and `s` if it is signed.
+namespace cxxrtl_yosys {
+
+using namespace cxxrtl;
+
+// std::max isn't constexpr until C++14 for no particular reason (it's an oversight), so we define our own.
+template<class T>
+constexpr T max(const T &a, const T &b) {
+       return a > b ? a : b;
+}
+
+// Logic operations
+template<size_t BitsY, size_t BitsA>
+value<BitsY> not_u(const value<BitsA> &a) {
+       return a.template zcast<BitsY>().bit_not();
+}
+
+template<size_t BitsY, size_t BitsA>
+value<BitsY> not_s(const value<BitsA> &a) {
+       return a.template scast<BitsY>().bit_not();
+}
+
+template<size_t BitsY, size_t BitsA>
+value<BitsY> logic_not_u(const value<BitsA> &a) {
+       return value<BitsY> { a ? 0u : 1u };
+}
+
+template<size_t BitsY, size_t BitsA>
+value<BitsY> logic_not_s(const value<BitsA> &a) {
+       return value<BitsY> { a ? 0u : 1u };
+}
+
+template<size_t BitsY, size_t BitsA>
+value<BitsY> reduce_and_u(const value<BitsA> &a) {
+       return value<BitsY> { a.bit_not().is_zero() ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA>
+value<BitsY> reduce_and_s(const value<BitsA> &a) {
+       return value<BitsY> { a.bit_not().is_zero() ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA>
+value<BitsY> reduce_or_u(const value<BitsA> &a) {
+       return value<BitsY> { a ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA>
+value<BitsY> reduce_or_s(const value<BitsA> &a) {
+       return value<BitsY> { a ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA>
+value<BitsY> reduce_xor_u(const value<BitsA> &a) {
+       return value<BitsY> { (a.ctpop() % 2) ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA>
+value<BitsY> reduce_xor_s(const value<BitsA> &a) {
+       return value<BitsY> { (a.ctpop() % 2) ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA>
+value<BitsY> reduce_xnor_u(const value<BitsA> &a) {
+       return value<BitsY> { (a.ctpop() % 2) ? 0u : 1u };
+}
+
+template<size_t BitsY, size_t BitsA>
+value<BitsY> reduce_xnor_s(const value<BitsA> &a) {
+       return value<BitsY> { (a.ctpop() % 2) ? 0u : 1u };
+}
+
+template<size_t BitsY, size_t BitsA>
+value<BitsY> reduce_bool_u(const value<BitsA> &a) {
+       return value<BitsY> { a ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA>
+value<BitsY> reduce_bool_s(const value<BitsA> &a) {
+       return value<BitsY> { a ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> and_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template zcast<BitsY>().bit_and(b.template zcast<BitsY>());
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> and_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template scast<BitsY>().bit_and(b.template scast<BitsY>());
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> or_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template zcast<BitsY>().bit_or(b.template zcast<BitsY>());
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> or_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template scast<BitsY>().bit_or(b.template scast<BitsY>());
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> xor_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>());
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> xor_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>());
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> xnor_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template zcast<BitsY>().bit_xor(b.template zcast<BitsY>()).bit_not();
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> xnor_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template scast<BitsY>().bit_xor(b.template scast<BitsY>()).bit_not();
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> logic_and_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return value<BitsY> { (bool(a) & bool(b)) ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> logic_and_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       return value<BitsY> { (bool(a) & bool(b)) ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> logic_or_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return value<BitsY> { (bool(a) | bool(b)) ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> logic_or_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       return value<BitsY> { (bool(a) | bool(b)) ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> shl_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template zcast<BitsY>().template shl(b);
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> shl_su(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template scast<BitsY>().template shl(b);
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> sshl_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template zcast<BitsY>().template shl(b);
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> sshl_su(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template scast<BitsY>().template shl(b);
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> shr_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template shr(b).template zcast<BitsY>();
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> shr_su(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template shr(b).template scast<BitsY>();
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> sshr_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template shr(b).template zcast<BitsY>();
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> sshr_su(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template shr(b).template scast<BitsY>();
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> shift_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return shr_uu<BitsY>(a, b);
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> shift_su(const value<BitsA> &a, const value<BitsB> &b) {
+       return shr_su<BitsY>(a, b);
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> shift_us(const value<BitsA> &a, const value<BitsB> &b) {
+       return b.is_neg() ? shl_uu<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_uu<BitsY>(a, b);
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> shift_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       return b.is_neg() ? shl_su<BitsY>(a, b.template sext<BitsB + 1>().neg()) : shr_su<BitsY>(a, b);
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> shiftx_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return shift_uu<BitsY>(a, b);
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> shiftx_su(const value<BitsA> &a, const value<BitsB> &b) {
+       return shift_su<BitsY>(a, b);
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> shiftx_us(const value<BitsA> &a, const value<BitsB> &b) {
+       return shift_us<BitsY>(a, b);
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> shiftx_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       return shift_ss<BitsY>(a, b);
+}
+
+// Comparison operations
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> eq_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       constexpr size_t BitsExt = max(BitsA, BitsB);
+       return value<BitsY>{ a.template zext<BitsExt>() == b.template zext<BitsExt>() ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> eq_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       constexpr size_t BitsExt = max(BitsA, BitsB);
+       return value<BitsY>{ a.template sext<BitsExt>() == b.template sext<BitsExt>() ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> ne_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       constexpr size_t BitsExt = max(BitsA, BitsB);
+       return value<BitsY>{ a.template zext<BitsExt>() != b.template zext<BitsExt>() ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> ne_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       constexpr size_t BitsExt = max(BitsA, BitsB);
+       return value<BitsY>{ a.template sext<BitsExt>() != b.template sext<BitsExt>() ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> eqx_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return eq_uu<BitsY>(a, b);
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> eqx_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       return eq_ss<BitsY>(a, b);
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> nex_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return ne_uu<BitsY>(a, b);
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> nex_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       return ne_ss<BitsY>(a, b);
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> gt_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       constexpr size_t BitsExt = max(BitsA, BitsB);
+       return value<BitsY> { b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> gt_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       constexpr size_t BitsExt = max(BitsA, BitsB);
+       return value<BitsY> { b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> ge_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       constexpr size_t BitsExt = max(BitsA, BitsB);
+       return value<BitsY> { !a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> ge_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       constexpr size_t BitsExt = max(BitsA, BitsB);
+       return value<BitsY> { !a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> lt_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       constexpr size_t BitsExt = max(BitsA, BitsB);
+       return value<BitsY> { a.template zext<BitsExt>().ucmp(b.template zext<BitsExt>()) ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> lt_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       constexpr size_t BitsExt = max(BitsA, BitsB);
+       return value<BitsY> { a.template sext<BitsExt>().scmp(b.template sext<BitsExt>()) ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> le_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       constexpr size_t BitsExt = max(BitsA, BitsB);
+       return value<BitsY> { !b.template zext<BitsExt>().ucmp(a.template zext<BitsExt>()) ? 1u : 0u };
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> le_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       constexpr size_t BitsExt = max(BitsA, BitsB);
+       return value<BitsY> { !b.template sext<BitsExt>().scmp(a.template sext<BitsExt>()) ? 1u : 0u };
+}
+
+// Arithmetic operations
+template<size_t BitsY, size_t BitsA>
+value<BitsY> pos_u(const value<BitsA> &a) {
+       return a.template zcast<BitsY>();
+}
+
+template<size_t BitsY, size_t BitsA>
+value<BitsY> pos_s(const value<BitsA> &a) {
+       return a.template scast<BitsY>();
+}
+
+template<size_t BitsY, size_t BitsA>
+value<BitsY> neg_u(const value<BitsA> &a) {
+       return a.template zcast<BitsY>().neg();
+}
+
+template<size_t BitsY, size_t BitsA>
+value<BitsY> neg_s(const value<BitsA> &a) {
+       return a.template scast<BitsY>().neg();
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> add_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template zcast<BitsY>().add(b.template zcast<BitsY>());
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> add_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template scast<BitsY>().add(b.template scast<BitsY>());
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> sub_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template zcast<BitsY>().sub(b.template zcast<BitsY>());
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> sub_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       return a.template scast<BitsY>().sub(b.template scast<BitsY>());
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> mul_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       value<BitsY> product;
+       value<BitsY> multiplicand = a.template zcast<BitsY>();
+       const value<BitsB> &multiplier = b;
+       uint32_t multiplicand_shift = 0;
+       for (size_t step = 0; step < BitsB; step++) {
+               if (multiplier.bit(step)) {
+                       multiplicand = multiplicand.shl(value<32> { multiplicand_shift });
+                       product = product.add(multiplicand);
+                       multiplicand_shift = 0;
+               }
+               multiplicand_shift++;
+       }
+       return product;
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> mul_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       value<BitsB + 1> ub = b.template sext<BitsB + 1>();
+       if (ub.is_neg()) ub = ub.neg();
+       value<BitsY> y = mul_uu<BitsY>(a.template scast<BitsY>(), ub);
+       return b.is_neg() ? y.neg() : y;
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+std::pair<value<BitsY>, value<BitsY>> divmod_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       constexpr size_t Bits = max(BitsY, max(BitsA, BitsB));
+       value<Bits> quotient;
+       value<Bits> dividend = a.template zext<Bits>();
+       value<Bits> divisor = b.template zext<Bits>();
+       if (dividend.ucmp(divisor))
+               return {/*quotient=*/value<BitsY> { 0u }, /*remainder=*/dividend.template trunc<BitsY>()};
+       uint32_t divisor_shift = dividend.ctlz() - divisor.ctlz();
+       divisor = divisor.shl(value<32> { divisor_shift });
+       for (size_t step = 0; step <= divisor_shift; step++) {
+               quotient = quotient.shl(value<1> { 1u });
+               if (!dividend.ucmp(divisor)) {
+                       dividend = dividend.sub(divisor);
+                       quotient.set_bit(0, true);
+               }
+               divisor = divisor.shr(value<1> { 1u });
+       }
+       return {quotient.template trunc<BitsY>(), /*remainder=*/dividend.template trunc<BitsY>()};
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+std::pair<value<BitsY>, value<BitsY>> divmod_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       value<BitsA + 1> ua = a.template sext<BitsA + 1>();
+       value<BitsB + 1> ub = b.template sext<BitsB + 1>();
+       if (ua.is_neg()) ua = ua.neg();
+       if (ub.is_neg()) ub = ub.neg();
+       value<BitsY> y, r;
+       std::tie(y, r) = divmod_uu<BitsY>(ua, ub);
+       if (a.is_neg() != b.is_neg()) y = y.neg();
+       if (a.is_neg()) r = r.neg();
+       return {y, r};
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> div_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return divmod_uu<BitsY>(a, b).first;
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> div_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       return divmod_ss<BitsY>(a, b).first;
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> mod_uu(const value<BitsA> &a, const value<BitsB> &b) {
+       return divmod_uu<BitsY>(a, b).second;
+}
+
+template<size_t BitsY, size_t BitsA, size_t BitsB>
+value<BitsY> mod_ss(const value<BitsA> &a, const value<BitsB> &b) {
+       return divmod_ss<BitsY>(a, b).second;
+}
+
+} // namespace cxxrtl_yosys
+
+#endif
index 380f7030bd54a88e48cea5f5fe4f88803d18fb12..cbf041f79a8562e73e89a1a15ec8f9751994fc97 100644 (file)
@@ -1034,6 +1034,8 @@ void run_backend(std::string filename, std::string command, RTLIL::Design *desig
                        command = "verilog";
                else if (filename.size() > 3 && filename.compare(filename.size()-3, std::string::npos, ".il") == 0)
                        command = "ilang";
+               else if (filename.size() > 3 && filename.compare(filename.size()-3, std::string::npos, ".cc") == 0)
+                       command = "cxxrtl";
                else if (filename.size() > 4 && filename.compare(filename.size()-4, std::string::npos, ".aig") == 0)
                        command = "aiger";
                else if (filename.size() > 5 && filename.compare(filename.size()-5, std::string::npos, ".blif") == 0)
index 16e0aaf1ca97b14f7f917a38ac9321e27866bb84..6aed7c96ab4fabf6f22e0ee48422ecf2ccc85f4b 100644 (file)
@@ -207,6 +207,7 @@ namespace RTLIL {
        struct SigSpec;
        struct Wire;
        struct Cell;
+       struct Memory;
        struct Module;
        struct Design;
        struct Monitor;
@@ -229,6 +230,7 @@ using RTLIL::Design;
 namespace hashlib {
        template<> struct hash_ops<RTLIL::Wire*> : hash_obj_ops {};
        template<> struct hash_ops<RTLIL::Cell*> : hash_obj_ops {};
+       template<> struct hash_ops<RTLIL::Memory*> : hash_obj_ops {};
        template<> struct hash_ops<RTLIL::Module*> : hash_obj_ops {};
        template<> struct hash_ops<RTLIL::Design*> : hash_obj_ops {};
        template<> struct hash_ops<RTLIL::Monitor*> : hash_obj_ops {};
@@ -236,6 +238,7 @@ namespace hashlib {
 
        template<> struct hash_ops<const RTLIL::Wire*> : hash_obj_ops {};
        template<> struct hash_ops<const RTLIL::Cell*> : hash_obj_ops {};
+       template<> struct hash_ops<const RTLIL::Memory*> : hash_obj_ops {};
        template<> struct hash_ops<const RTLIL::Module*> : hash_obj_ops {};
        template<> struct hash_ops<const RTLIL::Design*> : hash_obj_ops {};
        template<> struct hash_ops<const RTLIL::Monitor*> : hash_obj_ops {};