import argparse, os, sys, shutil, tempfile, re
##yosys-sys-path##
-from sby_core import SbyJob, SbyAbort, process_filename
+from sby_core import SbyTask, SbyAbort, process_filename
from time import localtime
class DictAction(argparse.Action):
print("ERROR: Exactly one task is required when workdir is specified. Specify the task or use --prefix instead of -d.", file=sys.stderr)
sys.exit(1)
-def run_job(taskname):
+def run_task(taskname):
my_opt_tmpdir = opt_tmpdir
my_workdir = None
junit_filename = "junit"
sbyconfig, _, _, _ = read_sbyconfig(sbydata, taskname)
- job = SbyJob(sbyconfig, my_workdir, early_logmsgs, reusedir)
+ task = SbyTask(sbyconfig, my_workdir, early_logmsgs, reusedir)
for k, v in exe_paths.items():
- job.exe_paths[k] = v
+ task.exe_paths[k] = v
if throw_err:
- job.run(setupmode)
+ task.run(setupmode)
else:
try:
- job.run(setupmode)
+ task.run(setupmode)
except SbyAbort:
pass
if my_opt_tmpdir:
- job.log(f"Removing directory '{my_workdir}'.")
+ task.log(f"Removing directory '{my_workdir}'.")
shutil.rmtree(my_workdir, ignore_errors=True)
if setupmode:
- job.log(f"SETUP COMPLETE (rc={job.retcode})")
+ task.log(f"SETUP COMPLETE (rc={task.retcode})")
else:
- job.log(f"DONE ({job.status}, rc={job.retcode})")
- job.logfile.close()
+ task.log(f"DONE ({task.status}, rc={task.retcode})")
+ task.logfile.close()
if not my_opt_tmpdir and not setupmode:
- with open("{}/{}.xml".format(job.workdir, junit_filename), "w") as f:
- junit_errors = 1 if job.retcode == 16 else 0
- junit_failures = 1 if job.retcode != 0 and junit_errors == 0 else 0
+ with open("{}/{}.xml".format(task.workdir, junit_filename), "w") as f:
+ junit_errors = 1 if task.retcode == 16 else 0
+ junit_failures = 1 if task.retcode != 0 and junit_errors == 0 else 0
print('<?xml version="1.0" encoding="UTF-8"?>', file=f)
- print(f'<testsuites disabled="0" errors="{junit_errors}" failures="{junit_failures}" tests="1" time="{job.total_time}">', file=f)
- print(f'<testsuite disabled="0" errors="{junit_errors}" failures="{junit_failures}" name="{junit_ts_name}" skipped="0" tests="1" time="{job.total_time}">', file=f)
+ print(f'<testsuites disabled="0" errors="{junit_errors}" failures="{junit_failures}" tests="1" time="{task.total_time}">', file=f)
+ print(f'<testsuite disabled="0" errors="{junit_errors}" failures="{junit_failures}" name="{junit_ts_name}" skipped="0" tests="1" time="{task.total_time}">', file=f)
print('<properties>', file=f)
print(f'<property name="os" value="{os.name}"/>', file=f)
print('</properties>', file=f)
- print(f'<testcase classname="{junit_ts_name}" name="{junit_tc_name}" status="{job.status}" time="{job.total_time}">', file=f)
+ print(f'<testcase classname="{junit_ts_name}" name="{junit_tc_name}" status="{task.status}" time="{task.total_time}">', file=f)
if junit_errors:
- print(f'<error message="{job.status}" type="{job.status}"/>', file=f)
+ print(f'<error message="{task.status}" type="{task.status}"/>', file=f)
if junit_failures:
- print(f'<failure message="{job.status}" type="{job.status}"/>', file=f)
+ print(f'<failure message="{task.status}" type="{task.status}"/>', file=f)
print('<system-out>', end="", file=f)
- with open(f"{job.workdir}/logfile.txt", "r") as logf:
+ with open(f"{task.workdir}/logfile.txt", "r") as logf:
for line in logf:
print(line.replace("&", "&").replace("<", "<").replace(">", ">").replace("\"", """), end="", file=f)
print('</system-out></testcase></testsuite></testsuites>', file=f)
- with open(f"{job.workdir}/status", "w") as f:
- print(f"{job.status} {job.retcode} {job.total_time}", file=f)
+ with open(f"{task.workdir}/status", "w") as f:
+ print(f"{task.status} {task.retcode} {task.total_time}", file=f)
- return job.retcode
+ return task.retcode
failed = []
retcode = 0
for task in tasknames:
- task_retcode = run_job(task)
+ task_retcode = run_task(task)
retcode |= task_retcode
if task_retcode:
failed.append(task)
from select import select
from time import time, localtime, sleep
-all_tasks_running = []
+all_procs_running = []
def force_shutdown(signum, frame):
print("SBY ---- Keyboard interrupt or external termination signal ----", flush=True)
- for task in list(all_tasks_running):
- task.terminate()
+ for proc in list(all_procs_running):
+ proc.terminate()
sys.exit(1)
if os.name == "posix":
return filename
-class SbyTask:
- def __init__(self, job, info, deps, cmdline, logfile=None, logstderr=True, silent=False):
+class SbyProc:
+ def __init__(self, task, info, deps, cmdline, logfile=None, logstderr=True, silent=False):
self.running = False
self.finished = False
self.terminated = False
self.checkretcode = False
- self.job = job
+ self.task = task
self.info = info
self.deps = deps
if os.name == "posix":
self.logstderr = logstderr
self.silent = silent
- self.job.tasks_pending.append(self)
+ self.task.procs_pending.append(self)
for dep in self.deps:
dep.register_dep(self)
self.output_callback = None
self.exit_callback = None
- def register_dep(self, next_task):
+ def register_dep(self, next_proc):
if self.finished:
- next_task.poll()
+ next_proc.poll()
else:
- self.notify.append(next_task)
+ self.notify.append(next_proc)
def log(self, line):
if line is not None and (self.noprintregex is None or not self.noprintregex.match(line)):
if self.logfile is not None:
print(line, file=self.logfile)
- self.job.log(f"{self.info}: {line}")
+ self.task.log(f"{self.info}: {line}")
def handle_output(self, line):
if self.terminated or len(line) == 0:
self.exit_callback(retcode)
def terminate(self, timeout=False):
- if self.job.opt_wait and not timeout:
+ if self.task.opt_wait and not timeout:
return
if self.running:
if not self.silent:
- self.job.log(f"{self.info}: terminating process")
+ self.task.log(f"{self.info}: terminating process")
if os.name == "posix":
try:
os.killpg(self.p.pid, signal.SIGTERM)
except PermissionError:
pass
self.p.terminate()
- self.job.tasks_running.remove(self)
- all_tasks_running.remove(self)
+ self.task.procs_running.remove(self)
+ all_procs_running.remove(self)
self.terminated = True
def poll(self):
return
if not self.silent:
- self.job.log(f"{self.info}: starting process \"{self.cmdline}\"")
+ self.task.log(f"{self.info}: starting process \"{self.cmdline}\"")
if os.name == "posix":
def preexec_fn():
self.p = subprocess.Popen(self.cmdline, shell=True, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE,
stderr=(subprocess.STDOUT if self.logstderr else None))
- self.job.tasks_pending.remove(self)
- self.job.tasks_running.append(self)
- all_tasks_running.append(self)
+ self.task.procs_pending.remove(self)
+ self.task.procs_running.append(self)
+ all_procs_running.append(self)
self.running = True
return
if self.p.poll() is not None:
if not self.silent:
- self.job.log(f"{self.info}: finished (returncode={self.p.returncode})")
- self.job.tasks_running.remove(self)
- all_tasks_running.remove(self)
+ self.task.log(f"{self.info}: finished (returncode={self.p.returncode})")
+ self.task.procs_running.remove(self)
+ all_procs_running.remove(self)
self.running = False
if self.p.returncode == 127:
- self.job.status = "ERROR"
+ self.task.status = "ERROR"
if not self.silent:
- self.job.log(f"{self.info}: COMMAND NOT FOUND. ERROR.")
+ self.task.log(f"{self.info}: COMMAND NOT FOUND. ERROR.")
self.terminated = True
- self.job.terminate()
+ self.task.terminate()
return
self.handle_exit(self.p.returncode)
if self.checkretcode and self.p.returncode != 0:
- self.job.status = "ERROR"
+ self.task.status = "ERROR"
if not self.silent:
- self.job.log(f"{self.info}: job failed. ERROR.")
+ self.task.log(f"{self.info}: task failed. ERROR.")
self.terminated = True
- self.job.terminate()
+ self.task.terminate()
return
self.finished = True
- for next_task in self.notify:
- next_task.poll()
+ for next_proc in self.notify:
+ next_proc.poll()
return
pass
-class SbyJob:
+class SbyTask:
def __init__(self, sbyconfig, workdir, early_logs, reusedir):
self.options = dict()
self.used_options = set()
"pono": os.getenv("PONO", "pono"),
}
- self.tasks_running = []
- self.tasks_pending = []
+ self.procs_running = []
+ self.procs_pending = []
self.start_clock_time = time()
print(line, file=f)
def taskloop(self):
- for task in self.tasks_pending:
- task.poll()
+ for proc in self.procs_pending:
+ proc.poll()
- while len(self.tasks_running):
+ while len(self.procs_running):
fds = []
- for task in self.tasks_running:
- if task.running:
- fds.append(task.p.stdout)
+ for proc in self.procs_running:
+ if proc.running:
+ fds.append(proc.p.stdout)
if os.name == "posix":
try:
else:
sleep(0.1)
- for task in self.tasks_running:
- task.poll()
+ for proc in self.procs_running:
+ proc.poll()
- for task in self.tasks_pending:
- task.poll()
+ for proc in self.procs_pending:
+ proc.poll()
if self.opt_timeout is not None:
total_clock_time = int(time() - self.start_clock_time)
if total_clock_time > self.opt_timeout:
- self.log(f"Reached TIMEOUT ({self.opt_timeout} seconds). Terminating all tasks.")
+ self.log(f"Reached TIMEOUT ({self.opt_timeout} seconds). Terminating all subprocesses.")
self.status = "TIMEOUT"
self.terminate(timeout=True)
print("hierarchy -simcheck", file=f)
print(f"""write_ilang ../model/design{"" if model_name == "base" else "_nomem"}.il""", file=f)
- task = SbyTask(
+ proc = SbyProc(
self,
model_name,
[],
"cd {}/src; {} -ql ../model/design{s}.log ../model/design{s}.ys".format(self.workdir, self.exe_paths["yosys"],
s="" if model_name == "base" else "_nomem")
)
- task.checkretcode = True
+ proc.checkretcode = True
- return [task]
+ return [proc]
if re.match(r"^smt2(_syn)?(_nomem)?(_stbv|_stdt)?$", model_name):
with open(f"{self.workdir}/model/design_{model_name}.ys", "w") as f:
else:
print(f"write_smt2 -wires design_{model_name}.smt2", file=f)
- task = SbyTask(
+ proc = SbyProc(
self,
model_name,
self.model("nomem" if "_nomem" in model_name else "base"),
"cd {}/model; {} -ql design_{s}.log design_{s}.ys".format(self.workdir, self.exe_paths["yosys"], s=model_name)
)
- task.checkretcode = True
+ proc.checkretcode = True
- return [task]
+ return [proc]
if re.match(r"^btor(_syn)?(_nomem)?$", model_name):
with open(f"{self.workdir}/model/design_{model_name}.ys", "w") as f:
print("stat", file=f)
print("write_btor {}-i design_{m}.info design_{m}.btor".format("-c " if self.opt_mode == "cover" else "", m=model_name), file=f)
- task = SbyTask(
+ proc = SbyProc(
self,
- model_name,
+ model_name,
self.model("nomem" if "_nomem" in model_name else "base"),
"cd {}/model; {} -ql design_{s}.log design_{s}.ys".format(self.workdir, self.exe_paths["yosys"], s=model_name)
)
- task.checkretcode = True
+ proc.checkretcode = True
- return [task]
+ return [proc]
if model_name == "aig":
with open(f"{self.workdir}/model/design_aiger.ys", "w") as f:
print("stat", file=f)
print("write_aiger -I -B -zinit -map design_aiger.aim design_aiger.aig", file=f)
- task = SbyTask(
+ proc = SbyProc(
self,
"aig",
self.model("nomem"),
f"""cd {self.workdir}/model; {self.exe_paths["yosys"]} -ql design_aiger.log design_aiger.ys"""
)
- task.checkretcode = True
+ proc.checkretcode = True
- return [task]
+ return [proc]
assert False
return self.models[model_name]
def terminate(self, timeout=False):
- for task in list(self.tasks_running):
- task.terminate(timeout=timeout)
+ for proc in list(self.procs_running):
+ proc.terminate(timeout=timeout)
def update_status(self, new_status):
assert new_status in ["PASS", "FAIL", "UNKNOWN", "ERROR"]
#
import re, os, getopt
-from sby_core import SbyTask
+from sby_core import SbyProc
-def run(mode, job, engine_idx, engine):
+def run(mode, task, engine_idx, engine):
abc_opts, abc_command = getopt.getopt(engine[1:], "", [])
if len(abc_command) == 0:
- job.error("Missing ABC command.")
+ task.error("Missing ABC command.")
for o, a in abc_opts:
- job.error("Unexpected ABC engine options.")
+ task.error("Unexpected ABC engine options.")
if abc_command[0] == "bmc3":
if mode != "bmc":
- job.error("ABC command 'bmc3' is only valid in bmc mode.")
- abc_command[0] += f" -F {job.opt_depth} -v"
+ task.error("ABC command 'bmc3' is only valid in bmc mode.")
+ abc_command[0] += f" -F {task.opt_depth} -v"
elif abc_command[0] == "sim3":
if mode != "bmc":
- job.error("ABC command 'sim3' is only valid in bmc mode.")
- abc_command[0] += f" -F {job.opt_depth} -v"
+ task.error("ABC command 'sim3' is only valid in bmc mode.")
+ abc_command[0] += f" -F {task.opt_depth} -v"
elif abc_command[0] == "pdr":
if mode != "prove":
- job.error("ABC command 'pdr' is only valid in prove mode.")
+ task.error("ABC command 'pdr' is only valid in prove mode.")
else:
- job.error(f"Invalid ABC command {abc_command[0]}.")
+ task.error(f"Invalid ABC command {abc_command[0]}.")
- task = SbyTask(
- job,
+ proc = SbyProc(
+ task,
f"engine_{engine_idx}",
- job.model("aig"),
- f"""cd {job.workdir}; {job.exe_paths["abc"]} -c 'read_aiger model/design_aiger.aig; fold; strash; {" ".join(abc_command)}; write_cex -a engine_{engine_idx}/trace.aiw'""",
- logfile=open(f"{job.workdir}/engine_{engine_idx}/logfile.txt", "w")
+ task.model("aig"),
+ f"""cd {task.workdir}; {task.exe_paths["abc"]} -c 'read_aiger model/design_aiger.aig; fold; strash; {" ".join(abc_command)}; write_cex -a engine_{engine_idx}/trace.aiw'""",
+ logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile.txt", "w")
)
- task.noprintregex = re.compile(r"^\.+$")
- task_status = None
+ proc.noprintregex = re.compile(r"^\.+$")
+ proc_status = None
def output_callback(line):
- nonlocal task_status
+ nonlocal proc_status
match = re.match(r"^Output [0-9]+ of miter .* was asserted in frame [0-9]+.", line)
- if match: task_status = "FAIL"
+ if match: proc_status = "FAIL"
match = re.match(r"^Simulation of [0-9]+ frames for [0-9]+ rounds with [0-9]+ restarts did not assert POs.", line)
- if match: task_status = "UNKNOWN"
+ if match: proc_status = "UNKNOWN"
match = re.match(r"^Stopping BMC because all 2\^[0-9]+ reachable states are visited.", line)
- if match: task_status = "PASS"
+ if match: proc_status = "PASS"
match = re.match(r"^No output asserted in [0-9]+ frames.", line)
- if match: task_status = "PASS"
+ if match: proc_status = "PASS"
match = re.match(r"^Property proved.", line)
- if match: task_status = "PASS"
+ if match: proc_status = "PASS"
return line
def exit_callback(retcode):
assert retcode == 0
- assert task_status is not None
+ assert proc_status is not None
- job.update_status(task_status)
- job.log(f"engine_{engine_idx}: Status returned by engine: {task_status}")
- job.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {task_status}""")
+ task.update_status(proc_status)
+ task.log(f"engine_{engine_idx}: Status returned by engine: {proc_status}")
+ task.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {proc_status}""")
- job.terminate()
+ task.terminate()
- if task_status == "FAIL" and job.opt_aigsmt != "none":
- task2 = SbyTask(
- job,
+ if proc_status == "FAIL" and task.opt_aigsmt != "none":
+ proc2 = SbyProc(
+ task,
f"engine_{engine_idx}",
- job.model("smt2"),
+ task.model("smt2"),
("cd {}; {} -s {}{} --noprogress --append {} --dump-vcd engine_{i}/trace.vcd --dump-vlogtb engine_{i}/trace_tb.v " +
"--dump-smtc engine_{i}/trace.smtc --aig model/design_aiger.aim:engine_{i}/trace.aiw --aig-noheader model/design_smt2.smt2").format
- (job.workdir, job.exe_paths["smtbmc"], job.opt_aigsmt,
- "" if job.opt_tbtop is None else f" --vlogtb-top {job.opt_tbtop}",
- job.opt_append, i=engine_idx),
- logfile=open(f"{job.workdir}/engine_{engine_idx}/logfile2.txt", "w")
+ (task.workdir, task.exe_paths["smtbmc"], task.opt_aigsmt,
+ "" if task.opt_tbtop is None else f" --vlogtb-top {task.opt_tbtop}",
+ task.opt_append, i=engine_idx),
+ logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w")
)
- task2_status = None
+ proc2_status = None
def output_callback2(line):
- nonlocal task2_status
+ nonlocal proc2_status
match = re.match(r"^## [0-9: ]+ Status: FAILED", line)
- if match: task2_status = "FAIL"
+ if match: proc2_status = "FAIL"
match = re.match(r"^## [0-9: ]+ Status: PASSED", line)
- if match: task2_status = "PASS"
+ if match: proc2_status = "PASS"
return line
def exit_callback2(line):
- assert task2_status is not None
- assert task2_status == "FAIL"
+ assert proc2_status is not None
+ assert proc2_status == "FAIL"
- if os.path.exists(f"{job.workdir}/engine_{engine_idx}/trace.vcd"):
- job.summary.append(f"counterexample trace: {job.workdir}/engine_{engine_idx}/trace.vcd")
+ if os.path.exists(f"{task.workdir}/engine_{engine_idx}/trace.vcd"):
+ task.summary.append(f"counterexample trace: {task.workdir}/engine_{engine_idx}/trace.vcd")
- task2.output_callback = output_callback2
- task2.exit_callback = exit_callback2
+ proc2.output_callback = output_callback2
+ proc2.exit_callback = exit_callback2
- task.output_callback = output_callback
- task.exit_callback = exit_callback
+ proc.output_callback = output_callback
+ proc.exit_callback = exit_callback
#
import re, os, getopt
-from sby_core import SbyTask
+from sby_core import SbyProc
-def run(mode, job, engine_idx, engine):
+def run(mode, task, engine_idx, engine):
opts, solver_args = getopt.getopt(engine[1:], "", [])
if len(solver_args) == 0:
- job.error("Missing solver command.")
+ task.error("Missing solver command.")
for o, a in opts:
- job.error("Unexpected AIGER engine options.")
+ task.error("Unexpected AIGER engine options.")
if solver_args[0] == "suprove":
if mode == "live" and (len(solver_args) == 1 or solver_args[1][0] != "+"):
solver_args.insert(1, "+simple_liveness")
- solver_cmd = " ".join([job.exe_paths["suprove"]] + solver_args[1:])
+ solver_cmd = " ".join([task.exe_paths["suprove"]] + solver_args[1:])
elif solver_args[0] == "avy":
- solver_cmd = " ".join([job.exe_paths["avy"], "--cex", "-"] + solver_args[1:])
+ solver_cmd = " ".join([task.exe_paths["avy"], "--cex", "-"] + solver_args[1:])
elif solver_args[0] == "aigbmc":
- solver_cmd = " ".join([job.exe_paths["aigbmc"]] + solver_args[1:])
+ solver_cmd = " ".join([task.exe_paths["aigbmc"]] + solver_args[1:])
else:
- job.error(f"Invalid solver command {solver_args[0]}.")
+ task.error(f"Invalid solver command {solver_args[0]}.")
- task = SbyTask(
- job,
+ proc = SbyProc(
+ task,
f"engine_{engine_idx}",
- job.model("aig"),
- f"cd {job.workdir}; {solver_cmd} model/design_aiger.aig",
- logfile=open(f"{job.workdir}/engine_{engine_idx}/logfile.txt", "w")
+ task.model("aig"),
+ f"cd {task.workdir}; {solver_cmd} model/design_aiger.aig",
+ logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile.txt", "w")
)
- task_status = None
+ proc_status = None
produced_cex = False
end_of_cex = False
- aiw_file = open(f"{job.workdir}/engine_{engine_idx}/trace.aiw", "w")
+ aiw_file = open(f"{task.workdir}/engine_{engine_idx}/trace.aiw", "w")
def output_callback(line):
- nonlocal task_status
+ nonlocal proc_status
nonlocal produced_cex
nonlocal end_of_cex
- if task_status is not None:
+ if proc_status is not None:
if not end_of_cex and not produced_cex and line.isdigit():
produced_cex = True
if not end_of_cex:
if line in ["0", "1", "2"]:
print(line, file=aiw_file)
- if line == "0": task_status = "PASS"
- if line == "1": task_status = "FAIL"
- if line == "2": task_status = "UNKNOWN"
+ if line == "0": proc_status = "PASS"
+ if line == "1": proc_status = "FAIL"
+ if line == "2": proc_status = "UNKNOWN"
return None
def exit_callback(retcode):
if solver_args[0] not in ["avy"]:
assert retcode == 0
- assert task_status is not None
+ assert proc_status is not None
aiw_file.close()
- job.update_status(task_status)
- job.log(f"engine_{engine_idx}: Status returned by engine: {task_status}")
- job.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {task_status}""")
+ task.update_status(proc_status)
+ task.log(f"engine_{engine_idx}: Status returned by engine: {proc_status}")
+ task.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {proc_status}""")
- job.terminate()
+ task.terminate()
- if task_status == "FAIL" and job.opt_aigsmt != "none":
+ if proc_status == "FAIL" and task.opt_aigsmt != "none":
if produced_cex:
if mode == "live":
- task2 = SbyTask(
- job,
+ proc2 = SbyProc(
+ task,
f"engine_{engine_idx}",
- job.model("smt2"),
+ task.model("smt2"),
("cd {}; {} -g -s {}{} --noprogress --dump-vcd engine_{i}/trace.vcd --dump-vlogtb engine_{i}/trace_tb.v " +
"--dump-smtc engine_{i}/trace.smtc --aig model/design_aiger.aim:engine_{i}/trace.aiw model/design_smt2.smt2").format
- (job.workdir, job.exe_paths["smtbmc"], job.opt_aigsmt,
- "" if job.opt_tbtop is None else f" --vlogtb-top {job.opt_tbtop}",
+ (task.workdir, task.exe_paths["smtbmc"], task.opt_aigsmt,
+ "" if task.opt_tbtop is None else f" --vlogtb-top {task.opt_tbtop}",
i=engine_idx),
- logfile=open(f"{job.workdir}/engine_{engine_idx}/logfile2.txt", "w")
+ logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w")
)
else:
- task2 = SbyTask(
- job,
+ proc2 = SbyProc(
+ task,
f"engine_{engine_idx}",
- job.model("smt2"),
+ task.model("smt2"),
("cd {}; {} -s {}{} --noprogress --append {} --dump-vcd engine_{i}/trace.vcd --dump-vlogtb engine_{i}/trace_tb.v " +
"--dump-smtc engine_{i}/trace.smtc --aig model/design_aiger.aim:engine_{i}/trace.aiw model/design_smt2.smt2").format
- (job.workdir, job.exe_paths["smtbmc"], job.opt_aigsmt,
- "" if job.opt_tbtop is None else f" --vlogtb-top {job.opt_tbtop}",
- job.opt_append, i=engine_idx),
- logfile=open(f"{job.workdir}/engine_{engine_idx}/logfile2.txt", "w")
+ (task.workdir, task.exe_paths["smtbmc"], task.opt_aigsmt,
+ "" if task.opt_tbtop is None else f" --vlogtb-top {task.opt_tbtop}",
+ task.opt_append, i=engine_idx),
+ logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w")
)
- task2_status = None
+ proc2_status = None
def output_callback2(line):
- nonlocal task2_status
+ nonlocal proc2_status
match = re.match(r"^## [0-9: ]+ Status: FAILED", line)
- if match: task2_status = "FAIL"
+ if match: proc2_status = "FAIL"
match = re.match(r"^## [0-9: ]+ Status: PASSED", line)
- if match: task2_status = "PASS"
+ if match: proc2_status = "PASS"
return line
def exit_callback2(line):
- assert task2_status is not None
+ assert proc2_status is not None
if mode == "live":
- assert task2_status == "PASS"
+ assert proc2_status == "PASS"
else:
- assert task2_status == "FAIL"
+ assert proc2_status == "FAIL"
- if os.path.exists(f"{job.workdir}/engine_{engine_idx}/trace.vcd"):
- job.summary.append(f"counterexample trace: {job.workdir}/engine_{engine_idx}/trace.vcd")
+ if os.path.exists(f"{task.workdir}/engine_{engine_idx}/trace.vcd"):
+ task.summary.append(f"counterexample trace: {task.workdir}/engine_{engine_idx}/trace.vcd")
- task2.output_callback = output_callback2
- task2.exit_callback = exit_callback2
+ proc2.output_callback = output_callback2
+ proc2.exit_callback = exit_callback2
else:
- job.log(f"engine_{engine_idx}: Engine did not produce a counter example.")
+ task.log(f"engine_{engine_idx}: Engine did not produce a counter example.")
- task.output_callback = output_callback
- task.exit_callback = exit_callback
+ proc.output_callback = output_callback
+ proc.exit_callback = exit_callback
import re, os, getopt
from types import SimpleNamespace
-from sby_core import SbyTask
+from sby_core import SbyProc
-def run(mode, job, engine_idx, engine):
+def run(mode, task, engine_idx, engine):
random_seed = None
opts, solver_args = getopt.getopt(engine[1:], "", ["seed="])
if len(solver_args) == 0:
- job.error("Missing solver command.")
+ task.error("Missing solver command.")
for o, a in opts:
if o == "--seed":
random_seed = a
else:
- job.error("Unexpected BTOR engine options.")
+ task.error("Unexpected BTOR engine options.")
if solver_args[0] == "btormc":
solver_cmd = ""
if random_seed:
solver_cmd += f"BTORSEED={random_seed} "
- solver_cmd += job.exe_paths["btormc"] + f""" --stop-first {0 if mode == "cover" else 1} -v 1 -kmax {job.opt_depth - 1}"""
- if job.opt_skip is not None:
- solver_cmd += f" -kmin {job.opt_skip}"
+ solver_cmd += task.exe_paths["btormc"] + f""" --stop-first {0 if mode == "cover" else 1} -v 1 -kmax {task.opt_depth - 1}"""
+ if task.opt_skip is not None:
+ solver_cmd += f" -kmin {task.opt_skip}"
solver_cmd += " ".join([""] + solver_args[1:])
elif solver_args[0] == "pono":
if random_seed:
- job.error("Setting the random seed is not available for the pono solver.")
- solver_cmd = job.exe_paths["pono"] + f" -v 1 -e bmc -k {job.opt_depth - 1}"
+ task.error("Setting the random seed is not available for the pono solver.")
+ solver_cmd = task.exe_paths["pono"] + f" -v 1 -e bmc -k {task.opt_depth - 1}"
else:
- job.error(f"Invalid solver command {solver_args[0]}.")
+ task.error(f"Invalid solver command {solver_args[0]}.")
common_state = SimpleNamespace()
common_state.solver_status = None
common_state.assert_fail = False
common_state.produced_traces = []
common_state.print_traces_max = 5
- common_state.running_tasks = 0
+ common_state.running_procs = 0
def print_traces_and_terminate():
if mode == "cover":
if common_state.assert_fail:
- task_status = "FAIL"
+ proc_status = "FAIL"
elif common_state.expected_cex == 0:
- task_status = "pass"
+ proc_status = "pass"
elif common_state.solver_status == "sat":
- task_status = "pass"
+ proc_status = "pass"
elif common_state.solver_status == "unsat":
- task_status = "FAIL"
+ proc_status = "FAIL"
else:
- job.error(f"engine_{engine_idx}: Engine terminated without status.")
+ task.error(f"engine_{engine_idx}: Engine terminated without status.")
else:
if common_state.expected_cex == 0:
- task_status = "pass"
+ proc_status = "pass"
elif common_state.solver_status == "sat":
- task_status = "FAIL"
+ proc_status = "FAIL"
elif common_state.solver_status == "unsat":
- task_status = "pass"
+ proc_status = "pass"
else:
- job.error(f"engine_{engine_idx}: Engine terminated without status.")
+ task.error(f"engine_{engine_idx}: Engine terminated without status.")
- job.update_status(task_status.upper())
- job.log(f"engine_{engine_idx}: Status returned by engine: {task_status}")
- job.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {task_status}""")
+ task.update_status(proc_status.upper())
+ task.log(f"engine_{engine_idx}: Status returned by engine: {proc_status}")
+ task.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {proc_status}""")
if len(common_state.produced_traces) == 0:
- job.log(f"""engine_{engine_idx}: Engine did not produce a{" counter" if mode != "cover" else "n "}example.""")
+ task.log(f"""engine_{engine_idx}: Engine did not produce a{" counter" if mode != "cover" else "n "}example.""")
elif len(common_state.produced_traces) <= common_state.print_traces_max:
- job.summary.extend(common_state.produced_traces)
+ task.summary.extend(common_state.produced_traces)
else:
- job.summary.extend(common_state.produced_traces[:common_state.print_traces_max])
+ task.summary.extend(common_state.produced_traces[:common_state.print_traces_max])
excess_traces = len(common_state.produced_traces) - common_state.print_traces_max
- job.summary.append(f"""and {excess_traces} further trace{"s" if excess_traces > 1 else ""}""")
+ task.summary.append(f"""and {excess_traces} further trace{"s" if excess_traces > 1 else ""}""")
- job.terminate()
+ task.terminate()
if mode == "cover":
def output_callback2(line):
def exit_callback2(retcode):
assert retcode == 0
- vcdpath = f"{job.workdir}/engine_{engine_idx}/trace{suffix}.vcd"
+ vcdpath = f"{task.workdir}/engine_{engine_idx}/trace{suffix}.vcd"
if os.path.exists(vcdpath):
common_state.produced_traces.append(f"""{"" if mode == "cover" else "counterexample "}trace: {vcdpath}""")
- common_state.running_tasks -= 1
- if (common_state.running_tasks == 0):
+ common_state.running_procs -= 1
+ if (common_state.running_procs == 0):
print_traces_and_terminate()
return exit_callback2
assert common_state.produced_cex == 0
else:
- job.error(f"engine_{engine_idx}: BTOR solver '{solver_args[0]}' is currently not supported in cover mode.")
+ task.error(f"engine_{engine_idx}: BTOR solver '{solver_args[0]}' is currently not supported in cover mode.")
if (common_state.produced_cex < common_state.expected_cex) and line == "sat":
assert common_state.wit_file == None
if common_state.expected_cex == 1:
- common_state.wit_file = open(f"{job.workdir}/engine_{engine_idx}/trace.wit", "w")
+ common_state.wit_file = open(f"{task.workdir}/engine_{engine_idx}/trace.wit", "w")
else:
- common_state.wit_file = open(f"""{job.workdir}/engine_{engine_idx}/trace{common_state.produced_cex}.wit""", "w")
+ common_state.wit_file = open(f"""{task.workdir}/engine_{engine_idx}/trace{common_state.produced_cex}.wit""", "w")
if solver_args[0] != "btormc":
- task.log("Found satisfiability witness.")
+ proc.log("Found satisfiability witness.")
if common_state.wit_file:
print(line, file=common_state.wit_file)
suffix = ""
else:
suffix = common_state.produced_cex
- task2 = SbyTask(
- job,
+ proc2 = SbyProc(
+ task,
f"engine_{engine_idx}_{common_state.produced_cex}",
- job.model("btor"),
- "cd {dir} ; btorsim -c --vcd engine_{idx}/trace{i}.vcd --hierarchical-symbols --info model/design_btor.info model/design_btor.btor engine_{idx}/trace{i}.wit".format(dir=job.workdir, idx=engine_idx, i=suffix),
- logfile=open(f"{job.workdir}/engine_{engine_idx}/logfile2.txt", "w")
+ task.model("btor"),
+ "cd {dir} ; btorsim -c --vcd engine_{idx}/trace{i}.vcd --hierarchical-symbols --info model/design_btor.info model/design_btor.btor engine_{idx}/trace{i}.wit".format(dir=task.workdir, idx=engine_idx, i=suffix),
+ logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w")
)
- task2.output_callback = output_callback2
- task2.exit_callback = make_exit_callback(suffix)
- task2.checkretcode = True
- common_state.running_tasks += 1
+ proc2.output_callback = output_callback2
+ proc2.exit_callback = make_exit_callback(suffix)
+ proc2.checkretcode = True
+ common_state.running_procs += 1
common_state.produced_cex += 1
common_state.wit_file.close()
if line not in ["b0"]:
return line
- print(line, file=task.logfile)
+ print(line, file=proc.logfile)
return None
if common_state.solver_status == "unsat":
if common_state.expected_cex == 1:
- with open(f"""{job.workdir}/engine_{engine_idx}/trace.wit""", "w") as wit_file:
+ with open(f"""{task.workdir}/engine_{engine_idx}/trace.wit""", "w") as wit_file:
print("unsat", file=wit_file)
else:
for i in range(common_state.produced_cex, common_state.expected_cex):
- with open(f"{job.workdir}/engine_{engine_idx}/trace{i}.wit", "w") as wit_file:
+ with open(f"{task.workdir}/engine_{engine_idx}/trace{i}.wit", "w") as wit_file:
print("unsat", file=wit_file)
- common_state.running_tasks -= 1
- if (common_state.running_tasks == 0):
+ common_state.running_procs -= 1
+ if (common_state.running_procs == 0):
print_traces_and_terminate()
- task = SbyTask(
- job,
- f"engine_{engine_idx}", job.model("btor"),
- f"cd {job.workdir}; {solver_cmd} model/design_btor.btor",
- logfile=open(f"{job.workdir}/engine_{engine_idx}/logfile.txt", "w")
+ proc = SbyProc(
+ task,
+ f"engine_{engine_idx}", task.model("btor"),
+ f"cd {task.workdir}; {solver_cmd} model/design_btor.btor",
+ logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile.txt", "w")
)
- task.output_callback = output_callback
- task.exit_callback = exit_callback
- common_state.running_tasks += 1
+ proc.output_callback = output_callback
+ proc.exit_callback = exit_callback
+ common_state.running_procs += 1
#
import re, os, getopt
-from sby_core import SbyTask
+from sby_core import SbyProc
-def run(mode, job, engine_idx, engine):
+def run(mode, task, engine_idx, engine):
smtbmc_opts = []
nomem_opt = False
presat_opt = True
progress = True
elif o == "--basecase":
if induction_only:
- job.error("smtbmc options --basecase and --induction are exclusive.")
+ task.error("smtbmc options --basecase and --induction are exclusive.")
basecase_only = True
elif o == "--induction":
if basecase_only:
- job.error("smtbmc options --basecase and --induction are exclusive.")
+ task.error("smtbmc options --basecase and --induction are exclusive.")
induction_only = True
elif o == "--seed":
random_seed = a
else:
- job.error(f"Invalid smtbmc options {o}.")
+ task.error(f"Invalid smtbmc options {o}.")
xtra_opts = False
for i, a in enumerate(args):
if unroll_opt is None or unroll_opt:
smtbmc_opts += ["--unroll"]
- if job.opt_smtc is not None:
- smtbmc_opts += ["--smtc", f"src/{job.opt_smtc}"]
+ if task.opt_smtc is not None:
+ smtbmc_opts += ["--smtc", f"src/{task.opt_smtc}"]
- if job.opt_tbtop is not None:
- smtbmc_opts += ["--vlogtb-top", job.opt_tbtop]
+ if task.opt_tbtop is not None:
+ smtbmc_opts += ["--vlogtb-top", task.opt_tbtop]
model_name = "smt2"
if syn_opt: model_name += "_syn"
if mode == "prove":
if not induction_only:
- run("prove_basecase", job, engine_idx, engine)
+ run("prove_basecase", task, engine_idx, engine)
if not basecase_only:
- run("prove_induction", job, engine_idx, engine)
+ run("prove_induction", task, engine_idx, engine)
return
- taskname = f"engine_{engine_idx}"
+ procname = f"engine_{engine_idx}"
trace_prefix = f"engine_{engine_idx}/trace"
- logfile_prefix = f"{job.workdir}/engine_{engine_idx}/logfile"
+ logfile_prefix = f"{task.workdir}/engine_{engine_idx}/logfile"
if mode == "prove_basecase":
- taskname += ".basecase"
+ procname += ".basecase"
logfile_prefix += "_basecase"
if mode == "prove_induction":
- taskname += ".induction"
+ procname += ".induction"
trace_prefix += "_induct"
logfile_prefix += "_induction"
smtbmc_opts.append("-i")
smtbmc_opts.append("--noprogress")
- if job.opt_skip is not None:
- t_opt = "{}:{}".format(job.opt_skip, job.opt_depth)
+ if task.opt_skip is not None:
+ t_opt = "{}:{}".format(task.opt_skip, task.opt_depth)
else:
- t_opt = "{}".format(job.opt_depth)
+ t_opt = "{}".format(task.opt_depth)
random_seed = f"--info \"(set-option :random-seed {random_seed})\"" if random_seed else ""
- task = SbyTask(
- job,
- taskname,
- job.model(model_name),
- f"""cd {job.workdir}; {job.exe_paths["smtbmc"]} {" ".join(smtbmc_opts)} -t {t_opt} {random_seed} --append {job.opt_append} --dump-vcd {trace_prefix}.vcd --dump-vlogtb {trace_prefix}_tb.v --dump-smtc {trace_prefix}.smtc model/design_{model_name}.smt2""",
+ proc = SbyProc(
+ task,
+ procname,
+ task.model(model_name),
+ f"""cd {task.workdir}; {task.exe_paths["smtbmc"]} {" ".join(smtbmc_opts)} -t {t_opt} {random_seed} --append {task.opt_append} --dump-vcd {trace_prefix}.vcd --dump-vlogtb {trace_prefix}_tb.v --dump-smtc {trace_prefix}.smtc model/design_{model_name}.smt2""",
logfile=open(logfile_prefix + ".txt", "w"),
logstderr=(not progress)
)
if mode == "prove_basecase":
- job.basecase_tasks.append(task)
+ task.basecase_procs.append(proc)
if mode == "prove_induction":
- job.induction_tasks.append(task)
+ task.induction_procs.append(proc)
- task_status = None
+ proc_status = None
def output_callback(line):
- nonlocal task_status
+ nonlocal proc_status
match = re.match(r"^## [0-9: ]+ Status: FAILED", line)
if match:
- task_status = "FAIL"
+ proc_status = "FAIL"
return line.replace("FAILED", "failed")
match = re.match(r"^## [0-9: ]+ Status: PASSED", line)
if match:
- task_status = "PASS"
+ proc_status = "PASS"
return line.replace("PASSED", "passed")
match = re.match(r"^## [0-9: ]+ Status: PREUNSAT", line)
if match:
- task_status = "ERROR"
+ proc_status = "ERROR"
return line
match = re.match(r"^## [0-9: ]+ Unexpected response from solver:", line)
if match:
- task_status = "ERROR"
+ proc_status = "ERROR"
return line
return line
def exit_callback(retcode):
- if task_status is None:
- job.error(f"engine_{engine_idx}: Engine terminated without status.")
+ if proc_status is None:
+ task.error(f"engine_{engine_idx}: Engine terminated without status.")
if mode == "bmc" or mode == "cover":
- job.update_status(task_status)
- task_status_lower = task_status.lower() if task_status == "PASS" else task_status
- job.log(f"engine_{engine_idx}: Status returned by engine: {task_status_lower}")
- job.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {task_status_lower}""")
-
- if task_status == "FAIL" and mode != "cover":
- if os.path.exists(f"{job.workdir}/engine_{engine_idx}/trace.vcd"):
- job.summary.append(f"counterexample trace: {job.workdir}/engine_{engine_idx}/trace.vcd")
- elif task_status == "PASS" and mode == "cover":
+ task.update_status(proc_status)
+ proc_status_lower = proc_status.lower() if proc_status == "PASS" else proc_status
+ task.log(f"engine_{engine_idx}: Status returned by engine: {proc_status_lower}")
+ task.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {proc_status_lower}""")
+
+ if proc_status == "FAIL" and mode != "cover":
+ if os.path.exists(f"{task.workdir}/engine_{engine_idx}/trace.vcd"):
+ task.summary.append(f"counterexample trace: {task.workdir}/engine_{engine_idx}/trace.vcd")
+ elif proc_status == "PASS" and mode == "cover":
print_traces_max = 5
for i in range(print_traces_max):
- if os.path.exists(f"{job.workdir}/engine_{engine_idx}/trace{i}.vcd"):
- job.summary.append(f"trace: {job.workdir}/engine_{engine_idx}/trace{i}.vcd")
+ if os.path.exists(f"{task.workdir}/engine_{engine_idx}/trace{i}.vcd"):
+ task.summary.append(f"trace: {task.workdir}/engine_{engine_idx}/trace{i}.vcd")
else:
break
else:
excess_traces = 0
- while os.path.exists(f"{job.workdir}/engine_{engine_idx}/trace{print_traces_max + excess_traces}.vcd"):
+ while os.path.exists(f"{task.workdir}/engine_{engine_idx}/trace{print_traces_max + excess_traces}.vcd"):
excess_traces += 1
if excess_traces > 0:
- job.summary.append(f"""and {excess_traces} further trace{"s" if excess_traces > 1 else ""}""")
+ task.summary.append(f"""and {excess_traces} further trace{"s" if excess_traces > 1 else ""}""")
- job.terminate()
+ task.terminate()
elif mode in ["prove_basecase", "prove_induction"]:
- task_status_lower = task_status.lower() if task_status == "PASS" else task_status
- job.log(f"""engine_{engine_idx}: Status returned by engine for {mode.split("_")[1]}: {task_status_lower}""")
- job.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {task_status_lower} for {mode.split("_")[1]}""")
+ proc_status_lower = proc_status.lower() if proc_status == "PASS" else proc_status
+ task.log(f"""engine_{engine_idx}: Status returned by engine for {mode.split("_")[1]}: {proc_status_lower}""")
+ task.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {proc_status_lower} for {mode.split("_")[1]}""")
if mode == "prove_basecase":
- for task in job.basecase_tasks:
- task.terminate()
+ for proc in task.basecase_procs:
+ proc.terminate()
- if task_status == "PASS":
- job.basecase_pass = True
+ if proc_status == "PASS":
+ task.basecase_pass = True
else:
- job.update_status(task_status)
- if os.path.exists(f"{job.workdir}/engine_{engine_idx}/trace.vcd"):
- job.summary.append(f"counterexample trace: {job.workdir}/engine_{engine_idx}/trace.vcd")
- job.terminate()
+ task.update_status(proc_status)
+ if os.path.exists(f"{task.workdir}/engine_{engine_idx}/trace.vcd"):
+ task.summary.append(f"counterexample trace: {task.workdir}/engine_{engine_idx}/trace.vcd")
+ task.terminate()
elif mode == "prove_induction":
- for task in job.induction_tasks:
- task.terminate()
+ for proc in task.induction_procs:
+ proc.terminate()
- if task_status == "PASS":
- job.induction_pass = True
+ if proc_status == "PASS":
+ task.induction_pass = True
else:
assert False
- if job.basecase_pass and job.induction_pass:
- job.update_status("PASS")
- job.summary.append("successful proof by k-induction.")
- job.terminate()
+ if task.basecase_pass and task.induction_pass:
+ task.update_status("PASS")
+ task.summary.append("successful proof by k-induction.")
+ task.terminate()
else:
assert False
- task.output_callback = output_callback
- task.exit_callback = exit_callback
+ proc.output_callback = output_callback
+ proc.exit_callback = exit_callback
#
import re, os, getopt
-from sby_core import SbyTask
+from sby_core import SbyProc
-def run(job):
- job.handle_int_option("depth", 20)
- job.handle_int_option("append", 0)
- job.handle_str_option("aigsmt", "yices")
+def run(task):
+ task.handle_int_option("depth", 20)
+ task.handle_int_option("append", 0)
+ task.handle_str_option("aigsmt", "yices")
- for engine_idx in range(len(job.engines)):
- engine = job.engines[engine_idx]
+ for engine_idx in range(len(task.engines)):
+ engine = task.engines[engine_idx]
assert len(engine) > 0
- job.log(f"""engine_{engine_idx}: {" ".join(engine)}""")
- job.makedirs(f"{job.workdir}/engine_{engine_idx}")
+ task.log(f"""engine_{engine_idx}: {" ".join(engine)}""")
+ task.makedirs(f"{task.workdir}/engine_{engine_idx}")
if engine[0] == "smtbmc":
import sby_engine_smtbmc
- sby_engine_smtbmc.run("bmc", job, engine_idx, engine)
+ sby_engine_smtbmc.run("bmc", task, engine_idx, engine)
elif engine[0] == "abc":
import sby_engine_abc
- sby_engine_abc.run("bmc", job, engine_idx, engine)
+ sby_engine_abc.run("bmc", task, engine_idx, engine)
elif engine[0] == "btor":
import sby_engine_btor
- sby_engine_btor.run("bmc", job, engine_idx, engine)
+ sby_engine_btor.run("bmc", task, engine_idx, engine)
else:
- job.error(f"Invalid engine '{engine[0]}' for bmc mode.")
+ task.error(f"Invalid engine '{engine[0]}' for bmc mode.")
#
import re, os, getopt
-from sby_core import SbyTask
+from sby_core import SbyProc
-def run(job):
- job.handle_int_option("depth", 20)
- job.handle_int_option("append", 0)
+def run(task):
+ task.handle_int_option("depth", 20)
+ task.handle_int_option("append", 0)
- for engine_idx in range(len(job.engines)):
- engine = job.engines[engine_idx]
+ for engine_idx in range(len(task.engines)):
+ engine = task.engines[engine_idx]
assert len(engine) > 0
- job.log(f"""engine_{engine_idx}: {" ".join(engine)}""")
- job.makedirs(f"{job.workdir}/engine_{engine_idx}")
+ task.log(f"""engine_{engine_idx}: {" ".join(engine)}""")
+ task.makedirs(f"{task.workdir}/engine_{engine_idx}")
if engine[0] == "smtbmc":
import sby_engine_smtbmc
- sby_engine_smtbmc.run("cover", job, engine_idx, engine)
+ sby_engine_smtbmc.run("cover", task, engine_idx, engine)
elif engine[0] == "btor":
import sby_engine_btor
- sby_engine_btor.run("cover", job, engine_idx, engine)
+ sby_engine_btor.run("cover", task, engine_idx, engine)
else:
- job.error(f"Invalid engine '{engine[0]}' for cover mode.")
+ task.error(f"Invalid engine '{engine[0]}' for cover mode.")
#
import re, os, getopt
-from sby_core import SbyTask
+from sby_core import SbyProc
-def run(job):
- job.handle_str_option("aigsmt", "yices")
+def run(task):
+ task.handle_str_option("aigsmt", "yices")
- job.status = "UNKNOWN"
+ task.status = "UNKNOWN"
- for engine_idx in range(len(job.engines)):
- engine = job.engines[engine_idx]
+ for engine_idx in range(len(task.engines)):
+ engine = task.engines[engine_idx]
assert len(engine) > 0
- job.log(f"""engine_{engine_idx}: {" ".join(engine)}""")
- job.makedirs(f"{job.workdir}/engine_{engine_idx}")
+ task.log(f"""engine_{engine_idx}: {" ".join(engine)}""")
+ task.makedirs(f"{task.workdir}/engine_{engine_idx}")
if engine[0] == "aiger":
import sby_engine_aiger
- sby_engine_aiger.run("live", job, engine_idx, engine)
+ sby_engine_aiger.run("live", task, engine_idx, engine)
else:
- job.error(f"Invalid engine '{engine[0]}' for live mode.")
+ task.error(f"Invalid engine '{engine[0]}' for live mode.")
#
import re, os, getopt
-from sby_core import SbyTask
+from sby_core import SbyProc
-def run(job):
- job.handle_int_option("depth", 20)
- job.handle_int_option("append", 0)
- job.handle_str_option("aigsmt", "yices")
+def run(task):
+ task.handle_int_option("depth", 20)
+ task.handle_int_option("append", 0)
+ task.handle_str_option("aigsmt", "yices")
- job.status = "UNKNOWN"
+ task.status = "UNKNOWN"
- job.basecase_pass = False
- job.induction_pass = False
- job.basecase_tasks = list()
- job.induction_tasks = list()
+ task.basecase_pass = False
+ task.induction_pass = False
+ task.basecase_procs = list()
+ task.induction_procs = list()
- for engine_idx in range(len(job.engines)):
- engine = job.engines[engine_idx]
+ for engine_idx in range(len(task.engines)):
+ engine = task.engines[engine_idx]
assert len(engine) > 0
- job.log(f"""engine_{engine_idx}: {" ".join(engine)}""")
- job.makedirs(f"{job.workdir}/engine_{engine_idx}")
+ task.log(f"""engine_{engine_idx}: {" ".join(engine)}""")
+ task.makedirs(f"{task.workdir}/engine_{engine_idx}")
if engine[0] == "smtbmc":
import sby_engine_smtbmc
- sby_engine_smtbmc.run("prove", job, engine_idx, engine)
+ sby_engine_smtbmc.run("prove", task, engine_idx, engine)
elif engine[0] == "aiger":
import sby_engine_aiger
- sby_engine_aiger.run("prove", job, engine_idx, engine)
+ sby_engine_aiger.run("prove", task, engine_idx, engine)
elif engine[0] == "abc":
import sby_engine_abc
- sby_engine_abc.run("prove", job, engine_idx, engine)
+ sby_engine_abc.run("prove", task, engine_idx, engine)
else:
- job.error(f"Invalid engine '{engine[0]}' for prove mode.")
+ task.error(f"Invalid engine '{engine[0]}' for prove mode.")