Browse Source

sync

master
Tom 5 years ago
parent
commit
9ee52233ea
17 changed files with 549 additions and 1164 deletions
  1. +0
    -1
      balanceDataset.py
  2. +0
    -121
      collectCmpStatus.py
  3. +0
    -40
      compareRuns.py
  4. +0
    -141
      comparisonStats.py
  5. +0
    -51
      createEmptyDataset.py
  6. +2
    -1
      create_wmis_result_table.sql
  7. +0
    -61
      generateRandomKsatDataSet.py
  8. +0
    -32
      runMinisatOnDataset.py
  9. +0
    -115
      runWMISquboOnSatInstance.py
  10. +33
    -4
      run_sampler_on_scope.py
  11. +0
    -290
      satUnsatConflictsPerVariableStats.py
  12. +0
    -256
      satUnsatDegreeStats.py
  13. +207
    -5
      testSAT2QUBO.py
  14. +212
    -43
      test_data_extraction.py
  15. +67
    -2
      util/SAT2QUBO.py
  16. +7
    -0
      util/randomSAT.py
  17. +21
    -1
      util/script.py

+ 0
- 1
balanceDataset.py View File

@ -1 +0,0 @@

+ 0
- 121
collectCmpStatus.py View File

@ -1,121 +0,0 @@
#!/usr/bin/env python3
import argparse
import os
import glob
import json
import numpy as np
import matplotlib.pyplot as plt
import configparser
from util import script as scriptUtils
def main():
args = __parseArguments()
__stats(args["comparisonDir"], args["outputDir"])
def __parseArguments():
parser = scriptUtils.ArgParser()
parser.addInstanceDirArg()
parser.addArg(alias="comparisonDir", shortFlag="c", longFlag="comparison_dir",
help="the direcotry with all comparison files", type=str)
parser.addArg(alias="outputDir", shortFlag="s", longFlag="comparison_stats_dir",
help="Directory to store the stats", type=str)
arguments = parser.parse()
arguments["datasetDir"] = os.path.abspath(arguments["datasetDir"])
arguments["comparisonDir"] = os.path.join(arguments["datasetDir"],
arguments["comparisonDir"])
arguments["outputDir"] = os.path.join(arguments["datasetDir"],
arguments["outputDir"])
return arguments
def __stats(comparisonDir, outputDir):
runs = glob.glob(os.path.join(comparisonDir, "run*"))
for run in runs:
stats = __collectStats(run)
runOutputDir = os.path.join(outputDir, os.path.basename(run))
__writeStats(stats, runOutputDir)
def __collectStats(comparisonDir):
files = glob.glob(os.path.join(comparisonDir, "*.cmp"))
stats = {}
stats["match"] = 0
stats["false_positive"] = 0
stats["false_negative"] = 0
stats["unsat"] = 0
for path in files:
comparison = __readComparison(path)
minisat_satisfiable = comparison["minisat_satisfiable"]
qubo_satisfiable = comparison["qubo_satisfiable"]
if minisat_satisfiable == qubo_satisfiable:
stats["match"] += 1
elif minisat_satisfiable == False and qubo_satisfiable == True:
stats["false_positive"] += 1
elif minisat_satisfiable == True and qubo_satisfiable == False:
stats["false_negative"] += 1
if not minisat_satisfiable:
stats["unsat"] += 1
return stats
def __readComparison(path):
cmpFile = open(path, "r")
comparison = json.load(cmpFile)
cmpFile.close()
return comparison
def __writeStats(stats, outputDir):
if not os.path.exists(outputDir):
os.makedirs(outputDir)
fig = plt.figure()
ax = fig.add_subplot(111)
numInstances = stats["match"] + stats["false_negative"] + stats["false_positive"]
matchBar = ax.bar(x=0, height=stats["match"])
falsePositiveBar = ax.bar(x=1, height=stats["false_positive"])
falseNegativeBar = ax.bar(x=1,
height=stats["false_negative"],
bottom=stats["false_positive"])
ax.axhline(y=stats["match"], linestyle="--", color="gray")
ax.axhline(y=stats["false_negative"], linestyle="--", color="gray")
plt.ylabel("SAT Instanzen")
plt.title("Verlgeich Minisat / WMIS qubo mit qbsolv")
plt.xticks([0, 1], ("Gleiches Ergebnis", "Unterschiedliches Ergebnis"))
plt.yticks([0, stats["match"], stats["false_negative"], numInstances])
plt.legend((matchBar, falsePositiveBar, falseNegativeBar),
("Gleiches Ergebnis",
"False Positive",
"False Negative"))
plt.savefig(os.path.join(outputDir, "stats.png"))
if __name__ == "__main__":
main()

+ 0
- 40
compareRuns.py View File

@ -1,40 +0,0 @@
#!/usr/bin/env python3
from util import script as scriptUtils
from util import compare
import glob
import os
def __main():
args = __parseArguments()
__compareRuns(args)
def __parseArguments():
parser = scriptUtils.ArgParser()
parser.addInstanceDirArg()
return parser.parse()
def __compareRuns(args):
instancePaths = glob.glob(os.path.join(
os.path.join(args["dataset_dir"],
args["instance_dir"]),
"*.dimacs"))
runDirs = glob.glob(os.path.join(
os.path.join(args["dataset_dir"],
args["wmis_result_dir"]),
"run*"))
for path in instancePaths:
__compareRunsOfInstance(path, runDirs)
def __compareRunsOfInstance(instancePath, runDirs):
instanceName = os.path.basename(instancePath)
with open(instancePath) as instanceFile:
if __name__ == "__main__":
__main()

+ 0
- 141
comparisonStats.py View File

@ -1,141 +0,0 @@
#!/usr/bin/env python3
import argparse
import os
import glob
import json
import numpy as np
import matplotlib.pyplot as plt
import configparser
import util.script as scriptUtils
def main():
args = __parseArguments()
__stats(args["comparisonDir"], args["outputDir"])
def __parseArguments():
parser = scriptUtils.ArgParser()
parser.addInstanceDirArg()
parser.addArg(alias="comparisonDir", shortFlag="c", longFlag="comparison_dir",
help="the direcotry with all comparison files", type=str)
parser.addArg(alias="outputDir", shortFlag="s", longFlag="comparison_stats_dir",
help="Directory to store the stats", type=str)
arguments = parser.parse()
arguments["datasetDir"] = os.path.abspath(arguments["datasetDir"])
arguments["comparisonDir"] = os.path.join(arguments["datasetDir"],
arguments["comparisonDir"])
arguments["outputDir"] = os.path.join(arguments["datasetDir"],
arguments["outputDir"])
return arguments
def __stats(comparisonDir, outputDir):
runs = glob.glob(os.path.join(comparisonDir, "run*"))
for run in runs:
stats = __collectStats(run)
print(stats)
runOutputDir = os.path.join(outputDir, os.path.basename(run))
__writeStats(stats, runOutputDir)
def __collectStats(comparisonDir):
files = glob.glob(os.path.join(comparisonDir, "*.cmp"))
stats = {}
stats["match"] = {"count": 0,
"instances": []}
stats["false_positive"] = {"count": 0,
"instances": []}
stats["false_negative"] = {"count": 0,
"instances": []}
stats["unsat"] = {"count": 0,
"instances": []}
for path in files:
comparison = __readComparison(path)
minisat_satisfiable = comparison["minisat_satisfiable"]
qubo_satisfiable = comparison["qubo_satisfiable"]
instanceName = str(os.path.basename(path)).split(".")[0]
if minisat_satisfiable == qubo_satisfiable:
stats["match"]["count"] += 1
stats["match"]["instances"].append(instanceName)
elif minisat_satisfiable == False and qubo_satisfiable == True:
stats["false_positive"]["count"] += 1
stats["false_positive"]["instances"].append(instanceName)
elif minisat_satisfiable == True and qubo_satisfiable == False:
stats["false_negative"]["count"] += 1
stats["false_negative"]["instances"].append(instanceName)
if not minisat_satisfiable:
stats["unsat"]["count"] += 1
stats["unsat"]["instances"].append(instanceName)
return stats
def __readComparison(path):
cmpFile = open(path, "r")
comparison = json.load(cmpFile)
cmpFile.close()
return comparison
def __writeStats(stats, outputDir):
if not os.path.exists(outputDir):
os.makedirs(outputDir)
with open(os.path.join(outputDir,"statusCollection"), "w+") as statusFile:
statusFile.write(json.dumps(stats))
fig = plt.figure()
ax = fig.add_subplot(111)
matchCount = stats["match"]["count"]
falseNegativeCount = stats["false_negative"]["count"]
falsePositiveCount = stats["false_positive"]["count"]
numInstances = matchCount + falseNegativeCount + falsePositiveCount
matchBar = ax.bar(x=0, height=matchCount)
falsePositiveBar = ax.bar(x=1, height=falsePositiveCount)
falseNegativeBar = ax.bar(x=1,
height=falseNegativeCount,
bottom=falsePositiveCount)
ax.axhline(y=matchCount, linestyle="--", color="gray")
ax.axhline(y=falseNegativeCount, linestyle="--", color="gray")
plt.ylabel("SAT Instanzen")
plt.title("Verlgeich Minisat / WMIS qubo mit qbsolv")
plt.xticks([0, 1], ("Gleiches Ergebnis", "Unterschiedliches Ergebnis"))
plt.yticks([0, matchCount, falseNegativeCount, numInstances])
plt.legend((matchBar, falsePositiveBar, falseNegativeBar),
("Gleiches Ergebnis",
"False Positive",
"False Negative"))
plt.savefig(os.path.join(outputDir, "stats.png"))
if __name__ == "__main__":
main()

+ 0
- 51
createEmptyDataset.py View File

@ -1,51 +0,0 @@
#!/usr/bin/env python3
import os
import configparser
import argparse
def main():
args = __parseArguments();
config = configparser.ConfigParser()
dirs = {"INSTANCE_DIR": "instances",
"MINISAT_RESULT_DIR": "minisatResults",
"WMIS_RESULT_DIR": "wmisResults",
"COMPARISON_DIR": "comparison"}
dirs["COMPARISON_STATS_DIR"] = os.path.join(dirs["COMPARISON_DIR"],
"stats")
config["STRUCTURE"] = dirs
os.mkdir(args["dir"])
os.mkdir(os.path.join(args["dir"], dirs["INSTANCE_DIR"]))
os.mkdir(os.path.join(args["dir"], dirs["MINISAT_RESULT_DIR"]))
os.mkdir(os.path.join(args["dir"], dirs["WMIS_RESULT_DIR"]))
os.mkdir(os.path.join(args["dir"], dirs["COMPARISON_DIR"]))
os.mkdir(os.path.join(args["dir"], dirs["COMPARISON_STATS_DIR"]))
with open(os.path.join(args["dir"], "dataset.config"), "w") as configfile:
config.write(configfile)
configfile.close()
def __parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", help="the direcotry for the new dataset", type=str)
args = parser.parse_args()
arguments = {}
print(args)
arguments["dir"] = args.directory
if arguments["dir"] == None:
arguments["dir"] = str(input("Directory: "))
arguments["dir"] = os.path.abspath(arguments["dir"])
return arguments
if __name__ == "__main__":
main()

+ 2
- 1
create_wmis_result_table.sql View File

@ -1,4 +1,5 @@
create table c42_vLogistic_6_wmis_qpu_results (
create table c42_vLogistic_6_wmis_4_qpu_results (
result_id char(24), result_id char(24),
run int, run int,
instance_id char(24), instance_id char(24),


+ 0
- 61
generateRandomKsatDataSet.py View File

@ -1,61 +0,0 @@
#!/usr/bin/env python3
from util import randomSAT
from util import kSAT
import argparse
import configparser
import os
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--base_directory", help="the base directorey of the new dataset; should contain a dataset.config file", type=str)
parser.add_argument("-i", "--instances", help="number of random kSAT instances", type=int)
parser.add_argument("-v", "--variables", help="number of variables in ksat instances", type=int)
parser.add_argument("-c", "--clauses", help="number of clauses in ksat instances", type=int)
parser.add_argument("--variables_per_clause", help="variables per clause in ksat instances", type=int, default=3)
parser.add_argument("-o", "--output", help="output directory", type=str)
args = parser.parse_args()
baseDir = args.base_directory
if baseDir != None:
config = __readConfig(os.path.join(baseDir, "dataset.config"));
numberOfVariables = args.variables
if numberOfVariables == None:
numberOfVariables = int(input("Number of variables per instance: "))
numberOfClauses = args.clauses
if numberOfClauses == None:
numberOfClauses = int(input("Number of clauses per instance: "))
numberOfInstances = args.instances
if numberOfInstances == None:
numberOfInstances = int(input("Number of instances: "))
instanceDir = None
if "instance_dir" in config["STRUCTURE"]:
instanceDir = os.path.join(baseDir, config["STRUCTURE"]["instance_dir"])
elif args.output != None:
instanceDir = args.output
elif args.output == None:
instanceDir = str(input("output directory: "))
for i in range(numberOfInstances):
ksatInstance = randomSAT.generateRandomKSAT(numberOfClauses,
numberOfVariables,
args.variables_per_clause)
instanceFilePath = os.path.join(instanceDir, "instance_%d.dimacs" % (i))
ksatInstance.writeDIMACS(instanceFilePath)
def __readConfig(configFilePath):
config = configparser.ConfigParser()
if os.path.isfile(configFilePath):
config.read(configFilePath)
return config
if __name__ == "__main__":
main()

+ 0
- 32
runMinisatOnDataset.py View File

@ -1,32 +0,0 @@
#!/usr/bin/env python3
from util import script as scrUt
import configparser
import os
def main():
args = __parseArgs()
print(args)
def __parseArguments():
parser = scrUt.ArgParser()
parser.addInstanceDirArg()
parser.addArg(alias="instanceDir", shortFlag="i", longFlag="instance_dir",
help="the directory with all instance files", type=str)
parser.addArg(alias="outputDir", shortFlag="o", longFlag="output_dir",
help="the directory to store the minisat results for each instance",
type=str)
parser.addArg(alias="configFile", shortFlag="c", longFlag="config",
help="config file (default: ./satlab.config)",
type=str, default=os.path.join(".", "satlab.config"))
arguments = parser.parse()
if __name__ == "__main__":
main()

+ 0
- 115
runWMISquboOnSatInstance.py View File

@ -1,115 +0,0 @@
#!/usr/bin/env python3
from util import kSAT
from util import SAT2QUBO
from util.SATquboResult import SATquboResult
import argparse
from dwave_qbsolv import QBSolv
import os
import collections
import json
from tqdm import tqdm
from util import script as scriptUtils
def main():
arguments = __parseArguments()
satInstance = kSAT.kSAT()
print("reading ksat...")
satInstance.readDIMACS(arguments["instancePath"])
print()
result = __runWMISquboOnSatInstance(satInstance)
resultPath = os.path.join(
os.path.join(arguments["resultDir"],
"run%d" % arguments["run"]),
"%s.out" % arguments["instanceFileName"])
print()
print("writing results to file...")
__writeResult(result, resultPath)
def __parseArguments():
parser = scriptUtils.ArgParser()
parser.addInstanceDirArg()
parser.addArg(alias="instancePath", shortFlag="i", longFlag="instance",
help="instance file, has to be in DIMACS format", type=str)
parser.addArg(alias="resultDir", shortFlag="o", longFlag="wmis_result_dir",
help="the wmis result directory", type=str,
ignoreDatabaseConfig=False)
parser.addArg(alias="run", shortFlag="r", longFlag="run",
help="results will get saved unter [instance]_[run].out", type=int)
arguments = parser.parse()
arguments["instanceFileName"] = os.path.basename(arguments["instancePath"])
return arguments
def __runWMISquboOnSatInstance(satInstance):
print("generating wmis qubo...")
qubo = SAT2QUBO.WMISdictQUBO(satInstance)
print()
print("running gbsolv...")
qbresult = QBSolv().sample_qubo(Q=qubo, find_max=True)
print()
print("packing results...")
results = __packResults(satInstance, qbresult)
return results
def __packResults(satInstance, qbresult):
results = []
samples = list(qbresult.samples())
occurrences = qbresult.data_vectors["num_occurrences"]
for i in tqdm(range(len(samples))):
quboResult = __satQuboResultFromSample(samples[i])
quboResult.setOccurrences(occurrences[i])
quboResult.setSatisfiesInstance(satInstance)
results.append(quboResult)
return results
def __writeResult(results, resultPath):
resultDir = os.path.dirname(resultPath)
if not os.path.exists(resultDir):
os.makedirs(resultDir)
resultFile = open(resultPath, "w+")
for result in tqdm(results):
resultFile.write(json.dumps(result.toPrimitive()))
resultFile.write("\n\n")
resultFile.close()
def __satQuboResultFromSample(sample):
result = SATquboResult()
for binding in sample:
isActive = True if sample[binding] == 1 else False
result.addBinding(binding, isActive)
#if sample[binding] == 1:
#result.addActiveBinding(binding)
return result
if __name__ == "__main__":
main()

+ 33
- 4
run_sampler_on_scope.py View File

@ -16,6 +16,7 @@ from tqdm import tqdm
__QUBO__ = 1 __QUBO__ = 1
__ISING__ = 2 __ISING__ = 2
def main(): def main():
mode = __get_mode() mode = __get_mode()
@ -29,6 +30,18 @@ def main():
__run_siman(ising_qubo_collection, model_type, result_collection) __run_siman(ising_qubo_collection, model_type, result_collection)
elif mode == "QPU": elif mode == "QPU":
__run_qpu(ising_qubo_collection, model_type, result_collection) __run_qpu(ising_qubo_collection, model_type, result_collection)
def __get_negation():
print("qubo_negation:")
print("(t)rue ")
print("(f)alse")
mode = input()
if mode == "t":
return True
else:
return False
def __get_mode(): def __get_mode():
print("choose mode:") print("choose mode:")
@ -67,7 +80,11 @@ def __run_siman(ising_qubo_collection, model_type, result_collection):
target_graph.nodes(), target_graph.nodes(),
target_graph.edges()) target_graph.edges())
__run_on_scope(solver_input_query, db[result_collection], chimera_sampler, model_type)
__run_on_scope(solver_input_query,
db[result_collection],
chimera_sampler,
model_type=model_type,
negate=__get_negation())
def __run_qpu(ising_qubo_collection, model_type, result_collection): def __run_qpu(ising_qubo_collection, model_type, result_collection):
@ -88,7 +105,8 @@ def __run_qpu(ising_qubo_collection, model_type, result_collection):
db[result_collection], db[result_collection],
base_solver, base_solver,
model_type, model_type,
solver_args)
negate=__get_negation(),
solver_args=solver_args)
@ -109,6 +127,7 @@ def __run_on_scope(solver_input_query,
result_collection, result_collection,
base_solver, base_solver,
model_type, model_type,
negate=False,
solver_args={}): solver_args={}):
run = int(input("save as run (numbered): ")) run = int(input("save as run (numbered): "))
@ -116,10 +135,12 @@ def __run_on_scope(solver_input_query,
for solver_input in tqdm(solver_input_query): for solver_input in tqdm(solver_input_query):
embedding = solver_input["embeddings"][0] embedding = solver_input["embeddings"][0]
qubo = solver_input["qubo"]
qubo = __negate_qubo(solver_input["qubo"]) if negate else solver_input["qubo"]
solver = FixedEmbeddingComposite(base_solver, embedding) solver = FixedEmbeddingComposite(base_solver, embedding)
res = None
if model_type == __QUBO__: if model_type == __QUBO__:
res = solver.sample_qubo(qubo, **solver_args) res = solver.sample_qubo(qubo, **solver_args)
elif model_type == __ISING__: elif model_type == __ISING__:
@ -132,6 +153,14 @@ def __run_on_scope(solver_input_query,
solver_input, solver_input,
emb_list_index = 0, emb_list_index = 0,
run = run) run = run)
def __negate_qubo(qubo):
negative_qubo = {}
for coupler, energy in qubo.items():
negative_qubo[coupler] = -1 * energy
return negative_qubo


+ 0
- 290
satUnsatConflictsPerVariableStats.py View File

@ -1,290 +0,0 @@
#!/usr/bin/env python3
import argparse
import os
import glob
import json
import numpy as np
import matplotlib.pyplot as plt
import collections
from script import script as scriptUtils
def main():
args = __parseArguments()
print(args)
__stats(args["comparisonDir"], args["outputDir"])
def __parseArguments():
argParser = scriptUtils.ArgParser()
argParser.addInstanceDirArg();
argParser.addArg(alias="comparisonDir", shortFlag="c", longFlag="comparison_dir",
help="the direcotry with all comparison files", type=str)
argParser.addArg(alias="outputDir", shortFlag="o", longFlag="comparison_stats_dir",
help="Directory to store the stats", type=str)
return argParser.parse()
def __stats(comparisonDir, outputDir):
stats = __collectStats(comparisonDir)
__writeStats(stats, outputDir)
def __collectStats(comparisonDir):
files = glob.glob(os.path.join(comparisonDir, "*.cmp"))
stats = []
for path in files:
comparison = __readComparison(path)
stats.append(__processSingleInstance(comparison))
return stats
def __processSingleInstance(comparison):
instanceStats = {}
conflicts = comparison["conflicts_per_variable"]
conflictArr = np.array(list(conflicts.values()))
instanceStats["conflicts_per_variable_mean"] = conflictArr.mean()
instanceStats["conflicts_per_variable_median"] = np.median(conflictArr)
instanceStats["conflicts_per_variable_std_dev"] = np.std(conflictArr)
instanceStats["conflicts_per_variable_max"] = conflictArr.max()
instanceStats["conflicts_per_variable_min"] = conflictArr.min()
instanceStats["conflicts_per_instance"] = np.sum(conflictArr)
instanceStats["raw_conflicts"] = list(conflictArr)
instanceStats["conflicts_to_degree_per_variable"] = __calcConflictsToDegree(conflicts,
comparison["degrees_of_variables"])
if comparison["minisat_satisfiable"]:
if __instanceIsFalseNegative(comparison):
instanceStats["result"] = "false_negative"
else:
instanceStats["result"] = "satisfiable"
else:
instanceStats["result"] = "unsatisfiable"
return instanceStats
def __calcConflictsToDegree(degreesPerVariable, conflictsPerVariable):
conflictsToDegreePerVariable = []
for varLabel, degree in degreesPerVariable.items():
conflicts = conflictsPerVariable[varLabel]
cnflToDeg = conflicts / (float(degree) / 2.0)**2
if cnflToDeg <= 1:
conflictsToDegreePerVariable.append(cnflToDeg)
return conflictsToDegreePerVariable
def __instanceIsFalseNegative(comparison):
return (comparison["minisat_satisfiable"] == True and
comparison["qubo_satisfiable"] == False)
def __readComparison(path):
cmpFile = open(path, "r")
comparison = json.load(cmpFile)
cmpFile.close()
return comparison
def __writeStats(stats, outputDir):
data = __seperateMatchesAndFalseNegatives(stats)
overviewFig = __createOverviewFig(data)
meanFig = __createSingleStatFig(data["mean"], "Conflicts per variable mean")
medianFig = __createSingleStatFig(data["median"], "Conflicts per variable median")
maxFig = __createSingleStatFig(data["max"], "Conflicts per variable max")
minFig = __createSingleStatFig(data["min"], "Conflicts per variable min")
stdDevFig = __createSingleStatFig(data["std_dev"], "Conflicts per variable\nstandard deviation")
cnflPerInstFig = __createSingleStatFig(data["cnfl_per_inst"], "Conflicts per instance")
cnflDegFig1 = __createSingleStatFig(data["cnflDeg"], "Conflicts in relation to degree", showfliers=False);
cnflDegFig2 = __createSingleStatFig(data["cnflDeg"], "Conflicts in relation to degree", showfliers=True);
histFig = __createHistogramFig(data, "raw", "Conflict per variable");
#cnflDegHistFig = __createHistogramFig(data, "cnflDeg", "Conflicts in relation to degree");
__setBatchXticks(figures=[overviewFig,
meanFig,
medianFig,
maxFig,
minFig,
stdDevFig,
cnflPerInstFig,
cnflDegFig1,
cnflDegFig2],
ticks=[1, 2, 3],
labels=["satisfiable",
"false negative",
"unsatisfiable"])
__setBatchXtickLabelRotation(figures=[overviewFig,
meanFig,
medianFig,
maxFig,
minFig,
stdDevFig,
cnflPerInstFig,
cnflDegFig1,
cnflDegFig2],
rotation=30)
overviewFig.savefig(os.path.join(outputDir, "conflicts_overview.png"))
meanFig.savefig(os.path.join(outputDir, "conflicts_mean.png"))
medianFig.savefig(os.path.join(outputDir, "conflicts_median.png"))
maxFig.savefig(os.path.join(outputDir, "conflicts_max.png"))
minFig.savefig(os.path.join(outputDir, "conflicts_min.png"))
stdDevFig.savefig(os.path.join(outputDir, "conflicts_std_dev.png"))
cnflPerInstFig.savefig(os.path.join(outputDir, "conflicts_per_instance.png"))
histFig.savefig(os.path.join(outputDir, "conflicts_per_var_hist.png"))
cnflDegFig1.savefig(os.path.join(outputDir, "conflicts_in_relation_to_degree_1.png"))
cnflDegFig2.savefig(os.path.join(outputDir, "conflicts_in_relation_to_degree_2.png"))
#plt.show(overviewFig)
def __createOverviewFig(data):
fig = plt.figure()
ax0 = fig.add_subplot(141,)
ax0.boxplot([data["mean"]["satisfiable"],
data["mean"]["false_negative"],
data["mean"]["unsatisfiable"]])
ax0.set_title("mean")
ax1 = fig.add_subplot(142, sharey=ax0)
ax1.boxplot([data["median"]["satisfiable"],
data["median"]["false_negative"],
data["median"]["unsatisfiable"]])
ax1.set_title("median")
ax2 = fig.add_subplot(143, sharey=ax0)
ax2.boxplot([data["max"]["satisfiable"],
data["max"]["false_negative"],
data["max"]["unsatisfiable"]])
ax2.set_title("max degree")
ax3 = fig.add_subplot(144, sharey=ax0)
ax3.boxplot([data["min"]["satisfiable"],
data["min"]["false_negative"],
data["min"]["unsatisfiable"]])
ax3.set_title("min degree")
fig.set_size_inches(12, 8)
fig.suptitle("Conflicts per variable overview", fontsize=16)
return fig
def __createHistogramFig(data, subDataSet, title):
fig = plt.figure()
bins = int(max(data[subDataSet]["satisfiable"]) / 5)
ax0 = fig.add_subplot(321)
ax0.hist(data[subDataSet]["satisfiable"], bins=bins)
ax0_2 = fig.add_subplot(322)
ax0_2.boxplot(data[subDataSet]["satisfiable"], vert=False)
ax1 = fig.add_subplot(323, sharex=ax0)
ax1.hist(data[subDataSet]["false_negative"], bins=bins)
ax1_2 = fig.add_subplot(324, sharex=ax0_2)
ax1_2.boxplot(data[subDataSet]["false_negative"], vert=False)
ax2 = fig.add_subplot(325, sharex=ax0)
ax2.hist(data[subDataSet]["unsatisfiable"], bins=bins)
ax2_2 = fig.add_subplot(326, sharex=ax0_2)
ax2_2.boxplot(data[subDataSet]["unsatisfiable"], vert=False)
fig.set_size_inches(14, 10)
fig.suptitle(title, fontsize=16)
return fig
def __createSingleStatFig(subDataset, title, showfliers=True):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.boxplot([subDataset["satisfiable"],
subDataset["false_negative"],
subDataset["unsatisfiable"]], showfliers=showfliers)
fig.set_size_inches(3.5, 8)
fig.suptitle(title, fontsize=16)
return fig
def __setBatchXticks(figures, ticks, labels):
for fig in figures:
plt.setp(fig.get_axes(), xticks=ticks, xticklabels=labels)
def __setBatchXtickLabelRotation(figures, rotation):
for fig in figures:
for ax in fig.get_axes():
plt.setp(ax.get_xticklabels(), rotation=rotation)
def __seperateMatchesAndFalseNegatives(stats):
data = {}
data["mean"] = {"false_negative": [],
"satisfiable": [],
"unsatisfiable": []}
data["median"] = {"false_negative": [],
"satisfiable": [],
"unsatisfiable": []}
data["std_dev"] = {"false_negative": [],
"satisfiable": [],
"unsatisfiable": []}
data["max"] = {"false_negative": [],
"satisfiable": [],
"unsatisfiable": []}
data["min"] = {"false_negative": [],
"satisfiable": [],
"unsatisfiable": []}
data["cnfl_per_inst"] = {"false_negative": [],
"satisfiable": [],
"unsatisfiable": []}
data["raw"] = {"false_negative": [],
"satisfiable": [],
"unsatisfiable": []}
data["cnflDeg"] = {"false_negative": [],
"satisfiable": [],
"unsatisfiable": []}
for instance in stats:
target = instance["result"]
data["mean"][target].append(instance["conflicts_per_variable_mean"])
data["median"][target].append(instance["conflicts_per_variable_median"])
data["std_dev"][target].append(instance["conflicts_per_variable_std_dev"])
data["max"][target].append(instance["conflicts_per_variable_max"])
data["min"][target].append(instance["conflicts_per_variable_min"])
data["cnfl_per_inst"][target].append(instance["conflicts_per_instance"])
data["raw"][target].extend(instance["raw_conflicts"])
data["cnflDeg"][target].extend(instance["conflicts_to_degree_per_variable"])
return data
if __name__ == "__main__":
main()

+ 0
- 256
satUnsatDegreeStats.py View File

@ -1,256 +0,0 @@
#!/usr/bin/env python3
import argparse
import os
import glob
import json
import numpy as np
import matplotlib.pyplot as plt
import collections
def main():
args = __parseArguments()
__stats(args["comparisonDir"], args["outputDir"])
def __parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", help="the direcotry with all comparison files", type=str)
parser.add_argument("-o", "--output", help="Directory to store the stats", type=str)
args = parser.parse_args()
arguments = {}
print(args)
arguments["comparisonDir"] = args.directory
if arguments["comparisonDir"] == None:
arguments["comparisonDir"] = str(input("Comparison directory: "))
arguments["comparisonDir"] = os.path.abspath(arguments["comparisonDir"])
arguments["outputDir"] = args.output
if arguments["outputDir"] == None:
arguments["outputDir"] = str(input("Output directory: "))
arguments["outputDir"] = os.path.abspath(arguments["outputDir"])
return arguments
def __stats(comparisonDir, outputDir):
stats = __collectStats(comparisonDir)
__writeStats(stats, outputDir)
def __collectStats(comparisonDir):
files = glob.glob(os.path.join(comparisonDir, "*.cmp"))
stats = []
for path in files:
comparison = __readComparison(path)
stats.append(__processSingleInstance(comparison))
return stats
def __processSingleInstance(comparison):
instanceStats = {}
degrees = comparison["degrees_of_variables"]
degreeArr = np.array(list(degrees.values()))
instanceStats["degree_of_variables_mean"] = degreeArr.mean()
instanceStats["degree_of_variables_median"] = np.median(degreeArr)
instanceStats["degree_of_variables_std_dev"] = np.std(degreeArr)
instanceStats["degree_of_variables_max"] = degreeArr.max()
instanceStats["degree_of_variables_min"] = degreeArr.min()
instanceStats["variables_per_degree"] = __getVarsPerDegree(degreeArr)
if comparison["minisat_satisfiable"]:
if __instanceIsFalseNegative(comparison):
instanceStats["result"] = "false_negative"
else:
instanceStats["result"] = "satisfiable"
else:
instanceStats["result"] = "unsatisfiable"
return instanceStats
def __instanceIsFalseNegative(comparison):
return (comparison["minisat_satisfiable"] == True and
comparison["qubo_satisfiable"] == False)
def __getVarsPerDegree(degreeArr):
degCount = collections.Counter(degreeArr)
varsPerDegree = {}
for degree in degCount:
varsPerDegree[degree] = degCount[degree]
return varsPerDegree
def __readComparison(path):
cmpFile = open(path, "r")
comparison = json.load(cmpFile)
cmpFile.close()
return comparison
def __writeStats(stats, outputDir):
fig1 = plt.figure()
data = __seperateMatchesAndFalseNegatives(stats)
ax0 = fig1.add_subplot(141,)
ax0.boxplot([data["mean"]["satisfiable"],
data["mean"]["false_negative"],
data["mean"]["unsatisfiable"]])
ax0.set_title("mean")
ax1 = fig1.add_subplot(142, sharey=ax0)
ax1.boxplot([data["median"]["satisfiable"],
data["median"]["false_negative"],
data["median"]["unsatisfiable"]])
ax1.set_title("median")
ax2 = fig1.add_subplot(143, sharey=ax0)
ax2.boxplot([data["max"]["satisfiable"],
data["max"]["false_negative"],
data["max"]["unsatisfiable"]])
ax2.set_title("max degree")
ax3 = fig1.add_subplot(144, sharey=ax0)
ax3.boxplot([data["min"]["satisfiable"],
data["min"]["false_negative"],
data["min"]["unsatisfiable"]])
ax3.set_title("min degree")
fig2 = plt.figure()
ax4 = fig2.add_subplot(111)
ax4.boxplot([data["std_dev"]["satisfiable"],
data["std_dev"]["false_negative"],
data["std_dev"]["unsatisfiable"]])
ax4.set_title("standard deviation")
_BINS_ = 23
fig3 = plt.figure()
ax5 = fig3.add_subplot(311)
varsPerDegreeSat = __accumulateVarsPerDegree(data["vars_per_degree"]["satisfiable"])
ax5.hist(varsPerDegreeSat, density=True, bins=_BINS_)
ax6 = fig3.add_subplot(312, sharex=ax5)
varsPerDegreeFP = __accumulateVarsPerDegree(data["vars_per_degree"]["false_negative"])
ax6.hist(varsPerDegreeFP, density=True, bins=_BINS_)
ax7 = fig3.add_subplot(313, sharex=ax6)
varsPerDegreeUnsat = __accumulateVarsPerDegree(data["vars_per_degree"]["unsatisfiable"])
ax7.hist(varsPerDegreeUnsat, density=True, bins=_BINS_)
plt.setp([ax0, ax1, ax2, ax3, ax4], xticks=[1, 2, 3], xticklabels=["satisfiable",
"false negative",
"unsatisfiable"])
plt.setp(ax0.get_xticklabels(), rotation=45)
plt.setp(ax1.get_xticklabels(), rotation=45)
plt.setp(ax2.get_xticklabels(), rotation=45)
plt.setp(ax3.get_xticklabels(), rotation=45)
plt.setp(ax4.get_xticklabels(), rotation=45)
fig1.set_size_inches(12, 8)
fig1.suptitle("Degrees of variables", fontsize=16)
fig2.set_size_inches(4, 8)
fig3.set_size_inches(5, 12)
fig1.savefig(os.path.join(outputDir, "degrees1.png"))
fig2.savefig(os.path.join(outputDir, "degrees2.png"))
fig3.savefig(os.path.join(outputDir, "degrees3.png"))
plt.show()
def __accumulateVarsPerDegree(listOfVarsPerDegreeDicts):
accumulated = []
for instance in listOfVarsPerDegreeDicts:
for degree in instance:
accumulated += [degree] * instance[degree]
return accumulated
def __compressVarsPerDegree(listOfVarsPerDegreeDicts):
compressed = {}
countOfVars = 0
for instance in listOfVarsPerDegreeDicts:
for degree in instance:
if degree in compressed:
compressed[degree] += float(instance[degree])
else:
compressed[degree] = float(instance[degree])
countOfVars += instance[degree]
check = 0
for degree in compressed:
compressed[degree] /= countOfVars
check += compressed[degree]
print("check: ", check)
return compressed
def __seperateMatchesAndFalseNegatives(stats):
data = {}
data["mean"] = {"false_negative": [],
"satisfiable": [],
"unsatisfiable": []}
data["median"] = {"false_negative": [],
"satisfiable": [],
"unsatisfiable": []}
data["std_dev"] = {"false_negative": [],
"satisfiable": [],
"unsatisfiable": []}
data["max"] = {"false_negative": [],
"satisfiable": [],
"unsatisfiable": []}
data["min"] = {"false_negative": [],
"satisfiable": [],
"unsatisfiable": []}
data["vars_per_degree"] = {"false_negative": [],
"satisfiable": [],
"unsatisfiable": []}
for instance in stats:
target = instance["result"]
data["mean"][target].append(instance["degree_of_variables_mean"])
data["median"][target].append(instance["degree_of_variables_median"])
data["std_dev"][target].append(instance["degree_of_variables_std_dev"])
data["max"][target].append(instance["degree_of_variables_max"])
data["min"][target].append(instance["degree_of_variables_min"])
data["vars_per_degree"][target].append(instance["variables_per_degree"])
return data
if __name__ == "__main__":
main()

+ 207
- 5
testSAT2QUBO.py View File

@ -4,18 +4,21 @@ from util import SAT2QUBO as s2q
from util import randomSAT as rs from util import randomSAT as rs
from util import queries from util import queries
from util import graph from util import graph
from util import script
import networkx as nx import networkx as nx
import dwave_networkx as dnx import dwave_networkx as dnx
import minorminer import minorminer
from dwave_qbsolv import QBSolv from dwave_qbsolv import QBSolv
import dimod import dimod
from dwave.system.composites import FixedEmbeddingComposite
from neal import SimulatedAnnealingSampler
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import seaborn as sns import seaborn as sns
import numpy as np import numpy as np
import re import re
from tqdm import tqdm
def frange(start, stop, steps): def frange(start, stop, steps):
while start < stop: while start < stop:
@ -24,6 +27,109 @@ def frange(start, stop, steps):
def test_kv_range(): def test_kv_range():
k = 75
v = 40
data = {
"p_node_counts" : [],
"wmis_node_counts" : [],
"p_conn_counts" : [],
"wmis_conn_counts" : [],
"p_avrg_connectivity" : [],
"wmis_avrg_connectivity" : [],
"a_arr" : [],
"k_arr" : []
}
target = dnx.chimera_graph(25, 25, 4)
print(target.number_of_nodes())
for r in range(2):
#for k in range(50, 5001, 50):
for a in frange(0.5, 8, 0.5):
#v = int(k/4)
#a = k/v
#v = int(k/a)
k = int(a*v)
print("k={} v={} k/v={}".format(k, v, k/v))
ksatInstance = rs.generateRandomKSAT(k, v, 3)
p_qubo = s2q.WMISdictQUBO_2(ksatInstance)
wmis_qubo = s2q.WMISdictQUBO(ksatInstance)
qg = nx.Graph()
for coupler, energy in wmis_qubo.items():
if coupler[0] != coupler[1]:
qg.add_edge(coupler[0], coupler[1], weight=energy)
ig = nx.Graph()
for coupler, energy in p_qubo.items():
if coupler[0] != coupler[1]:
ig.add_edge(coupler[0], coupler[1], weight=energy)
p_node_count = ig.number_of_nodes();
p_conn_count = ig.number_of_edges();
wmis_node_count = qg.number_of_nodes();
wmis_conn_count = qg.number_of_edges();
print("p_qubo nodes= {}".format(p_node_count));
print("wwmis qubo nodes= {}".format(wmis_node_count));
print("nodes= {}".format(p_node_count / wmis_node_count));
data["p_node_counts"].append(p_node_count)
data["wmis_node_counts"].append(wmis_node_count)
data["p_conn_counts"].append(p_conn_count)
data["wmis_conn_counts"].append(wmis_conn_count)
print("calculating connectivity...")
data["p_avrg_connectivity"].append(nx.edge_connectivity(ig))
print("calculating connectivity...")
data["wmis_avrg_connectivity"].append(nx.edge_connectivity(qg))
data["a_arr"].append(k/v)
data["k_arr"].append(k)
print()
sns.set()
ax = sns.scatterplot(x="a_arr", y="p_node_counts", data=data, label="p_qubo")
ax.set(xlabel='k/v', ylabel='used verticies')
ax = sns.scatterplot(x="a_arr", y="wmis_node_counts", data=data, ax=ax, label="wmis_qubo")
plt.show()
ax = sns.scatterplot(x="a_arr", y="p_conn_counts", data=data, label="p_qubo")
ax.set(xlabel='k/v', ylabel='used edges')
ax = sns.scatterplot(x="a_arr", y="wmis_conn_counts", data=data, ax=ax, label="wmis_qubo")
plt.show()
ax = sns.scatterplot(x="a_arr", y="p_avrg_connectivity", data=data, label="p_qubo")
ax.set(xlabel='k/v', ylabel='avrg connectivity')
ax = sns.scatterplot(x="a_arr", y="wmis_avrg_connectivity", data=data, ax=ax, label="wmis_qubo")
plt.show()
def test_kv_range_emb():
k = 75 k = 75
data = { data = {
@ -54,7 +160,7 @@ def test_kv_range():
ksatInstance = rs.generateRandomKSAT(k, v, 3) ksatInstance = rs.generateRandomKSAT(k, v, 3)
p_qubo = s2q.primitiveQUBO_5(ksatInstance)
p_qubo = s2q.WMISdictQUBO_2(ksatInstance)
wmis_qubo = s2q.WMISdictQUBO(ksatInstance) wmis_qubo = s2q.WMISdictQUBO(ksatInstance)
@ -441,6 +547,92 @@ def test3_3():
ising[(node_var, node_nvar)], ising[(node_var, node_nvar)],
assignments[var], assignments[var],
assignments[-var])) assignments[-var]))
def test3_4():
sat_count = 0
steps = 200
for i in tqdm(range(steps)):
sat = rs.generateRandomKSAT(35, 10, 3)
ising = s2q.WMISdictQUBO_2(sat)
res = QBSolv().sample_qubo(ising, find_max=False)
#res = dimod.ExactSolver().sample_qubo(ising)
sample = res.first.sample
assignments = {}
vars = set()
for node, energy in sample.items():
if node[0] == "x":
lit = int(node[1:])
vars.add(abs(lit))
assignments[lit] = energy
model = [True for i in range(len(vars))]
for var in vars:
if var in assignments:
model[var - 1] = True if assignments[var] == 1 else False
elif -var in assignments:
model[var - 1] = True if assignments[-var] == 0 else False
if sat.checkAssignment(model):
sat_count += 1
print("sat percentage: {}".format(sat_count / steps))
def test_wmis():
sat_count = 0
#steps = 100
steps = 1
for i in tqdm(range(steps)):
sat = rs.generateRandomKSAT(35, 10, 3)
target = dnx.chimera_graph(16, 16, 4)
qubo = s2q.WMISdictQUBO(sat)
qg = nx.Graph()
for coupler, energy in qubo.items():
if coupler[0] != coupler[1]:
qg.add_edge(coupler[0], coupler[1], weight=energy)
qemb = minorminer.find_embedding(qg.edges(), target.edges(), return_overlap=True)
chimera_sampler = dimod.StructureComposite(SimulatedAnnealingSampler(),
target.nodes(),
target.edges())
solver = FixedEmbeddingComposite(chimera_sampler, qemb[0])
res = solver.sample_qubo(__negate_qubo(qubo))
#res = solver.sample_qubo(qubo)
#res = dimod.ExactSolver().sample_qubo(ising)
sample = res.first.sample
model = script.majority_vote_sample(sample)
print("energy={}".format(res.first.energy))
if sat.checkAssignment(model):
sat_count += 1
print("sat percentage: {}".format(sat_count / steps))
def test4(): def test4():
sat = rs.generateRandomKSAT(50, 13, 3) sat = rs.generateRandomKSAT(50, 13, 3)
@ -497,8 +689,18 @@ def test4():
used_nodes = np.unique(used_nodes) used_nodes = np.unique(used_nodes)
print("used nodes (embedding) = {}".format(len(used_nodes))) print("used nodes (embedding) = {}".format(len(used_nodes)))
def __negate_qubo(qubo):
negative_qubo = {}
for coupler, energy in qubo.items():
negative_qubo[coupler] = -1 * energy
return negative_qubo
#test3_4()
#test2()
#test_kv_range
test_wmis()
#test3_3()
test2()
#test_kv_range()

+ 212
- 43
test_data_extraction.py View File

@ -4,19 +4,24 @@ import util.script as script
import util.queries as queries import util.queries as queries
import dimod import dimod
import random import random
import numpy as np
from tqdm import tqdm from tqdm import tqdm
def main(): def main():
#instance_parameters() #instance_parameters()
#wmis_results() #wmis_results()
wmis_siman_results_alpha_num_of_assignments()
#wmis_siman_results_alpha_num_of_assignments()
#wmis_qpu_results_alpha_num_of_assignments() #wmis_qpu_results_alpha_num_of_assignments()
#primitive_2_siman_results_alpha_num_of_assignments() #primitive_2_siman_results_alpha_num_of_assignments()
#primitive_5_siman_results_alpha_num_of_assignments() #primitive_5_siman_results_alpha_num_of_assignments()
#primitive_5_qpu_results_alpha_num_of_assignments() #primitive_5_qpu_results_alpha_num_of_assignments()
#primitive_8_siman_results_alpha_num_of_assignments() #primitive_8_siman_results_alpha_num_of_assignments()
#äwmis_2_qpu_results()
#minisat_runs() #minisat_runs()
#wmis_engergy()
wmis_2_engergy()
def instance_parameters(): def instance_parameters():
edb = script.connect_to_experimetns_db() edb = script.connect_to_experimetns_db()
@ -65,9 +70,9 @@ def wmis_siman_results_alpha_num_of_assignments():
idb = script.connect_to_instance_pool() idb = script.connect_to_instance_pool()
q = queries.WMIS_result_scope_query_raw(idb) q = queries.WMIS_result_scope_query_raw(idb)
q.query("c42_vLogistic_6", "wmis_qubos_2_siman")
q.query("c42_vLogistic_6", "wmis_siman_results")
insert_row = ("INSERT INTO c42_vLogistic_6_wmis_1_2_siman_results "
insert_row = ("INSERT INTO c42_vLogistic_6_wmis_siman_results "
"(result_id, " "(result_id, "
" run, " " run, "
" instance_id, " " instance_id, "
@ -76,31 +81,35 @@ def wmis_siman_results_alpha_num_of_assignments():
" num_occurrences, " " num_occurrences, "
" energy, " " energy, "
" satisfiable) " " satisfiable) "
"VALUES (%s, %s, %s, %s,wa %s, %s, %s, %s) ")
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s) ")
run = int(input("run: "))
for result in tqdm(q): for result in tqdm(q):
sample_set = queries.read_raw_wmis_sample_set(result["data"])
data = script.analyze_wmis_sample(sample_set.first)
if "run" in result and result["run"] == run:
#if result["run"] == run:
sample_set = queries.read_raw_wmis_sample_set(result["data"])
sat = queries.get_instance_by_id(idb["instances"], result["instance"])
data = script.analyze_wmis_sample(sample_set.first)
model = script.majority_vote_sample(sample_set.first.sample)
sat = queries.get_instance_by_id(idb["instances"], result["instance"])
isSatisfiable = sat.checkAssignment(model)
model = script.majority_vote_sample(sample_set.first.sample)
isSatisfiable = sat.checkAssignment(model)
edb_cursor.execute(insert_row, (str(result["_id"]),
int(result["run"]),
str(result["instance"]),
int(data["number_of_assignments"]),
float(data["chain_break_fraction"]),
int(data["num_occurrences"]),
int(data["energy"]),
isSatisfiable))
edb_cursor.execute(insert_row, (str(result["_id"]),
int(result["run"]),
str(result["instance"]),
int(data["number_of_assignments"]),
float(data["chain_break_fraction"]),
int(data["num_occurrences"]),
int(data["energy"]),
isSatisfiable))
edb.commit() edb.commit()
edb_cursor.close() edb_cursor.close()
edb.close() edb.close()
def primitive_2_siman_results_alpha_num_of_assignments(): def primitive_2_siman_results_alpha_num_of_assignments():
edb = script.connect_to_experimetns_db() edb = script.connect_to_experimetns_db()
@ -255,9 +264,9 @@ def primitive_8_siman_results_alpha_num_of_assignments():
idb = script.connect_to_instance_pool() idb = script.connect_to_instance_pool()
q = queries.WMIS_result_scope_query_raw(idb) q = queries.WMIS_result_scope_query_raw(idb)
q.query("c42_vLogistic_6", "wmis_2_qubos_siman")
q.query("c42_vLogistic_6", "wmis_4_qubos_siman")
insert_row = ("INSERT INTO c42_vLogistic_6_wmis_2_2_siman_results "
insert_row = ("INSERT INTO c42_vLogistic_6_wmis_4_siman_results "
"(result_id, " "(result_id, "
" run, " " run, "
" instance_id, " " instance_id, "
@ -294,6 +303,65 @@ def primitive_8_siman_results_alpha_num_of_assignments():
edb.commit() edb.commit()
edb_cursor.close() edb_cursor.close()
edb.close() edb.close()
def wmis_2_qpu_results():
edb = script.connect_to_experimetns_db()
edb_cursor = edb.cursor()
idb = script.connect_to_instance_pool()
q = queries.WMIS_result_scope_query_raw(idb)
q.query("c42_vLogistic_6", "wmis_2_qubos_2_qpu")
insert_row = ("INSERT INTO c42_vLogistic_6_wmis_2_vertex_eq_edge_qpu_results "
"(result_id, "
" run, "
" instance_id, "
" chain_break_fraction, "
" num_occurrences, "
" energy, "
" satisfiable, "
" anneal_time, "
" energy_reach, "
" sample_size) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ")
run = int(input("run: "))
for result in tqdm(q):
if True:
#if result["run"] == run:
sample_set = queries.read_raw_primitive_ising_sample_set(result["data"])
data = script.analyze_wmis_sample(sample_set.first)
sat = queries.get_instance_by_id(idb["instances"], result["instance"])
post_process_results = __post_process_prim_5_sample(sat,
sample_set.first.sample)
anneal_time = result["data"]["info"]["timing"]["qpu_anneal_time_per_sample"]
#print(post_process_results)
energy_reach = abs(sample_set.record["energy"].max() -
sample_set.record["energy"].min())
sample_size = np.sum(sample_set.record["num_occurrences"])
edb_cursor.execute(insert_row, (str(result["_id"]),
int(result["run"]),
str(result["instance"]),
float(data["chain_break_fraction"]),
int(data["num_occurrences"]),
int(data["energy"]),
bool(post_process_results["satisfiable"]),
int(anneal_time),
int(energy_reach),
int(sample_size)))
edb.commit()
edb_cursor.close()
edb.close()
def __post_process_prim_5_sample(sat, sample): def __post_process_prim_5_sample(sat, sample):
post_process_results = {} post_process_results = {}
@ -325,24 +393,24 @@ def __post_process_prim_5_sample(sat, sample):
var_list = list(conflicts)
monte_carlo_steps = 0
if len(conflicts) > 0:
for i in range(1000):
rand_var = random.choice(var_list)
if sat.checkAssignment(model):
monte_carlo_steps
break
model[rand_var - 1] = not model[rand_var - 1]
monte_carlo_steps += 1
#var_list = list(conflicts)
#
#monte_carlo_steps = 0
#if len(conflicts) > 0:
# for i in range(1000):
# rand_var = random.choice(var_list)
#
# if sat.checkAssignment(model):
# monte_carlo_steps
# break
#
# model[rand_var - 1] = not model[rand_var - 1]
#
# monte_carlo_steps += 1
post_process_results["conflicts"] = conflicts post_process_results["conflicts"] = conflicts
post_process_results["satisfiable"] = sat.checkAssignment(model) post_process_results["satisfiable"] = sat.checkAssignment(model)
post_process_results["monte_carlo_steps"] = monte_carlo_steps
#post_process_results["monte_carlo_steps"] = monte_carlo_steps
return post_process_results return post_process_results
@ -364,12 +432,15 @@ def wmis_qpu_results_alpha_num_of_assignments():
" num_occurrences, " " num_occurrences, "
" energy, " " energy, "
" satisfiable, " " satisfiable, "
" anneal_time) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s) ")
" anneal_time, "
" energy_reach, "
" sample_size) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ")
run = int(input("run: ")) run = int(input("run: "))
for result in tqdm(q): for result in tqdm(q):
if result["run"] == run:
#if result["run"] == run:
if True:
sample_set = queries.read_raw_wmis_sample_set(result["data"]) sample_set = queries.read_raw_wmis_sample_set(result["data"])
data = script.analyze_wmis_sample(sample_set.first) data = script.analyze_wmis_sample(sample_set.first)
@ -381,8 +452,12 @@ def wmis_qpu_results_alpha_num_of_assignments():
isSatisfiable = sat.checkAssignment(model) isSatisfiable = sat.checkAssignment(model)
anneal_time = result["data"]["info"]["timing"]["qpu_anneal_time_per_sample"] anneal_time = result["data"]["info"]["timing"]["qpu_anneal_time_per_sample"]
energy_reach = abs(sample_set.record["energy"].max() -
sample_set.record["energy"].min())
sample_size = np.sum(sample_set.record["num_occurrences"])
edb_cursor.execute(insert_row, (str(result["_id"]), edb_cursor.execute(insert_row, (str(result["_id"]),
int(result["run"]), int(result["run"]),
str(result["instance"]), str(result["instance"]),
@ -391,13 +466,108 @@ def wmis_qpu_results_alpha_num_of_assignments():
int(data["num_occurrences"]), int(data["num_occurrences"]),
int(data["energy"]), int(data["energy"]),
isSatisfiable, isSatisfiable,
int(anneal_time)))
int(anneal_time),
int(energy_reach),
int(sample_size)))
edb.commit() edb.commit()
edb_cursor.close() edb_cursor.close()
edb.close() edb.close()
def wmis_engergy():
edb = script.connect_to_experimetns_db()
edb_cursor = edb.cursor()
idb = script.connect_to_instance_pool()
q = queries.WMIS_result_scope_query_raw(idb)
q.query("c42_vLogistic_6", "wmis_qpu_results")
insert_row = ("INSERT INTO c42_vLogistic_6_wmis_qpu_energy "
"(result_id, "
" sample_index, "
" run, "
" instance_id, "
" energy, "
" anneal_time, "
" sample_size) "
"VALUES (%s, %s, %s, %s, %s, %s, %s) ")
run = int(input("run: "))
for result in tqdm(q):
#if result["run"] == run:
if True:
sample_set = queries.read_raw_wmis_sample_set(result["data"])
anneal_time = result["data"]["info"]["timing"]["qpu_anneal_time_per_sample"]
sample_size = np.sum(sample_set.record["num_occurrences"])
count = 1
for energy, in sample_set.data(fields=["energy"], sorted_by=("energy")):
edb_cursor.execute(insert_row, (str(result["_id"]),
int(count),
int(result["run"]),
str(result["instance"]),
int(energy),
int(anneal_time),
int(sample_size)))
count += 1
edb.commit()
edb_cursor.close()
edb.close()
def wmis_2_engergy():
edb = script.connect_to_experimetns_db()
edb_cursor = edb.cursor()
idb = script.connect_to_instance_pool()
q = queries.WMIS_result_scope_query_raw(idb)
q.query("c42_vLogistic_6", "wmis_2_qubos_2_qpu")
insert_row = ("INSERT INTO c42_vLogistic_6_wmis_2_vertex_eq_edge_qpu_energy "
"(result_id, "
" sample_index, "
" run, "
" instance_id, "
" energy, "
" anneal_time, "
" sample_size) "
"VALUES (%s, %s, %s, %s, %s, %s, %s) ")
run = int(input("run: "))
for result in tqdm(q):
if result["run"] == run:
#if True:
sample_set = queries.read_raw_wmis_sample_set(result["data"])
anneal_time = result["data"]["info"]["timing"]["qpu_anneal_time_per_sample"]
sample_size = np.sum(sample_set.record["num_occurrences"])
count = 1
for energy, in sample_set.data(fields=["energy"], sorted_by=("energy")):
edb_cursor.execute(insert_row, (str(result["_id"]),
int(count),
int(result["run"]),
str(result["instance"]),
int(energy),
int(anneal_time),
int(sample_size)))
count += 1
edb.commit()
edb_cursor.close()
edb.close()
def minisat_runs(): def minisat_runs():
edb = script.connect_to_experimetns_db() edb = script.connect_to_experimetns_db()
edb_cursor = edb.cursor(); edb_cursor = edb.cursor();
@ -451,6 +621,5 @@ def wmis_results():
print(sat.checkAssignment(model)) print(sat.checkAssignment(model))
if __name__ == "__main__": if __name__ == "__main__":
main() main()

+ 67
- 2
util/SAT2QUBO.py View File

@ -6,8 +6,11 @@ from tqdm import tqdm
import math import math
import random import random
__VERTEX_WEIGHT__ = -2#-1
__EDGE_WEIGHT__ = 2#2
#__VERTEX_WEIGHT__ = -2
#__EDGE_WEIGHT__ = 2
__VERTEX_WEIGHT__ = 1
__EDGE_WEIGHT__ = -2
def WMISdictQUBO(kSATInstance): def WMISdictQUBO(kSATInstance):
quboInstance = {} quboInstance = {}
@ -55,6 +58,68 @@ def WMISdictQUBO_2(kSATInstance):
for i in range(varIndexInClause + 1, len(clause)):
var2 = abs(clause[i])
aux2 = "z{}_{}".format(clauseIndex, var2)
quboInstance[(aux, aux2)] = __EDGE_WEIGHT__
return quboInstance
def WMISdictQUBO_3(kSATInstance):
quboInstance = {}
for clauseIndex in range(kSATInstance.getNumberOfClauses()):
clause = kSATInstance.getClause(clauseIndex)
# build triangles
for varIndexInClause in range(len(clause)):
lit = clause[varIndexInClause]
var = abs(lit)
aux = "z{}_{}".format(clauseIndex, var)
var_node = "x{}".format(var)
if lit < 0:
quboInstance[(aux, aux)] = __VERTEX_WEIGHT__
quboInstance[(var_node, aux)] = __EDGE_WEIGHT__
else:
quboInstance[(var_node, aux)] = __VERTEX_WEIGHT__
for i in range(varIndexInClause + 1, len(clause)):
var2 = abs(clause[i])
aux2 = "z{}_{}".format(clauseIndex, var2)
quboInstance[(aux, aux2)] = __EDGE_WEIGHT__ * 2
return quboInstance
def WMISdictQUBO_4(kSATInstance):
quboInstance = {}
for clauseIndex in range(kSATInstance.getNumberOfClauses()):
clause = kSATInstance.getClause(clauseIndex)
# build triangles
for varIndexInClause in range(len(clause)):
lit = clause[varIndexInClause]
var = abs(lit)
aux = "z{}_{}".format(clauseIndex, var)
var_node = "x{}".format(var)
if lit < 0:
quboInstance[(aux, aux)] = __VERTEX_WEIGHT__
quboInstance[(var_node, aux)] = __EDGE_WEIGHT__ * 2
else:
quboInstance[(var_node, aux)] = __VERTEX_WEIGHT__
for i in range(varIndexInClause + 1, len(clause)): for i in range(varIndexInClause + 1, len(clause)):
var2 = abs(clause[i]) var2 = abs(clause[i])


+ 7
- 0
util/randomSAT.py View File

@ -77,6 +77,13 @@ def number_of_possible_clauses(number_of_variables, variables_per_clause):
return int(__binom(number_of_variables, variables_per_clause) return int(__binom(number_of_variables, variables_per_clause)
* __number_of_sign_placements(variables_per_clause)) * __number_of_sign_placements(variables_per_clause))
def number_of_possible_instances(number_of_clauses,
number_of_variables,
variables_per_clause):
return int(__binom(number_of_possible_clauses(number_of_variables,
variables_per_clause),
number_of_clauses))
def __binom(n, k): def __binom(n, k):
if n == k: if n == k:
return 1 return 1


+ 21
- 1
util/script.py View File

@ -196,6 +196,25 @@ def create_wmis_2_qubos_for_scope(db, scope):
qubo = SAT2QUBO.WMISdictQUBO_2(instance) qubo = SAT2QUBO.WMISdictQUBO_2(instance)
write_qubo_to_pool_db(db["wmis_2_qubos"], qubo, instance_id) write_qubo_to_pool_db(db["wmis_2_qubos"], qubo, instance_id)
def create_wmis_3_qubos_for_scope(db, scope):
instances = queries.Instance_scope_query(db)
instances.query(scope)
for instance, instance_id in tqdm(instances):
qubo = SAT2QUBO.WMISdictQUBO_3(instance)
write_qubo_to_pool_db(db["wmis_3_qubos"], qubo, instance_id)
def create_wmis_4_qubos_for_scope(db, scope):
instances = queries.Instance_scope_query(db)
instances.query(scope)
for instance, instance_id in tqdm(instances):
qubo = SAT2QUBO.WMISdictQUBO_4(instance)
write_qubo_to_pool_db(db["wmis_4_qubos"], qubo, instance_id)
def create_primitive_isings_for_scope_2(db, scope): def create_primitive_isings_for_scope_2(db, scope):
instances = queries.Instance_scope_query(db) instances = queries.Instance_scope_query(db)
@ -437,7 +456,8 @@ def majority_vote_sample(sample):
for var, a in assignments.items(): for var, a in assignments.items():
assignments[var]["majority"] = 1 if __true_percentage(a["all"]) >= 0.5 else 0 assignments[var]["majority"] = 1 if __true_percentage(a["all"]) >= 0.5 else 0
assignment = [0 for i in range(len(assignments))] assignment = [0 for i in range(len(assignments))]
for var, a in assignments.items(): for var, a in assignments.items():


Loading…
Cancel
Save