#!/usr/bin/env python3
|
|
|
|
import argparse
|
|
import os
|
|
import glob
|
|
import json
|
|
import numpy as np
|
|
import matplotlib.pyplot as plt
|
|
import collections
|
|
from script import script as scriptUtils
|
|
|
|
def main():
|
|
args = __parseArguments()
|
|
|
|
print(args)
|
|
|
|
__stats(args["comparisonDir"], args["outputDir"])
|
|
|
|
|
|
def __parseArguments():
|
|
argParser = scriptUtils.ArgParser()
|
|
|
|
argParser.addInstanceDirArg();
|
|
argParser.addArg(alias="comparisonDir", shortFlag="c", longFlag="comparison_dir",
|
|
help="the direcotry with all comparison files", type=str)
|
|
|
|
argParser.addArg(alias="outputDir", shortFlag="o", longFlag="comparison_stats_dir",
|
|
help="Directory to store the stats", type=str)
|
|
|
|
return argParser.parse()
|
|
|
|
def __stats(comparisonDir, outputDir):
|
|
stats = __collectStats(comparisonDir)
|
|
|
|
__writeStats(stats, outputDir)
|
|
|
|
def __collectStats(comparisonDir):
|
|
files = glob.glob(os.path.join(comparisonDir, "*.cmp"))
|
|
|
|
stats = []
|
|
|
|
for path in files:
|
|
|
|
comparison = __readComparison(path)
|
|
|
|
stats.append(__processSingleInstance(comparison))
|
|
|
|
return stats
|
|
|
|
|
|
def __processSingleInstance(comparison):
|
|
instanceStats = {}
|
|
|
|
conflicts = comparison["conflicts_per_variable"]
|
|
conflictArr = np.array(list(conflicts.values()))
|
|
|
|
instanceStats["conflicts_per_variable_mean"] = conflictArr.mean()
|
|
instanceStats["conflicts_per_variable_median"] = np.median(conflictArr)
|
|
instanceStats["conflicts_per_variable_std_dev"] = np.std(conflictArr)
|
|
instanceStats["conflicts_per_variable_max"] = conflictArr.max()
|
|
instanceStats["conflicts_per_variable_min"] = conflictArr.min()
|
|
instanceStats["conflicts_per_instance"] = np.sum(conflictArr)
|
|
instanceStats["raw_conflicts"] = list(conflictArr)
|
|
instanceStats["conflicts_to_degree_per_variable"] = __calcConflictsToDegree(conflicts,
|
|
comparison["degrees_of_variables"])
|
|
|
|
|
|
if comparison["minisat_satisfiable"]:
|
|
if __instanceIsFalseNegative(comparison):
|
|
instanceStats["result"] = "false_negative"
|
|
else:
|
|
instanceStats["result"] = "satisfiable"
|
|
else:
|
|
instanceStats["result"] = "unsatisfiable"
|
|
|
|
return instanceStats
|
|
|
|
def __calcConflictsToDegree(degreesPerVariable, conflictsPerVariable):
|
|
conflictsToDegreePerVariable = []
|
|
|
|
for varLabel, degree in degreesPerVariable.items():
|
|
conflicts = conflictsPerVariable[varLabel]
|
|
cnflToDeg = conflicts / (float(degree) / 2.0)**2
|
|
|
|
if cnflToDeg <= 1:
|
|
conflictsToDegreePerVariable.append(cnflToDeg)
|
|
|
|
return conflictsToDegreePerVariable
|
|
|
|
def __instanceIsFalseNegative(comparison):
|
|
return (comparison["minisat_satisfiable"] == True and
|
|
comparison["qubo_satisfiable"] == False)
|
|
|
|
|
|
def __readComparison(path):
|
|
cmpFile = open(path, "r")
|
|
comparison = json.load(cmpFile)
|
|
cmpFile.close()
|
|
|
|
return comparison
|
|
|
|
|
|
def __writeStats(stats, outputDir):
|
|
|
|
data = __seperateMatchesAndFalseNegatives(stats)
|
|
|
|
overviewFig = __createOverviewFig(data)
|
|
meanFig = __createSingleStatFig(data["mean"], "Conflicts per variable mean")
|
|
medianFig = __createSingleStatFig(data["median"], "Conflicts per variable median")
|
|
maxFig = __createSingleStatFig(data["max"], "Conflicts per variable max")
|
|
minFig = __createSingleStatFig(data["min"], "Conflicts per variable min")
|
|
stdDevFig = __createSingleStatFig(data["std_dev"], "Conflicts per variable\nstandard deviation")
|
|
cnflPerInstFig = __createSingleStatFig(data["cnfl_per_inst"], "Conflicts per instance")
|
|
cnflDegFig1 = __createSingleStatFig(data["cnflDeg"], "Conflicts in relation to degree", showfliers=False);
|
|
cnflDegFig2 = __createSingleStatFig(data["cnflDeg"], "Conflicts in relation to degree", showfliers=True);
|
|
|
|
histFig = __createHistogramFig(data, "raw", "Conflict per variable");
|
|
#cnflDegHistFig = __createHistogramFig(data, "cnflDeg", "Conflicts in relation to degree");
|
|
|
|
__setBatchXticks(figures=[overviewFig,
|
|
meanFig,
|
|
medianFig,
|
|
maxFig,
|
|
minFig,
|
|
stdDevFig,
|
|
cnflPerInstFig,
|
|
cnflDegFig1,
|
|
cnflDegFig2],
|
|
ticks=[1, 2, 3],
|
|
labels=["satisfiable",
|
|
"false negative",
|
|
"unsatisfiable"])
|
|
|
|
__setBatchXtickLabelRotation(figures=[overviewFig,
|
|
meanFig,
|
|
medianFig,
|
|
maxFig,
|
|
minFig,
|
|
stdDevFig,
|
|
cnflPerInstFig,
|
|
cnflDegFig1,
|
|
cnflDegFig2],
|
|
rotation=30)
|
|
|
|
|
|
overviewFig.savefig(os.path.join(outputDir, "conflicts_overview.png"))
|
|
meanFig.savefig(os.path.join(outputDir, "conflicts_mean.png"))
|
|
medianFig.savefig(os.path.join(outputDir, "conflicts_median.png"))
|
|
maxFig.savefig(os.path.join(outputDir, "conflicts_max.png"))
|
|
minFig.savefig(os.path.join(outputDir, "conflicts_min.png"))
|
|
stdDevFig.savefig(os.path.join(outputDir, "conflicts_std_dev.png"))
|
|
cnflPerInstFig.savefig(os.path.join(outputDir, "conflicts_per_instance.png"))
|
|
histFig.savefig(os.path.join(outputDir, "conflicts_per_var_hist.png"))
|
|
cnflDegFig1.savefig(os.path.join(outputDir, "conflicts_in_relation_to_degree_1.png"))
|
|
cnflDegFig2.savefig(os.path.join(outputDir, "conflicts_in_relation_to_degree_2.png"))
|
|
|
|
#plt.show(overviewFig)
|
|
|
|
def __createOverviewFig(data):
|
|
fig = plt.figure()
|
|
|
|
ax0 = fig.add_subplot(141,)
|
|
ax0.boxplot([data["mean"]["satisfiable"],
|
|
data["mean"]["false_negative"],
|
|
data["mean"]["unsatisfiable"]])
|
|
ax0.set_title("mean")
|
|
|
|
ax1 = fig.add_subplot(142, sharey=ax0)
|
|
ax1.boxplot([data["median"]["satisfiable"],
|
|
data["median"]["false_negative"],
|
|
data["median"]["unsatisfiable"]])
|
|
ax1.set_title("median")
|
|
|
|
ax2 = fig.add_subplot(143, sharey=ax0)
|
|
ax2.boxplot([data["max"]["satisfiable"],
|
|
data["max"]["false_negative"],
|
|
data["max"]["unsatisfiable"]])
|
|
ax2.set_title("max degree")
|
|
|
|
ax3 = fig.add_subplot(144, sharey=ax0)
|
|
ax3.boxplot([data["min"]["satisfiable"],
|
|
data["min"]["false_negative"],
|
|
data["min"]["unsatisfiable"]])
|
|
ax3.set_title("min degree")
|
|
|
|
|
|
fig.set_size_inches(12, 8)
|
|
fig.suptitle("Conflicts per variable overview", fontsize=16)
|
|
|
|
return fig
|
|
|
|
def __createHistogramFig(data, subDataSet, title):
|
|
fig = plt.figure()
|
|
|
|
bins = int(max(data[subDataSet]["satisfiable"]) / 5)
|
|
|
|
ax0 = fig.add_subplot(321)
|
|
ax0.hist(data[subDataSet]["satisfiable"], bins=bins)
|
|
ax0_2 = fig.add_subplot(322)
|
|
ax0_2.boxplot(data[subDataSet]["satisfiable"], vert=False)
|
|
|
|
ax1 = fig.add_subplot(323, sharex=ax0)
|
|
ax1.hist(data[subDataSet]["false_negative"], bins=bins)
|
|
ax1_2 = fig.add_subplot(324, sharex=ax0_2)
|
|
ax1_2.boxplot(data[subDataSet]["false_negative"], vert=False)
|
|
|
|
ax2 = fig.add_subplot(325, sharex=ax0)
|
|
ax2.hist(data[subDataSet]["unsatisfiable"], bins=bins)
|
|
ax2_2 = fig.add_subplot(326, sharex=ax0_2)
|
|
ax2_2.boxplot(data[subDataSet]["unsatisfiable"], vert=False)
|
|
|
|
fig.set_size_inches(14, 10)
|
|
fig.suptitle(title, fontsize=16)
|
|
|
|
return fig
|
|
|
|
def __createSingleStatFig(subDataset, title, showfliers=True):
|
|
fig = plt.figure()
|
|
|
|
ax = fig.add_subplot(111)
|
|
ax.boxplot([subDataset["satisfiable"],
|
|
subDataset["false_negative"],
|
|
subDataset["unsatisfiable"]], showfliers=showfliers)
|
|
|
|
fig.set_size_inches(3.5, 8)
|
|
fig.suptitle(title, fontsize=16)
|
|
|
|
return fig
|
|
|
|
def __setBatchXticks(figures, ticks, labels):
|
|
for fig in figures:
|
|
plt.setp(fig.get_axes(), xticks=ticks, xticklabels=labels)
|
|
|
|
def __setBatchXtickLabelRotation(figures, rotation):
|
|
for fig in figures:
|
|
for ax in fig.get_axes():
|
|
plt.setp(ax.get_xticklabels(), rotation=rotation)
|
|
|
|
|
|
|
|
def __seperateMatchesAndFalseNegatives(stats):
|
|
data = {}
|
|
data["mean"] = {"false_negative": [],
|
|
"satisfiable": [],
|
|
"unsatisfiable": []}
|
|
|
|
data["median"] = {"false_negative": [],
|
|
"satisfiable": [],
|
|
"unsatisfiable": []}
|
|
|
|
data["std_dev"] = {"false_negative": [],
|
|
"satisfiable": [],
|
|
"unsatisfiable": []}
|
|
|
|
data["max"] = {"false_negative": [],
|
|
"satisfiable": [],
|
|
"unsatisfiable": []}
|
|
|
|
data["min"] = {"false_negative": [],
|
|
"satisfiable": [],
|
|
"unsatisfiable": []}
|
|
|
|
data["cnfl_per_inst"] = {"false_negative": [],
|
|
"satisfiable": [],
|
|
"unsatisfiable": []}
|
|
|
|
data["raw"] = {"false_negative": [],
|
|
"satisfiable": [],
|
|
"unsatisfiable": []}
|
|
|
|
data["cnflDeg"] = {"false_negative": [],
|
|
"satisfiable": [],
|
|
"unsatisfiable": []}
|
|
|
|
for instance in stats:
|
|
target = instance["result"]
|
|
|
|
data["mean"][target].append(instance["conflicts_per_variable_mean"])
|
|
data["median"][target].append(instance["conflicts_per_variable_median"])
|
|
data["std_dev"][target].append(instance["conflicts_per_variable_std_dev"])
|
|
data["max"][target].append(instance["conflicts_per_variable_max"])
|
|
data["min"][target].append(instance["conflicts_per_variable_min"])
|
|
data["cnfl_per_inst"][target].append(instance["conflicts_per_instance"])
|
|
data["raw"][target].extend(instance["raw_conflicts"])
|
|
data["cnflDeg"][target].extend(instance["conflicts_to_degree_per_variable"])
|
|
|
|
return data
|
|
|
|
if __name__ == "__main__":
|
|
main()
|