Skip to content

Commit

Permalink
more informed splitter selection
Browse files Browse the repository at this point in the history
  • Loading branch information
Roman Andriushchenko committed Jan 16, 2024
1 parent 1fbb610 commit d5c08c7
Show file tree
Hide file tree
Showing 3 changed files with 57 additions and 24 deletions.
38 changes: 25 additions & 13 deletions paynt/synthesizer/policy_tree.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import paynt.quotient.quotient
import paynt.verification.property_result
from paynt.verification.property import Property
import paynt.utils.profiler

import paynt.family.smt
import paynt.synthesizer.conflict_generator.dtmc
Expand Down Expand Up @@ -406,9 +407,8 @@ def merge_compatible_policies(self, policy_indices):

def postprocess(self, quotient, prop):

import paynt.utils.profiler
postprocessing_time = paynt.utils.profiler.Timer()
postprocessing_time.start()
postprocessing_timer = paynt.utils.profiler.Timer()
postprocessing_timer.start()
logger.info("post-processing the policy tree...")

logger.info("merging SAT siblings solved by non-exclusively compatible policies...")
Expand Down Expand Up @@ -445,9 +445,10 @@ def postprocess(self, quotient, prop):
nodes_removed = nodes_before - self.root.num_nodes()
logger.info("removed {} nodes".format(nodes_removed))

postprocessing_time.stop()
time = int(postprocessing_time.read())
postprocessing_timer.stop()
time = int(postprocessing_timer.read())
logger.debug(f"postprocessing took {time} s")
return time


def extract_policies(self, quotient):
Expand Down Expand Up @@ -527,7 +528,7 @@ def solve_game_abstraction(self, family, prop, game_solver):
game_solver.solve(family.selected_choices, prop.maximizing, prop.minimizing)
self.stat.iteration_game(family.mdp.states)
game_value = game_solver.solution_value
game_sat = prop.satisfies_threshold(game_value)
game_sat = prop.satisfies_threshold_within_precision(game_value)
# logger.debug("game solved, value is {}".format(game_value))
game_policy = game_solver.solution_state_to_player1_action
# fix irrelevant choices
Expand Down Expand Up @@ -599,7 +600,12 @@ def verify_family(self, family, game_solver, prop):
def choose_splitter(self, family, prop, scheduler_choices, state_values, hole_selection):
inconsistent_assignments = {hole:options for hole,options in enumerate(hole_selection) if len(options) > 1}
if len(inconsistent_assignments)==0:
# # pick any hole with multiple options involved in the hole selection
for hole,options in enumerate(hole_selection):
if family.hole_num_options(hole) > 1 and len(options) > 0:
return hole
# pick any hole with multiple options
logger.debug("picking an arbitrary hole...")
for hole in range(family.num_holes):
if family.hole_num_options(hole) > 1:
return hole
Expand Down Expand Up @@ -656,12 +662,15 @@ def split(self, family, prop, hole_selection, splitter, policy):
policy_consistent = all([len(options) <= 1 for options in hole_selection])
if policy_consistent:
# associate the branch of the split that contains hole selection with the policy
assert len(used_options) == 1
option = used_options[0]
for subfamily in subfamilies:
if option in subfamily.hole_options(splitter):
subfamily.candidate_policy = policy
break
if len(used_options) != 1:
logger.warning(f"splitting wrt. hole that has {len(used_options)} options within the scheduler" +
"(expected 1); most likely caused by model checking precision")
else:
option = used_options[0]
for subfamily in subfamilies:
if option in subfamily.hole_options(splitter):
subfamily.candidate_policy = policy
break

return suboptions,subfamilies

Expand Down Expand Up @@ -709,10 +718,13 @@ def evaluate_all(self, family, prop, keep_value_only=False):
self.stat.num_mdps_total = self.quotient.design_space.size
self.stat.num_mdps_sat = sum([n.family.size for n in policy_tree.collect_sat()])
self.stat.num_nodes = len(policy_tree.collect_all())
self.stat.num_leaves = len(policy_tree.collect_leaves())
self.stat.num_policies = len(policy_tree.policies)
policy_tree.postprocess(self.quotient, prop)
postprocessing_time = policy_tree.postprocess(self.quotient, prop)
policy_tree.print_stats()
self.stat.postprocessing_time = postprocessing_time
self.stat.num_nodes_merged = len(policy_tree.collect_all())
self.stat.num_leaves_merged = len(policy_tree.collect_leaves())
self.stat.num_policies_merged = len(policy_tree.policies)
self.policy_tree = policy_tree

Expand Down
36 changes: 25 additions & 11 deletions paynt/synthesizer/statistic.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def status(self):
fraction_explored = (self.synthesizer.explored + discarded) / self.family_size
time_estimate = safe_division(self.synthesis_timer.read(), fraction_explored)
percentage_explored = int(fraction_explored * 100000) / 1000.0
ret_str += f"Progress {percentage_explored}%"
ret_str += f"progress {percentage_explored}%"

time_elapsed = int(self.synthesis_timer.read())
ret_str += f", elapsed {time_elapsed} s"
Expand Down Expand Up @@ -253,26 +253,40 @@ def print_mdp_family_table_entries(self):
print(self.num_mdps_total,end=" ")
print(self.quotient.quotient_mdp.nr_states*self.num_mdps_total,end=" ")
print(self.num_mdps_sat,end=" ")
sat_by_total_percentage = round(self.num_mdps_sat/self.num_mdps_total*100,1)
sat_by_total_percentage = round(self.num_mdps_sat/self.num_mdps_total*100,2)
print(sat_by_total_percentage)


synt_stats_header = "synt info:\t"
synt_stats_header += "\t".join(["time","nodes","nodes (merged)","policies","policies (merged)","policies(merged) / SAT %","game iters","MDP iters", "iters/MDPs"])

headers = [
"time","nodes","nodes (merged)","leaves","leaves (merged)","leaves (merged) / MDPs %",
"policies","policies (merged)","policies (merged) / SAT %","pp time","pp time %",
"game iters","MDP iters","iters/MDPs %"]
synt_stats_header = "synthesis info:\t" + "\t".join(headers)
print(synt_stats_header)

# print("\t\t",end="")
synthesis_timer = int(self.synthesis_timer.time)
print(synthesis_timer,end=" ")
synthesis_time = int(self.synthesis_timer.time)
print(synthesis_time,end=" ")
print(self.num_nodes,end=" ")
print(self.num_nodes_merged,end=" ")

print(self.num_leaves,end=" ")
print(self.num_leaves_merged,end=" ")
leaves_by_mdps = round(self.num_leaves_merged/self.num_mdps_total*100,2)
print(leaves_by_mdps,end=" ")

print(self.num_policies,end=" ")
print(self.num_policies_merged,end=" ")
merged_by_sat_percentage = "N/A"
policies_by_sat = "N/A"
if self.num_mdps_sat > 0:
merged_by_sat_percentage = round(self.num_policies_merged/self.num_mdps_sat*100,1)
print(merged_by_sat_percentage,end=" ")
policies_by_sat = round(self.num_policies_merged/self.num_mdps_sat*100,2)
print(policies_by_sat,end=" ")
print(self.postprocessing_time,end=" ")
postprocessing_time_percentage = round(self.postprocessing_time/synthesis_time*100,2)
print(postprocessing_time_percentage,end=" ")

print(self.iterations_game,end=" ")
print(self.iterations_mdp,end=" ")
iters_by_mdp = round((self.iterations_game+self.iterations_mdp)/self.num_mdps_total,1)
iters_by_mdp = round((self.iterations_game+self.iterations_mdp)/self.num_mdps_total*100,2)
print(iters_by_mdp)
print()
7 changes: 7 additions & 0 deletions paynt/verification/property.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,10 @@ def __init__(self, prop, discount_factor=1):

# set threshold
self.threshold = rf.threshold_expr.evaluate_as_double()
if self.minimizing:
self.threshold_plus_precision = self.threshold + Property.model_checking_precision
else:
self.threshold_plus_precision = self.threshold - Property.model_checking_precision

# construct quantitative formula (without bound) for explicit model checking
# set optimality type
Expand Down Expand Up @@ -131,6 +135,9 @@ def result_valid(self, value):
def satisfies_threshold(self, value):
return self.result_valid(value) and self.op(value, self.threshold)

def satisfies_threshold_within_precision(self, value):
return self.result_valid(value) and self.op(value, self.threshold_plus_precision)

@property
def can_be_improved(self):
return False
Expand Down

0 comments on commit d5c08c7

Please sign in to comment.