Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 32 additions & 13 deletions var_elim/algorithms/replace.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
)
from pyomo.contrib.incidence_analysis import IncidenceGraphInterface
from pyomo.contrib.incidence_analysis.config import IncidenceMethod
import pyomo.contrib.fbbt.fbbt as fbbt
from pyomo.common.modeling import unique_component_name
from pyomo.common.timing import HierarchicalTimer

Expand Down Expand Up @@ -132,21 +133,27 @@ def add_bounds_to_expr(var, var_expr):
constraints on the expression if the variables replaced were bounded

Each constraint added to the bound_cons list is indexed by var_name_ub or
var_name_lb depending upon whihc bound it adds to the expression
var_name_lb depending upon which bound it adds to the expression
"""
if var.ub is None and var.lb is None:
lb_expr = None
ub_expr = None
elif var.lb is not None and var.ub is None:
lb_expr = var_expr >= var.lb
ub_expr = None
elif var.ub is not None and var.lb is None:
lb_expr = None
ub_expr = var_expr <= var.ub
lb, ub = fbbt.compute_bounds_on_expr(var_expr)
# TODO: Use a tolerance for bound equivalence here?
if var.lb is not None and (lb is None or lb < var.lb):
# We add a lower bound constraint if the variable has a lower bound
# and it is not dominated by the bounds on its defining expression.
add_lb_con = True
else:
lb_expr = var_expr >= var.lb
ub_expr = var_expr <= var.ub

add_lb_con = False
if var.ub is not None and (ub is None or ub > var.ub):
add_ub_con = True
else:
add_ub_con = False

# TODO: If our expression is a linear (monotonic) function of a single
# variable, we can propagate its bound back to the independent variable.

ub_expr = var_expr <= var.ub if add_ub_con else None
lb_expr = var_expr >= var.lb if add_lb_con else None

return lb_expr, ub_expr


Expand Down Expand Up @@ -355,10 +362,19 @@ def eliminate_variables(
# Update data structures for eliminated variables
for var in var_order:
var_expr = substitution_map[id(var)]
# Here, we should return None if adding inequalities was not necessary.
# In these cases, we either (a) do nothing (the bounds are dominated by
# existing bounds on dependent variables) or (b) translate the bounds
# to the (single, linear) defining variable.
# If we're updating bounds on defining variables in-place, I need to make
# sure these bounds are accounted for in later steps.
lb_expr, ub_expr = add_bounds_to_expr(var, var_expr)
lb_name = var.name + "_lb"
ub_name = var.name + "_ub"
if lb_expr is not None and type(lb_expr) is not bool:
# Checking for trivial constraints here seems unnecessary. But in this
# way we're masking an infeasibility. I guess we've determined that these
# don't happen in the problems we care about?
if lb_expr is False:
raise RuntimeError("Lower bound resolved to trivial infeasible constraint")
bound_con_set.add(lb_name)
Expand All @@ -382,6 +398,9 @@ def eliminate_variables(
# iterate over variables-to-eliminate, and perform the substitution for each
# (new) constraint that they're adjacent to. This way we don't check every
# constraint if we're only eliminating a small number of variables.
#
# This loop replaces variables in equality and inequality constraints, but
# doesn't convert bounds on eliminated variables to inequalities.
for con in igraph.constraints:
if (
id(con) not in elim_con_set
Expand Down
27 changes: 26 additions & 1 deletion var_elim/algorithms/tests/test_replacement.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,8 @@ def test_constraints_are_added(self):
vars_to_elim = [m.x[1], m.x[2]]
cons_to_elim = [m.eq1, m.eq2]

m.x[1].setlb(1)

var_order, con_order = define_elimination_order(vars_to_elim, cons_to_elim)
eliminate_variables(m, var_order, con_order)

Expand Down Expand Up @@ -229,6 +231,27 @@ def test_same_solution(self):
pyo.value(x2_lb_con.lower),
)

def test_dominated_bounds(self):
m = pyo.ConcreteModel()
m.x = pyo.Var([1, 2, 3])
m.eq = pyo.Constraint(pyo.PositiveIntegers)
m.eq[1] = m.x[1] == m.x[2] + m.x[3]**2
m.obj = pyo.Objective(expr=m.x[1]**2 + m.x[2]**2 + m.x[3]**2)
e = m.x[2] + m.x[3]**2
m.x[2].setlb(-2)
m.x[2].setub(2)
m.x[3].setlb(-1)
m.x[3].setub(4)
# Bounds on expression are (-2, 18)
m.x[1].setlb(-2)
m.x[1].setub(20)

var_elim = [m.x[1]]
con_elim = [m.eq[1]]
var_exprs, var_lb_map, var_ub_map = eliminate_variables(m, var_elim, con_elim)
assert len(m.replaced_variable_bounds_set) == 0
assert len(m.replaced_variable_bounds) == 0


class TestReplacementInInequalities:
def _make_simple_model(self):
Expand Down Expand Up @@ -260,7 +283,9 @@ def test_simple_replacement(self):
# Make sure new model has correct number of constraints
new_igraph = IncidenceGraphInterface(m, include_inequality=True)

assert len(new_igraph.constraints) == 6
# New constraints only get added for upper bounds, as lower bounds are
# redundant.
assert len(new_igraph.constraints) == 4

# Make sure proper replacement happened here
assert ComponentSet(identify_variables(m.eq4.expr)) == ComponentSet(m.y[:])
Expand Down
16 changes: 15 additions & 1 deletion var_elim/scripts/analyze_solvetime.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,14 @@ def solve_reduced(m, tee=True, callback=matching_elim_callback):
return m


def get_objective_value(m):
objs = list(m.component_data_objects(pyo.Objective, active=True))
if len(objs) > 1:
raise RuntimeError(f"Model has {len(objs)} objectives")
else:
return pyo.value(objs[0].expr)


def main(args):
if args.model is not None:
models = [(args.model, config.CONSTRUCTOR_LOOKUP[args.model])]
Expand Down Expand Up @@ -114,6 +122,7 @@ def main(args):
"success": [],
"feasible": [],
"max-infeasibility": [],
"objective-value": [],
"elim-time": [],
"solve-time": [],
"init-time": [],
Expand Down Expand Up @@ -152,7 +161,8 @@ def main(args):
# We need to re-set the callback each time we solve a model
htimer.start("solver")
solver.config.intermediate_callback = Callback()
res = solver.solve(model, tee=False, timer=htimer)
solver.config.options["print_user_options"] = "yes"
res = solver.solve(model, tee=args.tee, timer=htimer)
htimer.stop("solver")

timer.toc("Solve model")
Expand Down Expand Up @@ -248,12 +258,15 @@ def main(args):
print(f"Time spent initializing solver: {init_time}")
print()

objval = get_objective_value(model)

data["model"].append(mname)
data["method"].append(elim_name)
data["elim-time"].append(elim_time)
data["success"].append(success)
data["feasible"].append(valid)
data["max-infeasibility"].append(max_infeas)
data["objective-value"].append(objval)
data["solve-time"].append(solve_time)
data["init-time"].append(init_time)
# This is time to build the model, and has nothing to do with the elimination
Expand Down Expand Up @@ -294,6 +307,7 @@ def main(args):
default=None,
help="Basename for file to write results to",
)
argparser.add_argument("--tee", action="store_true", help="Stream solver log to stdout")
# HACK: We change the default of the argparser so we can handle it specially
# if --method or --model are used.
# It's unclear whether this hack will be worth the convenience, but let's try it.
Expand Down
40 changes: 25 additions & 15 deletions var_elim/scripts/collect_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,8 @@ def main(args):
fnames = [basename + "-" + s + ".csv" for s in suffixes]
filedir = os.path.dirname(__file__)
fpaths = [
os.path.join(filedir, "results", args.result_type, fname)
#os.path.join(filedir, "results", args.result_type, fname)
os.path.join(args.results_dir, args.result_type, fname)
for fname in fnames
]
print()
Expand All @@ -71,7 +72,7 @@ def main(args):
# output-suffix overrides suffix
suff_str = f"-{args.output_suffix}"
output_fname = args.result_type + suff_str + ".csv"
output_fpath = os.path.join(config.get_results_dir(), output_fname)
output_fpath = os.path.join(args.results_dir, output_fname)

dfs = [pd.read_csv(fpath) for fpath in fpaths]
output_df = pd.concat(dfs, ignore_index=True, join="inner")
Expand Down Expand Up @@ -101,25 +102,34 @@ def main(args):
),
default=None,
)
# We now append results_type to results_dir, so this isn't necessary.
#argparser.add_argument(
# "--output-results-dir",
# help=(
# "Results dir to store collected output"
# " (different from results dir where we look for results to collect)"
# ),
# default=config.get_results_dir(),
#)

# HACK: We change the default of the argparser so we can handle it specially
# if --method or --model are used.
# It's unclear whether this hack will be worth the convenience, but let's try it.
argparser.set_defaults(results_dir=None)
#argparser.set_defaults(results_dir=None)

args = argparser.parse_args()

if args.results_dir is None:
if args.method is None and args.model is None:
# If neither method nor model is used (we are collecting all results)
# we put results in the top-level results directory.
args.results_dir = config.get_results_dir()
else:
# If either method or model is used, we put the results in the
# results/structure subdirectory. This is because we don't want the
# top-level results getting polluted with a bunch of files.
resdir = os.path.join(config.get_results_dir(), "solvetime")
config.validate_dir(resdir)
args.results_dir = resdir
#if args.results_dir is None:
# if args.method is None and args.model is None:
# # If neither method nor model is used (we are collecting all results)
# # we put results in the top-level results directory.
# args.results_dir = config.get_results_dir()
# else:
# # If either method or model is used, we put the results in the
# # results/structure subdirectory. This is because we don't want the
# # top-level results getting polluted with a bunch of files.
# resdir = os.path.join(config.get_results_dir(), args.results_dir)
# config.validate_dir(resdir)
# args.results_dir = resdir

main(args)
8 changes: 8 additions & 0 deletions var_elim/scripts/write_command_lines.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,13 @@ def main(args):
if args.suffix is not None:
for cl in cl_lists:
cl.append(f"--suffix={args.suffix}")
if args.results_dir != config.get_results_dir():
for cl in cl_lists:
cl.append(f"--results-dir={args.results_dir}")
if args.tee:
for cl in cl_lists:
cl.append("--tee")

else:
results_dir = os.path.join(os.path.dirname(__file__), "results", "sweep")
suff_str = "" if args.suffix is None else f"-{args.suffix}"
Expand Down Expand Up @@ -113,5 +120,6 @@ def main(args):
help="Parallelize by model, method, or both. Default=both",
default="both",
)
argparser.add_argument("--tee", action="store_true", help="Solver log to stdout")
args = argparser.parse_args()
main(args)
8 changes: 6 additions & 2 deletions var_elim/scripts/write_sweep_command_lines.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,9 +66,11 @@ def main(args):
# Note that sample arg (and output filename) is base-1
for i in range(1, nsamples_total + 1)
]
if args.suffix is not None:
for cmd in sample_commands:
for cmd in sample_commands:
if args.suffix is not None:
cmd.append(f"--suffix={args.suffix}")
if args.results_dir is not None:
cmd.append(f"--results-dir={args.results_dir}")

sample_commands_str = [" ".join(cmd) + "\n" for cmd in sample_commands]

Expand All @@ -95,6 +97,8 @@ def main(args):
# suffix will be used to identify the right input files, while
# output-suffix will be used for the output file.
collect_cmd.append(f"--output_suffix={args.output_suffix}")
if args.results_dir is not None:
collect_cmd.append(f"--results-dir={args.results_dir}")
collect_commands.append(collect_cmd)

# TODO: Maybe send other arguments to the plotting command?
Expand Down