Skip to content

Commit

Permalink
Add polishing step option: do_polishing_step (#25)
Browse files Browse the repository at this point in the history
* make function _print_iter_stats()

* draft polishing step

* add ind_comp: indices of complementarity constraints

* fix ind_comp

* work on polish step, add statistics, fix timing plot

* add test_polishing

* polish step good enough

* cleanup

* oscilator example: cleanup and add polishing example

* fix rebase
  • Loading branch information
FreyJo authored Jan 21, 2023
1 parent 4e9789a commit 4bdad94
Show file tree
Hide file tree
Showing 6 changed files with 168 additions and 23 deletions.
41 changes: 36 additions & 5 deletions examples/oscilator_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ def get_oscilator_model(use_g_Stewart=False):

def main(use_g_Stewart=False):
opts = nosnoc.NosnocOpts()
# opts.irk_representation = "differential"
opts.use_fesd = True
comp_tol = 1e-6
opts.comp_tol = comp_tol
Expand Down Expand Up @@ -74,7 +73,6 @@ def main(use_g_Stewart=False):
# json.dump(results['w_sim'], f, indent=4, sort_keys=True, default=make_object_json_dumpable)
# print(f"saved results in {json_file}")


def main_least_squares():

# load reference solution
Expand All @@ -84,11 +82,9 @@ def main_least_squares():
# w_sim_ref = json.load(f)

opts = nosnoc.NosnocOpts()
# opts.irk_representation = "differential"
opts.use_fesd = True
comp_tol = 1e-7
opts.comp_tol = comp_tol
# opts.homotopy_update_slope = 0.9 # decrease rate
opts.N_finite_elements = 2
opts.n_s = 2
opts.print_level = 3
Expand Down Expand Up @@ -128,8 +124,42 @@ def main_least_squares():
# breakpoint()


def plot_oscilator(X_sim, t_grid, latexify=True):
def main_polishing():

opts = nosnoc.NosnocOpts()
opts.comp_tol = 1e-4
opts.print_level = 3

opts.cross_comp_mode = nosnoc.CrossComplementarityMode.COMPLEMENT_ALL_STAGE_VALUES_WITH_EACH_OTHER
opts.step_equilibration = nosnoc.StepEquilibrationMode.DIRECT
# opts.constraint_handling = nosnoc.ConstraintHandling.LEAST_SQUARES
# opts.mpcc_mode = nosnoc.MpccMode.FISCHER_BURMEISTER
opts.do_polishing_step = True
opts.homotopy_update_slope = 0.1

model = get_oscilator_model()

Tsim = np.pi / 2
Nsim = 29
Tstep = Tsim / Nsim

opts.terminal_time = Tstep

solver = nosnoc.NosnocSolver(opts, model)
solver.print_problem()
# loop
looper = nosnoc.NosnocSimLooper(solver, model.x0, Nsim)
looper.run()
results = looper.get_results()
print(f"max cost_val = {max(results['cost_vals']):.2e}")

plot_oscilator(results["X_sim"], results["t_grid"])
nosnoc.plot_timings(results["cpu_nlp"])


def plot_oscilator(X_sim, t_grid, latexify=True):
if latexify:
nosnoc.latexify_plot()
plt.figure()
plt.subplot(1, 2, 1)
plt.plot(t_grid, X_sim)
Expand Down Expand Up @@ -170,3 +200,4 @@ def make_object_json_dumpable(input):
if __name__ == "__main__":
main(len(argv) > 1)
# main_least_squares()
# main_polishing()
5 changes: 2 additions & 3 deletions nosnoc/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def __init__(self, solver: NosnocSolver, x0: np.ndarray, Nsim: int, p_values: Op
self.cost_vals = []
self.w_init = w_init

self.cpu_nlp = np.zeros((Nsim, solver.opts.max_iter_homotopy))
self.cpu_nlp = np.zeros((Nsim, solver.opts.max_iter_homotopy+1))

def run(self) -> None:
"""Run the simulation loop."""
Expand Down Expand Up @@ -67,10 +67,9 @@ def run(self) -> None:

def get_results(self) -> dict:
self.t_grid = np.concatenate((np.array([0.0]), np.cumsum(self.time_steps)))

results = {
"X_sim": self.X_sim,
"cpu_nlp": self.cpu_nlp,
"cpu_nlp": np.nan_to_num(self.cpu_nlp),
"time_steps": self.time_steps,
"t_grid": self.t_grid,
"theta_sim": self.theta_sim,
Expand Down
123 changes: 110 additions & 13 deletions nosnoc/nosnoc.py
Original file line number Diff line number Diff line change
Expand Up @@ -426,7 +426,7 @@ def add_variable(self,
index[stage] = new_indices
return

def add_constraint(self, symbolic: SX, lb=None, ub=None):
def add_constraint(self, symbolic: SX, lb=None, ub=None, index: Optional[list]=None):
n = casadi_length(symbolic)
if lb is None:
lb = np.zeros((n,))
Expand All @@ -435,6 +435,11 @@ def add_constraint(self, symbolic: SX, lb=None, ub=None):
if len(lb) != n or len(ub) != n:
raise Exception(f'add_constraint, inconsistent dimension: {symbolic=}, {lb=}, {ub=}')

if index is not None:
ng = casadi_length(self.g)
new_indices = list(range(ng, ng + n))
index.append(new_indices)

self.g = vertcat(self.g, symbolic)
self.lbg = np.concatenate((self.lbg, lb))
self.ubg = np.concatenate((self.ubg, ub))
Expand Down Expand Up @@ -527,6 +532,8 @@ def __init__(self,
self.ind_lambda_p = create_empty_list_matrix((n_s + end_allowance, dims.n_sys))
self.ind_h = []

self.ind_comp = []

# create variables
h = SX.sym(f'h_{ctrl_idx}_{fe_idx}')
h_ctrl_stage = opts.terminal_time / opts.N_stages
Expand Down Expand Up @@ -786,7 +793,7 @@ def create_complementarity(self, x: List[SX], y: SX, sigma: SX, tau: SX) -> None
lb_comp = 0 * np.ones((n_comp,))
ub_comp = 0 * np.ones((n_comp,))

self.add_constraint(g_comp, lb=lb_comp, ub=ub_comp)
self.add_constraint(g_comp, lb=lb_comp, ub=ub_comp, index=self.ind_comp)

return

Expand Down Expand Up @@ -920,6 +927,14 @@ def _add_primal_vector(self, symbolic: SX, lb: np.array, ub, initial):
self.w0 = np.concatenate((self.w0, initial))
return

def add_fe_constraints(self, fe: FiniteElement, ctrl_idx: int):
g_len = casadi_length(self.g)
self.add_constraint(fe.g, fe.lbg, fe.ubg)
# constraint indices
self.ind_comp[ctrl_idx].append(increment_indices(fe.ind_comp, g_len))
return


def __init__(self, opts: NosnocOpts, model: NosnocModel, ocp: Optional[NosnocOcp] = None):

super().__init__()
Expand All @@ -937,7 +952,7 @@ def __init__(self, opts: NosnocOpts, model: NosnocModel, ocp: Optional[NosnocOcp
h_ctrl_stage = opts.terminal_time / opts.N_stages
self.stages: list[list[FiniteElement]] = []

# Index vectors
# Index vectors of optimization variables
self.ind_x = create_empty_list_matrix((opts.N_stages,))
self.ind_x_cont = create_empty_list_matrix((opts.N_stages,))
self.ind_v = create_empty_list_matrix((opts.N_stages,))
Expand All @@ -952,6 +967,9 @@ def __init__(self, opts: NosnocOpts, model: NosnocModel, ocp: Optional[NosnocOcp
self.ind_h = []
self.ind_v_global = []

# Index vectors within constraints g
self.ind_comp = create_empty_list_matrix((opts.N_stages,))

# setup parameters, lambda00 is added later:
sigma_p = SX.sym('sigma_p') # homotopy parameter
tau = SX.sym('tau') # homotopy parameter
Expand All @@ -965,8 +983,8 @@ def __init__(self, opts: NosnocOpts, model: NosnocModel, ocp: Optional[NosnocOcp
if opts.time_freezing:
t0 = model.t_fun(self.fe0.w[self.fe0.ind_x[-1]])

for k, stage in enumerate(self.stages):
Uk = self.w[self.ind_u[k]]
for ctrl_idx, stage in enumerate(self.stages):
Uk = self.w[self.ind_u[ctrl_idx]]
for _, fe in enumerate(stage):

# 1) Stewart Runge-Kutta discretization
Expand All @@ -980,14 +998,14 @@ def __init__(self, opts: NosnocOpts, model: NosnocModel, ocp: Optional[NosnocOcp

# 4) add cost and constraints from FE to problem
self.cost += fe.cost
self.add_constraint(fe.g, fe.lbg, fe.ubg)
self.add_fe_constraints(fe, ctrl_idx)

if opts.use_fesd and opts.equidistant_control_grid:
self.add_constraint(sum([fe.h() for fe in stage]) - h_ctrl_stage)

if opts.time_freezing and opts.equidistant_control_grid:
# TODO: make t0 dynamic (since now it needs to be 0!)
t_now = opts.terminal_time / opts.N_stages * (k + 1) + t0
t_now = opts.terminal_time / opts.N_stages * (ctrl_idx + 1) + t0
Xk_end = stage[-1].w[stage[-1].ind_x[-1]]
self.add_constraint(model.t_fun(Xk_end) - t_now,
[-opts.time_freezing_tolerance],
Expand Down Expand Up @@ -1242,6 +1260,15 @@ def initialize(self):
missing_indices = sorted(set(range(len(prob.w0))) - set(db_updated_indices))
# print(f"{missing_indices=}")

def _print_iter_stats(self, sigma_k,
complementarity_residual,
nlp_res,
cost_val,
cpu_time_nlp,
nlp_iter,
status):
print(f'{sigma_k:.1e} \t {complementarity_residual:.2e} \t {nlp_res:.2e}' +
f'\t {cost_val:.2e} \t {cpu_time_nlp:3f} \t {nlp_iter} \t {status}')

class NosnocSolver(NosnocSolverBase):
"""
Expand Down Expand Up @@ -1278,9 +1305,10 @@ def solve(self) -> dict:
w0 = prob.w0

w_all = [w0.copy()]
complementarity_stats = opts.max_iter_homotopy * [None]
cpu_time_nlp = opts.max_iter_homotopy * [None]
nlp_iter = opts.max_iter_homotopy * [None]
n_iter_polish = opts.max_iter_homotopy + 1
complementarity_stats = n_iter_polish * [None]
cpu_time_nlp = n_iter_polish * [None]
nlp_iter = n_iter_polish * [None]

if opts.print_level:
print('-------------------------------------------')
Expand Down Expand Up @@ -1323,9 +1351,8 @@ def solve(self) -> dict:
complementarity_stats[ii] = complementarity_residual

if opts.print_level:
print(
f'{sigma_k:.1e} \t {complementarity_residual:.2e} \t {nlp_res:.2e} \t {cost_val:.2e} \t {cpu_time_nlp[ii]:3f} \t {nlp_iter[ii]} \t {status}'
)
self._print_iter_stats(sigma_k, complementarity_residual, nlp_res,
cost_val, cpu_time_nlp[ii], nlp_iter[ii], status)
if status not in ['Solve_Succeeded', 'Solved_To_Acceptable_Level']:
print(f"Warning: IPOPT exited with status {status}")

Expand All @@ -1344,6 +1371,9 @@ def solve(self) -> dict:
min(opts.homotopy_update_slope * sigma_k,
sigma_k**opts.homotopy_update_exponent))

if opts.do_polishing_step:
w_opt, cpu_time_nlp[n_iter_polish-1], nlp_iter[n_iter_polish-1] = self.polish_solution(w_opt, lambda00, x0)

# collect results
results = get_results_from_primal_vector(prob, w_opt)

Expand All @@ -1370,3 +1400,70 @@ def solve(self) -> dict:
# print(f"w{i}: {prob.w[i]} = {w_opt[i]}")
return results

def polish_solution(self, w_guess, lambda00, x0):
opts = self.opts
if opts.pss_mode != PssMode.STEWART:
raise NotImplementedError()
prob = self.problem

eps_sigma = 1e1 * opts.comp_tol

ind_set = flatten(prob.ind_lam + prob.ind_lambda_n + prob.ind_lambda_p + prob.ind_alpha + prob.ind_theta + prob.ind_mu)
ind_dont_set = flatten(prob.ind_h + prob.ind_u + prob.ind_x + prob.ind_v_global + prob.ind_v)
# sanity check
ind_all = ind_set + ind_dont_set
for iw in range(len(w_guess)):
if iw not in ind_all:
raise Exception(f"w[{iw}] = {prob.w[iw]} not handled proprerly")

w_fix_zero = w_guess < eps_sigma
w_fix_zero[ind_dont_set] = False
ind_fix_zero = np.where(w_fix_zero)[0].tolist()

w_fix_one = np.abs(w_guess - 1.0) < eps_sigma
w_fix_one[ind_dont_set] = False
ind_fix_one = np.where(w_fix_one)[0].tolist()

lbw = prob.lbw.copy()
ubw = prob.ubw.copy()
lbw[ind_fix_zero] = 0.0
ubw[ind_fix_zero] = 0.0
lbw[ind_fix_one] = 1.0
ubw[ind_fix_one] = 1.0

# fix some variables
if opts.print_level:
print(f"polishing step: setting {len(ind_fix_zero)} variables to 0.0, {len(ind_fix_one)} to 1.0.")
for i_ctrl in range(opts.N_stages):
for i_fe in range(opts.Nfe_list[i_ctrl]):
w_guess[prob.ind_theta[i_ctrl][i_fe][:]]

sigma_k, tau_val = 0.0, 0.0
p_val = np.concatenate((prob.model.p_val_ctrl_stages.flatten(), np.array([sigma_k, tau_val]), lambda00, x0))

# solve NLP
t = time.time()
sol = self.solver(x0=w_guess,
lbg=prob.lbg,
ubg=prob.ubg,
lbx=lbw,
ubx=ubw,
p=p_val)
cpu_time_nlp = time.time() - t

# print and process solution
solver_stats = self.solver.stats()
status = solver_stats['return_status']
nlp_iter = solver_stats['iter_count']
nlp_res = norm_inf(sol['g']).full()[0][0]
cost_val = norm_inf(sol['f']).full()[0][0]
w_opt = sol['x'].full().flatten()

complementarity_residual = prob.comp_res(w_opt, p_val).full()[0][0]
if opts.print_level:
self._print_iter_stats(sigma_k, complementarity_residual, nlp_res,
cost_val, cpu_time_nlp, nlp_iter, status)
if status not in ['Solve_Succeeded', 'Solved_To_Acceptable_Level']:
print(f"Warning: IPOPT exited with status {status}")

return w_opt, cpu_time_nlp, nlp_iter
3 changes: 3 additions & 0 deletions nosnoc/nosnoc_opts.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ class NosnocOpts:
step_equilibration_sigma: float = 0.1
rho_h: float = 1.0

# polishing step
do_polishing_step = False

# OCP only
N_stages: int = 1
equidistant_control_grid: bool = True # NOTE: tested in test_ocp
Expand Down
4 changes: 2 additions & 2 deletions nosnoc/plot_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def latexify_plot():
matplotlib.rcParams.update(params)


def plot_timings(timings, latexify=True, title='', figure_filename=''):
def plot_timings(timings: np.ndarray, latexify=True, title='', figure_filename=''):
# latexify plot
if latexify:
latexify_plot()
Expand All @@ -43,7 +43,7 @@ def plot_timings(timings, latexify=True, title='', figure_filename=''):
mean_cpu = np.mean(np.sum(timings, axis=1))
plt.plot(x_range,
mean_cpu * np.ones(2,),
label=f'mean {mean_cpu:.3f}',
label=f'mean/step {mean_cpu:.3f}',
linestyle=':',
color='black')
plt.ylabel('CPU time [s]')
Expand Down
15 changes: 15 additions & 0 deletions test/simple_sim_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,21 @@ def test_initializations(self):
except:
raise Exception(f"Test failed with setting:\n {opts=} \n{model=}")

def test_polishing(self):
model = get_simplest_model_switch()

opts = get_default_options()
opts.print_level = 1
opts.do_polishing_step = True
opts.comp_tol = 1e-3
try:
test_opts(opts, model=model)
except:
raise Exception(f"Test failed with setting:\n {opts=} \n{model=}")


if __name__ == "__main__":
unittest.main()
# uncomment to run single test locally
# simple_test = SimpleTests()
# simple_test.test_polishing()

0 comments on commit 4bdad94

Please sign in to comment.