Grammar / spelling for incremental chapter

This commit is contained in:
Jip J. Dekker 2021-04-27 10:08:54 +10:00
parent 1ab2442c8d
commit ce5c3c8e58
No known key found for this signature in database
GPG Key ID: 517DF4A00618C9C3
14 changed files with 206 additions and 174 deletions

View File

@ -1,9 +1,10 @@
\newacronym[see={[Glossary:]{gls-cbls}}]{cbls}{CBLS\glsadd{gls-cbls}}{Constraint-Based Local Search} \newacronym[see={[Glossary:]{gls-cbls}}]{cbls}{CBLS\glsadd{gls-cbls}}{Constraint-Based Local Search}
\newacronym[see={[Glossary:]{gls-clp}}]{clp}{CLP\glsadd{gls-clp}}{Constraint Logic Programming} \newacronym[see={[Glossary:]{gls-clp}}]{clp}{CLP\glsadd{gls-clp}}{Constraint Logic Programming}
\newacronym[see={[Glossary:]{gls-cp}}]{cp}{CP\glsadd{gls-cp}}{Constraint Programming} \newacronym[see={[Glossary:]{gls-cp}}]{cp}{CP\glsadd{gls-cp}}{Constraint Programming}
\newacronym[see={[Glossary:]{gls-cse}}]{cse}{CSE\glsadd{gls-cse}}{Common Subexpression Elimination} \newacronym[see={[Glossary:]{gls-cse}}]{cse}{CSE\glsadd{gls-cse}}{Common Sub-expression Elimination}
\newacronym[see={[Glossary:]{gls-csp}}]{csp}{CSP\glsadd{gls-csp}}{Constraint Satisfaction Problem} \newacronym[see={[Glossary:]{gls-csp}}]{csp}{CSP\glsadd{gls-csp}}{Constraint Satisfaction Problem}
\newacronym[see={[Glossary:]{gls-cop}}]{cop}{COP\glsadd{gls-cop}}{Constraint Optimisation Problem} \newacronym[see={[Glossary:]{gls-cop}}]{cop}{COP\glsadd{gls-cop}}{Constraint Optimisation Problem}
\newacronym[see={[Glossary:]{gls-gbac}}]{gbac}{GBAC\glsadd{gls-gbac}}{Generalised Balanced Academic Curriculum}
\newacronym[see={[Glossary:]{gls-lcg}}]{lcg}{LCG\glsadd{gls-lcg}}{Lazy Clause Generation} \newacronym[see={[Glossary:]{gls-lcg}}]{lcg}{LCG\glsadd{gls-lcg}}{Lazy Clause Generation}
\newacronym[see={[Glossary:]{gls-lns}}]{lns}{LNS\glsadd{gls-lns}}{Large Neighbourhood Search} \newacronym[see={[Glossary:]{gls-lns}}]{lns}{LNS\glsadd{gls-lns}}{Large Neighbourhood Search}
\newacronym[see={[Glossary:]{gls-mip}}]{mip}{MIP\glsadd{gls-mip}}{Mixed Integer Programming} \newacronym[see={[Glossary:]{gls-mip}}]{mip}{MIP\glsadd{gls-mip}}{Mixed Integer Programming}

View File

@ -88,6 +88,12 @@
name={Gecode}, name={Gecode},
description={}, description={},
} }
\newglossaryentry{gls-gbac}{
name={Generalised Balanced Academic Curriculum},
description={},
}
\newglossaryentry{generator}{ \newglossaryentry{generator}{
name={generator}, name={generator},
description={}, description={},

View File

@ -1,4 +0,0 @@
c @$\mapsto$@ true @$\sep$@ []
x @$\mapsto$@ mkvar(-10..10) @$\sep$@ []
y @$\mapsto$@ mkvar(-10..10) @$\sep$@ []
true @$\mapsto$@ true @$\sep$@ []

View File

@ -1,14 +0,0 @@
% Posted c
true @$\lhd$@ -[c]
% Propagated c = true
c @$\mapsfrom$@ mkvar(0,1) @$\sep$@ []
true @$\lhd$@ +[c]
% Simplified bool_or(b1, true) = true
b2 @$\mapsfrom$@ bool_or(b1, c) @$\sep$@ []
true @$\lhd$@ +[b2]
% b1 became unused...
b1 @$\mapsfrom$@ int_gt(t, y) @$\sep$@ []
% causing t, then b0 and finally z to become unused
t @$\mapsfrom$@ z @$\sep$@ [b0]
b0 @$\mapsfrom$@ int_abs(x, z) @$\sep$@ []
z @$\mapsfrom$@ mkvar(-infinity,infinity) @$\sep$@ []

View File

@ -1 +0,0 @@
predicate basic_lns(var bool: nbh) = (status()!=START -> nbh);

View File

@ -1,10 +1,6 @@
% TODO: We probably need to unify these (at least for the thesis)
predicate random_allocation(array[int] of int: sol) =
forall(i in courses) (
(uniform(0,99) < 80) -> (period_of[i] == sol[i])
);
predicate free_period() = predicate free_period() =
let { int: period = uniform(periods) } in let {
forall(i in courses where sol(period_of[i]) != period) int: period = uniform(periods)
(period_of[i] = sol(period_of[i])); } in forall(i in courses where sol(period_of[i]) != period) (
period_of[i] = sol(period_of[i])
);

View File

@ -1 +0,0 @@
predicate hill_climbing() = status() != START -> _objective < sol(_objective);

View File

@ -1,2 +0,0 @@
constraint x < 10;
constraint y < x;

View File

@ -1,25 +0,0 @@
predicate lex_minimize(array[int] of var int: o) =
let {
var index_set(o): stage
array[index_set(o)] of var int: best;
} in if status() = START then
stage = min(index_set(o))
else
if status() = UNSAT then
if lastval(stage) < l then
stage = lastval(stage) + 1
else
complete() % we are finished
endif
else
stage = lastval(stage)
/\ best[stage] = sol(_objective)
endif
/\ for(i in min(index_set(o))..stage-1) (
o[i] = lastval(best[i])
)
/\ if status() = SAT then
o[stage] < sol(_objective)
endif
/\ _objective = o[stage]
endif;

View File

@ -1,19 +0,0 @@
predicate pareto_optimal(var int: obj1, var int: obj2) =
let {
int: ms = 1000; % max solutions
var 0..ms: nsol; % number of solutions
set of int: SOL = 1..ms;
array[SOL] of var lb(obj1)..ub(obj1): s1;
array[SOL] of var lb(obj2)..ub(obj2): s2;
} in if status() = START then
nsol = 0
elseif status() = UNSAT then
complete() % we are finished!
elseif
nsol = sol(nsol) + 1 /\
s1[nsol] = sol(obj1) /\
s2[nsol] = sol(obj2)
endif
/\ for(i in 1..nsol) (
obj1 < lastval(s1[i]) \/ obj2 < lastval(s2[i])
);

View File

@ -1,9 +0,0 @@
predicate simulated_annealing(float: init_temp, float: cooling_rate) =
let {
var float: temp;
} in if status() = START then
temp = init_temp
else
temp = last_val(temp) * (1 - cooling_rate) % cool down
/\ _objective < sol(_objective) - ceil(log(uniform(0.0, 1.0)) * temp)
endif;

View File

@ -1 +0,0 @@
b3 -> x[1] = sol(x[1])

View File

@ -1 +0,0 @@
(status() != START /\ uniform(0.0,1.0) > 0.2) -> x[1] = sol(x[1])

View File

@ -270,7 +270,9 @@ parametric over the neighbourhoods they should apply. For example, since
strategy \mzninline{basic_lns} that applies a neighbourhood only if the current strategy \mzninline{basic_lns} that applies a neighbourhood only if the current
status is not \mzninline{START}: status is not \mzninline{START}:
\mznfile{assets/mzn/6_basic_lns.mzn} \begin{mzn}
predicate basic_lns(var bool: nbh) = (status()!=START -> nbh);
\end{mzn}
In order to use this predicate with the \mzninline{on_restart} annotation, we In order to use this predicate with the \mzninline{on_restart} annotation, we
cannot simply pass \mzninline{basic_lns(uniform_neighbourhood(x, 0.2))}. Calling \mzninline{uniform_neighbourhood} like that would result in a cannot simply pass \mzninline{basic_lns(uniform_neighbourhood(x, 0.2))}. Calling \mzninline{uniform_neighbourhood} like that would result in a
@ -337,7 +339,9 @@ With \mzninline{restart_without_objective}, the restart predicate is now
responsible for constraining the objective function. Note that a simple responsible for constraining the objective function. Note that a simple
hill-climbing (for minimisation) can still be defined easily in this context as: hill-climbing (for minimisation) can still be defined easily in this context as:
\mznfile{assets/mzn/6_hill_climbing.mzn} \begin{mzn}
predicate hill_climbing() = status() != START -> _objective < sol(_objective);
\end{mzn}
It takes advantage of the fact that the declared objective function is available It takes advantage of the fact that the declared objective function is available
through the built-in variable \mzninline{_objective}. A more interesting example through the built-in variable \mzninline{_objective}. A more interesting example
@ -350,7 +354,17 @@ solution needs to improve until we are just looking for any improvements. This
thereby reaching the optimal solution quicker. This strategy is also easy to thereby reaching the optimal solution quicker. This strategy is also easy to
express using our restart-based modelling: express using our restart-based modelling:
\mznfile{assets/mzn/6_simulated_annealing.mzn} \begin{mzn}
predicate simulated_annealing(float: init_temp, float: cooling_rate) =
let {
var float: temp;
} in if status() = START then
temp = init_temp
else
temp = last_val(temp) * (1 - cooling_rate) % cool down
/\ _objective < sol(_objective) - ceil(log(uniform(0.0, 1.0)) * temp)
endif;
\end{mzn}
Using the same methods it is also possible to describe optimisation strategies Using the same methods it is also possible to describe optimisation strategies
with multiple objectives. An example of such a strategy is lexicographic search. with multiple objectives. An example of such a strategy is lexicographic search.
@ -361,7 +375,33 @@ same value for the first objective and improve the second objective, or have the
same value for the first two objectives and improve the third objective, and so same value for the first two objectives and improve the third objective, and so
on. We can model this strategy restarts as such: on. We can model this strategy restarts as such:
\mznfile{assets/mzn/6_lex_minimize.mzn} \begin{mzn}
predicate lex_minimize(array[int] of var int: o) =
let {
var index_set(o): stage
array[index_set(o)] of var int: best;
} in if status() = START then
stage = min(index_set(o))
else
if status() = UNSAT then
if lastval(stage) < l then
stage = lastval(stage) + 1
else
complete() % we are finished
endif
else
stage = lastval(stage)
/\ best[stage] = sol(_objective)
endif
/\ for(i in min(index_set(o))..stage-1) (
o[i] = lastval(best[i])
)
/\ if status() = SAT then
o[stage] < sol(_objective)
endif
/\ _objective = o[stage]
endif;
\end{mzn}
The lexicographic objective changes the objective at each stage in the The lexicographic objective changes the objective at each stage in the
evaluation. Initially the stage is 1. Otherwise, is we have an unsatisfiable evaluation. Initially the stage is 1. Otherwise, is we have an unsatisfiable
@ -378,7 +418,27 @@ problem. In these cases we might instead look for a number of diverse solutions
and allow the user to pick the most acceptable options. The following fragment and allow the user to pick the most acceptable options. The following fragment
shows a \gls{meta-search} for the Pareto optimality of a pair of objectives: shows a \gls{meta-search} for the Pareto optimality of a pair of objectives:
\mznfile{assets/mzn/6_pareto_optimal.mzn} \begin{mzn}
predicate pareto_optimal(var int: obj1, var int: obj2) =
let {
int: ms = 1000; % max solutions
var 0..ms: nsol; % number of solutions
set of int: SOL = 1..ms;
array[SOL] of var lb(obj1)..ub(obj1): s1;
array[SOL] of var lb(obj2)..ub(obj2): s2;
} in if status() = START then
nsol = 0
elseif status() = UNSAT then
complete() % we are finished!
elseif
nsol = sol(nsol) + 1 /\
s1[nsol] = sol(obj1) /\
s2[nsol] = sol(obj2)
endif
/\ for(i in 1..nsol) (
obj1 < lastval(s1[i]) \/ obj2 < lastval(s2[i])
);
\end{mzn}
In this implementation we keep track of the number of solutions found so far In this implementation we keep track of the number of solutions found so far
using \mzninline{nsol}. There is a maximum number we can handle using \mzninline{nsol}. There is a maximum number we can handle
@ -568,7 +628,9 @@ For example, consider the model from \cref{lst:6-basic-complete} again.
The second block of code (\lrefrange{line:6:x1:start}{line:6:x1:end}) represents The second block of code (\lrefrange{line:6:x1:start}{line:6:x1:end}) represents
the decomposition of the expression the decomposition of the expression
\mznfile{assets/mzn/6_transformed_partial.mzn} \begin{mzn}
(status() != START /\ uniform(0.0,1.0) > 0.2) -> x[1] = sol(x[1])
\end{mzn}
which is the result of merging the implication from the \mzninline{basic_lns} which is the result of merging the implication from the \mzninline{basic_lns}
predicate with the \mzninline{if} expression from predicate with the \mzninline{if} expression from
@ -580,7 +642,9 @@ is constrained to be true if-and-only-if the random number is greater than
in the previous solution. Finally, the half-reified constraint in in the previous solution. Finally, the half-reified constraint in
\lref{line:6:x1:end} implements \lref{line:6:x1:end} implements
\mznfile{assets/mzn/6_transformed_half_reif.mzn} \begin{mzn}
b3 -> x[1] = sol(x[1])
\end{mzn}
We have omitted the similar code generated for \mzninline{x[2]} to We have omitted the similar code generated for \mzninline{x[2]} to
\mzninline{x[n]}. Note that the \flatzinc\ shown here has been simplified for \mzninline{x[n]}. Note that the \flatzinc\ shown here has been simplified for
@ -631,7 +695,10 @@ propagation, \gls{cse} and other simplifications.
\begin{example}\label{ex:6-incremental} \begin{example}\label{ex:6-incremental}
Consider the following \minizinc\ fragment: Consider the following \minizinc\ fragment:
\mznfile{assets/mzn/6_incremental.mzn} \begin{mzn}
constraint x < 10;
constraint y < x;
\end{mzn}
After evaluating the first constraint, the domain of \mzninline{x} is changed to After evaluating the first constraint, the domain of \mzninline{x} is changed to
be less than 10. Evaluating the second constraint causes the domain of be less than 10. Evaluating the second constraint causes the domain of
@ -668,7 +735,12 @@ trailing.
\begin{example}\label{ex:6-trail} \begin{example}\label{ex:6-trail}
Let us look again at the resulting \nanozinc\ code from \cref{ex:4-absreif}: Let us look again at the resulting \nanozinc\ code from \cref{ex:4-absreif}:
% \mznfile{assets/mzn/6_abs_reif_result.mzn} \begin{nzn}
c @$\mapsto$@ true @$\sep$@ []
x @$\mapsto$@ mkvar(-10..10) @$\sep$@ []
y @$\mapsto$@ mkvar(-10..10) @$\sep$@ []
true @$\mapsto$@ true @$\sep$@ []
\end{nzn}
Assume that we added a choice point before posting the constraint Assume that we added a choice point before posting the constraint
\mzninline{c}. Then the trail stores the \emph{inverse} of all modifications \mzninline{c}. Then the trail stores the \emph{inverse} of all modifications
@ -676,7 +748,22 @@ trailing.
\(\mapsfrom\) denotes restoring an identifier, and \(\lhd\) \texttt{+}/\texttt{-} \(\mapsfrom\) denotes restoring an identifier, and \(\lhd\) \texttt{+}/\texttt{-}
respectively denote attaching and detaching constraints): respectively denote attaching and detaching constraints):
% \mznfile{assets/mzn/6_abs_reif_trail.mzn} \begin{nzn}
% Posted c
true @$\lhd$@ -[c]
% Propagated c = true
c @$\mapsfrom$@ mkvar(0,1) @$\sep$@ []
true @$\lhd$@ +[c]
% Simplified bool_or(b1, true) = true
b2 @$\mapsfrom$@ bool_or(b1, c) @$\sep$@ []
true @$\lhd$@ +[b2]
% b1 became unused...
b1 @$\mapsfrom$@ int_gt(t, y) @$\sep$@ []
% causing t, then b0 and finally z to become unused
t @$\mapsfrom$@ z @$\sep$@ [b0]
b0 @$\mapsfrom$@ int_abs(x, z) @$\sep$@ []
z @$\mapsfrom$@ mkvar(-infinity,infinity) @$\sep$@ []
\end{nzn}
To reconstruct the \nanozinc\ program at the choice point, we simply apply To reconstruct the \nanozinc\ program at the choice point, we simply apply
the changes recorded in the trail, in reverse order. the changes recorded in the trail, in reverse order.
@ -710,8 +797,8 @@ therefore support solvers with different levels of an incremental interface:
\section{Experiments}\label{sec:6-experiments} \section{Experiments}\label{sec:6-experiments}
We have created a prototype implementation of the architecture presented in the We have created a prototype implementation of the architecture presented in the
preceding sections. It consists of a compiler from \minizinc\ to \microzinc, and preceding sections. It consists of a compiler from \minizinc\ to \microzinc{}, and
an incremental \microzinc\ interpreter producing \nanozinc. The system supports an incremental \microzinc\ interpreter producing \nanozinc{}. The system supports
a significant subset of the full \minizinc\ language; notable features that are a significant subset of the full \minizinc\ language; notable features that are
missing are support for set and float variables, option types, and compilation missing are support for set and float variables, option types, and compilation
of model output expressions and annotations. We will release our implementation of model output expressions and annotations. We will release our implementation
@ -732,29 +819,40 @@ offers, we present a runtime evaluation of two meta-heuristics implemented using
our prototype interpreter. For both meta-heuristics, we evaluate the performance our prototype interpreter. For both meta-heuristics, we evaluate the performance
of fully re-evaluating and solving the instance from scratch, compared to the of fully re-evaluating and solving the instance from scratch, compared to the
fully incremental evaluation and solving. The solving in both tests is performed fully incremental evaluation and solving. The solving in both tests is performed
by the Gecode solver, version 6.1.2, connected using the fully incremental by the \gls{gecode} \gls{solver}, version 6.1.2, connected using the fully
API\@. incremental API\@.
\paragraph{GBAC} \paragraph{\glsentrytext{gbac}} %
The Generalised Balanced Academic Curriculum (GBAC) problem The \glsaccesslong{gbac} problem \autocite{chiarandini-2012-gbac} consists of
\autocite{chiarandini-2012-gbac} is comprised of scheduling the courses in a scheduling the courses in a curriculum subject to load limits on the number of
curriculum subject to load limits on the number of courses for each period, courses for each period, prerequisites for courses, and preferences of teaching
prerequisites for courses, and preferences of teaching periods by teaching periods by teaching staff. It has been shown~\autocite{dekker-2018-mzn-lns} that
staff. It has been shown~\autocite{dekker-2018-mzn-lns} that Large Neighbourhood Large Neighbourhood Search (\gls{lns}) is a useful meta-heuristic for quickly
Search (\gls{lns}) is a useful meta-heuristic for quickly finding high quality finding high quality solutions to this problem. In \gls{lns}, once an initial
solutions to this problem. In \gls{lns}, once an initial (sub-optimal) solution is (sub-optimal) solution is found, constraints are added to the problem that
found, constraints are added to the problem that restrict the search space to a restrict the search space to a \textit{neighbourhood} of the previous solution.
\textit{neighbourhood} of the previous solution. After this neighbourhood has After this neighbourhood has been explored, the constraints are removed, and
been explored, the constraints are removed, and constraints for a different constraints for a different neighbourhood are added. This is repeated until a
neighbourhood are added. This is repeated until a sufficiently high solution sufficiently high solution quality has been reached.
quality has been reached.
We can model a neighbourhood in \minizinc\ as a predicate that, given the values We can model a neighbourhood in \minizinc\ as a predicate that, given the values
of the variables in the previous solution, posts constraints to restrict the of the variables in the previous solution, posts constraints to restrict the
search. The following predicate defines a suitable neighbourhood for the GBAC search. The following predicate defines a suitable neighbourhood for the
problem: \gls{gbac} problem:
\mznfile{assets/mzn/6_gbac_neighbourhood.mzn} \begin{mzn}
predicate random_allocation(array[int] of int: sol) =
forall(i in courses) (
(uniform(0,99) < 80) -> (period_of[i] == sol[i])
);
predicate free_period() =
let {
int: period = uniform(periods)
} in forall(i in courses where sol(period_of[i]) != period) (
period_of[i] = sol(period_of[i])
);
\end{mzn}
When this predicate is called with a previous solution \mzninline{sol}, then When this predicate is called with a previous solution \mzninline{sol}, then
every \mzninline{period_of} variable has an \(80\%\) chance to be fixed to its every \mzninline{period_of} variable has an \(80\%\) chance to be fixed to its
@ -762,14 +860,14 @@ value in the previous solution. With the remaining \(20\%\), the variable is
unconstrained and will be part of the search for a better solution. unconstrained and will be part of the search for a better solution.
In a non-incremental architecture, we would re-flatten the original model plus In a non-incremental architecture, we would re-flatten the original model plus
the neighbourhood constraint for each iteration of the \gls{lns}. In the incremental the neighbourhood constraint for each iteration of the \gls{lns}. In the
\nanozinc\ architecture, we can easily express \gls{lns} as a repeated addition and incremental \nanozinc\ architecture, we can easily express \gls{lns} as a
retraction of the neighbourhood constraints. We implemented both approaches repeated addition and retraction of the neighbourhood constraints. We
using the \nanozinc\ prototype, with the results shown in \Cref{fig:6-gbac}. The implemented both approaches using the \nanozinc\ prototype, with the results
incremental \nanozinc\ translation shows a 12x speedup compared to re-compiling shown in \Cref{fig:6-gbac}. The incremental \nanozinc\ translation shows a 12x
the model from scratch in each iteration. For this particular problem, speedup compared to re-compiling the model from scratch in each iteration. For
incrementality in the target solver (Gecode) does not lead to a significant this particular problem, incrementally instructing the target solver
reduction in runtime. (\gls{gecode}) does not lead to a significant reduction in runtime.
\begin{figure} \begin{figure}
\centering \centering
@ -791,16 +889,21 @@ important than the second. The problem therefore has a lexicographical
objective: a solution is better if it requires a strictly shorter exposure time, objective: a solution is better if it requires a strictly shorter exposure time,
or the same exposure time but a lower number of ``shots''. or the same exposure time but a lower number of ``shots''.
\minizinc\ solvers do not support lexicographical objectives directly, but we \minizinc\ \glspl{solver} do not support lexicographical objectives directly,
can instead repeatedly solve a model instance and add a constraint to ensure but we can instead repeatedly solve a model instance and add a constraint to
that the lexicographical objective improves. When the solver proves that no ensure that the lexicographical objective improves. When the solver proves that
better solution can be found, the last solution is known to be optimal. Given no better solution can be found, the last solution is known to be optimal. Given
two variables \mzninline{exposure} and \mzninline{shots}, once we have found a two variables \mzninline{exposure} and \mzninline{shots}, once we have found a
solution with \mzninline{exposure=e} and \mzninline{shots=s}, we can add the solution with \mzninline{exposure=e} and \mzninline{shots=s}, we can add the
constraint \mzninline{exposure < e \/ (exposure = e /\ shots < s)}, expressing constraint
the lexicographic ordering, and continue the search. Since each added
lexicographic constraint is strictly stronger than the previous one, we never \begin{mzn}
have to retract previous constraints. constraint exposure < e \/ (exposure = e /\ shots < s)
\end{mzn}
expressing the lexicographic ordering, and continue the search. Since each
added lexicographic constraint is strictly stronger than the previous one, we
never have to retract previous constraints.
\begin{figure} \begin{figure}
\centering \centering
@ -836,26 +939,28 @@ specifications can (a) be effective and (b) incur only a small overhead compared
to a dedicated implementation of the neighbourhoods. to a dedicated implementation of the neighbourhoods.
To measure the overhead, we implemented our new approach in To measure the overhead, we implemented our new approach in
Gecode~\autocite{gecode-2021-gecode}. The resulting solver (\gecodeMzn\ in the tables \gls{gecode}~\autocite{gecode-2021-gecode}. The resulting solver (\gecodeMzn{} in
below) has been instrumented to also output the domains of all model variables the tables below) has been instrumented to also output the domains of all model
after propagating the new special constraints. We implemented another extension variables after propagating the new special constraints. We implemented another
to Gecode (\gecodeReplay) that simply reads the stream of variable domains for extension to \gls{gecode} (\gecodeReplay) that simply reads the stream of variable
each restart, essentially replaying the \gls{lns} of \gecodeMzn\ without incurring any domains for each restart, essentially replaying the \gls{lns} of \gecodeMzn\
overhead for evaluating the neighbourhoods or handling the additional variables without incurring any overhead for evaluating the neighbourhoods or handling the
and constraints. Note that this is a conservative estimate of the overhead: additional variables and constraints. Note that this is a conservative estimate
\gecodeReplay\ has to perform \emph{less} work than any real \gls{lns} implementation. of the overhead: \gecodeReplay\ has to perform \emph{less} work than any real
\gls{lns} implementation.
In addition, we also present benchmark results for the standard release of In addition, we also present benchmark results for the standard release of
Gecode 6.0 without \gls{lns} (\gecodeStd); as well as \chuffedStd, the development \gls{gecode} 6.0 without \gls{lns} (\gecodeStd); as well as \chuffedStd{}, the
version of Chuffed; and \chuffedMzn, Chuffed performing \gls{lns} with FlatZinc development version of Chuffed; and \chuffedMzn{}, Chuffed performing \gls{lns}
neighbourhoods. These experiments illustrate that the \gls{lns} implementations indeed with \flatzinc\ neighbourhoods. These experiments illustrate that the \gls{lns}
perform well compared to the standard solvers.\footnote{Our implementations are implementations indeed perform well compared to the standard
available at solvers.\footnote{Our implementations are available at
\texttt{\justify{}https://github.com/Dekker1/\{libminizinc,gecode,chuffed\}} on branches \texttt{\justify{}https://github.com/Dekker1/\{libminizinc,gecode,chuffed\}} on
containing the keyword \texttt{on\_restart}.} All experiments were run on a branches containing the keyword \texttt{on\_restart}.} All experiments were run
single core of an Intel Core i5 CPU @ 3.4 GHz with 4 cores and 16 GB RAM running on a single core of an Intel Core i5 CPU @ 3.4 GHz with 4 cores and 16 GB RAM
MacOS High Sierra. \gls{lns} benchmarks are repeated with 10 different random seeds running macOS High Sierra. \gls{lns} benchmarks are repeated with 10 different
and the average is shown. The overall timeout for each run is 120 seconds. random seeds and the average is shown. The overall timeout for each run is 120
seconds.
We ran experiments for three models from the MiniZinc We ran experiments for three models from the MiniZinc
challenge~\autocite{stuckey-2010-challenge, stuckey-2014-challenge} (\texttt{gbac}, challenge~\autocite{stuckey-2010-challenge, stuckey-2014-challenge} (\texttt{gbac},
@ -869,7 +974,7 @@ percentage (\%), which is shown as the superscript on \(\minobj\) when running
\gls{lns}. \gls{lns}.
%and the average number of nodes per one second (\nodesec). %and the average number of nodes per one second (\nodesec).
The underlying search strategy used is the fixed search strategy defined in the The underlying search strategy used is the fixed search strategy defined in the
model. For each model we use a round robin evaluation (\cref{lst:6-round-robin}) model. For each model we use a round-robin evaluation (\cref{lst:6-round-robin})
of two neighbourhoods: a neighbourhood that destroys \(20\%\) of the main decision of two neighbourhoods: a neighbourhood that destroys \(20\%\) of the main decision
variables (\cref{lst:6-lns-minisearch-pred}) and a structured neighbourhood for variables (\cref{lst:6-lns-minisearch-pred}) and a structured neighbourhood for
the model (described below). The restart strategy is the model (described below). The restart strategy is
@ -884,14 +989,14 @@ the model (described below). The restart strategy is
courses in a period.} courses in a period.}
\end{listing} \end{listing}
The Generalised Balanced Academic Curriculum problem comprises courses having a The \gls{gbac} problem comprises courses having a specified number of credits
specified number of credits and lasting a certain number of periods, load limits and lasting a certain number of periods, load limits of courses for each period,
of courses for each period, prerequisites for courses, and preferences of prerequisites for courses, and preferences of teaching periods for professors. A
teaching periods for professors. A detailed description of the problem is given detailed description of the problem is given
in~\autocite{chiarandini-2012-gbac}. The main decisions are to assign courses to in~\autocite{chiarandini-2012-gbac}. The main decisions are to assign courses to
periods, which is done via the variables \mzninline{period_of} in the model. periods, which is done via the variables \mzninline{period_of} in the model.
\cref{lst:6-free-period} shows the neighbourhood chosen, which randomly picks one \cref{lst:6-free-period} shows the neighbourhood chosen, which randomly picks
period and frees all courses that are assigned to it. one period and frees all courses that are assigned to it.
\begin{table} \begin{table}
\centering \centering
@ -900,10 +1005,11 @@ period and frees all courses that are assigned to it.
\end{table} \end{table}
The results for \texttt{gbac} in \cref{tab:6-gbac} show that the overhead The results for \texttt{gbac} in \cref{tab:6-gbac} show that the overhead
introduced by \gecodeMzn\ w.r.t.~\gecodeReplay\ is quite low, and both their introduced by \gecodeMzn\ w.r.t. \gecodeReplay{} is quite low, and both their
results are much better than the baseline \gecodeStd. Since learning is not very results are much better than the baseline \gecodeStd{}. Since learning is not
effective for \texttt{gbac}, the performance of \chuffedStd\ is inferior to very effective for \gls{gbac}, the performance of \chuffedStd\ is inferior to
Gecode. However, \gls{lns} again significantly improves over standard Chuffed. \gls{gecode}. However, \gls{lns} again significantly improves over standard
Chuffed.
\subsubsection{\texttt{steelmillslab}} \subsubsection{\texttt{steelmillslab}}
@ -918,10 +1024,10 @@ so that all orders are fulfilled while minimising the wastage. The steel mill
only produces slabs of certain sizes, and orders have both a size and a colour. only produces slabs of certain sizes, and orders have both a size and a colour.
We have to assign orders to slabs, with at most two different colours on each We have to assign orders to slabs, with at most two different colours on each
slab. The model uses the variables \mzninline{assign} for deciding which order slab. The model uses the variables \mzninline{assign} for deciding which order
is assigned to which slab. \cref{lst:6-free-bin} shows a structured neighbourhood is assigned to which slab. \cref{lst:6-free-bin} shows a structured
that randomly selects a slab and frees the orders assigned to it in the neighbourhood that randomly selects a slab and frees the orders assigned to it
incumbent solution. These orders can then be freely reassigned to any other in the incumbent solution. These orders can then be freely reassigned to any
slab. other slab.
\begin{table} \begin{table}
\centering \centering
@ -935,7 +1041,7 @@ optimal solutions. As \cref{tab:6-steelmillslab} shows, \gecodeMzn\ is again
slightly slower than \gecodeReplay\ (the integral is slightly larger). While slightly slower than \gecodeReplay\ (the integral is slightly larger). While
\chuffedStd\ significantly outperforms \gecodeStd\ on this problem, once we use \chuffedStd\ significantly outperforms \gecodeStd\ on this problem, once we use
\gls{lns}, the learning in \chuffedMzn\ is not advantageous compared to \gls{lns}, the learning in \chuffedMzn\ is not advantageous compared to
\gecodeMzn\ or \gecodeReplay. Still, \chuffedMzn\ outperforms \chuffedStd\ by \gecodeMzn\ or \gecodeReplay{}. Still, \chuffedMzn\ outperforms \chuffedStd\ by
always finding an optimal solution. always finding an optimal solution.
% RCPSP/wet % RCPSP/wet
@ -960,15 +1066,15 @@ that time interval, which allows a reshuffling of these tasks.
\begin{table}[b] \begin{table}[b]
\centering \centering
\input{assets/table/6_rcpsp-wet} \input{assets/table/6_rcpsp-wet}
\caption{\label{tab:6-rcpsp-wet}\texttt{rcpsp-wet} benchmarks} \caption{\label{tab:6-rcpsp-wet}\texttt{rcpsp-wet} benchmarks.}
\end{table} \end{table}
\cref{tab:6-rcpsp-wet} shows that \gecodeReplay\ and \gecodeMzn\ perform almost \cref{tab:6-rcpsp-wet} shows that \gecodeReplay\ and \gecodeMzn\ perform almost
identically, and substantially better than baseline \gecodeStd\ for these identically, and substantially better than baseline \gecodeStd\ for these
instances. The baseline learning solver \chuffedStd\ is best overall on the easy instances. The baseline learning solver, \chuffedStd{}, is the best overall on
examples, but \gls{lns} makes it much more robust. The poor performance of the easy examples, but \gls{lns} makes it much more robust. The poor performance
\chuffedMzn\ on the last instance is due to the fixed search, which limits the of \chuffedMzn\ on the last instance is due to the fixed search, which limits
usefulness of nogood learning. the usefulness of no-good learning.
\subsubsection{Summary} \subsubsection{Summary}
The results show that \gls{lns} outperforms the baseline solvers, except for The results show that \gls{lns} outperforms the baseline solvers, except for
@ -978,8 +1084,8 @@ However, the main result from these experiments is that the overhead introduced
by our \flatzinc\ interface, when compared to an optimal \gls{lns} by our \flatzinc\ interface, when compared to an optimal \gls{lns}
implementation, is relatively small. We have additionally calculated the rate of implementation, is relatively small. We have additionally calculated the rate of
search nodes explored per second and, across all experiments, \gecodeMzn\ search nodes explored per second and, across all experiments, \gecodeMzn\
achieves around 3\% fewer nodes per second than \gecodeReplay. This overhead is achieves around 3\% fewer nodes per second than \gecodeReplay{}. This overhead is
caused by propagating the additional constraints in \gecodeMzn. Overall, the caused by propagating the additional constraints in \gecodeMzn{}. Overall, the
experiments demonstrate that the compilation approach is an effective and experiments demonstrate that the compilation approach is an effective and
efficient way of adding \gls{lns} to a modelling language with minimal changes efficient way of adding \gls{lns} to a modelling language with minimal changes
to the solver. to the solver.