Some work on the incremental chapter

This commit is contained in:
Jip J. Dekker 2021-02-22 15:37:13 +11:00
parent 9475102e5a
commit ab6adde55c
No known key found for this signature in database
GPG Key ID: 517DF4A00618C9C3
11 changed files with 270 additions and 148 deletions

View File

@ -5,7 +5,7 @@ PY_LISTINGS := $(addsuffix tex, $(wildcard assets/py/*.py) )
.PHONY: $(PROJECT).pdf clean clobber
$(PROJECT).pdf: $(PROJECT).tex
$(PROJECT).pdf: $(PROJECT).tex listings
latexmk -use-make $<
listings: $(MZN_LISTINGS) $(PY_LISTINGS)

View File

@ -1 +1,2 @@
\newacronym[see={[Glossary:]{gls-cp}}]{cp}{CP}{Constraint Programming\glsadd{gls-cp}}
\newacronym[see={[Glossary:]{gls-lns}}]{lns}{LNS}{Large Neighbourhood Search\glsadd{gls-lns}}

View File

@ -32,6 +32,25 @@
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{ek-2020-online,
author = {Alexander Ek and Maria Garcia de la Banda and Andreas
Schutt and Peter J. Stuckey and Guido Tack},
title = {Modelling and Solving Online Optimisation Problems},
booktitle = {The Thirty-Fourth {AAAI} Conference on Artificial
Intelligence, {AAAI} 2020, The Thirty-Second Innovative
Applications of Artificial Intelligence Conference, {IAAI}
2020, The Tenth {AAAI} Symposium on Educational Advances in
Artificial Intelligence, {EAAI} 2020, New York, NY, USA,
February 7-12, 2020},
pages = {1477--1485},
publisher = {{AAAI} Press},
year = 2020,
url = {https://aaai.org/ojs/index.php/AAAI/article/view/5506},
timestamp = {Tue, 02 Feb 2021 08:00:20 +0100},
biburl = {https://dblp.org/rec/conf/aaai/EkBSST20.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{freuder-1997-holygrail,
author = {Eugene C. Freuder},
title = {In Pursuit of the Holy Grail},
@ -73,6 +92,25 @@
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{ingmar-2020-diverse,
author = {Linnea Ingmar and Maria Garcia de la Banda and Peter J.
Stuckey and Guido Tack},
title = {Modelling Diversity of Solutions},
booktitle = {The Thirty-Fourth {AAAI} Conference on Artificial
Intelligence, {AAAI} 2020, The Thirty-Second Innovative
Applications of Artificial Intelligence Conference, {IAAI}
2020, The Tenth {AAAI} Symposium on Educational Advances in
Artificial Intelligence, {EAAI} 2020, New York, NY, USA,
February 7-12, 2020},
pages = {1528--1535},
publisher = {{AAAI} Press},
year = 2020,
url = {https://aaai.org/ojs/index.php/AAAI/article/view/5512},
timestamp = {Tue, 02 Feb 2021 08:00:14 +0100},
biburl = {https://dblp.org/rec/conf/aaai/IngmarBST20.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@book{jaillet-2021-online,
title = {Online Optimization},
author = {Jaillet, P. and Wagner, M.R.},

View File

@ -11,41 +11,107 @@
\newglossaryentry{constraint}{
name={constraint},
description={TODO},
description={A constraint is a relationship between two or more decision
variables or problem parameters that has to present in any valid solution to a
problem},
}
\newglossaryentry{constraint-modelling}{
name={constraint modelling},
description={Constraint modelling is a technique used to describe
combinatorial problems. In this paradigm the problem in terms of
\glspl{decision-variable} that have an unknown value, but are potentially
subject to certain \glspl{constraint}},
}
\newglossaryentry{gls-cp}{
name={constraint programming},
description={},
description={Constraint Programming (CP) is a paradigm used to solve
combinatorial problems. Its distinctive features are the declarative way in
which the user creates a problem description, in this thesis referred to as
\gls{constraint-modelling}, and its backtracking search that employs
\gls{propagation} and customisable search heuristics},
}
\newglossaryentry{decision-variable}{
name={decision variable},
description={TODO},
description={A decision variable is a value that is yet to be determined. A
problem defined as a constraint model is solved by assigning a value to each
variable that does not violate any constraints in the model and, in case of an
optimisation problem, optimises the objective function}
}
\newglossaryentry{flatzinc}{
name={Flat\-Zinc},
description={TODO},
description={A subset of the \minizinc\ syntax that is used as input for
\glspl{solver}},
}
\newglossaryentry{global}{
name={global constraint},
description={A global constraint is a common \gls{constraint} pattern that can
be described using simpler \glspl{constraint}. \Glspl{solver} sometimes provide
dedicated algorithms or rewriting rules to better handle the global constraint},
}
\newglossaryentry{gls-lns}{
name={large neighbourhood search},
description={Large Neighbourhood Search (LNS) is a meta-search algorithm that
repeatedly restricts the search space, applying a \gls{neighbourhood}, to
quickly find better solutions to a problem},
}
\newglossaryentry{meta-search}{
name={meta-search},
description={A search approach that repeatedly solves constraint models},
}
\newglossaryentry{microzinc}{
name={Micro\-Zinc},
description={TODO},
}
\newglossaryentry{minisearch}{
name={Mini\-Search},
description={TODO},
}
\newglossaryentry{minizinc}{
name={Mini\-Zinc},
description={TODO},
description={A high-level \gls{constraint-modelling} language with an
extensive library of \glspl{global}},
}
\newglossaryentry{nanozinc}{
name={Nano\-Zinc},
description={TODO},
}
\newglossaryentry{neighbourhood}{
name={neighbourhood},
description={A neighbourhood is a restriction of the search space of the
\gls{solver}},
}
\newglossaryentry{solver}{
name={solver},
description={TODO},
description={A solver is a dedicated program or algorithm that can be used to
solve combinatorial problems, or a subset thereof},
}
\newglossaryentry{problem-parameter}{
name={problem parameter},
description={TODO},
description={A problem parameter is a constant value that helps define the
problem. Its value can differ among different problem instances. Its exact value
must be known when rewriting a constraint model, but is not required when
compiling a constraint model into a executable program},
}
\newglossaryentry{propagation}{
name={constraint propagation},
description={Constraint propagation is the inference that
\glspl{decision-variable} can no longer take a certain values, lest they would
violate a \gls{constraint}},
}

View File

@ -1,8 +1,8 @@
function ann: lns(var int: obj, array[int] of var int: vars,
int: iterations, float: destrRate, int: exploreTime) =
int: iterations, float: destr_rate, int: explore_time) =
repeat (i in 1..iterations) ( scope(
if has_sol() then post(uniformNeighbourhood(vars,destrRate))
if has_sol() then post(uniform_neighbourhood(vars, destr_rate))
else true endif /\
time_limit(exploreTime, minimize_bab(obj)) /\
time_limit(explore_time, minimize_bab(obj)) /\
commit() /\ print()
) /\ post(obj < sol(obj)) );

View File

@ -1,10 +1,10 @@
\begin{Verbatim}[commandchars=\\\{\},numbers=left,firstnumber=1,stepnumber=1,codes={\catcode`\$=3\catcode`\^=7\catcode`\_=8},xleftmargin=5mm]
\PY{k}{function}\PY{l+s}{ }\PY{k+kt}{ann}\PY{p}{:}\PY{l+s}{ }\PY{n+nf}{lns}\PY{p}{(}\PY{k+kt}{var}\PY{l+s}{ }\PY{k+kt}{int}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{obj}\PY{p}{,}\PY{l+s}{ }\PY{k+kt}{array}\PY{p}{[}\PY{k+kt}{int}\PY{p}{]}\PY{l+s}{ }\PY{k+kt}{of}\PY{l+s}{ }\PY{k+kt}{var}\PY{l+s}{ }\PY{k+kt}{int}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{vars}\PY{p}{,}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{k+kt}{int}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{iterations}\PY{p}{,}\PY{l+s}{ }\PY{k+kt}{float}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{destrRate}\PY{p}{,}\PY{l+s}{ }\PY{k+kt}{int}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{exploreTime}\PY{g+gr}{)}\PY{l+s}{ }\PY{o}{=}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{k+kt}{int}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{iterations}\PY{p}{,}\PY{l+s}{ }\PY{k+kt}{float}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{destr\PYZus{}rate}\PY{p}{,}\PY{l+s}{ }\PY{k+kt}{int}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{explore\PYZus{}time}\PY{g+gr}{)}\PY{l+s}{ }\PY{o}{=}
\PY{l+s}{ }\PY{l+s}{ }\PY{n+nv}{repeat}\PY{l+s}{ }\PY{p}{(}\PY{n+nv}{i}\PY{l+s}{ }\PY{o}{in}\PY{l+s}{ }\PY{l+m}{1}\PY{o}{..}\PY{n+nv}{iterations}\PY{p}{)}\PY{l+s}{ }\PY{p}{(}\PY{l+s}{ }\PY{n+nf}{scope}\PY{p}{(}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{k}{if}\PY{l+s}{ }\PY{n+nf}{has\PYZus{}sol}\PY{p}{(}\PY{p}{)}\PY{l+s}{ }\PY{k}{then}\PY{l+s}{ }\PY{n+nf}{post}\PY{p}{(}\PY{n+nf}{uniformNeighbourhood}\PY{p}{(}\PY{n+nv}{vars}\PY{p}{,}\PY{n+nv}{destrRate}\PY{p}{)}\PY{p}{)}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{k}{if}\PY{l+s}{ }\PY{n+nf}{has\PYZus{}sol}\PY{p}{(}\PY{p}{)}\PY{l+s}{ }\PY{k}{then}\PY{l+s}{ }\PY{n+nf}{post}\PY{p}{(}\PY{n+nf}{uniform\PYZus{}neighbourhood}\PY{p}{(}\PY{n+nv}{vars}\PY{p}{,}\PY{l+s}{ }\PY{n+nv}{destr\PYZus{}rate}\PY{p}{)}\PY{p}{)}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{k}{else}\PY{l+s}{ }\PY{l}{true}\PY{l+s}{ }\PY{k}{endif}\PY{l+s}{ }\PY{o}{/\PYZbs{}}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{n+nf}{time\PYZus{}limit}\PY{p}{(}\PY{n+nv}{exploreTime}\PY{p}{,}\PY{l+s}{ }\PY{n+nf}{minimize\PYZus{}bab}\PY{p}{(}\PY{n+nv}{obj}\PY{p}{)}\PY{p}{)}\PY{l+s}{ }\PY{o}{/\PYZbs{}}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{n+nf}{time\PYZus{}limit}\PY{p}{(}\PY{n+nv}{explore\PYZus{}time}\PY{p}{,}\PY{l+s}{ }\PY{n+nf}{minimize\PYZus{}bab}\PY{p}{(}\PY{n+nv}{obj}\PY{p}{)}\PY{p}{)}\PY{l+s}{ }\PY{o}{/\PYZbs{}}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{n+nf}{commit}\PY{p}{(}\PY{p}{)}\PY{l+s}{ }\PY{o}{/\PYZbs{}}\PY{l+s}{ }\PY{n+nf}{print}\PY{p}{(}\PY{p}{)}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{g+gr}{)}\PY{l+s}{ }\PY{o}{/\PYZbs{}}\PY{l+s}{ }\PY{n+nf}{post}\PY{p}{(}\PY{n+nv}{obj}\PY{l+s}{ }\PY{o}{\PYZlt{}}\PY{l+s}{ }\PY{n+nf}{sol}\PY{p}{(}\PY{n+nv}{obj}\PY{p}{)}\PY{p}{)}\PY{l+s}{ }\PY{g+gr}{)}\PY{p}{;}
\end{Verbatim}

View File

@ -1,3 +1,3 @@
predicate uniformNeighbourhood(array[int] of var int: x, float: destrRate) =
predicate uniform_neighbourhood(array[int] of var int: x, float: destrRate) =
forall(i in index_set(x))
(if uniform(0.0,1.0) > destrRate then x[i] = sol(x[i]) else true endif);

View File

@ -1,5 +1,5 @@
\begin{Verbatim}[commandchars=\\\{\},numbers=left,firstnumber=1,stepnumber=1,codes={\catcode`\$=3\catcode`\^=7\catcode`\_=8},xleftmargin=5mm]
\PY{k}{predicate}\PY{l+s}{ }\PY{n+nf}{uniformNeighbourhood}\PY{p}{(}\PY{k+kt}{array}\PY{p}{[}\PY{k+kt}{int}\PY{p}{]}\PY{l+s}{ }\PY{k+kt}{of}\PY{l+s}{ }\PY{k+kt}{var}\PY{l+s}{ }\PY{k+kt}{int}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{x}\PY{p}{,}\PY{l+s}{ }\PY{k+kt}{float}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{destrRate}\PY{p}{)}\PY{l+s}{ }\PY{o}{=}
\PY{k}{predicate}\PY{l+s}{ }\PY{n+nf}{uniform\PYZus{}neighbourhood}\PY{p}{(}\PY{k+kt}{array}\PY{p}{[}\PY{k+kt}{int}\PY{p}{]}\PY{l+s}{ }\PY{k+kt}{of}\PY{l+s}{ }\PY{k+kt}{var}\PY{l+s}{ }\PY{k+kt}{int}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{x}\PY{p}{,}\PY{l+s}{ }\PY{k+kt}{float}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{destrRate}\PY{p}{)}\PY{l+s}{ }\PY{o}{=}
\PY{l+s}{ }\PY{l+s}{ }\PY{k}{forall}\PY{p}{(}\PY{n+nv}{i}\PY{l+s}{ }\PY{o}{in}\PY{l+s}{ }\PY{n+nb}{index\PYZus{}set}\PY{p}{(}\PY{n+nv}{x}\PY{p}{)}\PY{p}{)}
\PY{l+s}{ }\PY{l+s}{ }\PY{p}{(}\PY{k}{if}\PY{l+s}{ }\PY{n+nf}{uniform}\PY{p}{(}\PY{l+m}{0.0}\PY{p}{,}\PY{l+m}{1.0}\PY{p}{)}\PY{l+s}{ }\PY{o}{\PYZgt{}}\PY{l+s}{ }\PY{n+nv}{destrRate}\PY{l+s}{ }\PY{k}{then}\PY{l+s}{ }\PY{n+nv}{x}\PY{p}{[}\PY{n+nv}{i}\PY{p}{]}\PY{l+s}{ }\PY{o}{=}\PY{l+s}{ }\PY{n+nf}{sol}\PY{p}{(}\PY{n+nv}{x}\PY{p}{[}\PY{n+nv}{i}\PY{p}{]}\PY{p}{)}\PY{l+s}{ }\PY{k}{else}\PY{l+s}{ }\PY{l}{true}\PY{l+s}{ }\PY{k}{endif}\PY{p}{)}\PY{p}{;}
\end{Verbatim}

View File

@ -1,6 +1,9 @@
\newcommand{\eg}{e.g.}
\newcommand{\eg}{e.g.,}
\newcommand{\ie}{e.g.,}
\newcommand{\flatzinc}{\gls{flatzinc}}
\newcommand{\microzinc}{\gls{microzinc}}
\newcommand{\minisearch}{\gls{minisearch}}
\newcommand{\minizinc}{\gls{minizinc}}
\newcommand{\nanozinc}{\gls{nanozinc}}
\newcommand{\cml}{\gls{constraint-modelling} language}
\newcommand{\cmls}{\gls{constraint-modelling} languages}

View File

@ -5,7 +5,7 @@
A goal shared between all programming languages is to provide a certain level of
abstraction: an assembly language allows you to abstract from the binary
instructions and memory positions; Low-level imperial languages, like FORTRAN,
were the first to allow you to abstract from the processor architecture of the
were the first to allow you to abstract from the processor archITECTURE of the
target machine; and nowadays writing a program requires little knowledge of the
actual workings of the hardware. Freuder states that the ``Holy Grail'' of
programming languages would be where the user merely states the problem, and the

View File

@ -1,167 +1,173 @@
\chapter{Incremental Processing}\label{ch:incremental}
%************************************************
Many applications require solving almost the same combinatorial problem
repeatedly, with only slight modifications, thousands of times. For example:
In previous chapters we explored the compilation of constraint models for a
\gls{solver} as a definitive linear process, but to solve real-world problems
\gls{meta-search} algorithms are often used. These methods usually require
solving almost the same combinatorial problem repeatedly, with only slight
modifications, thousands of times. Examples of these methods are:
\begin{itemize}
\item Multi-objective problems~\autocite{jones-2002-multi-objective} are often
not supported directly in solvers. They can be solved using a
meta-search approach: find a solution to a (single-objective) problem,
then add more constraints to the original problem and repeat.
\item Large Neighbourhood Search~\autocite{shaw-1998-local-search} is a very
successful meta-heuristic. After finding a (sub-optimal) solution to a
problem, constraints are added to restrict the search in the
neighbourhood of that solution. When a new solution is found, the
constraints are removed, and constraints for a new neighbourhood are
added.
\item In Online Optimisation~\autocite{jaillet-2021-online}, a problem
instance is continuously updated with new data, such as newly available
jobs to be scheduled or customer requests to be processed.
\item Diverse Solution Search~\autocite{hebrard-2005-diverse} aims at
providing a set of solutions that are sufficiently different from each
other, in order to give human decision makers an overview of the
solution space. Diversity can be achieved by repeatedly solving a
problem instance with different objectives.
\item In Interactive Search~\autocite{}, a user provides feedback
on decisions made by the solver. The feedback is added back into the
problem, and a new solution is generated. Users may also take back some
earlier feedback and explore different aspects of the problem.
\item Multi-objective search \autocite{jones-2002-multi-objective}. Optimising
multiple objectives is often not supported directly in solvers. Instead
it can be solved using a \gls{meta-search} approach: find a solution to
a (single-objective) problem, then add more constraints to the original
problem and repeat.
\item \gls{lns} \autocite{shaw-1998-local-search}. This is a very successful
\gls{meta-search} algorithm to quickly improve solution quality. After
finding a (sub-optimal) solution to a problem, constraints are added to
restrict the search in the \gls{neighbourhood} of that solution. When a
new solution is found, the constraints are removed, and constraints for
a new \gls{neighbourhood} are added.
\item Online Optimisation \autocite{jaillet-2021-online}. These techniques can
be employed when the problem rapidly changes. A problem instance is
continuously updated with new data, such as newly available jobs to be
scheduled or customer requests to be processed.
\item Diverse Solution Search \autocite{hebrard-2005-diverse}. Here we aim to
provide a set of solutions that are sufficiently different from each
other in order to give human decision makers an overview of the solution
space. Diversity can be achieved by repeatedly solving a problem
instance with different objectives.
% \item In Interactive Search \autocite{}, a user provides feedback on decisions
% made by the solver. The feedback is added back into the problem, and a
% new solution is generated. Users may also take back some earlier
% feedback and explore different aspects of the problem.
\end{itemize}
All of these examples have in common that a problem instance is solved, new
constraints are added, the resulting instance is solved again, and constraints
may be removed again.
The usage of these methods is not new to \minizinc\ and they have proven to be
very useful \autocite{rendl-2015-minisearch, schrijvers-2013-combinators,
dekker-2018-mzn-lns, schiendorfer-2018-minibrass}. In its most basic form, a
simple scripting language is sufficient to implement these methods, by
repeatedly calling on \minizinc\ to flatten and solve the updated problems.
While the techniques presented so far in this paper should already improve the
performance of these approaches, the overhead of re-flattening an almost
identical model may still prove prohibitive, warranting direct support for
adding and removing constraints in the \minizinc\ abstract machine. In this
section, we will see that our proposed architecture can be made
\emph{incremental}, significantly improving efficiency for these iterative
solving approaches.
The usage of these methods is not new to \gls{constraint-modelling} and they
have proven to be very useful \autocite{schrijvers-2013-combinators,
rendl-2015-minisearch, schiendorfer-2018-minibrass, ek-2020-online,
ingmar-2020-diverse}. In its most basic form, a simple scripting language is
sufficient to implement these methods, by repeatedly calling on the
\gls{constraint-modelling} infrastructure to compile and solve the adjusted
constraint models. While improvements of the compilation of constraint models,
such as the ones discussed in previous chapters, can increase the performance of
these approaches, the overhead of re-compiling an almost identical model may
still prove prohibitive, warranting direct support from the
\gls{constraint-modelling} infrastructure. In this chapter we introduces two
methods to provide this support:
\begin{itemize}
\item We can add an interface for adding and removing constraints in the
\gls{constraint-modelling} infrastructure and avoid recompilation where
possible.
\item With a slight extension of existing solvers, we can compile
\gls{meta-search} algorithms into efficient solver-level specifications,
avoiding recompilation all-together.
\end{itemize}
\section{Modelling of Neighbourhoods and Meta-heuristics}
\label{section:2-modelling-nbhs}
%
% Start with a brief review of most common neighbourhoods and then explain:
% \begin{itemize}
% \item Random built in with integers
% \begin{itemize}
% \item Explain the built in
% \item Give an example of use (use model)
% \item Limitations if any
% \end{itemize}
The rest of the chapter is organised as follows. \Cref{sec:6-minisearch}
discusses \minisearch\ as a basis for extending \cmls\ with \gls{meta-search}
capabilities. \Cref{sec:6-modelling} discusses how to extend a \cml\ to model
the changes to be made by a \gls{meta-search} algorithm.
\Cref{sec:6-incremental-compilation} introduces the method that extends the
\gls{constraint-modelling} infrastructure with an interface to add and remove
constraints from an existing model while avoiding recompilation.
\Cref{sec:6-solver-extension} introduces the method can compile some
\gls{meta-search} algorithms into efficient solver-level specifications that
only require a small extension of existing \glspl{solver}.
\Cref{sec:6-experiments} reports on the experimental results of both approaches.
Finally, \Cref{sec:6-conclusion} presents the conclusions.
% \item Solution based one
% \begin{itemize}
% \item Explain the built in
% \item Give an example of use (use model)
% \item Limitations if any
% \end{itemize}
% \end{itemize}
\section{Meta-Search in \glsentrytext{minisearch}}
\label{sec:6-minisearch}
% End with future work for other built ins (hint which ones would be useful).
% Most LNS literature discusses neighbourhoods in terms of ``destroying'' part of
% a solution that is later repaired. However, from a declarative modelling point
% of view, it is more natural to see neighbourhoods as adding new constraints and
% variables that need to be applied to the base model, \eg\ forcing variables to
% take the same value as in the previous solution.
Most LNS literature discusses neighbourhoods in terms of ``destroying'' part of
a solution that is later repaired. However, from a declarative modelling point
of view, it is more natural to see neighbourhoods as adding new constraints and
variables that need to be applied to the base model, \eg forcing variables to
take the same value as in the previous solution.
\minisearch\ \autocite{rendl-2015-minisearch} introduced a \minizinc\ extension
that enables modellers to express meta-searches inside a \minizinc\ model. A
meta-search in \minisearch\ typically solves a given \minizinc\ model, performs
some calculations on the solution, adds new constraints and then solves again.
This section introduces a \minizinc\ extension that enables modellers to define
neighbourhoods using the $\mathit{nbh(a)}$ approach described above. This
extension is based on the constructs introduced in
\minisearch\ \autocite{rendl-2015-minisearch}, as summarised below.
\subsection{LNS in \glsentrytext{minisearch}}
\minisearch\ introduced a \minizinc\ extension that enables modellers to express
meta-searches inside a \minizinc\ model. A meta-search in \minisearch\ typically
solves a given \minizinc\ model, performs some calculations on the solution,
adds new constraints and then solves again.
An LNS definition in \minisearch\ consists of two parts. The first part is a
declarative definition of a neighbourhood as a \minizinc\ predicate that posts
the constraints that should be added with respect to a previous solution. This
makes use of the \minisearch\ function: \mzninline{function int: sol(var int:
x)}, which returns the value that variable \mzninline{x} was assigned to in
the previous solution (similar functions are defined for Boolean, float and set
variables). In addition, a neighbourhood predicate will typically make use of
the random number generators available in the \minizinc\ standard library.
Most \gls{meta-search} definitions in \minisearch\ consist of two parts. The
first part is a declarative definition of any restriction to the search space
that the \gls{meta-search} algorithm might apply, called a \gls{neighbourhood}.
In \minisearch\ these definitions can make use of the function:
\mzninline{function int: sol(var int: x)}, which returns the value that variable
\mzninline{x} was assigned to in the previous solution (similar functions are
defined for Boolean, float and set variables). This allows the
\gls{neigbourhood} to be defined in terms of the previous solution. In addition,
a neighbourhood predicate will typically make use of the random number
generators available in the \minizinc\ standard library.
\Cref{lst:6-lns-minisearch-pred} shows a simple random neighbourhood. For each
decision variable \mzninline{x[i]}, it draws a random number from a uniform
distribution and, if it exceeds threshold \mzninline{destrRate}, posts
distribution and, if it exceeds threshold \mzninline{destr_rate}, posts
constraints forcing \mzninline{x[i]} to take the same value as in the previous
solution. For example, \mzninline{uniformNeighbourhood(x, 0.2)} would result in
solution. For example, \mzninline{uniform_neighbourhood(x, 0.2)} would result in
each variable in the array \mzninline{x} having a 20\% chance of being
unconstrained, and an 80\% chance of being assigned to the value it had in the
previous solution.
\begin{listing}
\highlightfile{assets/mzn/6_lns_minisearch_pred.mzn}
\caption{\label{lst:6-lns-minisearch-pred} A simple random LNS predicate
\caption{\label{lst:6-lns-minisearch-pred} A simple random \gls{lns} predicate
implemented in \minisearch{}}
\end{listing}
\begin{listing}
\highlightfile{assets/mzn/6_lns_minisearch.mzn}
\caption{\label{lst:6-lns-minisearch} A simple LNS metaheuristic implemented
in \minisearch{}}
\caption{\label{lst:6-lns-minisearch} A simple \gls{lns} \gls{meta-search}
implemented in \minisearch{}}
\end{listing}
The second part of a \minisearch\ LNS is the meta-search itself. The most basic
example is that of function \mzninline{lns} in \cref{lst:6-lns-minisearch}. It
The second part of a \minisearch\ \gls{meta-search} is the \gls{meta-search}
algorithm itself. \Cref{lst:6-lns-minisearch} shows a basic \minisearch\
implementation of a basic \gls{lns} algorithm, called \mzninline{lns}. It
performs a fixed number of iterations, each invoking the neighbourhood predicate
\mzninline{uniformNeighbourhood} in a fresh scope (so that the constraints only
\mzninline{uniform_neighbourhood} in a fresh scope (so that the constraints only
affect the current loop iteration). It then searches for a solution
(\mzninline{minimize_bab}) with a given timeout, and if the search does return a
new solution, it commits to that solution (so that it becomes available to the
\mzninline{sol} function in subsequent iterations). The \texttt{lns} function
\mzninline{sol} function in subsequent iterations). The \mzninline{lns} function
also posts the constraint \mzninline{obj < sol(obj)}, ensuring the objective
value in the next iteration is strictly better than that of the current
solution.
\paragraph{Limitations of the \minisearch\ approach.}
Although \minisearch\ enables the modeller to express \glspl{neighbourhood} in a
declarative way, the definition of the \gls{meta-search} algorithms is rather
unintuitive and difficult to debug, leading to unwieldy code for defining even
simple restarting strategies.
Although \minisearch\ enables the modeller to express \emph{neighbourhoods} in a
declarative way, the definition of the \emph{meta-search} is rather unintuitive
and difficult to debug, leading to unwieldy code for defining simple restarting
strategies. Furthermore, the \minisearch\ implementation requires either a close
integration of the backend solver into the \minisearch\ system, or it drives the
solver through the regular text-file based \flatzinc\ interface, leading to a
significant communication overhead.
\textbf{TODO:} Furthermore, the \minisearch\ implementation requires either a
close integration of the backend solver into the \minisearch\ system, or it
drives the solver through the regular text-file based \flatzinc\ interface,
leading to a significant communication overhead.
To address these two issues for LNS, we propose to keep modelling neighbourhoods
as predicates, but define a small number of additional \minizinc\ built-in
annotations and functions that (a) allow us to express important aspects of the
meta-search in a more convenient way, and (b) enable a simple compilation scheme
that requires no additional communication with and only small, simple extensions
of the backend solver.
% To address these two issues, we propose to keep modelling neighbourhoods as
% predicates, but define a small number of additional \minizinc\ built-in
% annotations and functions that (a) allow us to express important aspects of the
% meta-search in a more convenient way, and (b) enable a simple compilation scheme
% that requires no additional communication with and only small, simple extensions
% of the backend solver.
The approach we follow here is therefore to \textbf{extend \flatzinc}, such that
the definition of neighbourhoods can be communicated to the solver together with
the problem instance. This maintains the loose coupling of \minizinc\ and
solver, while avoiding the costly communication and cold-starting of the
black-box approach.
% The approach we follow here is therefore to \textbf{extend \flatzinc}, such that
% the definition of neighbourhoods can be communicated to the solver together with
% the problem instance. This maintains the loose coupling of \minizinc\ and
% solver, while avoiding the costly communication and cold-starting of the
% black-box approach.
\subsection{Restart annotations}
\section{Modelling of Meta-Search}
\label{sec:6-modelling}
Instead of the complex \minisearch\ definitions, we propose to add support for
simple meta-searches that are purely based on the notion of \emph{restarts}. A
restart happens when a solver abandons its current search efforts, returns to
the root node of the search tree, and begins a new exploration. Many CP solvers
already provide support for controlling their restarting behaviour, e.g.\ they
can periodically restart after a certain number of nodes, or restart for every
solution. Typically, solvers also support posting additional constraints upon
restarting (e.g Comet~\autocite{michel-2005-comet}) that are only valid for the
particular restart (i.e., they are ``retracted'' for the next restart).
the root node of the search tree, and begins a new exploration. Many \gls{cp}
solvers already provide support for controlling their restarting behaviour,
e.g.\ they can periodically restart after a certain number of nodes, or restart
for every solution. Typically, solvers also support posting additional
constraints upon restarting (\eg\ Comet \autocite{michel-2005-comet}) that are
only valid for the particular restart (\ie\ they are ``retracted'' for the next
restart).
In its simplest form, we can therefore implement LNS by specifying a
neighbourhood predicate, and annotating the \mzninline{solve} item to indicate
@ -178,7 +184,7 @@ restart as a string (see the definition of the new \mzninline{on_restart}
annotation in \cref{lst:6-restart-ann}).
The second component of our LNS definition is the \emph{restarting strategy},
defining how much effort the solver should put into each neighbourhood (i.e.,
defining how much effort the solver should put into each neighbourhood (\ie\
restart), and when to stop the overall search.
We propose adding new search annotations to \minizinc\ to control this behaviour
@ -210,7 +216,7 @@ round-robin fashion.
In \minisearch\, adaptive or round-robin approaches can be implemented using
\emph{state variables}, which support destructive update (overwriting the value
they store). In this way, the \minisearch\ strategy can store values to be used
in later iterations. We use the \emph{solver state} instead, i.e., normal
in later iterations. We use the \emph{solver state} instead, \ie\ normal
decision variables, and define two simple built-in functions to access the
solver state \emph{of the previous restart}. This approach is sufficient for
expressing neighbourhood selection strategies, and its implementation is much
@ -236,9 +242,9 @@ undefined if \mzninline{status()=START}).
In order to be able to initialise the variables used for state access, we
reinterpret \mzninline{on_restart} so that the predicate is also called for the
initial search (i.e., before the first ``real'' restart) with the same
semantics, that is, any constraint posted by the predicate will be retracted for
the next restart.
initial search (\ie\ before the first ``real'' restart) with the same semantics,
that is, any constraint posted by the predicate will be retracted for the next
restart.
\paragraph{Parametric neighbourhood selection predicates}
@ -330,7 +336,9 @@ express:
\highlightfile{assets/mzn/6_simulated_annealing.mzn}
\section{Incremental Flattening}
\section{An Incremental Interface for Constraint Modelling Languages}
\label{sec:6-incremental-compilation}
In order to support incremental flattening, the \nanozinc\ interpreter must be
able to process \nanozinc\ calls \emph{added} to an existing \nanozinc\ program,
@ -395,7 +403,7 @@ trailing.
the changes recorded in the trail, in reverse order.
\end{example}
\section{Incremental Solving}
\subsection{Incremental Solving}
Ideally, the incremental changes made by the interpreter would also be applied
incrementally to the solver. This requires the solver to support both the
@ -419,8 +427,8 @@ therefore support solvers with different levels of an incremental interface:
point.
\end{itemize}
\section{Compilation of Neighbourhoods} \label{section:compilation}
\section{Solver Executable Meta-Search}
\label{sec:6-solver-extension}
The neighbourhoods defined in the previous section can be executed with
\minisearch\ by adding support for the \mzninline{status} and
@ -428,14 +436,14 @@ The neighbourhoods defined in the previous section can be executed with
The \minisearch{} evaluator will then call a solver to produce a solution, and
evaluate the neighbourhood predicate, incrementally producing new \flatzinc\ to
be added to the next round of solving.
%
While this is a viable approach, our goal is to keep the compiler and solver
separate, by embedding the entire LNS specification into the \flatzinc\ that is
passed to the solver.
%
This section introduces such a compilation approach. It only requires simple
modifications of the \minizinc\ compiler, and the compiled \flatzinc\ can be
executed by standard CP solvers with a small set of simple extensions.
executed by standard \gls{cp} solvers with a small set of simple extensions.
\subsection{Compilation overview}
@ -643,7 +651,9 @@ against being invoked before \mzninline{status()!=START}, since the
solution has been recorded yet, but we use this simple example to illustrate
how these Boolean conditions are compiled and evaluated.
\section{Experiments}
\label{sec:6-experiments}
We have created a prototype implementation of the architecture presented in the
preceding sections. It consists of a compiler from \minizinc\ to \microzinc, and
@ -771,7 +781,7 @@ specifications can (a) be effective and (b) incur only a small overhead compared
to a dedicated implementation of the neighbourhoods.
To measure the overhead, we implemented our new approach in
Gecode~\cite{gecode-2021-gecode}. The resulting solver (\gecodeMzn in the tables
Gecode~\autocite{gecode-2021-gecode}. The resulting solver (\gecodeMzn in the tables
below) has been instrumented to also output the domains of all model variables
after propagating the new special constraints. We implemented another extension
to Gecode (\gecodeReplay) that simply reads the stream of variable domains for
@ -793,7 +803,7 @@ MacOS High Sierra. LNS benchmarks are repeated with 10 different random seeds
and the average is shown. The overall timeout for each run is 120 seconds.
We ran experiments for three models from the MiniZinc
challenge~\cite{stuckey-2010-challenge, stuckey-2014-challenge} (\texttt{gbac},
challenge~\autocite{stuckey-2010-challenge, stuckey-2014-challenge} (\texttt{gbac},
\texttt{steelmillslab}, and \texttt{rcpsp-wet}). The best objective found during
the \minizinc\ Challenge is shown for every instance (\emph{best known}).
@ -823,7 +833,7 @@ The Generalised Balanced Academic Curriculum problem comprises courses having a
specified number of credits and lasting a certain number of periods, load limits
of courses for each period, prerequisites for courses, and preferences of
teaching periods for professors. A detailed description of the problem is given
in~\cite{chiarandini-2012-gbac}. The main decisions are to assign courses to
in~\autocite{chiarandini-2012-gbac}. The main decisions are to assign courses to
periods, which is done via the variables \mzninline{period_of} in the model.
\cref{lst:6-free-period} shows the neighbourhood chosen, which randomly picks one
period and frees all courses that are assigned to it.
@ -916,3 +926,7 @@ fewer nodes per second than \gecodeReplay. This overhead is caused by
propagating the additional constraints in \gecodeMzn. Overall, the experiments
demonstrate that the compilation approach is an effective and efficient way of
adding LNS to a modelling language with minimal changes to the solver.
\section{Conclusions}
\label{sec:6-conclusion}