Reorganise incremetal chapter and add other experiments

This commit is contained in:
Jip J. Dekker 2021-02-11 14:43:11 +11:00
parent 6289555fa7
commit 9475102e5a
No known key found for this signature in database
GPG Key ID: 517DF4A00618C9C3
15 changed files with 531 additions and 218 deletions

View File

@ -1 +1 @@
% \newacronym[see={[Glossary:]{gls-api}}]{api}{API}{Application Programming Interface\glsadd{gls-api}}
\newacronym[see={[Glossary:]{gls-cp}}]{cp}{CP}{Constraint Programming\glsadd{gls-cp}}

View File

@ -47,6 +47,13 @@
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@misc{gecode-2021-gecode,
author = {{Gecode Team}},
title = {Gecode: A Generic Constraint Development Environment},
year = 2021,
url = {http://www.gecode.org}
}
@inproceedings{hebrard-2005-diverse,
author = {Emmanuel Hebrard and Brahim Hnich and Barry O'Sullivan and
Toby Walsh},
@ -239,6 +246,21 @@
address = {USA}
}
@article{stuckey-2010-challenge,
author = {Peter J. Stuckey and Ralph Becket and Julien Fischer},
title = {Philosophy of the MiniZinc challenge},
journal = {Constraints An Int. J.},
volume = 15,
number = 3,
pages = {307--316},
year = 2010,
url = {https://doi.org/10.1007/s10601-010-9093-0},
doi = {10.1007/s10601-010-9093-0},
timestamp = {Fri, 13 Mar 2020 10:58:27 +0100},
biburl = {https://dblp.org/rec/journals/constraints/StuckeyBF10.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{stuckey-2013-functions,
author = {Peter J. Stuckey and Guido Tack},
editor = {Carla P. Gomes and Meinolf Sellmann},
@ -259,6 +281,22 @@
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{stuckey-2014-challenge,
author = {Peter J. Stuckey and Thibaut Feydy and Andreas Schutt and
Guido Tack and Julien Fischer},
title = {The MiniZinc Challenge 2008-2013},
journal = {{AI} Mag.},
volume = 35,
number = 2,
pages = {55--60},
year = 2014,
url = {https://doi.org/10.1609/aimag.v35i2.2539},
doi = {10.1609/aimag.v35i2.2539},
timestamp = {Tue, 25 Aug 2020 16:50:56 +0200},
biburl = {https://dblp.org/rec/journals/aim/StuckeyFSTF14.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{warren-1983-wam,
title = {An abstract Prolog instruction set},
author = {Warren, David HD},

View File

@ -13,6 +13,10 @@
name={constraint},
description={TODO},
}
\newglossaryentry{gls-cp}{
name={constraint programming},
description={},
}
\newglossaryentry{decision-variable}{
name={decision variable},
description={TODO},

View File

@ -1,4 +1,10 @@
% TODO: We probably need to unify these (at least for the thesis)
predicate random_allocation(array[int] of int: sol) =
forall(i in courses) (
(uniform(0,99) < 80) -> (period_of[i] == sol[i])
);
forall(i in courses) (
(uniform(0,99) < 80) -> (period_of[i] == sol[i])
);
predicate free_period() =
let { int: period = uniform(periods) } in
forall(i in courses where sol(period_of[i]) != period)
(period_of[i] = sol(period_of[i]));

View File

@ -1,6 +1,12 @@
\begin{Verbatim}[commandchars=\\\{\},numbers=left,firstnumber=1,stepnumber=1,codes={\catcode`\$=3\catcode`\^=7\catcode`\_=8},xleftmargin=5mm]
\PY{c}{\PYZpc{} TODO: We probably need to unify these (at least for the thesis)}
\PY{k}{predicate}\PY{l+s}{ }\PY{n+nf}{random\PYZus{}allocation}\PY{p}{(}\PY{k+kt}{array}\PY{p}{[}\PY{k+kt}{int}\PY{p}{]}\PY{l+s}{ }\PY{k+kt}{of}\PY{l+s}{ }\PY{k+kt}{int}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{sol}\PY{p}{)}\PY{l+s}{ }\PY{o}{=}
\PY{k}{forall}\PY{p}{(}\PY{n+nv}{i}\PY{l+s}{ }\PY{o}{in}\PY{l+s}{ }\PY{n+nv}{courses}\PY{p}{)}\PY{l+s}{ }\PY{p}{(}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{p}{(}\PY{n+nf}{uniform}\PY{p}{(}\PY{l+m}{0,99}\PY{p}{)}\PY{l+s}{ }\PY{o}{\PYZlt{}}\PY{l+s}{ }\PY{l+m}{80}\PY{p}{)}\PY{l+s}{ }\PY{o}{\PYZhy{}\PYZgt{}}\PY{l+s}{ }\PY{p}{(}\PY{n+nv}{period\PYZus{}of}\PY{p}{[}\PY{n+nv}{i}\PY{p}{]}\PY{l+s}{ }\PY{o}{==}\PY{l+s}{ }\PY{n+nv}{sol}\PY{p}{[}\PY{n+nv}{i}\PY{p}{]}\PY{p}{)}
\PY{p}{)}\PY{p}{;}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{k}{forall}\PY{p}{(}\PY{n+nv}{i}\PY{l+s}{ }\PY{o}{in}\PY{l+s}{ }\PY{n+nv}{courses}\PY{p}{)}\PY{l+s}{ }\PY{p}{(}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{p}{(}\PY{n+nf}{uniform}\PY{p}{(}\PY{l+m}{0,99}\PY{p}{)}\PY{l+s}{ }\PY{o}{\PYZlt{}}\PY{l+s}{ }\PY{l+m}{80}\PY{p}{)}\PY{l+s}{ }\PY{o}{\PYZhy{}\PYZgt{}}\PY{l+s}{ }\PY{p}{(}\PY{n+nv}{period\PYZus{}of}\PY{p}{[}\PY{n+nv}{i}\PY{p}{]}\PY{l+s}{ }\PY{o}{==}\PY{l+s}{ }\PY{n+nv}{sol}\PY{p}{[}\PY{n+nv}{i}\PY{p}{]}\PY{p}{)}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{p}{)}\PY{p}{;}
\PY{k}{predicate}\PY{l+s}{ }\PY{n+nf}{free\PYZus{}period}\PY{p}{(}\PY{p}{)}\PY{l+s}{ }\PY{o}{=}
\PY{k}{let}\PY{l+s}{ }\PY{p}{\PYZob{}}\PY{l+s}{ }\PY{k+kt}{int}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{period}\PY{l+s}{ }\PY{o}{=}\PY{l+s}{ }\PY{n+nf}{uniform}\PY{p}{(}\PY{n+nv}{periods}\PY{p}{)}\PY{l+s}{ }\PY{p}{\PYZcb{}}\PY{l+s}{ }\PY{o}{in}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{k}{forall}\PY{p}{(}\PY{n+nv}{i}\PY{l+s}{ }\PY{o}{in}\PY{l+s}{ }\PY{n+nv}{courses}\PY{l+s}{ }\PY{k}{where}\PY{l+s}{ }\PY{n+nf}{sol}\PY{p}{(}\PY{n+nv}{period\PYZus{}of}\PY{p}{[}\PY{n+nv}{i}\PY{p}{]}\PY{p}{)}\PY{l+s}{ }\PY{o}{!=}\PY{l+s}{ }\PY{n+nv}{period}\PY{p}{)}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{p}{(}\PY{n+nv}{period\PYZus{}of}\PY{p}{[}\PY{n+nv}{i}\PY{p}{]}\PY{l+s}{ }\PY{o}{=}\PY{l+s}{ }\PY{n+nf}{sol}\PY{p}{(}\PY{n+nv}{period\PYZus{}of}\PY{p}{[}\PY{n+nv}{i}\PY{p}{]}\PY{p}{)}\PY{p}{)}\PY{p}{;}
\end{Verbatim}

View File

@ -0,0 +1,5 @@
predicate free_timeslot() =
let { int: slot = max(Times) div 10;
int: time = uniform(min(Times), max(Times) - slot); } in
forall(t in Tasks)
((sol(s[t]) < time \/ time+slot > sol(s[t])) -> s[t] = sol(s[t]));

View File

@ -0,0 +1,7 @@
\begin{Verbatim}[commandchars=\\\{\},numbers=left,firstnumber=1,stepnumber=1,codes={\catcode`\$=3\catcode`\^=7\catcode`\_=8},xleftmargin=5mm]
\PY{k}{predicate}\PY{l+s}{ }\PY{n+nf}{free\PYZus{}timeslot}\PY{p}{(}\PY{p}{)}\PY{l+s}{ }\PY{l+s}{ }\PY{o}{=}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{k}{let}\PY{l+s}{ }\PY{p}{\PYZob{}}\PY{l+s}{ }\PY{k+kt}{int}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{slot}\PY{l+s}{ }\PY{o}{=}\PY{l+s}{ }\PY{n+nb}{max}\PY{p}{(}\PY{n+nv}{Times}\PY{p}{)}\PY{l+s}{ }\PY{o}{div}\PY{l+s}{ }\PY{l+m}{10}\PY{p}{;}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{k+kt}{int}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{time}\PY{l+s}{ }\PY{o}{=}\PY{l+s}{ }\PY{n+nf}{uniform}\PY{p}{(}\PY{n+nb}{min}\PY{p}{(}\PY{n+nv}{Times}\PY{p}{)}\PY{p}{,}\PY{l+s}{ }\PY{n+nb}{max}\PY{p}{(}\PY{n+nv}{Times}\PY{p}{)}\PY{l+s}{ }\PY{o}{\PYZhy{}}\PY{l+s}{ }\PY{n+nv}{slot}\PY{p}{)}\PY{p}{;}\PY{l+s}{ }\PY{p}{\PYZcb{}}\PY{l+s}{ }\PY{o}{in}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{k}{forall}\PY{p}{(}\PY{n+nv}{t}\PY{l+s}{ }\PY{o}{in}\PY{l+s}{ }\PY{n+nv}{Tasks}\PY{p}{)}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{p}{(}\PY{p}{(}\PY{n+nf}{sol}\PY{p}{(}\PY{n+nv}{s}\PY{p}{[}\PY{n+nv}{t}\PY{p}{]}\PY{p}{)}\PY{l+s}{ }\PY{o}{\PYZlt{}}\PY{l+s}{ }\PY{n+nv}{time}\PY{l+s}{ }\PY{o}{\PYZbs{}/}\PY{l+s}{ }\PY{n+nv}{time}\PY{o}{+}\PY{n+nv}{slot}\PY{l+s}{ }\PY{o}{\PYZgt{}}\PY{l+s}{ }\PY{n+nf}{sol}\PY{p}{(}\PY{n+nv}{s}\PY{p}{[}\PY{n+nv}{t}\PY{p}{]}\PY{p}{)}\PY{p}{)}\PY{l+s}{ }\PY{o}{\PYZhy{}\PYZgt{}}\PY{l+s}{ }\PY{n+nv}{s}\PY{p}{[}\PY{n+nv}{t}\PY{p}{]}\PY{l+s}{ }\PY{o}{=}\PY{l+s}{ }\PY{n+nf}{sol}\PY{p}{(}\PY{n+nv}{s}\PY{p}{[}\PY{n+nv}{t}\PY{p}{]}\PY{p}{)}\PY{p}{)}\PY{p}{;}
\end{Verbatim}

View File

@ -0,0 +1,4 @@
predicate free_slab() =
let { int: slab = uniform(1, nbSlabs) } in
forall(i in 1..nbSlabs where slab != sol(assign[i]))
(assign[i] = sol(assign[i]));

View File

@ -0,0 +1,6 @@
\begin{Verbatim}[commandchars=\\\{\},numbers=left,firstnumber=1,stepnumber=1,codes={\catcode`\$=3\catcode`\^=7\catcode`\_=8},xleftmargin=5mm]
\PY{k}{predicate}\PY{l+s}{ }\PY{n+nf}{free\PYZus{}slab}\PY{p}{(}\PY{p}{)}\PY{l+s}{ }\PY{o}{=}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{k}{let}\PY{l+s}{ }\PY{p}{\PYZob{}}\PY{l+s}{ }\PY{k+kt}{int}\PY{p}{:}\PY{l+s}{ }\PY{n+nv}{slab}\PY{l+s}{ }\PY{o}{=}\PY{l+s}{ }\PY{n+nf}{uniform}\PY{p}{(}\PY{l+m}{1}\PY{p}{,}\PY{l+s}{ }\PY{n+nv}{nbSlabs}\PY{p}{)}\PY{l+s}{ }\PY{p}{\PYZcb{}}\PY{l+s}{ }\PY{o}{in}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{k}{forall}\PY{p}{(}\PY{n+nv}{i}\PY{l+s}{ }\PY{o}{in}\PY{l+s}{ }\PY{l+m}{1}\PY{o}{..}\PY{n+nv}{nbSlabs}\PY{l+s}{ }\PY{k}{where}\PY{l+s}{ }\PY{n+nv}{slab}\PY{l+s}{ }\PY{o}{!=}\PY{l+s}{ }\PY{n+nf}{sol}\PY{p}{(}\PY{n+nv}{assign}\PY{p}{[}\PY{n+nv}{i}\PY{p}{]}\PY{p}{)}\PY{p}{)}
\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{l+s}{ }\PY{p}{(}\PY{n+nv}{assign}\PY{p}{[}\PY{n+nv}{i}\PY{p}{]}\PY{l+s}{ }\PY{o}{=}\PY{l+s}{ }\PY{n+nf}{sol}\PY{p}{(}\PY{n+nv}{assign}\PY{p}{[}\PY{n+nv}{i}\PY{p}{]}\PY{p}{)}\PY{p}{)}\PY{p}{;}
\end{Verbatim}

View File

@ -49,6 +49,7 @@ style=apa,
% Glossary / Acronyms
\usepackage[acronym,toc]{glossaries}
\usepackage{titlecaps}
\glsdisablehyper{}
\defglsentryfmt[main]{\ifglsused{\glslabel}{\glsgenentryfmt}{\textit{\glsgenentryfmt}}}
\makeglossaries{}
@ -66,17 +67,13 @@ style=apa,
\usepackage{fancyvrb}
\usepackage{color}
\input{assets/pygments_header.tex}
\newcommand{\highlightfile}[1]{\input{#1tex}}
\newcommand{\highlightfile}[1]{{\scriptsize\input{#1tex}}}
\DeclareNewTOC[
type=listing,
float,
name=Listing,
counterwithin=chapter,
listname={List of Source Listings},
atbegin={
\centering
\scriptsize
}
]{listing}
\crefname{listing}{listing}{listings}

26
assets/table/6_gbac.tex Normal file
View File

@ -0,0 +1,26 @@
\begin{tabular}{|l||r|||r|r||r|r||r|r|||r|r||r|r|}
\hline
& best known
& \multicolumn{2}{c||}{\gecodeStd}
& \multicolumn{2}{c||}{\gecodeMzn}
& \multicolumn{2}{c|||}{\gecodeReplay}
& \multicolumn{2}{c||}{\chuffedStd}
& \multicolumn{2}{c|}{\chuffedMzn}
\\
\hline
Instance
& $\minobj$
& $\intobj$ & $\minobj$
& $\intobj$ & $\minobj$
& $\intobj$ & $\minobj$
& $\intobj$ & $\minobj$
& $\intobj$ & $\minobj$
\\
\hline
UD2-gbac & \textbf{146} & 1502k & 12515 & 93k & ${376}^{16}$ & \textbf{92k} & $\mathbf{362}^{15}$ & 1494k & 12344 & \textbf{207k} & $\mathbf{598}^{54}$ \\
UD4-gbac & \textbf{396} & 1517k & 12645 & 121k & $\mathbf{932}^{24}$ & \textbf{120k} & $\mathbf{932}^{24}$ & 1151k & 9267 & \textbf{160k} & $\mathbf{1142}^{5}$ \\
UD5-gbac & \textbf{222} & 2765k & 23028 & 283k & $\mathbf{2007}^{39}$ & \textbf{281k} & $\mathbf{2007}^{39}$ & 2569k & 21233 & \textbf{483k} & $\mathbf{2572}^{22}$ \\
UD8-gbac & \textbf{40} & 1195k & 9611 & 21k & $\mathbf{53}^{26}$ & \textbf{20k} & $\mathbf{53}^{26}$ & 1173k & 9559 & \textbf{114k} & $\mathbf{76}^{26}$ \\
{\scriptsize reduced\_UD4}& \textbf{949} & 629k & 4917 & \textbf{114k} & $\mathbf{950}^{0}$ & \textbf{114k} & $\mathbf{950}^{0}$ & 715k & 5491 & \textbf{117k} & $\mathbf{950}^{0}$ \\
\hline
\end{tabular}

View File

@ -0,0 +1,26 @@
\begin{tabular}{|l||r|||r|r||r|r||r|r|||r|r||r|r|}
\hline
& best known
& \multicolumn{2}{c||}{\gecodeStd}
& \multicolumn{2}{c||}{\gecodeMzn}
& \multicolumn{2}{c|||}{\gecodeReplay}
& \multicolumn{2}{c||}{\chuffedStd}
& \multicolumn{2}{c|}{\chuffedMzn}
\\
\hline
Instance
& $\minobj$
& $\intobj$ & $\minobj$
& $\intobj$ & $\minobj$
& $\intobj$ & $\minobj$
& $\intobj$ & $\minobj$
& $\intobj$ & $\minobj$
\\
\hline
j30\_1\_3-wet & \textbf{93} & 20k & 161 & \textbf{11k} & $\mathbf{93}^{0}$ & \textbf{11k} & $\mathbf{93}^{0}$ & \textbf{3k} & \textbf{93} & 13k & $\mathbf{93}^{0}$ \\
j30\_43\_10-wet & \textbf{121} & 19k & 158 & 15k & $\mathbf{121}^{0}$ & \textbf{14k} & $\mathbf{121}^{0}$ & \textbf{10k} & \textbf{121} & 15k & $\mathbf{121}^{0}$ \\
j60\_19\_6-wet & \textbf{227} & 54k & 441 & \textbf{29k} & $\mathbf{235}^{3}$ & \textbf{29k} & $\mathbf{235}^{3}$ & 63k & 487 & \textbf{29k} & $\mathbf{227}^{0}$ \\
j60\_28\_3-wet & \textbf{266} & 94k & 770 & \textbf{33k} & $\mathbf{273}^{0}$ & \textbf{33k} & $\mathbf{273}^{0}$ & 79k & 604 & \textbf{35k} & $\mathbf{272}^{1}$ \\
j90\_48\_4-wet & \textbf{513} & 199k & 1653 & 72k & $\mathbf{535}^{2}$ & \textbf{71k} & $\mathbf{535}^{2}$ & 201k & 1638 & \textbf{109k} & $\mathbf{587}^{2}$ \\
\hline
\end{tabular}

View File

@ -0,0 +1,26 @@
\begin{tabular}{|l||r|||r|r||r|r||r|r|||r|r||r|r|}
\hline
& best known
& \multicolumn{2}{c||}{\gecodeStd}
& \multicolumn{2}{c||}{\gecodeMzn}
& \multicolumn{2}{c|||}{\gecodeReplay}
& \multicolumn{2}{c||}{\chuffedStd}
& \multicolumn{2}{c|}{\chuffedMzn}
\\
\hline
Instance
& $\minobj$
& $\intobj$ & $\minobj$
& $\intobj$ & $\minobj$
& $\intobj$ & $\minobj$
& $\intobj$ & $\minobj$
& $\intobj$ & $\minobj$
\\
\hline
bench\_13\_0 & \textbf{0} & 3247 & 27 & 20 & $\mathbf{0}^{0}$ & \textbf{19} & $\mathbf{0}^{0}$ & 1315 & 9 & \textbf{50} & $\mathbf{0}^{0}$ \\
bench\_14\_1 & \textbf{0} & 1248 & \textbf{0} & 32 & $\mathbf{0}^{0}$ & \textbf{31} & $\mathbf{0}^{0}$ & \textbf{72} & \textbf{0} & 79 & $\mathbf{0}^{0}$ \\
bench\_15\_11 & \textbf{0} & 4458 & 30 & 27 & $\mathbf{0}^{0}$ & \textbf{26} & $\mathbf{0}^{0}$ & 143 & \textbf{0} & \textbf{65} & $\mathbf{0}^{0}$ \\
bench\_16\_10 & \textbf{0} & 2446 & \textbf{0} & \textbf{19} & $\mathbf{0}^{0}$ & \textbf{19} & $\mathbf{0}^{0}$ & 122 & \textbf{0} & \textbf{51} & $\mathbf{0}^{0}$ \\
bench\_19\_5 & \textbf{0} & 3380 & 28 & 12 & $\mathbf{0}^{0}$ & \textbf{11} & $\mathbf{0}^{0}$ & 3040 & 19 & \textbf{31} & $\mathbf{0}^{0}$ \\
\hline
\end{tabular}

View File

@ -46,204 +46,6 @@ section, we will see that our proposed architecture can be made
\emph{incremental}, significantly improving efficiency for these iterative
solving approaches.
\section{Incremental Flattening}
In order to support incremental flattening, the \nanozinc\ interpreter must be
able to process \nanozinc\ calls \emph{added} to an existing \nanozinc\ program,
as well as to \emph{remove} calls from an existing \nanozinc\ program. Adding new
calls is straightforward, since \nanozinc\ is already processed call-by-call.
Removing a call, however, is not so simple. When we remove a call, all effects
the call had on the \nanozinc\ program have to be undone, including results of
propagation, CSE and other simplifications.
\begin{example}\label{ex:6-incremental}
Consider the following \minizinc\ fragment:
\highlightfile{assets/mzn/6_incremental.mzn}
After evaluating the first constraint, the domain of \mzninline{x} is changed to
be less than 10. Evaluating the second constraint causes the domain of
\mzninline{y} to be less than 9. If we now, however, try to remove the first
constraint, it is not just the direct inference on the domain of \mzninline{x}
that has to be undone, but also any further effects of those changes -- in this
case, the changes to the domain of \mzninline{y}.
\end{example}
Due to this complex interaction between calls, we only support the removal of
calls in reverse chronological order, also known as \textit{backtracking}. The
common way of implementing backtracking is using a \textit{trail} data
structure~\autocite{warren-1983-wam}. The trail records all changes to the
\nanozinc\ program:
\begin{itemize}
\item the addition or removal of new variables or constraints,
\item changes made to the domains of variables,
\item additions to the CSE table, and
\item substitutions made due to equality propagation.
\end{itemize}
These changes can be caused by the evaluation of a call, propagation, or CSE.
When a call is removed, the corresponding changes can now be undone by
reversing any action recorded on the trail up to the point where the call was
added.
In order to limit the amount of trailing required, the programmer must create
explicit \textit{choice points} to which the system state can be reset. In
particular, this means that if no choice point was created before the initial
model was flattened, then this flattening can be performed without any
trailing.
\begin{example}\label{ex:6-trail}
Let us look again at the resulting \nanozinc\ code from \Cref{ex:absreif}:
% \highlightfile{assets/mzn/6_abs_reif_result.mzn}
Assume that we added a choice point before posting the constraint
\mzninline{c}. Then the trail stores the \emph{inverse} of all modifications
that were made to the \nanozinc\ as a result of \mzninline{c} (where
$\mapsfrom$ denotes restoring an identifier, and $\lhd$ \texttt{+}/\texttt{-}
respectively denote attaching and detaching constraints):
% \highlightfile{assets/mzn/6_abs_reif_trail.mzn}
To reconstruct the \nanozinc\ program at the choice point, we simply apply
the changes recorded in the trail, in reverse order.
\end{example}
\section{Incremental Solving}
Ideally, the incremental changes made by the interpreter would also be applied
incrementally to the solver. This requires the solver to support both the
dynamic addition and removal of variables and constraints. While some solvers
can support this functionality, most solvers have limitations. The system can
therefore support solvers with different levels of an incremental interface:
\begin{itemize}
\item Using a non-incremental interface, the solver is reinitialised with the
updated \nanozinc\ program every time. In this case, we still get a
performance benefit from the improved flattening time, but not from
incremental solving.
\item Using a \textit{warm-starting} interface, the solver is reinitialised
with the updated program as above, but it is also given a previous solution
to initialise some internal data structures. In particular for mathematical
programming solvers, this can result in dramatic performance gains compared
to ``cold-starting'' the solver every time.
\item Using a fully incremental interface, the solver is instructed to apply
the changes made by the interpreter. In this case, the trail data structure
is used to compute the set of \nanozinc\ changes since the last choice
point.
\end{itemize}
\section{Experiments}
We have created a prototype implementation of the architecture presented in the
preceding sections. It consists of a compiler from \minizinc\ to \microzinc, and
an incremental \microzinc\ interpreter producing \nanozinc. The system supports
a significant subset of the full \minizinc\ language; notable features that are
missing are support for set and float variables, option types, and compilation
of model output expressions and annotations. We will release our implementation
under an open-source license and can make it available to the reviewers upon
request.
The implementation is not optimised for performance yet, but was created as a
faithful implementation of the developed concepts, in order to evaluate their
suitability and provide a solid baseline for future improvements. In the
following we present experimental results on basic flattening performance as
well as incremental flattening and solving that demonstrate the efficiency
gains that are possible thanks to the new architecture.
\subsection{Incremental Flattening and Solving}
To demonstrate the advantage that the incremental processing of \minizinc\
offers, we present a runtime evaluation of two meta-heuristics implemented using
our prototype interpreter. For both meta-heuristics, we evaluate the performance
of fully re-evaluating and solving the instance from scratch, compared to the
fully incremental evaluation and solving. The solving in both tests is performed
by the Gecode solver, version 6.1.2, connected using the fully incremental API.
\paragraph{GBAC}
The Generalised Balanced Academic Curriculum (GBAC) problem
\autocite{chiarandini-2012-gbac} is comprised of scheduling the courses in a
curriculum subject to load limits on the number of courses for each period,
prerequisites for courses, and preferences of teaching periods by teaching
staff. It has been shown~\autocite{dekker-2018-mzn-lns} that Large Neighbourhood
Search (LNS) is a useful meta-heuristic for quickly finding high quality
solutions to this problem. In LNS, once an initial (sub-optimal) solution is
found, constraints are added to the problem that restrict the search space to a
\textit{neighbourhood} of the previous solution. After this neighbourhood has
been explored, the constraints are removed, and constraints for a different
neighbourhood are added. This is repeated until a sufficiently high solution
quality has been reached.
We can model a neighbourhood in \minizinc\ as a predicate that, given the values
of the variables in the previous solution, posts constraints to restrict the
search. The following predicate defines a suitable neighbourhood for the GBAC
problem:
\highlightfile{assets/mzn/6_gbac_neighbourhood.mzn}
When this predicate is called with a previous solution \mzninline{sol}, then
every \mzninline{period_of} variable has an $80\%$ chance to be fixed to its
value in the previous solution. With the remaining $20\%$, the variable is
unconstrained and will be part of the search for a better solution.
In a non-incremental architecture, we would re-flatten the original model plus
the neighbourhood constraint for each iteration of the LNS. In the incremental
\nanozinc\ architecture, we can easily express LNS as a repeated addition and
retraction of the neighbourhood constraints. We implemented both approaches
using the \nanozinc\ prototype, with the results shown in \Cref{fig:gbac}. The
incremental \nanozinc\ translation shows a 12x speedup compared to re-compiling
the model from scratch in each iteration. For this particular problem,
incrementality in the target solver (Gecode) does not lead to a significant
reduction in runtime.
\begin{figure}
\centering
\includegraphics[width=0.5\columnwidth]{assets/img/6_gbac}
\caption{\label{fig:gbac}A run-time performance comparison between incremental
processing (Incr.) and re-evaluation (Redo) of 5 GBAC \minizinc\ instances
in the application of LNS on a 3.4 GHz Quad-Core Intel Core i5 using the
Gecode 6.1.2 solver. Each run consisted of 2500 iterations of applying
neighbourhood predicates. Reported times are averages of 10 runs.}
\end{figure}
\paragraph{Radiation}
Our second experiment is based on a problem of planning cancer radiation therapy
treatment using multi-leaf collimators \autocite{baatar-2011-radiation}. Two
characteristics mark the quality of a solution: the amount of time the patient
is exposed to radiation, and the number of ``shots'' or different angles the
treatment requires. However, the first characteristic is considered more
important than the second. The problem therefore has a lexicographical
objective: a solution is better if it requires a strictly shorter exposure time,
or the same exposure time but a lower number of ``shots''.
\minizinc\ solvers do not support lexicographical objectives directly, but we
can instead repeatedly solve a model instance and add a constraint to ensure
that the lexicographical objective improves. When the solver proves that no
better solution can be found, the last solution is known to be optimal. Given
two variables \mzninline{exposure} and \mzninline{shots}, once we have found a
solution with \mzninline{exposure=e} and \mzninline{shots=s}, we can add the
constraint \mzninline{exposure < e \/ (exposure = e /\ shots < s)}, expressing
the lexicographic ordering, and continue the search. Since each added
lexicographic constraint is strictly stronger than the previous one, we never
have to retract previous constraints.
\begin{figure}
\centering
\includegraphics[width=0.5\columnwidth]{assets/img/6_radiation}
\caption{\label{fig:radiation}A run-time performance comparison between
incremental processing (Incr.) and re-evaluation (Redo) of 9 Radiation
\minizinc\ instances in the application of Lexicographic objectives on a 3.4
GHz Quad-Core Intel Core i5 using the Gecode 6.1.2 solver. Each test was run
to optimality and was conducted 20 times to provide an average.}
\end{figure}
As shown in \cref{fig:radiation}, the incremental processing of the added
\mzninline{lex_less} calls is a clear improvement over the re-evaluation of the
whole model. The translation shows a 13x speedup on average, and even the time
spent solving is reduced by 33\%.
\section{Modelling of Neighbourhoods and Meta-heuristics}
\label{section:2-modelling-nbhs}
@ -276,7 +78,7 @@ take the same value as in the previous solution.
This section introduces a \minizinc\ extension that enables modellers to define
neighbourhoods using the $\mathit{nbh(a)}$ approach described above. This
extension is based on the constructs introduced in
\minisearch\~\autocite{rendl-2015-minisearch}, as summarised below.
\minisearch\ \autocite{rendl-2015-minisearch}, as summarised below.
\subsection{LNS in \glsentrytext{minisearch}}
@ -516,7 +318,6 @@ responsible for constraining the objective function. Note that a simple
hill-climbing (for minimisation) can still be defined easily in this context as:
{
\centering
\scriptsize
\highlightfile{assets/mzn/6_hill_climbing.mzn}
}
@ -527,11 +328,97 @@ through the built-in variable \mzninline{_objective}.
A simulated annealing strategy is also easy to
express:
{
\centering
\scriptsize
\highlightfile{assets/mzn/6_simulated_annealing.mzn}
}
\highlightfile{assets/mzn/6_simulated_annealing.mzn}
\section{Incremental Flattening}
In order to support incremental flattening, the \nanozinc\ interpreter must be
able to process \nanozinc\ calls \emph{added} to an existing \nanozinc\ program,
as well as to \emph{remove} calls from an existing \nanozinc\ program. Adding new
calls is straightforward, since \nanozinc\ is already processed call-by-call.
Removing a call, however, is not so simple. When we remove a call, all effects
the call had on the \nanozinc\ program have to be undone, including results of
propagation, CSE and other simplifications.
\begin{example}\label{ex:6-incremental}
Consider the following \minizinc\ fragment:
\highlightfile{assets/mzn/6_incremental.mzn}
After evaluating the first constraint, the domain of \mzninline{x} is changed to
be less than 10. Evaluating the second constraint causes the domain of
\mzninline{y} to be less than 9. If we now, however, try to remove the first
constraint, it is not just the direct inference on the domain of \mzninline{x}
that has to be undone, but also any further effects of those changes -- in this
case, the changes to the domain of \mzninline{y}.
\end{example}
Due to this complex interaction between calls, we only support the removal of
calls in reverse chronological order, also known as \textit{backtracking}. The
common way of implementing backtracking is using a \textit{trail} data
structure~\autocite{warren-1983-wam}. The trail records all changes to the
\nanozinc\ program:
\begin{itemize}
\item the addition or removal of new variables or constraints,
\item changes made to the domains of variables,
\item additions to the CSE table, and
\item substitutions made due to equality propagation.
\end{itemize}
These changes can be caused by the evaluation of a call, propagation, or CSE.
When a call is removed, the corresponding changes can now be undone by
reversing any action recorded on the trail up to the point where the call was
added.
In order to limit the amount of trailing required, the programmer must create
explicit \textit{choice points} to which the system state can be reset. In
particular, this means that if no choice point was created before the initial
model was flattened, then this flattening can be performed without any
trailing.
\begin{example}\label{ex:6-trail}
Let us look again at the resulting \nanozinc\ code from \Cref{ex:absreif}:
% \highlightfile{assets/mzn/6_abs_reif_result.mzn}
Assume that we added a choice point before posting the constraint
\mzninline{c}. Then the trail stores the \emph{inverse} of all modifications
that were made to the \nanozinc\ as a result of \mzninline{c} (where
$\mapsfrom$ denotes restoring an identifier, and $\lhd$ \texttt{+}/\texttt{-}
respectively denote attaching and detaching constraints):
% \highlightfile{assets/mzn/6_abs_reif_trail.mzn}
To reconstruct the \nanozinc\ program at the choice point, we simply apply
the changes recorded in the trail, in reverse order.
\end{example}
\section{Incremental Solving}
Ideally, the incremental changes made by the interpreter would also be applied
incrementally to the solver. This requires the solver to support both the
dynamic addition and removal of variables and constraints. While some solvers
can support this functionality, most solvers have limitations. The system can
therefore support solvers with different levels of an incremental interface:
\begin{itemize}
\item Using a non-incremental interface, the solver is reinitialised with the
updated \nanozinc\ program every time. In this case, we still get a
performance benefit from the improved flattening time, but not from
incremental solving.
\item Using a \textit{warm-starting} interface, the solver is reinitialised
with the updated program as above, but it is also given a previous solution
to initialise some internal data structures. In particular for mathematical
programming solvers, this can result in dramatic performance gains compared
to ``cold-starting'' the solver every time.
\item Using a fully incremental interface, the solver is instructed to apply
the changes made by the interpreter. In this case, the trail data structure
is used to compute the set of \nanozinc\ changes since the last choice
point.
\end{itemize}
\section{Compilation of Neighbourhoods} \label{section:compilation}
@ -755,3 +642,277 @@ against being invoked before \mzninline{status()!=START}, since the
\mzninline{sol} constraints will simply not propagate anything in case no
solution has been recorded yet, but we use this simple example to illustrate
how these Boolean conditions are compiled and evaluated.
\section{Experiments}
We have created a prototype implementation of the architecture presented in the
preceding sections. It consists of a compiler from \minizinc\ to \microzinc, and
an incremental \microzinc\ interpreter producing \nanozinc. The system supports
a significant subset of the full \minizinc\ language; notable features that are
missing are support for set and float variables, option types, and compilation
of model output expressions and annotations. We will release our implementation
under an open-source license and can make it available to the reviewers upon
request.
The implementation is not optimised for performance yet, but was created as a
faithful implementation of the developed concepts, in order to evaluate their
suitability and provide a solid baseline for future improvements. In the
following we present experimental results on basic flattening performance as
well as incremental flattening and solving that demonstrate the efficiency
gains that are possible thanks to the new architecture.
\subsection{Incremental Flattening and Solving}
To demonstrate the advantage that the incremental processing of \minizinc\
offers, we present a runtime evaluation of two meta-heuristics implemented using
our prototype interpreter. For both meta-heuristics, we evaluate the performance
of fully re-evaluating and solving the instance from scratch, compared to the
fully incremental evaluation and solving. The solving in both tests is performed
by the Gecode solver, version 6.1.2, connected using the fully incremental API.
\paragraph{GBAC}
The Generalised Balanced Academic Curriculum (GBAC) problem
\autocite{chiarandini-2012-gbac} is comprised of scheduling the courses in a
curriculum subject to load limits on the number of courses for each period,
prerequisites for courses, and preferences of teaching periods by teaching
staff. It has been shown~\autocite{dekker-2018-mzn-lns} that Large Neighbourhood
Search (LNS) is a useful meta-heuristic for quickly finding high quality
solutions to this problem. In LNS, once an initial (sub-optimal) solution is
found, constraints are added to the problem that restrict the search space to a
\textit{neighbourhood} of the previous solution. After this neighbourhood has
been explored, the constraints are removed, and constraints for a different
neighbourhood are added. This is repeated until a sufficiently high solution
quality has been reached.
We can model a neighbourhood in \minizinc\ as a predicate that, given the values
of the variables in the previous solution, posts constraints to restrict the
search. The following predicate defines a suitable neighbourhood for the GBAC
problem:
\highlightfile{assets/mzn/6_gbac_neighbourhood.mzn}
When this predicate is called with a previous solution \mzninline{sol}, then
every \mzninline{period_of} variable has an $80\%$ chance to be fixed to its
value in the previous solution. With the remaining $20\%$, the variable is
unconstrained and will be part of the search for a better solution.
In a non-incremental architecture, we would re-flatten the original model plus
the neighbourhood constraint for each iteration of the LNS. In the incremental
\nanozinc\ architecture, we can easily express LNS as a repeated addition and
retraction of the neighbourhood constraints. We implemented both approaches
using the \nanozinc\ prototype, with the results shown in \Cref{fig:6-gbac}. The
incremental \nanozinc\ translation shows a 12x speedup compared to re-compiling
the model from scratch in each iteration. For this particular problem,
incrementality in the target solver (Gecode) does not lead to a significant
reduction in runtime.
\begin{figure}
\centering
\includegraphics[width=0.5\columnwidth]{assets/img/6_gbac}
\caption{\label{fig:6-gbac}A run-time performance comparison between incremental
processing (Incr.) and re-evaluation (Redo) of 5 GBAC \minizinc\ instances
in the application of LNS on a 3.4 GHz Quad-Core Intel Core i5 using the
Gecode 6.1.2 solver. Each run consisted of 2500 iterations of applying
neighbourhood predicates. Reported times are averages of 10 runs.}
\end{figure}
\paragraph{Radiation}
Our second experiment is based on a problem of planning cancer radiation therapy
treatment using multi-leaf collimators \autocite{baatar-2011-radiation}. Two
characteristics mark the quality of a solution: the amount of time the patient
is exposed to radiation, and the number of ``shots'' or different angles the
treatment requires. However, the first characteristic is considered more
important than the second. The problem therefore has a lexicographical
objective: a solution is better if it requires a strictly shorter exposure time,
or the same exposure time but a lower number of ``shots''.
\minizinc\ solvers do not support lexicographical objectives directly, but we
can instead repeatedly solve a model instance and add a constraint to ensure
that the lexicographical objective improves. When the solver proves that no
better solution can be found, the last solution is known to be optimal. Given
two variables \mzninline{exposure} and \mzninline{shots}, once we have found a
solution with \mzninline{exposure=e} and \mzninline{shots=s}, we can add the
constraint \mzninline{exposure < e \/ (exposure = e /\ shots < s)}, expressing
the lexicographic ordering, and continue the search. Since each added
lexicographic constraint is strictly stronger than the previous one, we never
have to retract previous constraints.
\begin{figure}
\centering
\includegraphics[width=0.5\columnwidth]{assets/img/6_radiation}
\caption{\label{fig:6-radiation}A run-time performance comparison between
incremental processing (Incr.) and re-evaluation (Redo) of 9 Radiation
\minizinc\ instances in the application of Lexicographic objectives on a 3.4
GHz Quad-Core Intel Core i5 using the Gecode 6.1.2 solver. Each test was run
to optimality and was conducted 20 times to provide an average.}
\end{figure}
As shown in \cref{fig:6-radiation}, the incremental processing of the added
\mzninline{lex_less} calls is a clear improvement over the re-evaluation of the
whole model. The translation shows a 13x speedup on average, and even the time
spent solving is reduced by 33\%.
\subsection{Compiling neighbourhoods}
% TODO: Decide what to do with these
% Table column headings
\newcommand{\intobj}{\int}
\newcommand{\minobj}{\min}
\newcommand{\devobj}{\sigma}
\newcommand{\nodesec}{n/s}
\newcommand{\gecodeStd}{\textsf{gecode}}
\newcommand{\gecodeReplay}{\textsf{gecode-replay}}
\newcommand{\gecodeMzn}{\textsf{gecode-fzn}}
\newcommand{\chuffedStd}{\textsf{chuffed}}
\newcommand{\chuffedMzn}{\textsf{chuffed-fzn}}
We will now show that a solver that evaluates the compiled \flatzinc LNS
specifications can (a) be effective and (b) incur only a small overhead compared
to a dedicated implementation of the neighbourhoods.
To measure the overhead, we implemented our new approach in
Gecode~\cite{gecode-2021-gecode}. The resulting solver (\gecodeMzn in the tables
below) has been instrumented to also output the domains of all model variables
after propagating the new special constraints. We implemented another extension
to Gecode (\gecodeReplay) that simply reads the stream of variable domains for
each restart, essentially replaying the LNS of \gecodeMzn without incurring any
overhead for evaluating the neighbourhoods or handling the additional variables
and constraints. Note that this is a conservative estimate of the overhead:
\gecodeReplay has to perform \emph{less} work than any real LNS implementation.
In addition, we also present benchmark results for the standard release of
Gecode 6.0 without LNS (\gecodeStd); as well as \chuffedStd, the development
version of Chuffed; and \chuffedMzn, Chuffed performing LNS with FlatZinc
neighbourhoods. These experiments illustrate that the LNS implementations indeed
perform well compared to the standard solvers.\footnote{Our implementations are
available at
\texttt{\justify{}https://github.com/Dekker1/\{libminizinc,gecode,chuffed\}} on branches
containing the keyword \texttt{on\_restart}.} All experiments were run on a
single core of an Intel Core i5 CPU @ 3.4 GHz with 4 cores and 16 GB RAM running
MacOS High Sierra. LNS benchmarks are repeated with 10 different random seeds
and the average is shown. The overall timeout for each run is 120 seconds.
We ran experiments for three models from the MiniZinc
challenge~\cite{stuckey-2010-challenge, stuckey-2014-challenge} (\texttt{gbac},
\texttt{steelmillslab}, and \texttt{rcpsp-wet}). The best objective found during
the \minizinc\ Challenge is shown for every instance (\emph{best known}).
For each solving method we measured the average integral of the model objective
after finding the initial solution ($\intobj$), the average best objective found
($\minobj$), and the standard deviation of the best objective found in
percentage (\%), which is shown as the superscript on $\minobj$ when running
LNS.
%and the average number of nodes per one second (\nodesec).
The underlying search strategy used is the fixed search strategy defined in the
model. For each model we use a round robin evaluation (\cref{lst:6-round-robin})
of two neighbourhoods: a neighbourhood that destroys $20\%$ of the main decision
variables (\cref{lst:6-lns-minisearch-pred}) and a structured neighbourhood for
the model (described below). The restart strategy is
\mzninline{::restart_constant(250)} \mzninline{::restart_on_solution}.
\subsubsection{\texttt{gbac}}
% GBAC
\begin{listing}[b]
\highlightfile{assets/mzn/6_gbac_neighbourhood.mzn}
\caption{\label{lst:6-free-period}\texttt{gbac}: neighbourhood freeing all
courses in a period.}
\end{listing}
The Generalised Balanced Academic Curriculum problem comprises courses having a
specified number of credits and lasting a certain number of periods, load limits
of courses for each period, prerequisites for courses, and preferences of
teaching periods for professors. A detailed description of the problem is given
in~\cite{chiarandini-2012-gbac}. The main decisions are to assign courses to
periods, which is done via the variables \mzninline{period_of} in the model.
\cref{lst:6-free-period} shows the neighbourhood chosen, which randomly picks one
period and frees all courses that are assigned to it.
\begin{table}[t]
\centering
\input{assets/table/6_gbac}
\caption{\label{tab:6-gbac}\texttt{gbac} benchmarks}
\end{table}
The results for \texttt{gbac} in \cref{tab:6-gbac} show that the overhead
introduced by \gecodeMzn w.r.t.~\gecodeReplay is quite low, and both their
results are much better than the baseline \gecodeStd. Since learning is not very
effective for \texttt{gbac}, the performance of \chuffedStd is inferior to
Gecode. However, LNS again significantly improves over standard Chuffed.
\subsubsection{\texttt{steelmillslab}}
\begin{listing}[t]
\highlightfile{assets/mzn/6_steelmillslab_neighbourhood.mzn}
\caption{\label{lst:6-free-bin}\texttt{steelmillslab}: Neighbourhood that frees
all orders assigned to a selected slab.}
\end{listing}
The Steel Mill Slab design problem consists of cutting slabs into smaller ones,
so that all orders are fulfilled while minimising the wastage. The steel mill
only produces slabs of certain sizes, and orders have both a size and a colour.
We have to assign orders to slabs, with at most two different colours on each
slab. The model uses the variables \mzninline{assign} for deciding which order
is assigned to which slab. \cref{lst:6-free-bin} shows a structured neighbourhood
that randomly selects a slab and frees the orders assigned to it in the
incumbent solution. These orders can then be freely reassigned to any other
slab.
\begin{table}[t]
\centering
\input{assets/table/6_steelmillslab}
\caption{\label{tab:6-steelmillslab}\texttt{steelmillslab} benchmarks}
\end{table}
For this problem a solution with zero wastage is always optimal. The use of LNS
makes these instances easy, as all the LNS approaches find optimal solutions. As
\cref{tab:6-steelmillslab} shows, \gecodeMzn is again slightly slower than
\gecodeReplay (the integral is slightly larger). While \chuffedStd significantly
outperforms \gecodeStd on this problem, once we use LNS, the learning in
\chuffedMzn is not advantageous compared to \gecodeMzn or \gecodeReplay. Still,
\chuffedMzn outperforms \chuffedStd by always finding an optimal solution.
% RCPSP/wet
\subsubsection{\texttt{rcpsp-wet}}
\begin{listing}[t]
\highlightfile{assets/mzn/6_rcpsp_neighbourhood.mzn}
\caption{\label{lst:6-free-timeslot}\texttt{rcpsp-wet}: Neighbourhood freeing
all tasks starting in the drawn interval.}
\end{listing}
The Resource-Constrained Project Scheduling problem with Weighted Earliness and
Tardiness cost, is a classic scheduling problem in which tasks need to be
scheduled subject to precedence constraints and cumulative resource
restrictions. The objective is to find an optimal schedule that minimises the
weighted cost of the earliness and tardiness for tasks that are not completed by
their proposed deadline. The decision variables in array \mzninline{s} represent
the start times of each task in the model. \cref{lst:6-free-timeslot} shows our
structured neighbourhood for this model. It randomly selects a time interval of
one-tenth the length of the planning horizon and frees all tasks starting in
that time interval, which allows a reshuffling of these tasks.
\begin{table}[b]
\centering
\input{assets/table/6_rcpsp-wet}
\caption{\label{tab:6-rcpsp-wet}\texttt{rcpsp-wet} benchmarks}
\end{table}
\cref{tab:6-rcpsp-wet} shows that \gecodeReplay and \gecodeMzn perform almost
identically, and substantially better than baseline \gecodeStd for these
instances. The baseline learning solver \chuffedStd is best overall on the easy
examples, but LNS makes it much more robust. The poor performance of \chuffedMzn
on the last instance is due to the fixed search, which limits the usefulness of
nogood learning.
\subsubsection{Summary}
The results show that LNS outperforms the baseline solvers, except for
benchmarks where we can quickly find and prove optimality.
However, the main result from these experiments is that the overhead introduced
by our \flatzinc interface, when compared to an optimal LNS implementation, is
relatively small. We have additionally calculated the rate of search nodes
explored per second and, across all experiments, \gecodeMzn achieves around 3\%
fewer nodes per second than \gecodeReplay. This overhead is caused by
propagating the additional constraints in \gecodeMzn. Overall, the experiments
demonstrate that the compilation approach is an effective and efficient way of
adding LNS to a modelling language with minimal changes to the solver.

View File

@ -49,6 +49,7 @@ following publication:
\printbibliography{}
\printglossary[type=\acronymtype]{}
\printglossary{}
\renewcommand{\glsnamefont}[1]{\titlecap{#1}}
\printglossary[nonumberlist]{}
\end{document}