Fix indentation using latexindent

This commit is contained in:
Jip J. Dekker 2021-05-21 14:30:42 +10:00
parent 097d359724
commit fb565cf28c
No known key found for this signature in database
GPG Key ID: 517DF4A00618C9C3
10 changed files with 1981 additions and 1985 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,5 @@
\usepackage{silence}
\WarningFilter{biblatex}{File 'australian-apa.lbx'}
\hfuzz=1.5pt
\usepackage{fvextra}
\usepackage{csquotes}
@ -10,36 +12,36 @@
\usepackage{fontspec}
%% Main font
\setmainfont{IBMPlexSerif}[
Path=assets/fonts/IBM-Plex-Serif/,
Extension=.otf,
UprightFont=*-Regular,
BoldFont=*-Bold,
ItalicFont=*-Italic,
BoldItalicFont=*-BoldItalic,
Path=assets/fonts/IBM-Plex-Serif/,
Extension=.otf,
UprightFont=*-Regular,
BoldFont=*-Bold,
ItalicFont=*-Italic,
BoldItalicFont=*-BoldItalic,
]
%% Title font
\newfontfamily{\ibmplexsanscondensed}{IBMPlexSansCondensed}[
Path=assets/fonts/IBM-Plex-Sans-Condensed/,
Extension=.otf,
UprightFont=*-Regular,
BoldFont=*-Bold,
ItalicFont=*-Italic,
BoldItalicFont=*-BoldItalic,
Path=assets/fonts/IBM-Plex-Sans-Condensed/,
Extension=.otf,
UprightFont=*-Regular,
BoldFont=*-Bold,
ItalicFont=*-Italic,
BoldItalicFont=*-BoldItalic,
]
\newfontfamily{\satisfyfont}{Satisfy}[
Path=assets/fonts/Satisfy/,
Extension=.ttf,
UprightFont=*-Regular,
Path=assets/fonts/Satisfy/,
Extension=.ttf,
UprightFont=*-Regular,
]
%% Monospace font
\setmonofont{IBMPlexMono}[
Ligatures=TeX,
Path=assets/fonts/IBM-Plex-Mono/,
Extension=.otf,
UprightFont=*-Regular,
BoldFont=*-Bold,
ItalicFont=*-Italic,
BoldItalicFont=*-BoldItalic,
Ligatures=TeX,
Path=assets/fonts/IBM-Plex-Mono/,
Extension=.otf,
UprightFont=*-Regular,
BoldFont=*-Bold,
ItalicFont=*-Italic,
BoldItalicFont=*-BoldItalic,
]
%% Mathmatical font
\usepackage{amsmath}
@ -48,7 +50,7 @@ BoldItalicFont=*-BoldItalic,
% References
\usepackage[
style=apa,
style=apa,
]{biblatex}
\usepackage[noabbrev]{cleveref}
@ -115,17 +117,17 @@ style=apa,
attach boxed title to top center,
boxed title style={empty,arc=0pt,outer arc=0pt,boxrule=0pt},
underlay boxed title={
\draw[#1] (title.north west) -- (title.north east) -- +(\tcboxedtitleheight,-\tcboxedtitleheight)
-- (title.south west) -- +(-\tcboxedtitleheight,0)-- cycle;
},
\draw[#1] (title.north west) -- (title.north east) -- +(\tcboxedtitleheight,-\tcboxedtitleheight)
-- (title.south west) -- +(-\tcboxedtitleheight,0)-- cycle;
},
overlay last={
\draw[#1] ([xshift=-80pt]frame.south) -- ++ (160pt,0pt);
\fill[#1] ([xshift=-30pt,yshift=-1pt]frame.south) rectangle +(60pt,2pt);
},
\draw[#1] ([xshift=-80pt]frame.south) -- ++ (160pt,0pt);
\fill[#1] ([xshift=-30pt,yshift=-1pt]frame.south) rectangle +(60pt,2pt);
},
overlay unbroken={
\draw[#1] ([xshift=-80pt]frame.south) -- ++ (160pt,0pt);
\fill[#1] ([xshift=-30pt,yshift=-1pt]frame.south) rectangle +(60pt,2pt);
},
\draw[#1] ([xshift=-80pt]frame.south) -- ++ (160pt,0pt);
\fill[#1] ([xshift=-30pt,yshift=-1pt]frame.south) rectangle +(60pt,2pt);
},
title={Example \theexample{}},
colback=white,
coltitle=black,
@ -134,9 +136,9 @@ style=apa,
% Code formatting
\usepackage[
chapter,
cachedir=listings,
outputdir=build,
chapter,
cachedir=listings,
outputdir=build,
]{minted}
\usemintedstyle{borland}
@ -147,24 +149,24 @@ outputdir=build,
boxrule=0pt,
outer arc=0pt,
overlay first={
\draw[#1] (frame.north west) -- ++ (0,-5pt);
\draw[#1] (frame.north west) -- ++ (25pt, 0pt);
},
\draw[#1] (frame.north west) -- ++ (0,-5pt);
\draw[#1] (frame.north west) -- ++ (25pt, 0pt);
},
overlay last={
\draw[#1] (frame.south west) -- ++ (0, 5pt);
\draw[#1] (frame.south west) -- ++ (25pt,0pt);
},
\draw[#1] (frame.south west) -- ++ (0, 5pt);
\draw[#1] (frame.south west) -- ++ (25pt,0pt);
},
overlay unbroken={
\draw[#1] (frame.north west) -- ++ (0,-5pt);
\draw[#1] (frame.north west) -- ++ (25pt, 0pt);
\draw[#1] (frame.south west) -- ++ (0, 5pt);
\draw[#1] (frame.south west) -- ++ (25pt,0pt);
},
\draw[#1] (frame.north west) -- ++ (0,-5pt);
\draw[#1] (frame.north west) -- ++ (25pt, 0pt);
\draw[#1] (frame.south west) -- ++ (0, 5pt);
\draw[#1] (frame.south west) -- ++ (25pt,0pt);
},
colback=white,
colframe=white,
}
\BeforeBeginEnvironment{minted}{\begin{listingbox}}
\AfterEndEnvironment{minted}{\end{listingbox}}
\AfterEndEnvironment{minted}{\end{listingbox}}
\newcommand{\ptinline}[1]{{\texttt{\small {#1}}}}

View File

@ -2,7 +2,7 @@
\chapter{Introduction}\label{ch:introduction}
%************************************************
High-level \cmls, like \minizinc, were originally designed as a convenient input
High-level \cmls{}, like \minizinc{}, were originally designed as a convenient input
language for \gls{cp} solvers \autocite{marriott-1998-clp}. A user would write a
model consisting of a few loops or comprehensions; given a data file for the
parameters, this would be rewritten into a relatively small set of constraints
@ -14,13 +14,13 @@ acceptable: models (both before and after translation) were small, translation
was fast, and the expected results of translation were obvious. The current
architecture is illustrated in \Cref{sfig:4-oldarch}.
But over time, the uses of high-level \cmls\ have expanded greatly from this
But over time, the uses of high-level \cmls{} have expanded greatly from this
original vision. It is now used to generate input for wildly different solver
technologies: not just \gls{cp}, but also \gls{mip} \autocite{wolsey-1988-mip},
\gls{sat} \autocite{davis-1962-dpll} and \gls{cbls}
\autocite{bjordal-2015-fzn2oscarcbls} solvers. Crucially, the same constraint
model can be used with any of these solvers, which is achieved through the use
of solver-specific libraries of constraint definitions. In \minizinc, these
of solver-specific libraries of constraint definitions. In \minizinc{}, these
solver libraries are written in the same language.
As such, \minizinc\ turned out to be much more expressive than expected, so more
@ -35,42 +35,43 @@ However, as they have become more common, these extended uses have revealed
weaknesses of the existing \minizinc\ tool chain. In particular:
\begin{itemize}
\item The \minizinc\ compiler is inefficient. It does a surprisingly large
amount of work for each expression (especially resolving sub-typing and
overloading), which may be repeated many times --- for example, inside
the body of a comprehension. And as models generated for other solver
technologies (particularly \gls{mip}) can be quite large, the resulting
flattening procedure can be intolerably slow. As the model
transformations implemented in \minizinc\ become more sophisticated,
these performance problems are simply magnified.
\item The generated models often contain unnecessary constraints. During the
transformation, functional expressions are replaced with constraints.
But this breaks the functional dependencies: if the original expression
later becomes redundant (due to model simplifications), \minizinc\ may
fail to detect that the constraint can be removed.
\item Monolithic flattening is wasteful. When \minizinc\ is used for
multi-shot solving, there is typically a large base model common to all
subproblems, and a small set of constraints which are added or removed
in each iteration. But with the existing \minizinc\ architecture, the
whole model must be re-flattened each time. Many use cases involve
generating a base model, then repeatedly adding or removing a few
constraints before re-solving. In the current tool chain, the whole
model must be fully re-flattened each time. Not only does this repeat
all the work done to flatten the base model, This means a large
(sometimes dominant) portion of runtime is simply flattening the core
model over and over again. But it also prevents \emph{the solver} from
carrying over anything it learnt from one problem to the next, closely
related, problem.
\item The \minizinc\ compiler is inefficient. It does a surprisingly large
amount of work for each expression (especially resolving sub-typing
and overloading), which may be repeated many times --- for example,
inside the body of a comprehension. And as models generated for
other solver technologies (particularly \gls{mip}) can be quite
large, the resulting flattening procedure can be intolerably slow.
As the model transformations implemented in \minizinc\ become more
sophisticated, these performance problems are simply magnified.
\item The generated models often contain unnecessary constraints. During
the transformation, functional expressions are replaced with
constraints. But this breaks the functional dependencies: if the
original expression later becomes redundant (due to model
simplifications), \minizinc\ may fail to detect that the constraint
can be removed.
\item Monolithic flattening is wasteful. When \minizinc\ is used for
multi-shot solving, there is typically a large base model common to
all sub-problems, and a small set of constraints which are added or
removed in each iteration. But with the existing \minizinc\
architecture, the whole model must be re-flattened each time. Many
use cases involve generating a base model, then repeatedly adding or
removing a few constraints before re-solving. In the current tool
chain, the whole model must be fully re-flattened each time. Not
only does this repeat all the work done to flatten the base model,
This means a large (sometimes dominant) portion of runtime is simply
flattening the core model over and over again. But it also prevents
\emph{the solver} from carrying over anything it learnt from one
problem to the next, closely related, problem.
\end{itemize}
In this thesis, we revisit the rewriting of high-level \cmls\ into solver-level
constraint models and describe an architecture that allows us to:
\begin{itemize}
\item easily integrate a range of \textbf{optimisation and simplification}
techniques,
\item effectively \textbf{detect and eliminate dead code} introduced by
functional definitions, and
\item support \textbf{incremental flattening and solving}, and better
integration with solvers providing native incremental features.
\item easily integrate a range of \textbf{optimisation and simplification}
techniques,
\item effectively \textbf{detect and eliminate dead code} introduced by
functional definitions, and
\item support \textbf{incremental flattening and solving}, and better
integration with solvers providing native incremental features.
\end{itemize}

View File

@ -1,5 +1,5 @@
%************************************************
\chapter{Review of Literature}\label{ch:background}
\chapter{Background}\label{ch:background}
%************************************************
A goal shared between all programming languages is to provide a certain level of
@ -34,12 +34,6 @@ solution, these models can generally be given to a dedicated solving program, or
\solver{} for short, that can find a solution that fits the requirements of the
model.
\begin{listing}
\pyfile{assets/py/2_dyn_knapsack.py}
\caption{\label{lst:2-dyn-knapsack} A Python program that solves a 0-1 knapsack
problem using dynamic programming}
\end{listing}
\begin{example}%
\label{ex:back-knapsack}
@ -78,6 +72,12 @@ model.
\end{example}
\begin{listing}
\pyfile{assets/py/2_dyn_knapsack.py}
\caption{\label{lst:2-dyn-knapsack} A Python program that solves a 0-1 knapsack
problem using dynamic programming}
\end{listing}
In the remainder of this chapter we will first, in \cref{sec:back-minizinc} we
introduce \minizinc\ as the leading \cml\ used within this thesis. In
\cref{sec:back-solving} we discuss how \gls{cp} can be used to solve a
@ -445,14 +445,13 @@ resulting definition. There are three main purposes for \glspl{let}:
multiplication constraint \mzninline{pred_int_times}.
\end{enumerate}
An important detail in flattening \glspl{let} is that any \variables{} that
are introduced might need to be renamed in the resulting solver level model.
Different from top-level definitions, the \variables{} declared in
\glspl{let} can be flattened multiple times when used in loops, function
definitions (that are called multiple times), and \gls{array}
\glspl{comprehension}. In these cases the flattener must assign any
\variables{} in the \gls{let} a new name and use this name in any subsequent
definitions and in the resulting expression.
An important detail in flattening \glspl{let} is that any \variables{} that are
introduced might need to be renamed in the resulting solver level model.
Different from top-level definitions, the \variables{} declared in \glspl{let}
can be flattened multiple times when used in loops, function definitions (that
are called multiple times), and \gls{array} \glspl{comprehension}. In these
cases the flattener must assign any \variables{} in the \gls{let} a new name and
use this name in any subsequent definitions and in the resulting expression.
\subsection{Reification}%
\label{subsec:back-reification}
@ -460,8 +459,8 @@ definitions and in the resulting expression.
With the rich expression language in \minizinc{}, \constraints{} can consist of
complex expressions that do not translate to a single constraint at the
\solver{} level. Instead, different parts of a complex expression will be
translated into different \constraints{}. Not all of these \constraints{} will be
globally enforced by the solver. \constraints{} stemming for sub-expressions
translated into different \constraints{}. Not all of these \constraints{} will
be globally enforced by the solver. \constraints{} stemming for sub-expressions
will typically be \emph{reified} into a Boolean variable. \Gls{reification}
means that a variable \mzninline{b} is constrained to be true if and only if a
corresponding constraint \mzninline{c(...)} holds.
@ -509,8 +508,8 @@ Some expressions in the \cmls\ do not always have a well-defined result.
the constraint should be trivially true.
\end{example}
Part of the semantics of a \cmls{} is the choice as to how to treat these partial
functions. Examples of such expressions in \minizinc\ are:
Part of the semantics of a \cmls{} is the choice as to how to treat these
partial functions. Examples of such expressions in \minizinc\ are:
\begin{itemize}
\item Division (or modulus) when the divisor is zero:

View File

@ -22,12 +22,12 @@ larger.
In this chapter, we revisit the rewriting of high-level \cmls\ into solver-level
constraint models. We describe a new \textbf{systematic view of the execution of
\minizinc{}} and build on this to propose a new tool chain. We show how this
\minizinc{}} and build on this to propose a new tool chain. We show how this
tool chain allows us to:
\begin{itemize}
\item efficiently rewrite high-level constraint models with \textbf{minimal
overhead},
overhead},
\item easily integrate a range of \textbf{optimisation and simplification}
techniques,
\item and effectively \textbf{detect and eliminate dead code} introduced by
@ -100,20 +100,20 @@ and where expressions have \mzninline{par} type.
<literal> ::= <constant> | <ident>
<exp> ::= <literal>
\alt <ident>"(" <exp> ["," <exp>]* ")"
\alt "let" "{" <item>* "}" "in" <literal>
\alt "if" <exp> "then" <exp> "else" <exp> "endif"
\alt "[" <exp> "|" <gen-exp> "where" <exp> "]"
\lat <ident>"[" <literal> "]"
\alt <ident>"(" <exp> ["," <exp>]* ")"
\alt "let" "{" <item>* "}" "in" <literal>
\alt "if" <exp> "then" <exp> "else" <exp> "endif"
\alt "[" <exp> "|" <gen-exp> "where" <exp> "]"
\lat <ident>"[" <literal> "]"
<item> ::= <param> [ "=" <exp> ]";"
\alt "tuple(" <type-inst> ["," <type-inst>]* "):" <ident> = "(" <exp> ["," <exp>]* ")";
\alt "(" <param> ["," <param>]* ")" "=" <exp>";"
\alt "constraint" <exp>";"
\alt "tuple(" <type-inst> ["," <type-inst>]* "):" <ident> = "(" <exp> ["," <exp>]* ")";
\alt "(" <param> ["," <param>]* ")" "=" <exp>";"
\alt "constraint" <exp>";"
<param> ::= <type-inst>":" <ident>
<gen-exp> ::= <ident> "in" <exp> ["," <ident> "in" <exp> ]*
<gen-exp> ::= <ident> "in" <exp> ["," <ident> "in" <exp> ]*
\end{grammar}
\caption{\label{fig:4-uzn-syntax}Syntax of \microzinc{}.}
\end{figure}
@ -127,18 +127,18 @@ explained in detail in \cref{sec:4-nanozinc}, for now you can ignore them).
\begin{figure}
\begin{grammar}
<nzn> ::= <nzn-item>*
<nzn> ::= <nzn-item>*
<nzn-item> ::= <nzn-def> | <nzn-con>
<nzn-item> ::= <nzn-def> | <nzn-con>
<nzn-con> ::= "constraint" <ident>"(" <literal> ["," <literal>]* ")"";"
<nzn-con> ::= "constraint" <ident>"(" <literal> ["," <literal>]* ")"";"
<nzn-var> ::= "var" <nzn-dom> ":" <ident> ";" <nzn-bind>*
<nzn-var> ::= "var" <nzn-dom> ":" <ident> ";" <nzn-bind>*
<nzn-dom> ::= <constant> ".." <constant> | <constant>
<nzn-dom> ::= <constant> ".." <constant> | <constant>
\alt "bool" | "int" | "float" | "set of int"
<nzn-bind> ::= "↳" <nzn-con>
<nzn-bind> ::= "↳" <nzn-con>
\end{grammar}
\caption{\label{fig:4-nzn-syntax}Syntax of \nanozinc{}.}
\end{figure}
@ -154,9 +154,9 @@ The translation from \minizinc\ to \microzinc{} involves the following steps:
expressions like \mzninline{x+y} by function calls
\mzninline{int_plus(x,y)}. As mentioned above, more complex rules for
rewriting conditionals with variables, such as \mzninline{if x then A
else B endif} and \mzninline{A[x]} where \mzninline{x} is a variable,
else B endif} and \mzninline{A[x]} where \mzninline{x} is a variable,
are also replaced by function calls (\eg\ \mzninline{if_then_else([x,
true], [A, B])} and \mzninline{element(a, x)} respectively).
true], [A, B])} and \mzninline{element(a, x)} respectively).
\item Replace optional variables into non-optional forms. As shown by Mears et
al., optional variable types can be represented using a variable of the
@ -189,8 +189,8 @@ The translation from \minizinc\ to \microzinc{} involves the following steps:
is only defined when \mzninline{x in index_set(a)}. A total version to
be used in \microzinc\ could look like\footnote{For brevity's sake, we
will still use \minizinc\ operators to write the \microzinc\
definition.}:
will still use \minizinc\ operators to write the \microzinc\
definition.}:
\begin{mzn}
function tuple(var int, var bool): element_safe(array of int: a, var int: x) =
@ -409,25 +409,25 @@ identifiers are fresh (\ie\ not used in the rest of the \nanozinc\ program), by
suitable alpha renaming.
\begin{figure*}
\centering
\begin{prooftree}
\hypo{\texttt{function} ~t\texttt{:}~\ptinline{F(\(p_1, \ldots{}, p_k\)) = E;} \in{} \Prog{} \text{~where the~} p_i \text{~are fresh}}
\infer[no rule]1{\Sem{\(\ptinline{E}_{[p_i \mapsto a_i, \forall{} 1 \leq{} i \leq{} k]}\)}{\Prog, \Env} \Rightarrow{} \tuple{v, \Env'}}
\infer1[(Call)]{\Sem{F(\(a_1, \ldots, a_k\))}{\Prog, \Env} \Rightarrow{} \tuple{v, \Env'}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\ptinline{F} \in \text{Builtins}}
\infer1[(CallBuiltin)]{\Sem{F(\(a_1, \ldots, a_k\))}{\Prog, \Env} \Rightarrow{} \tuple{\ensuremath{\mathit{eval}(\ptinline{F}(a_1,\ldots, a_k))}, \Env}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\texttt{predicate}~\ptinline{F(\(p_1, \ldots, p_k\));} \in \Prog}
\infer1[(CallPredicate)]{\Sem{F(\(a_1, \ldots, a_k\))}{\Prog, \Env} \Rightarrow{}
\tuple{\ensuremath{\texttt{constraint}~\ptinline{F}(a_1, \ldots, a_k), \Env}}}
\end{prooftree}
\caption{\label{fig:4-rewrite-to-nzn-calls} Rewriting rules for partial evaluation
of \microzinc\ calls to \nanozinc{}.}
\centering
\begin{prooftree}
\hypo{\texttt{function} ~t\texttt{:}~\ptinline{F(\(p_1, \ldots{}, p_k\)) = E;} \in{} \Prog{} \text{~where the~} p_i \text{~are fresh}}
\infer[no rule]1{\Sem{\(\ptinline{E}_{[p_i \mapsto a_i, \forall{} 1 \leq{} i \leq{} k]}\)}{\Prog, \Env} \Rightarrow{} \tuple{v, \Env'}}
\infer1[(Call)]{\Sem{F(\(a_1, \ldots, a_k\))}{\Prog, \Env} \Rightarrow{} \tuple{v, \Env'}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\ptinline{F} \in \text{Builtins}}
\infer1[(CallBuiltin)]{\Sem{F(\(a_1, \ldots, a_k\))}{\Prog, \Env} \Rightarrow{} \tuple{\ensuremath{\mathit{eval}(\ptinline{F}(a_1,\ldots, a_k))}, \Env}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\texttt{predicate}~\ptinline{F(\(p_1, \ldots, p_k\));} \in \Prog}
\infer1[(CallPredicate)]{\Sem{F(\(a_1, \ldots, a_k\))}{\Prog, \Env} \Rightarrow{}
\tuple{\ensuremath{\texttt{constraint}~\ptinline{F}(a_1, \ldots, a_k), \Env}}}
\end{prooftree}
\caption{\label{fig:4-rewrite-to-nzn-calls} Rewriting rules for partial evaluation
of \microzinc\ calls to \nanozinc{}.}
\end{figure*}
The rules in \cref{fig:4-rewrite-to-nzn-calls} handle function calls. The first
@ -449,47 +449,47 @@ considered primitives, and as such simply need to be transferred into the
\nanozinc\ program.
\begin{figure*}
\centering
\begin{prooftree}
\hypo{\Sem{\(\mathbf{I} \mid{} \mathbf{X}\)}{\Prog, \Env, \emptyset} \Rightarrow \tuple{x, \Env'}}
\infer1[(Let)]{\Sem{let \{ \(\mathbf{I}\) \} in \(\mathbf{X}\)}{\Prog, \Env} \Rightarrow\ \tuple{x, \Env'}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(\mathbf{X}\)}{\Prog, \{ t : x \} \cup{} \Env} \Rightarrow \tuple{x, \{ t: x \} \cup{} \Env'}}
\infer1[(Item0)]{\Sem{\(\epsilon{} \mid{} \mathbf{X}\)}{\Prog, \{ t: x \}\cup{}\Env, \Ctx} \Rightarrow{} \tuple{x, \{ t: x \texttt{~↳~} \Ctx{} \} \cup{} \Env'}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(\mathbf{I} \mid{} \mathbf{X}\)}{\Prog, \Env\ \cup\ \{t : x\}, \Ctx} \Rightarrow \tuple{x, \Env'}}
\infer1[(ItemT)]{\Sem{\(t\texttt{:}~x\texttt{;} \mathbf{I} \mid{} \mathbf{X}\)}{\Prog, \Env, \Ctx} \Rightarrow{} \tuple{x, \Env'}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(E\)}{\Prog, \Env} \Rightarrow \tuple{v, \Env'}}
\hypo{\Sem{\(\mathbf{I}_{[x \mapsto v]} \mid{} \mathbf{X}_{[x \mapsto v]}\)}{\Prog, \Env', \Ctx} \Rightarrow\ \tuple{x, \Env''}}
\infer2[(ItemTE)]{\Sem{\(t\texttt{:}~x = E\texttt{;} \mathbf{I} \mid{} \mathbf{X}\)}{\Prog, \Env, \Ctx} \Rightarrow\ \tuple{x, \Env''}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(E_{i}\)}{\Prog,\Env^{i-1}} \Rightarrow \tuple{v_{i}, \Env^{i}}, \forall{} 1 \leq{} i \leq{} k}
\infer[no rule]1{\Sem{\(\mathbf{I}_{[x_{\langle{}i\rangle} \mapsto v_{i}, \forall{} 1 \leq{} i \leq{} k]} \mid{} \mathbf{X}_{[x_{\langle{}i\rangle} \mapsto v_{i}, \forall{} 1 \leq{} i \leq{} k]}\)}{\Prog, \Env^{k}, \Ctx} \Rightarrow\ \tuple{x, \Env'}}
\infer1[(ItemTupC)]{\Sem{\( \texttt{tuple(}t_{1}, \ldots, t_{k}\texttt{):}~x = \left(E_{1}, \ldots, E_{k}\right)\)}{\Prog, \Env^{0}, \Ctx} \Rightarrow{} \tuple{x, \Env'}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(E\)}{\Prog, \Env} \Rightarrow \tuple{\left(v_{\langle{}1\rangle}, \ldots, v_{\langle{}k\rangle}\right), \Env'}}
\infer[no rule]1{\Sem{\(\mathbf{I}_{[x_{i} \mapsto v_{\langle{}i\rangle}, \forall{} 1 \leq{} i \leq{} k]} \mid{} \mathbf{X}_{[x_{i} \mapsto v_{\langle{}i\rangle}, \forall{} 1 \leq{} i \leq{} k]}\)}{\Prog, \Env', \Ctx} \Rightarrow\ \tuple{x, \Env''}}
\infer1[(ItemTupD)]{\Sem{\(\left(t_{1}\texttt{:}~x_{1}, \ldots, t_{k}\texttt{:}~x_{k}\right) = E\texttt{;} \mathbf{I} \mid{} \mathbf{X}\)}{\Prog, \Env, \Ctx} \Rightarrow\ \tuple{x, \Env''}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(C\)}{\Prog, \Env} \Rightarrow \tuple{c, \Env'}}
\hypo{\Sem{\(\mathbf{I} \mid{} \mathbf{X}\)}{\Prog, \Env', \{c\}\cup\Ctx} \Rightarrow \tuple{x, \Env''}}
\infer2[(ItemC)]{\Sem{\(\mbox{constraint}~C\texttt{;} \mathbf{I} \mid{} \mathbf{X}\)}{\Prog, \Env, \Ctx} \Rightarrow\ \tuple{x, \Env''}}
\end{prooftree}
\caption{\label{fig:4-rewrite-to-nzn-let} Rewriting rules for partial evaluation
of \microzinc\ let expressions to \nanozinc{}.}
\centering
\begin{prooftree}
\hypo{\Sem{\(\mathbf{I} \mid{} \mathbf{X}\)}{\Prog, \Env, \emptyset} \Rightarrow \tuple{x, \Env'}}
\infer1[(Let)]{\Sem{let \{ \(\mathbf{I}\) \} in \(\mathbf{X}\)}{\Prog, \Env} \Rightarrow\ \tuple{x, \Env'}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(\mathbf{X}\)}{\Prog, \{ t : x \} \cup{} \Env} \Rightarrow \tuple{x, \{ t: x \} \cup{} \Env'}}
\infer1[(Item0)]{\Sem{\(\epsilon{} \mid{} \mathbf{X}\)}{\Prog, \{ t: x \}\cup{}\Env, \Ctx} \Rightarrow{} \tuple{x, \{ t: x \texttt{~↳~} \Ctx{} \} \cup{} \Env'}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(\mathbf{I} \mid{} \mathbf{X}\)}{\Prog, \Env\ \cup\ \{t : x\}, \Ctx} \Rightarrow \tuple{x, \Env'}}
\infer1[(ItemT)]{\Sem{\(t\texttt{:}~x\texttt{;} \mathbf{I} \mid{} \mathbf{X}\)}{\Prog, \Env, \Ctx} \Rightarrow{} \tuple{x, \Env'}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(E\)}{\Prog, \Env} \Rightarrow \tuple{v, \Env'}}
\hypo{\Sem{\(\mathbf{I}_{[x \mapsto v]} \mid{} \mathbf{X}_{[x \mapsto v]}\)}{\Prog, \Env', \Ctx} \Rightarrow\ \tuple{x, \Env''}}
\infer2[(ItemTE)]{\Sem{\(t\texttt{:}~x = E\texttt{;} \mathbf{I} \mid{} \mathbf{X}\)}{\Prog, \Env, \Ctx} \Rightarrow\ \tuple{x, \Env''}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(E_{i}\)}{\Prog,\Env^{i-1}} \Rightarrow \tuple{v_{i}, \Env^{i}}, \forall{} 1 \leq{} i \leq{} k}
\infer[no rule]1{\Sem{\(\mathbf{I}_{[x_{\langle{}i\rangle} \mapsto v_{i}, \forall{} 1 \leq{} i \leq{} k]} \mid{} \mathbf{X}_{[x_{\langle{}i\rangle} \mapsto v_{i}, \forall{} 1 \leq{} i \leq{} k]}\)}{\Prog, \Env^{k}, \Ctx} \Rightarrow\ \tuple{x, \Env'}}
\infer1[(ItemTupC)]{\Sem{\( \texttt{tuple(}t_{1}, \ldots, t_{k}\texttt{):}~x = \left(E_{1}, \ldots, E_{k}\right)\)}{\Prog, \Env^{0}, \Ctx} \Rightarrow{} \tuple{x, \Env'}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(E\)}{\Prog, \Env} \Rightarrow \tuple{\left(v_{\langle{}1\rangle}, \ldots, v_{\langle{}k\rangle}\right), \Env'}}
\infer[no rule]1{\Sem{\(\mathbf{I}_{[x_{i} \mapsto v_{\langle{}i\rangle}, \forall{} 1 \leq{} i \leq{} k]} \mid{} \mathbf{X}_{[x_{i} \mapsto v_{\langle{}i\rangle}, \forall{} 1 \leq{} i \leq{} k]}\)}{\Prog, \Env', \Ctx} \Rightarrow\ \tuple{x, \Env''}}
\infer1[(ItemTupD)]{\Sem{\(\left(t_{1}\texttt{:}~x_{1}, \ldots, t_{k}\texttt{:}~x_{k}\right) = E\texttt{;} \mathbf{I} \mid{} \mathbf{X}\)}{\Prog, \Env, \Ctx} \Rightarrow\ \tuple{x, \Env''}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(C\)}{\Prog, \Env} \Rightarrow \tuple{c, \Env'}}
\hypo{\Sem{\(\mathbf{I} \mid{} \mathbf{X}\)}{\Prog, \Env', \{c\}\cup\Ctx} \Rightarrow \tuple{x, \Env''}}
\infer2[(ItemC)]{\Sem{\(\mbox{constraint}~C\texttt{;} \mathbf{I} \mid{} \mathbf{X}\)}{\Prog, \Env, \Ctx} \Rightarrow\ \tuple{x, \Env''}}
\end{prooftree}
\caption{\label{fig:4-rewrite-to-nzn-let} Rewriting rules for partial evaluation
of \microzinc\ let expressions to \nanozinc{}.}
\end{figure*}
The rules in \cref{fig:4-rewrite-to-nzn-let} considers let expressions. Starting
@ -506,54 +506,54 @@ to mark the \(i^{\text{th}}\) member of the tuple \(x\) in our substitution
notation.
\begin{figure*}
\centering
\begin{prooftree}
\hypo{x \in \langle \text{ident} \rangle}
\hypo{\{t: x \texttt{~↳~} \phi\ \} \in \Env}
\infer2[(IdX)]{\Sem{\(x\)}{\Prog, \Env} \Rightarrow{} \tuple{x, \Env}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(x\)}{\Prog, \Env} \Rightarrow \tuple{[x_{1}, \ldots, x_{n}], \Env'}}
\hypo{\Sem{\(i\)}{\Prog, \Env'} \Rightarrow \tuple{v, \Env''}}
\hypo{v \in [1 \ldots{} n]}
\infer3[(Access)]{\Sem{\(x\texttt{[}i\texttt{]}\)}{\Prog, \Env} \Rightarrow{} \tuple{x_{v}, \Env''}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{c \text{~constant}}
\infer1[(Const)]{\Sem{\(c\)}{\Prog, \Env} \Rightarrow{} \tuple{c, \Env}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(C\)}{\Prog, \Env} \Rightarrow \tuple{\ptinline{true}, \_}}
\infer1[(If\(_T\))]{\Sem{if \(C\) then \(E_t\) else \(E_e\) endif}{\Prog, \Env} \Rightarrow{} \Sem{\(E_t\)}{\Prog, \Env}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(C\)}{\Prog, \Env} \Rightarrow \tuple{\ptinline{false}, \_}}
\infer1[(If\(_F\))]{\Sem{if \(C\) then \(E_t\) else \(E_e\) endif}{\Prog, \Env} \Rightarrow{} \Sem{\(E_e\)}{\Prog, \Env}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{G}{\Prog,\Env} \Rightarrow \tuple{ \ptinline{true}, \_}}
\hypo{\Sem{\(E\)}{\Prog, \Env} \Rightarrow \tuple {v, \Env'}}
\infer2[(WhereT)]{\Sem{\([E ~|~ \mbox{where~} G]\)}{\Prog, \Env} \Rightarrow{} \tuple{[v], \Env'}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{G}{\Prog,\Env} \Rightarrow \tuple{\ptinline{false}, \_}}
\infer1[(WhereF)]{\Sem{\([E ~|~ \mbox{where~} G]\)}{\Prog, \Env} \Rightarrow{} \tuple{[], \Env}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(\mathit{PE}\)}{\Prog,\Env} \Rightarrow \tuple{S, \_}}
\infer[no rule]1{\Sem{\([E ~|~ \mathit{GE} \mbox{~where~} G]\)}{\Prog, \Env_{[x \mapsto{} v]}} \Rightarrow{} \tuple{A_v, \Env_{[x \mapsto{} v]} \cup{} \Env{}_{v}}, \forall{} v \in{} S }
\infer1[(ListG)]{\Sem{\([E~|~ x \mbox{~in~} \mathit{PE}, \mathit{GE} \mbox{~where~} G]\)}{\Prog, \Env} \Rightarrow{}
\centering
\begin{prooftree}
\hypo{x \in \langle \text{ident} \rangle}
\hypo{\{t: x \texttt{~↳~} \phi\ \} \in \Env}
\infer2[(IdX)]{\Sem{\(x\)}{\Prog, \Env} \Rightarrow{} \tuple{x, \Env}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(x\)}{\Prog, \Env} \Rightarrow \tuple{[x_{1}, \ldots, x_{n}], \Env'}}
\hypo{\Sem{\(i\)}{\Prog, \Env'} \Rightarrow \tuple{v, \Env''}}
\hypo{v \in [1 \ldots{} n]}
\infer3[(Access)]{\Sem{\(x\texttt{[}i\texttt{]}\)}{\Prog, \Env} \Rightarrow{} \tuple{x_{v}, \Env''}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{c \text{~constant}}
\infer1[(Const)]{\Sem{\(c\)}{\Prog, \Env} \Rightarrow{} \tuple{c, \Env}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(C\)}{\Prog, \Env} \Rightarrow \tuple{\ptinline{true}, \_}}
\infer1[(If\(_T\))]{\Sem{if \(C\) then \(E_t\) else \(E_e\) endif}{\Prog, \Env} \Rightarrow{} \Sem{\(E_t\)}{\Prog, \Env}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(C\)}{\Prog, \Env} \Rightarrow \tuple{\ptinline{false}, \_}}
\infer1[(If\(_F\))]{\Sem{if \(C\) then \(E_t\) else \(E_e\) endif}{\Prog, \Env} \Rightarrow{} \Sem{\(E_e\)}{\Prog, \Env}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{G}{\Prog,\Env} \Rightarrow \tuple{ \ptinline{true}, \_}}
\hypo{\Sem{\(E\)}{\Prog, \Env} \Rightarrow \tuple {v, \Env'}}
\infer2[(WhereT)]{\Sem{\([E ~|~ \mbox{where~} G]\)}{\Prog, \Env} \Rightarrow{} \tuple{[v], \Env'}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{G}{\Prog,\Env} \Rightarrow \tuple{\ptinline{false}, \_}}
\infer1[(WhereF)]{\Sem{\([E ~|~ \mbox{where~} G]\)}{\Prog, \Env} \Rightarrow{} \tuple{[], \Env}}
\end{prooftree} \\
\bigskip
\begin{prooftree}
\hypo{\Sem{\(\mathit{PE}\)}{\Prog,\Env} \Rightarrow \tuple{S, \_}}
\infer[no rule]1{\Sem{\([E ~|~ \mathit{GE} \mbox{~where~} G]\)}{\Prog, \Env_{[x \mapsto{} v]}} \Rightarrow{} \tuple{A_v, \Env_{[x \mapsto{} v]} \cup{} \Env{}_{v}}, \forall{} v \in{} S }
\infer1[(ListG)]{\Sem{\([E~|~ x \mbox{~in~} \mathit{PE}, \mathit{GE} \mbox{~where~} G]\)}{\Prog, \Env} \Rightarrow{}
\tuple{\mbox{concat} [ A_v~|~v \in{} S ], \Env{} \cup{} \bigcup{}_{v \in{} S} \Env{}_{v}}}
\end{prooftree}
\caption{\label{fig:4-rewrite-to-nzn-other} Rewriting rules for partial evaluation
of other \microzinc\ expressions to \nanozinc{}.}
\end{prooftree}
\caption{\label{fig:4-rewrite-to-nzn-other} Rewriting rules for partial evaluation
of other \microzinc\ expressions to \nanozinc{}.}
\end{figure*}
Finally, the rules in \cref{fig:4-rewrite-to-nzn-other} handle the evaluation of
@ -1028,7 +1028,7 @@ Complex \minizinc\ expression can sometimes result in the creation of many new
variables that represent intermediate results. This is in particular true for
linear and boolean equations that are generally written using \minizinc\
operators. For example the evaluation of the linear constraint \mzninline{x +
2*y <= z} will result in the following \nanozinc:
2*y <= z} will result in the following \nanozinc:
\begin{nzn}
var int: x;
@ -1045,7 +1045,7 @@ These \nanozinc\ definitions are correct, but, at least for \gls{mip} solvers,
the existence of the intermediate variables is likely to have a negative impact
on the \gls{solver}'s performance. These \glspl{solver} would likely perform
better had they received the linear constraint \mzninline{int_lin_le([1,2,-1],
[x,y,z], 0)} directly. Since many solvers support linear constraints, it is
[x,y,z], 0)} directly. Since many solvers support linear constraints, it is
often an additional burden to have intermediate values that have to be given a
value in the solution.
@ -1145,22 +1145,22 @@ unoptimised prototype to a mature piece of software.
]{assets/table/rew_functional.csv}\rewFuncData
\begin{tikzpicture}
\begin{axis}[
xbar,
reverse legend,
legend style={at={(0.82,0.8)},
anchor=west,legend columns=1},
xlabel={Run-time (ms)},
symbolic y coords={{A(3,6)},{Fib(23)},{Tak(18,12,6)}},
ytick=data,
% cycle list name=exotic,
nodes near coords,
nodes near coords align={horizontal},
width = \columnwidth,
height = 8cm,
enlarge y limits={0.3},
enlarge x limits={0.03},
xmin = 0,
xmax = 130,
xbar,
reverse legend,
legend style={at={(0.82,0.8)},
anchor=west,legend columns=1},
xlabel={Run-time (ms)},
symbolic y coords={{A(3,6)},{Fib(23)},{Tak(18,12,6)}},
ytick=data,
% cycle list name=exotic,
nodes near coords,
nodes near coords align={horizontal},
width = \columnwidth,
height = 8cm,
enlarge y limits={0.3},
enlarge x limits={0.03},
xmin = 0,
xmax = 130,
]
\addplot[fill=cb1, postaction={pattern=grid}] table [y={test}, x={MiniZinc}]{\rewFuncData};
\addplot[fill=cb2, postaction={pattern=north east lines}] table [y={test}, x={Python}]{\rewFuncData};

View File

@ -6,29 +6,29 @@ In this chapter we study the usage of \gls{half-reif}. When a constraint \mzninl
\begin{itemize}
\item Flattening with half reification can naturally produce the relational
semantics when flattening partial functions in positive contexts.
\item Half reified constraints add no burden to the solver writer; if they
have a propagator for constraint \mzninline{pred(....)} then they can
straightforwardly construct a half reified propagator for \mzninline{b
-> pred(...)}.
\item Half reified constraints \mzninline{b -> pred(...)} can implement fully
reified constraints without any loss of propagation strength (assuming
reified constraints are negatable).
\item Flattening with half reification can produce more efficient propagation
when flattening complex constraints.
\item Flattening with half reification can produce smaller linear models when
used with a mixed integer programming solver.
\item Flattening with half reification can naturally produce the relational
semantics when flattening partial functions in positive contexts.
\item Half reified constraints add no burden to the solver writer; if they
have a propagator for constraint \mzninline{pred(....)} then they can
straightforwardly construct a half reified propagator for \mzninline{b
-> pred(...)}.
\item Half reified constraints \mzninline{b -> pred(...)} can implement fully
reified constraints without any loss of propagation strength (assuming
reified constraints are negatable).
\item Flattening with half reification can produce more efficient propagation
when flattening complex constraints.
\item Flattening with half reification can produce smaller linear models when
used with a mixed integer programming solver.
\end{itemize}
\section{Propagation and Half Reification}%
\label{sec:half-propagation}
\begin{itemize}
\item custom implemented half-reified propagations.
\item benefits
\item downside
\item experimental results
\item custom implemented half-reified propagations.
\item benefits
\item downside
\item experimental results
\end{itemize}
A propagation engine gains certain advantages from half-reification, but also
@ -40,10 +40,10 @@ the propagator, and hence make its propagation faster.
When full reification is applicable (where we are not using half reified
predicates) an alternative to half reification is to implement full reification
\mzninline{x <-> pred(...)} by two half reified propagators \mzinline{x ->
pred(...)}, \mzninline{y \half \neg pred(...)}, \mzninline{x <-> not y}. This
\mzninline{x <-> pred(...)} by two half reified propagators \mzninline{x ->
pred(...)}, \mzninline{y \half \neg pred(...)}, \mzninline{x <-> not y}. This
does not lose propagation strength. For Booleans appearing in a positive context
we can make the the propagator \mzninline{y -> not pred(...)} run at the lowest
we can make the propagator \mzninline{y -> not pred(...)} run at the lowest
priority, since it will never cause failure. Similarly in negative contexts we
can make the propagator \mzninline{b -> pred(...)} run at the lowest priority.
This means that Boolean variables are still fixed at the same time, but there is
@ -56,30 +56,30 @@ less overhead.
\label{sec:half-context}
\begin{tabular}{ccc}
\(
\begin{array}{lcl}
\changepos \rootc & = &\posc \\
\changepos \posc & =& \posc \\
\changepos \negc & =& \negc \\
\changepos \mixc & =& \mixc
\end{array}
\)
&~&
\(
\begin{array}{lcl}
\negop \rootc & = &\negc \\
\negop \posc & = & \negc \\
\negop \negc & = & \posc \\
\negop \mixc & = & \mixc
\end{array}
\)
\(
\begin{array}{lcl}
\changepos \rootc & = & \posc \\
\changepos \posc & = & \posc \\
\changepos \negc & = & \negc \\
\changepos \mixc & = & \mixc
\end{array}
\)
& ~ &
\(
\begin{array}{lcl}
\changeneg \rootc & = & \negc \\
\changeneg \posc & = & \negc \\
\changeneg \negc & = & \posc \\
\changeneg \mixc & = & \mixc
\end{array}
\)
\end{tabular}
\begin{itemize}
\item What (again) are the contexts?
\item Why are they important
\item When can we use half-reification
\item How does the analysis work?
\item What (again) are the contexts?
\item Why are they important
\item When can we use half-reification
\item How does the analysis work?
\end{itemize}
@ -87,7 +87,7 @@ less overhead.
\label{sec:half-flattening}
\begin{itemize}
\item cse
\item chain compression
\item cse
\item chain compression
\end{itemize}

View File

@ -43,8 +43,8 @@ may be removed again.
The usage of these methods is not new to \gls{constraint-modelling}, and they
have proven to be very useful \autocite{schrijvers-2013-combinators,
rendl-2015-minisearch, schiendorfer-2018-minibrass, ek-2020-online,
ingmar-2020-diverse}. In its most basic form, a simple scripting language is
rendl-2015-minisearch, schiendorfer-2018-minibrass, ek-2020-online,
ingmar-2020-diverse}. In its most basic form, a simple scripting language is
sufficient to implement these methods, by repeatedly calling on the
\gls{constraint-modelling} infrastructure to compile and solve the adjusted
constraint models. While improvements of the compilation of constraint models,
@ -566,9 +566,9 @@ The definition of these new functions follows the same pattern as for
\mzninline{sol}, \mzninline{status}, and \mzninline{last_val}. The MiniZinc
definition of the \mzninline{uniform_nbh} function is shown in
\Cref{lst:6-int-rnd}. \footnote{Random number functions need to be marked as
\mzninline{::impure} for the compiler not to apply \gls{cse}
\autocite{stuckey-2013-functions} if they are called multiple times with the
same arguments.} Note that the function accepts variable arguments \mzninline{l}
\mzninline{::impure} for the compiler not to apply \gls{cse}
\autocite{stuckey-2013-functions} if they are called multiple times with the
same arguments.} Note that the function accepts variable arguments \mzninline{l}
and \mzninline{u}, so that it can be used in combination with other functions,
such as \mzninline{sol}.
@ -693,19 +693,19 @@ the call had on the \nanozinc\ program have to be undone, including results of
propagation, \gls{cse} and other simplifications.
\begin{example}\label{ex:6-incremental}
Consider the following \minizinc\ fragment:
Consider the following \minizinc\ fragment:
\begin{mzn}
\begin{mzn}
constraint x < 10;
constraint y < x;
\end{mzn}
After evaluating the first constraint, the domain of \mzninline{x} is changed to
be less than 10. Evaluating the second constraint causes the domain of
\mzninline{y} to be less than 9. If we now, however, try to remove the first
constraint, it is not just the direct inference on the domain of \mzninline{x}
that has to be undone, but also any further effects of those changes --- in this
case, the changes to the domain of \mzninline{y}.
After evaluating the first constraint, the domain of \mzninline{x} is changed to
be less than 10. Evaluating the second constraint causes the domain of
\mzninline{y} to be less than 9. If we now, however, try to remove the first
constraint, it is not just the direct inference on the domain of \mzninline{x}
that has to be undone, but also any further effects of those changes --- in this
case, the changes to the domain of \mzninline{y}.
\end{example}
Due to this complex interaction between calls, we only support the removal of
@ -779,18 +779,18 @@ therefore support solvers with different levels of an incremental interface:
\begin{itemize}
\item Using a non-incremental interface, the solver is reinitialised with the
updated \nanozinc\ program every time. In this case, we still get a
performance benefit from the improved flattening time, but not from
incremental solving.
updated \nanozinc\ program every time. In this case, we still get a
performance benefit from the improved flattening time, but not from
incremental solving.
\item Using a \textit{warm-starting} interface, the solver is reinitialised
with the updated program as above, but it is also given a previous solution
to initialise some internal data structures. In particular for mathematical
programming solvers, this can result in dramatic performance gains compared
to ``cold-starting'' the solver every time.
with the updated program as above, but it is also given a previous solution
to initialise some internal data structures. In particular for mathematical
programming solvers, this can result in dramatic performance gains compared
to ``cold-starting'' the solver every time.
\item Using a fully incremental interface, the solver is instructed to apply
the changes made by the interpreter. In this case, the trail data structure
is used to compute the set of \nanozinc\ changes since the last choice
point.
the changes made by the interpreter. In this case, the trail data structure
is used to compute the set of \nanozinc\ changes since the last choice
point.
\end{itemize}
@ -955,8 +955,8 @@ development version of Chuffed; and \chuffedMzn{}, Chuffed performing \gls{lns}
with \flatzinc\ neighbourhoods. These experiments illustrate that the \gls{lns}
implementations indeed perform well compared to the standard
solvers.\footnote{Our implementations are available at
\texttt{\justify{}https://github.com/Dekker1/\{libminizinc,gecode,chuffed\}} on
branches containing the keyword \texttt{on\_restart}.} All experiments were run
\texttt{\justify{}https://github.com/Dekker1/\{libminizinc,gecode,chuffed\}} on
branches containing the keyword \texttt{on\_restart}.} All experiments were run
on a single core of an Intel Core i5 CPU @ 3.4 GHz with 4 cores and 16 GB RAM
running macOS High Sierra. \gls{lns} benchmarks are repeated with 10 different
random seeds and the average is shown. The overall timeout for each run is 120
@ -999,9 +999,9 @@ periods, which is done via the variables \mzninline{period_of} in the model.
one period and frees all courses that are assigned to it.
\begin{table}
\centering
\input{assets/table/6_gbac}
\caption{\label{tab:6-gbac}\texttt{gbac} benchmarks}
\centering
\input{assets/table/6_gbac}
\caption{\label{tab:6-gbac}\texttt{gbac} benchmarks}
\end{table}
The results for \texttt{gbac} in \cref{tab:6-gbac} show that the overhead
@ -1030,9 +1030,9 @@ in the incumbent solution. These orders can then be freely reassigned to any
other slab.
\begin{table}
\centering
\input{assets/table/6_steelmillslab}
\caption{\label{tab:6-steelmillslab}\texttt{steelmillslab} benchmarks}
\centering
\input{assets/table/6_steelmillslab}
\caption{\label{tab:6-steelmillslab}\texttt{steelmillslab} benchmarks}
\end{table}
For this problem a solution with zero wastage is always optimal. The use of
@ -1064,9 +1064,9 @@ structured neighbourhood for this model. It randomly selects a time interval of
one-tenth the length of the planning horizon and frees all tasks starting in
that time interval, which allows a reshuffling of these tasks.
\begin{table}[b]
\centering
\input{assets/table/6_rcpsp-wet}
\caption{\label{tab:6-rcpsp-wet}\texttt{rcpsp-wet} benchmarks.}
\centering
\input{assets/table/6_rcpsp-wet}
\caption{\label{tab:6-rcpsp-wet}\texttt{rcpsp-wet} benchmarks.}
\end{table}
\cref{tab:6-rcpsp-wet} shows that \gecodeReplay\ and \gecodeMzn\ perform almost

View File

@ -25,16 +25,16 @@ the rules for identifiers and annotations.
% Items
<item> ::= <include-item>
\alt <var-decl-item>
\alt <enum-item>
\alt <assign-item>
\alt <constraint-item>
\alt <solve-item>
\alt <output-item>
\alt <predicate-item>
\alt <test-item>
\alt <function-item>
\alt <annotation-item>
\alt <var-decl-item>
\alt <enum-item>
\alt <assign-item>
\alt <constraint-item>
\alt <solve-item>
\alt <output-item>
\alt <predicate-item>
\alt <test-item>
\alt <function-item>
\alt <annotation-item>
<ti-expr-and-id> ::= <type-inst> ":" <ident>
@ -64,20 +64,20 @@ the rules for identifiers and annotations.
\alt "solve" <annotations> "minimize" <expr>
\alt "solve" <annotations> "maximize" <expr>
% Output items
<output-item> ::= "output" <expr>
% Output items
<output-item> ::= "output" <expr>
% Annotation items
<annotation-item> ::= "annotation" <ident> <params>
% Annotation items
<annotation-item> ::= "annotation" <ident> <params>
% Predicate, test and function items
<predicate-item> ::= "predicate" <operation-item-tail>
% Predicate, test and function items
<predicate-item> ::= "predicate" <operation-item-tail>
<test-item> ::= "test" <operation-item-tail>
<test-item> ::= "test" <operation-item-tail>
<function-item> ::= "function" <type-inst> ":" <operation-item-tail>
<function-item> ::= "function" <type-inst> ":" <operation-item-tail>
<operation-item-tail> ::= <ident> <params> <annotations> [ "=" <expr> ]
<operation-item-tail> ::= <ident> <params> <annotations> [ "=" <expr> ]
<params> ::= [ ( <ti-expr-and-id> "," ... ) ]
@ -236,8 +236,8 @@ the rules for identifiers and annotations.
% Annotation literals
<ann-literal> ::= <ident> [ "(" <expr> "," ... ")" ]
% If-then-else expressions
<if-then-else-expr> ::= "if" <expr> "then" <expr> [ "elseif" <expr> "then" <expr> ]* "else" <expr> "endif"
% If-then-else expressions
<if-then-else-expr> ::= "if" <expr> "then" <expr> [ "elseif" <expr> "then" <expr> ]* "else" <expr> "endif"
% Call expressions
<call-expr> ::= <ident-or-quoted-op> [ "(" <expr> "," ... ")" ]
@ -259,7 +259,7 @@ the rules for identifiers and annotations.
% % Miscellaneous
% Identifiers
<ident> ::= \(regexp\left(\texttt{[A-Za-z][A-Za-z0-9\_]*}\right)\)
\alt \(regexp\left(\verb|'[^'\xa\xd\x0]*'|\right)\)
\alt \(regexp\left(\verb|'[^'\xa\xd\x0]*'|\right)\)
% Identifiers and quoted operators
<ident-or-quoted-op> ::= <ident>

File diff suppressed because it is too large Load Diff

View File

@ -94,7 +94,7 @@ following publication:
\printbibliography[heading=none]
\end{refsection}
\chapter{Acknoledgements}
\chapter{Acknowledgements}
\mainmatter{}