diff --git a/paper/.vscode/ltex.dictionary.en-US.txt b/paper/.vscode/ltex.dictionary.en-US.txt new file mode 100644 index 0000000..e2c9f51 --- /dev/null +++ b/paper/.vscode/ltex.dictionary.en-US.txt @@ -0,0 +1,19 @@ +LinearAlgebra +BenchmarkTools +@benchmark +mul +BigInt +SumOfSquares +JuMP +MutableArithmetics +Dowson +Lubin +MultivariatePolynomials +jl +gcd +num +BangBang +MathOptInterface +@rewrite +JuMP-specific +MPZ diff --git a/paper/code/axiom.jl b/paper/code/axiom.jl new file mode 100644 index 0000000..0b11b4b --- /dev/null +++ b/paper/code/axiom.jl @@ -0,0 +1,17 @@ +function operate!!(op, args...) + T = typeof.(args) + if mutability(T[1], op, T...) isa IsMutable + return operate!(op, args...) + else + return op(args...) + end +end +function operate_to!!(output, op, args...) + O = typeof(output) + T = typeof.(args) + if mutability(O, op, T...) isa IsMutable + return operate_to!(output, op, args...) + else + return op(args...) + end +end diff --git a/paper/code/matmul.jl b/paper/code/matmul.jl new file mode 100644 index 0000000..b651783 --- /dev/null +++ b/paper/code/matmul.jl @@ -0,0 +1,4 @@ +function Base.:*(A::Matrix{S}, b::Vector{T}) where {S,T} + c = Vector{U}(undef, size(A, 1)) # What is U ? + return mul_to!(c, A, b) +end diff --git a/paper/code/rational.jl b/paper/code/rational.jl new file mode 100644 index 0000000..535efa2 --- /dev/null +++ b/paper/code/rational.jl @@ -0,0 +1,4 @@ +struct Rational{T} + num::T + den::T +end diff --git a/paper/code/sum.jl b/paper/code/sum.jl new file mode 100644 index 0000000..8812902 --- /dev/null +++ b/paper/code/sum.jl @@ -0,0 +1,7 @@ +function sum(x::Vector) + acc = zero(eltype(x)) + for el in x + acc = acc + el + end + return acc +end diff --git a/paper/code/term.jl b/paper/code/term.jl new file mode 100644 index 0000000..39a146d --- /dev/null +++ b/paper/code/term.jl @@ -0,0 +1,9 @@ +struct Term{T} + coef::T + sym::SymbolicVariable +end +struct Sum{T} + terms::Vector{Term{T}} +end +Base.:+(s::Sum, t::Term) = Sum(push!(copy(s.terms), t)) +Base.zero(::Type{Term{T}}) where {T} = Sum(Term{T}[]) diff --git a/paper/code/termsum.jl b/paper/code/termsum.jl new file mode 100644 index 0000000..896ee41 --- /dev/null +++ b/paper/code/termsum.jl @@ -0,0 +1,13 @@ +function sum(x) + acc = zero(eltype(x)) + for el in x + acc = add!!(acc, el) + end + return acc +end +add!!(a, b) = a + b # default fallback +add!!(a::BigInt, b::BigInt) = Base.GMP.MPZ.add!(a, b) +function add!!(s::Sum, t::Term) + push!(s.terms, t) + return s +end diff --git a/paper/code/verify.jl b/paper/code/verify.jl new file mode 100644 index 0000000..1800f86 --- /dev/null +++ b/paper/code/verify.jl @@ -0,0 +1,10 @@ +module Verify +using MutableArithmetics +struct SymbolicVariable end +for file in readdir(@__DIR__) + path = joinpath(@__DIR__, file) + if path != (@__FILE__) + include(file) + end +end +end diff --git a/paper/header.tex b/paper/header.tex index 032c6a0..8f40c90 100644 --- a/paper/header.tex +++ b/paper/header.tex @@ -3,14 +3,13 @@ \title{MutableArithmetics: An API for mutable operations} \author[1]{Benoît Legat} -\affil[1]{Massachusetts Institute of Technology} +\affil[1]{KU Leuven} \keywords{Julia, Optimization, Performance, Interface} \hypersetup{ pdftitle = {MutableArithmetics: An API for mutable operations}, -pdfsubject = {JuliaCon 2019 Proceedings}, +pdfsubject = {JuliaCon 2021 Proceedings}, pdfauthor = {Benoît Legat}, pdfkeywords = {Julia, Optimization, Performance, Interface}, } - diff --git a/paper/paper.tex b/paper/paper.tex index 889a939..20de2e8 100644 --- a/paper/paper.tex +++ b/paper/paper.tex @@ -1,6 +1,7 @@ \documentclass{juliacon} \setcounter{page}{1} +\usepackage{amsmath} \usepackage{cleveref} \crefformat{footnote}{#2\footnotemark[#1]#3} @@ -8,6 +9,8 @@ \addlitjlmacros{@rewrite}{@rewrite}{8} +\hyphenation{Mutable-Arithmetics} + \begin{document} \input{header} @@ -19,7 +22,7 @@ Arithmetic operations defined in Julia do not modify their arguments. However, in many situations, a variable represents an accumulator that can be modified in-place to contain the result, e.g., when summing the elements of an array. Moreover, for types that support mutation, mutating the value may have a significant performance benefit over creating a new instance. -This talk presents an interface that allows algorithms to exploit mutability in arithmetic operations in a generic manner. +This paper presents an interface that allows algorithms to exploit mutability in arithmetic operations in a generic manner. \end{abstract} @@ -28,10 +31,10 @@ \section{Introduction} Julia enables developers to write generic algorithms that work with arbitrary number types, as long as the types implement the needed operations such as \lstinline|+|, \lstinline|*|, \lstinline|-|, \lstinline|zero|, \lstinline|one|, ... The implementations of these arithmetic operations in Julia do not modify their arguments. Instead, they return a new instance of the type as the result. -However, in many situations, a variable represents an accumulator that can be mutated\footnote{\label{foot:mutate}In this paper, the terminology ``mutate $x$ to $y$'' means modifying $x$ in-place in such a way that its value after the modification is equal to $y$} to the result, e.g., -when summing the elements of an array or when implementing array multiplication. +However, in many situations, a variable represents an accumulator that can be mutated\footnote{\label{foot:mutate}In this paper, the terminology ``mutate $x$ to $y$'' means modifying $x$ in-place in such a way that its value after the modification is equal to $y$.} to the result, e.g., +when summing the elements of an array of \lstinline|BigInt| or when implementing array multiplication. Moreover, for types that support mutation, mutating the value may have a significant performance benefit over creating a new instance. -Examples of types that implement arithmetic operations and support mutation include \lstinline|Array|s, multiple precision numbers, JuMP~\cite{dunning2017jump} expressions, MathOptInterface (MOI)~\cite{legat2021mathoptinterface} functions, and polynomials (univariate~\cite{verzani2021polynomials} or multivariate~\cite{legat2021multivariatepolynomials}). +Examples of types that implement arithmetic operations and support mutation include \lstinline|Array|s, multiple-precision numbers, JuMP~\cite{dunning2017jump} expressions, MathOptInterface (MOI)~\cite{legat2021mathoptinterface} functions, and polynomials (univariate~\cite{verzani2021polynomials} or multivariate~\cite{legat2023multivariate}). This paper introduces an interface called \ma{}. It allows mutable types to implement an arithmetic exploiting their mutability, and for algorithms to @@ -40,18 +43,19 @@ \section{Introduction} \begin{enumerate} \item \label{item:reimplement} - it re-implements part of the Julia standard library on top of the API to allow mutable types to use a more efficient version than the default one. + MutableArithmetics re-implements part of the Julia standard library on top of the API to allow mutable types to use a more efficient version than the default one. \item \label{item:rewrite} - it defines a \lstinline|@rewrite| macro that rewrites an expression using the standard operations (e.g \lstinline|+|, \lstinline|*|, ...) into an expression that exploits the mutability of the intermediate values created when evaluating the expression. + MutableArithmetics defines a \lstinline|@rewrite| macro that rewrites an expression using the standard operations (e.g., \lstinline|+|, \lstinline|*|, ...) into an expression that exploits the mutability of the intermediate values created when evaluating the expression. \end{enumerate} JuMP~\cite{dunning2017jump} used to have its own API for mutable operations on JuMP expressions and -its own JuMP-specific implementation of \ref{item:reimplement} and \ref{item:rewrite}. +its own JuMP-specific implementation of \eqref{item:reimplement} and \eqref{item:rewrite}. These two features are one of the key reasons why JuMP is competitive in performance with commercial algebraic modeling languages~\cite[Section~3--4]{dunning2017jump}. These features were refactored into \ma{}, generalizing them to arbitrary mutable types. Starting from JuMP v0.21, JuMP expressions and MOI functions implement the \ma{} API, and -the JuMP-specific implementations of \ref{item:reimplement} and \ref{item:rewrite} were replaced by the generic versions implemented in \ma{} on top of the \ma{} API. +the JuMP-specific implementations of \eqref{item:reimplement} and \eqref{item:rewrite} were +replaced by the generic versions implemented in \ma{}. \section{Design consideration} This section provides concrete examples that motivated the design of \ma{}. @@ -60,16 +64,8 @@ \section{Design consideration} \subsection{May mutate} \label{sec:may_mutate} Consider the task of summing the elements of a vector. -By default, Julia \lstinline|sum| function will compute the sum with a code equivalent to the following: -\begin{jllisting} -function sum(x::Vector) - acc = zero(eltype(x)) - for el in x - acc = acc + el - end - return acc -end -\end{jllisting} +By default, Julia's \lstinline|sum| function will compute the sum with a code equivalent to the following: +\jlinputlisting{code/sum.jl} If the type of the elements of \lstinline|x| is \lstinline|BigInt|, it is more efficient to replace the line \lstinline|acc = acc + el| by the line \lstinline|Base.GMP.MPZ.add!(acc, el)|. @@ -78,22 +74,14 @@ \subsection{May mutate} On the other hand, \lstinline|Base.GMP.MPZ.add!| mutates\cref{foot:mutate} \lstinline|acc| to the result. Even if using \lstinline|Base.GMP.MPZ.add!| provides a significant performance improvement, -the time complexity order is identical: $\Theta(nm)$ in both cases where $n$ is the number of elements and $m$ is the number of bits of an element. +the time complexity order is identical: $\Theta(nm)$ in both cases, +where $n$ is the number of elements and $m$ is the number of bits of an element. We now consider a mutable element type for which exploiting mutability affects the time complexity. -Consider a type \lstinline|SymbolicVariable| representing a symbolic variable and the following types representing a linear combinations of these variables with coefficients of type \lstinline|T|. -This example encapsulates for instance JuMP affine expressions~\cite{dunning2017jump}, MOI affine functions~\cite{legat2021mathoptinterface}, polynomials (univariate~\cite{verzani2021polynomials} or multivariate~\cite{legat2021multivariatepolynomials}) or symbolic sums~\cite{gowda2021high}. -\begin{jllisting} -struct Term{T} - coef::T - sym::SymbolicVariable -end -struct Sum{T} - terms::Vector{Term{T}} -end -Base.:+(s::Sum, t::Term) = Sum(push!(copy(s.terms), t)) -Base.zero(::Type{Term{T}}) where {T} = Sum(Term{T}[]) -\end{jllisting} +Consider a type \lstinline|SymbolicVariable| representing a symbolic variable +and the following types representing linear combinations of these variables with coefficients of type \lstinline|T|. +This example encapsulates for instance JuMP affine expressions~\cite{dunning2017jump}, MOI affine functions~\cite{legat2021mathoptinterface}, polynomials (univariate~\cite{verzani2021polynomials} or multivariate~\cite{legat2023multivariate}) or symbolic sums~\cite{gowda2021high}. +\jlinputlisting{code/term.jl} Calling \lstinline|sum| on a vector of $n$ \lstinline|Term{T}| has a time complexity $\Theta(n^2)$. Indeed, when calling \lstinline|acc + el| where \lstinline|acc| contains the sum of the first \lstinline|k| terms and \lstinline|el| is the $(k+1)$th term, the result cannot mutate \lstinline|acc.terms| and the copy of \lstinline|acc.terms| has time complexity $\Theta(k)$. @@ -101,25 +89,11 @@ \subsection{May mutate} A possible mutable interface would be to define an \lstinline|add!!| function that is similar to \lstinline|+| with the only difference being that it is allowed to modify its first argument. -By default, \lstinline|add!!| would fallback to calling \lstinline|+| +By default, \lstinline|add!!| would fall back to calling \lstinline|+| so that a method calling \lstinline|add!!| would both exploit the mutability of mutable types but would also work for non-mutable types. For our example, an implementation could be: -\begin{jllisting} -function sum(x) - acc = zero(eltype(x)) - for el in x - acc = add!!(acc, el) - end - return acc -end -add!!(a, b) = a + b # default fallback -add!!(a::BigInt, b::BigInt) = Base.GMP.MPZ.add!(a, b) -function add!!(s::Sum, t::Term) - push!(s.terms, t) - return s -end -\end{jllisting} +\jlinputlisting{code/termsum.jl} Note that the time complexity of the sum of $n$ \lstinline|Term| is now $\Theta(n)$. Julia implements a specialized method for computing the sum of \lstinline|BigInt|s that uses \lstinline|Base.GMP.MPZ.add!|. @@ -132,16 +106,13 @@ \subsection{May mutate} as well as the \lstinline|destructive_add!| function in JuMP v0.20. \subsection{Should mutate} -When writing a code that should mutate the first argument of an operation, -an API that silently returns the result without modifying the first argument is not appropriate. +In situations where the correctness of a code depends on +the mutation of the first argument of an operation, +an API that allows an implementation to silently returns the +result without modifying the first argument is not appropriate. -To motivate this, consider the rational Julia type: -\begin{jllisting} -struct Rational{T} - num::T - den::T -end -\end{jllisting} +To motivate this, consider the \lstinline|Rational| Julia type: +\jlinputlisting{code/rational.jl} Suppose we want to mutate\cref{foot:mutate} some rational \lstinline|a::Rational| to the product of \lstinline|a::Rational| and some other rational \lstinline|b::Rational| (ignoring the simplification with \lstinline|gcd| for simplicity). @@ -159,8 +130,8 @@ \subsection{Should mutate} \subsection{Mutability} A third useful feature for users of a mutable API is the ability to determine whether objects of a given type can be mutated\cref{foot:mutate} to the result of a mutable operation. -To motivate this, consider again the multiplication of rational number introduces in the previous section. -An implementation \lstinline|mul!!| (where \lstinline|mul!!| may mutate its first argument and \lstinline|mul!| should mutate its first argument) +To motivate this, consider again the multiplication of rational numbers introduced in the previous section. +An implementation \lstinline|mul!!| (where \lstinline|mul!!| \emph{may} mutate its first argument and \lstinline|mul!| \emph{should} mutate its first argument) for rational numbers could be: \begin{jllisting} function mul!!(a::Rational{S}, b::Rational{T}) @@ -186,20 +157,15 @@ \subsection{Promotion} %For the generic sum implementation to work, %\lstinline|zero(::Type{SymbolicVariable})| may return \lstinline|zero(Sum{Int})|. -Consider the following matrix-vector multiplication implementation with \lstinline|SymbolicVariable| +Consider the following matrix-vector multiplication implementation where \lstinline|mul_to!| mutates\cref{foot:mutate} \lstinline|c| to \lstinline|A * b|. -\begin{jllisting} -function Base.:*(A::Matrix{S}, b::Vector{T}) - c = Vector{U}(undef, size(A, 1)) # What is U ? - return mul_to!(c, A, b) -end -\end{jllisting} +\jlinputlisting{code/matmul.jl} What should be the element type \lstinline|U| of the accumulator \lstinline|c| ? For instance, if \lstinline|S| is \lstinline|Float64| and \lstinline|T| is \lstinline|SymbolicVariable| then \lstinline|U| should be \lstinline|Sum{Float64}|. -LinearAlgebra uses \lstinline|Base.promote_op| for this which relies on Julia inference -to determine the type of a sum of products of elements of type \lstinline|S| and \lstinline|T|. +LinearAlgebra uses \lstinline|Base.promote_op| for this which relies on Julia's inference +to determine the type of the sum of products of elements of type \lstinline|S| and \lstinline|T|. In the summing example introduced in \cref{sec:may_mutate}, the type of the accumulator should also be determined as the type of the sum of elements of the vector. @@ -212,7 +178,7 @@ \section{Implementing the interface} \ma{} defines the following four functions that provides the features motivated in the corresponding four subsections of the previous section. \begin{enumerate} \item \lstinline|operate!!(op::Function, args...)| (resp. \lstinline|operate_to!!(output, op::Function, args...)|) returns the result of \lstinline|op(args...)| and may mutate \lstinline|args[1]| (resp. \lstinline|output|). - \item \lstinline|operate!(op::Function, args...)| (resp. \lstinline|operate_to!(output, op::Function, args...)|) mutate\cref{foot:mutate} \lstinline|args[1]| (resp. \lstinline|output|) to the result of \lstinline|op(args...)| and returns it. + \item \lstinline|operate!(op::Function, args...)| (resp. \lstinline|operate_to!(output, op::Function, args...)|) mutates\cref{foot:mutate} \lstinline|args[1]| (resp. \lstinline|output|) to the result of \lstinline|op(args...)| and returns it. \item \lstinline|mutability(T::Type, op::Function, args::Type...)| is a trait returning \lstinline|IsMutable()| if objects of type \lstinline|T| can be mutated\cref{foot:mutate} to the result of \lstinline|op(::args[1], ::args[2], ...)| and \lstinline|IsNotMutable()| otherwise. \item \lstinline|promote_operation(op::Function, args::Type...)| returns the return type of \lstinline|op(::args...)|. \end{enumerate} @@ -227,36 +193,19 @@ \subsection{Promotion fallback} defaults to \lstinline|typeof(zero(S) + zero(T))| which is correct if \lstinline|+(::S, ::T)| is type-stable. There are two cases for which this default implementation of \lstinline|promote_operation| is not sufficient. -First, as we will see below, \lstinline|promote_operation| is at the core of many operations so it is important that it is efficient. +First, as we will see below, \lstinline|promote_operation| is at the core of many operations, so it is important that it is efficient. Julia may be able to compute the result of \lstinline|typeof(zero(S) + zero(T))| at compile time. However, if the body of \lstinline|promote_operation| is not evaluated at compile-time, this can cause performance issues. This is amplified for mutable types as \lstinline|zero(S) + zero(T)| may allocate. Second, if \lstinline|zero(S) + zero(T)| ends up calling \lstinline|promote_operation(+, S, T)|, this default implementation will not terminate. -In both of these cases, \lstinline|promote_operation| should have a specialized implementation, e.g., by hardcoding the result for each pairs of concrete types \lstinline|S| and \lstinline|T|. +In both of these cases, \lstinline|promote_operation| should have a specialized implementation, e.g., by hardcoding the result for each pair of concrete types \lstinline|S| and \lstinline|T|. -Note that implementing \lstinline|promote_operation| should be significantly easier than implementing the actual operation where the actual value of the result need to be computed, not just its type. Hence this should not consitute a burden for the implementation. +Note that implementing \lstinline|promote_operation| should be significantly easier than implementing the actual operation where the actual value of the result needs to be computed, not just its type. +Hence this should not constitute a burden for the implementation. \subsection{May mutate fallback} -We have the following default implementations of \lstinline|operate!!| (resp. \lstinline|operate_to!!|). -\begin{jllisting} -function operate!!(op, args...) - T = typeof.(args) - if mutability(T[1], op, T...) isa IsMutable - return operate!(op, args...) - else - return op(args...) - end -end -function operate_to!!(output, op, args...) - O = typeof(output) - T = typeof.(args) - if mutability(O, op, T...) isa IsMutable - return operate_to!(output, op, args...) - else - return op(args...) - end -end -\end{jllisting} +We have the following default implementations of \lstinline|operate!!| and \lstinline|operate_to!!|. +\jlinputlisting{code/axiom.jl} Note that this default implementation should have optimal performance in case \lstinline|mutability| is evaluated at compile-time and the \lstinline|if| statement is optimized out by the compiler. Indeed, suppose that another implementation is faster than this default one. @@ -268,7 +217,7 @@ \subsection{Mutability fallback} It turns out that all types considered at the moment fall into two categories. The first category is made of the types \lstinline|T| for which \lstinline|mutability(T, ...)| always return \lstinline|IsNotMutable()|. -These are typically the non-mutable type, e.g., \lstinline|Int|, \lstinline|Float64|, \lstinline|Rational{Int}|, ... +These are typically the non-mutable types, e.g., \lstinline|Int|, \lstinline|Float64|, \lstinline|Rational{Int}|, ... In the second category are the types \lstinline|T| for which \lstinline|mutability(T, op, args...)| returns \lstinline|IsMutable()| if and only if \lstinline|T == promote_operation(op, args...)|. @@ -278,11 +227,7 @@ \subsection{Mutability fallback} Then we have the following fallback for \lstinline|mutability|: \begin{jllisting} mutability(::Type) = IsNotMutable() -function mutability( - T::Type, - op::Function, - args::Type..., -) +function mutability(T::Type, op::Function, args::Type...) if mutability(T) isa IsMutable && T == promote_operation(op, args...) return IsMutable() @@ -302,13 +247,8 @@ \subsection{Minimal interface} (we assume the operation is \lstinline|+| and the result type is \lstinline|Foo|), \begin{jllisting} -function promote_operation( - ::typeof(+), - ::Type{Foo}, - ::Type{Foo}, -) - return Foo -end +promote_operation(::typeof(+), ::Type{Foo}, ::Type{Foo}) = + Foo function operate!(::typeof(+), a::Foo, b::Foo) # ... return a @@ -326,10 +266,8 @@ \subsection{Minimal interface} Then \begin{unnumlist} \item \lstinline|mutability(::Foo, +, Foo, Foo)|, - \item \lstinline|operate!!(+, ::Foo, ::Foo)|, - \item \lstinline|operate_to!!(::Foo, +, ::Foo, ::Foo)|, - \item \lstinline|add!(::Foo, ::Foo)|, - \item \lstinline|add_to!(::Foo, ::Foo, ::Foo)|, + \item \lstinline|operate!!(+, ::Foo, ::Foo)| and + \item \lstinline|operate_to!!(::Foo, +, ::Foo, ::Foo)| \item \lstinline|add!!(::Foo, ::Foo)| and \item \lstinline|add_to!!(::Foo, ::Foo, ::Foo)| \end{unnumlist} @@ -338,9 +276,11 @@ \subsection{Minimal interface} \section{Rewriting macro} As mentioned in the introduction, \ma{} implements a \lstinline|@rewrite| macro that rewrites: +\begin{minipage}{\linewidth} \begin{jllisting} -@rewrite(a * b + c * d - e * f * g - sum(i * y[i]^2 for i in 2:n)) +@rewrite(a * b + c * d - e * f * g - sum(i * y[i] for i in 2:n)) \end{jllisting} +\end{minipage} into \begin{jllisting} acc0 = Zero() @@ -414,7 +354,7 @@ \subsection{Matrix-vector product} \end{jllisting} In order to avoid allocating $n^2$ new \lstinline|BigInt|s, \ma{} enables operations to communicate the buffers they need to allocate through the \lstinline|buffer_for| function. -The buffer can then be reused between multiple occurence of the same operation with \lstinline|buffered_operate!|. +The buffer can then be reused between multiple occurrences of the same operation with \lstinline|buffered_operate!|. By default, \lstinline|buffer_for| returns \lstinline|nothing| and \lstinline|buffered_operate!| has the following fallback: \begin{jllisting} @@ -434,33 +374,29 @@ \subsection{Matrix-vector product} a::BigInt, x::BigInt, y::BigInt, -) where {N} +) operate_to!(buffer, *, x, y) return operate!(+, a, buffer) end - \end{jllisting} Then, the matrix multiplication can create the buffer only once with \begin{jllisting} -buf = buffer_for( - add_mul, - eltype(c), - eltype(A), - eltype(b), -) +buf = buffer_for(add_mul, eltype(c), eltype(A), eltype(b)) \end{jllisting} and then call \begin{jllisting} buffered_operate!(buf, add_mul, c[i], A[i, j], b[j]) \end{jllisting} -This explains why there is only 48 bytes allocated in the benchmark result above, this is the -allocation of a single \lstinline|BigInt()|. +This explains why there are only 48 bytes allocated in the benchmark result above, +which corresponds to the allocation of a single \lstinline|BigInt()|. In fact, a buffer needed for a low-level operation can even be communicated -at the level of higher level operations. +at the level of higher-level operations. This allows for instance to allocate the buffer only once even if several matrix products are computed: + +%\begin{minipage}{\linewidth} \begin{jllisting} buf = buffer_for( add_mul, @@ -474,6 +410,7 @@ \subsection{Matrix-vector product} 0 \end{jllisting} +%\end{minipage} \subsection{Mutability layers} Mutable states in objects can form a hierarchy of mutable layers. @@ -514,11 +451,11 @@ \subsection{Mutability layers} 0 \end{jllisting} -As a matter of fact, one of the motivation for \ma{} -was to improve the performance of SumOfSquares~\cite{legat2020sumofsquares}. +As a matter of fact, one of the motivations for \ma{} +was to improve the performance of SumOfSquares~\cite{weisser2019polynomial}. SumOfSquares is using multivariate polynomials with JuMP expressions or MOI functions as coefficients. -JuMP had a interface for exploiting the mutability of its expressions +JuMP had an interface for exploiting the mutability of its expressions, but MultivariatePolynomials was not exploiting it. MultivariatePolynomials now implements \ma{} and also exploits the mutability of its coefficients, whether they are \lstinline|BigInt|, JuMP expressions, MOI functions or any other types implementing \ma{}. @@ -527,10 +464,10 @@ \section{Conclusion} \ma{} provides an interface for mutable operations. As detailed in this paper, the design of the interface provides both an extensive set of features for the user without sacrificing -the ease of implementation of the interface. +the ease of implementing the interface. Moreover, it provides a zero-cost abstraction so that a single generic implementation can handle mutable and non-mutable inputs. -As the same API is used for arrays, functions, numbers, ... multi-layered mutability can be exploited efficiently +As the same API is used for arrays, functions, numbers, ... multi-layered mutability can be exploited efficiently, and the intermediate allocations needed by inner layers can be preallocated from the outside layers using a buffer API. @@ -540,8 +477,10 @@ \section*{Acknowledgments} Moreover, we would like to thank the authors of JuMP: Iain Dunning, Joey Huchette and Miles Lubin for the implementations of the mutability code in JuMP from which MutableArithmetics was largely inspired. Finally, we would like to thank all others contributors to the packages for their valuable inputs\footnote{The full list of contributors is available \href{https://github.com/jump-dev/MutableArithmetics.jl/graphs/contributors}{on Github}.}. -The first author is a post-doctoral fellow of the Belgian American Educational Foundation. -His work is partially supported by the National Science Foundation under Grant No. OAC-1835443. +The author was supported by +a Belgian American Educational Foundation (BAEF) postdoctoral fellowship, +the National Science Foundation (NSF) under Grant No. OAC-1835443 and +the European Commission (ERC Adv. Grant) under Grant 885682. \input{bib.tex} diff --git a/paper/paper.yml b/paper/paper.yml index 1eddef3..418283f 100644 --- a/paper/paper.yml +++ b/paper/paper.yml @@ -9,7 +9,7 @@ authors: orcid: 0000-0002-5921-368X affiliation: 1 affiliations: - - name: Massachusetts Institute of Technology + - name: KU Leuven index: 1 date: 29 July 2021 bibliography: ref.bib diff --git a/paper/ref.bib b/paper/ref.bib index 4c508bc..533248b 100644 --- a/paper/ref.bib +++ b/paper/ref.bib @@ -39,25 +39,16 @@ @Article{dunning2017jump publisher = {SIAM}, } -@Article{legat2021mathoptinterface, - author = {Legat, Beno\^{\i}t and Dowson, Oscar and Garcia, Joaquim Dias and Lubin, Miles}, - journal = {INFORMS Journal on Computing}, - title = {{MathOptInterface}: a data structure for mathematical optimization problems}, - year = {2021}, - note = {In press}, -} - -@software{legat2021multivariatepolynomials, - author = {Benoît Legat and - Sascha Timme and - Robin Deits}, - title = {{JuliaAlgebra/MultivariatePolynomials.jl: v0.3.18}}, - month = jul, - year = 2021, - publisher = {Zenodo}, - version = {v0.3.18}, - doi = {10.5281/zenodo.5083796}, - url = {https://doi.org/10.5281/zenodo.5083796} +@article{legat2021mathoptinterface, + title={{MathOptInterface}: a data structure for mathematical optimization problems}, + author={Legat, Beno{\^\i}t and Dowson, Oscar and Dias Garcia, Joaquim and Lubin, Miles}, + journal={INFORMS Journal on Computing}, + year={2021}, + volume={34}, + number={2}, + pages={672--689}, + doi={10.1287/ijoc.2021.1067}, + publisher={INFORMS} } @software{verzani2021polynomials, @@ -65,23 +56,37 @@ @software{verzani2021polynomials Miles Lucas and Zdeněk Hurák and Jameson Nash}, - title = {{JuliaMath/Polynomials.jl: v2.0.14}}, - version = {v2.0.14}, + title = {{JuliaMath/Polynomials.jl: v4.0.5}}, + version = {v4.0.5}, url = {https://github.com/JuliaMath/Polynomials.jl} } @software{takafumi2021bangbang, author = {Takafumi Arakaki}, - title = {{JuliaFolds/BangBang.jl: v0.3.31}}, - version = {v0.3.31}, - url = {https://github.com/JuliaFolds/BangBang.jl} + title = {{JuliaFolds2/BangBang.jl: v0.4.1}}, + version = {v0.4.1}, + url = {https://github.com/JuliaFolds2/BangBang.jl} } @article{gowda2021high, - title={High-performance symbolic-numerics via multiple dispatch}, - author={Gowda, Shashi and Ma, Yingbo and Cheli, Alessandro and Gwozdz, Maja and Shah, Viral B and Edelman, Alan and Rackauckas, Christopher}, - journal={arXiv preprint arXiv:2105.03949}, - year={2021} + author = {Shashi Gowda and + Yingbo Ma and + Alessandro Cheli and + Maja Gw{\'{o}}zdz and + Viral B. Shah and + Alan Edelman and + Christopher Rackauckas}, + title = {High-performance symbolic-numerics via multiple dispatch}, + journal = {{ACM} Commun. Comput. Algebra}, + volume = {55}, + number = {3}, + pages = {92--96}, + year = {2021}, + url = {https://doi.org/10.1145/3511528.3511535}, + doi = {10.1145/3511528.3511535}, + timestamp = {Sat, 28 Jan 2023 18:54:19 +0100}, + biburl = {https://dblp.org/rec/journals/cca/GowdaMCGSER21.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} } @inproceedings{AbstractAlgebra.jl-2017, @@ -113,9 +118,20 @@ @ARTICLE{BenchmarkTools.jl-2016 adsnote = {Provided by the SAO/NASA Astrophysics Data System} } -@Conference{legat2020sumofsquares, - author = {Legat, Beno{\^\i}t and Weisser, Tillmann}, - title = {SumOfSquares: A Julia package for Polynomial Optimization}, - booktitle = {INFORMS Annual Meeting}, - year = {2020}, +@Conference{weisser2019polynomial, + author = {Weisser, Tillmann and Legat, Beno{\^\i}t and Coey, Chris and Kapelevich, Lea and Vielma, Juan Pablo}, + title = {Polynomial and Moment Optimization in Julia and JuMP}, + booktitle = {JuliaCon}, + year = {2019}, + url = {https://pretalx.com/juliacon2019/talk/QZBKAU/}, } + +@Conference{legat2023multivariate, + author = {Legat, Beno{\^\i}t}, + title = {Multivariate polynomials in {Julia}}, + month = jul, + year = 2022, + booktitle = {JuliaCon}, + year = {2022}, + url = {https://pretalx.com/juliacon-2022/talk/TRFSJY/}, +} \ No newline at end of file