%% Copyright 2021-2023 Ian Jauslin %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. %% You may obtain a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, software %% distributed under the License is distributed on an "AS IS" BASIS, %% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %% See the License for the specific language governing permissions and %% limitations under the License. \documentclass[subsection_in_all]{ian} \usepackage{code} \usepackage{dsfont} \usepackage{largearray} \usepackage{dlmf} \def\Eta{H} \begin{document} \hbox{} \hfil{\bf\Large {\tt simplesolv}\par \medskip \hfil \large v0.4 } \vfill \tableofcontents \vfill \eject \setcounter{page}1 \pagestyle{plain} \indent {\tt simplesolv} is a tool to compute the solution of the equations of the ``Simplified approach'' to the repulsive Bose gas introduced in\-~\cite{CJL20,CJL21,CHe21}. This approach provides an approximation to various observables of the ground state of the Hamiltonian \begin{equation} H_N=-\frac12\sum_{i=1}^N\Delta_i+\sum_{1\leqslant i\leqslant j\leqslant N}v(|x_i-x_j|) \end{equation} in three dimensions, with periodic boundary conditions, in the thermodynamic limit $N\to\infty$ at fixed density $\rho$. \bigskip \indent {\tt simplesolv} is written in {\tt julia}. The source code is located in the {\tt src} directory of this bundle. Throughout the documentation, we will refer to the directory containing the {\tt src} directory as the ``installation directory'', and will denote it by the bash variable {\tt\$SIMPLESOLV} (so that the main julia file, for instance, is located at {\tt \$SIMPLESOLV/src/main.jl}). \vskip20pt \section{Basic usage} \indent Denoting the location of the installation directory by {\tt\$SIMPLESOLV}, {\tt simplesolv} is run by calling \begin{code} julia \$SIMPLESOLV/src/main.jl [args] \end{code} where the optional arguments {\tt [args]} take the form {\tt [-p params] [-U potential] [-M method] [-s savefile]}. \bigskip \indent A few commands support multithreaded execution. To enable {\tt julia} to run on several processors, it should be run with the {\tt -p} option. For example, to run on 8 CPUs, run \begin{code} julia -p 8 \$SIMPLESOLV/src/main.jl [args] \end{code} \bigskip \indent {\tt command} specifies which computation is to be carried out, such as {\tt energy} to compute the ground state energy, or {\tt condensate\_fraction} for the uncondensed fraction. The list of available commands depends on the \refname{sec:methods}{{\tt method}} argument, which specifies one of the available methods to solve the equation at hand. The available methods are (see section\-~\ref{sec:methods} for further details) \begin{itemize} \item \refname{sec:easyeq}{{\tt easyeq}} ({\bf default}) for the Simple or Medium equation, or any interpolation between them, with a soft potential using the Newton algorithm, \item \refname{sec:anyeq}{{\tt anyeq}} for any equation in the ``Simplified approach'' using the Newton algorithm. \item \refname{sec:simpleq-Kv}{{\tt simpleq-Kv}} for the Simple equation using explicit expressions involving $\mathfrak Kv$ (see\-~(\ref{Kv})), \item \refname{sec:simpleq-hardcore}{{\tt simpleq-hardcore}} for the Simple equation with a hard core potential using the Newton algorithm, \item \refname{sec:simpleq-iteration}{{\tt simpleq-iteration}} for the Simple equation with a soft potential using the iteration defined in\-~\cite{CJL20}. \end{itemize} Each method is described in detail below, along with the list of commands ({\tt command}) and parameters ({\tt params}) compatible with them. In addition, the scattering length can be displayed using the {\tt scattering\_length} command: \begin{code} julia \$SIMPLESOLV/src/main.jl scattering\_length \end{code} \bigskip \indent {\tt params} should be a `{\tt;}' separated list of entries, each of which is of the form {\tt key=value}. For example {\tt -p "rho=1e-6;v\_a=2"}. (Note that you should not end the list of parameters by a `{\tt;}', otherwise {\tt simplesolv} will interpret that as there being an empty parameter entry, which it cannot split into a key and value, and will fail.) \bigskip \indent \refname{sec:potentials}{{\tt potential}} specifies which potential $v$ should be used, from the following list (see section\-~\ref{sec:potentials} for further details). \begin{itemize} \item \refname{sec:exp}{{\tt exp}} ({\bf default}) for $v(|x|)=ae^{-|x|}$, \item \refname{sec:tent}{{\tt tent}} for $v(|x|)=\mathds 1_{|x|0$. \bigskip \indent \makelink{eq:easyeq_simpleq}{} \makelink{eq:easyeq_medeq}{} The special choice $\beta_K=\beta_L=0$ is called the Simple equation ({\tt simpleq}), and the choice $\beta_K=\beta_L=1$ is called the Medium equation ({\tt medeq}) \bigskip \subsubsection{Usage} \indent Unless otherwise noted, this method takes the following parameters (specified via the {\tt [-p params]} flag, as a `{\tt;}' separated list of entries). \begin{itemize} \item\makelink{param:easyeq_rho}{} {\tt rho} ({\tt Float64}, default: $10^{-6}$): density $\rho$. \item\makelink{param:easyeq_tolerance}{} {\tt tolerance} ({\tt Float64}, default: $10^{-11}$): maximal size of final step in Newton iteration. \item\makelink{param:easyeq_maxiter}{} {\tt maxiter} ({\tt Int64}, default: 21): maximal number of iterations of the Newton algorithm before giving up. \item\makelink{param:easyeq_order}{} {\tt order} ({\tt Int64}, default: 100): order used for all Gauss quadratures (denoted by $N$ below). \item\makelink{param:easyeq_minlrho_init}{} {\tt minlrho\_init} ({\tt Float64}, default: $-6$): to initialize the Newton algorithm, we first compute the solution for a smaller $\rho$, {\tt minlrho} is the minimal value for $\log_{10}\rho$ to start this initialization process. \item\makelink{param:easyeq_nlrho_init}{} {\tt nlrho\_init} ({\tt Int64}, default: 0): number of steps in the initialization process described above. Set to 0 to disable the incremental initialization process. \item\makelink{param:easyeq_bK}{} {\tt bK}, {\tt bL} ({\tt Float64}, default: 1, 1): the values of $\beta_K$ and $\beta_L$. \item\makelink{param:easyeq_eq}{} {\tt eq} ({\tt String}, default: ``{\tt simpleq}'', acceptable values: ``{\tt simpleq}'', ``{\tt medeq}''): A shortcut to select either the \refname{eq:easyeq_simpleq}{Simple equation} ($\beta_K=\beta_L=0$) or the \refname{eq:easyeq_medeq}{Medium equation} ($\beta_L=\beta_K=1$). When this option is set, neither {\tt bK} nor {\tt bL} should be set. \end{itemize} \bigskip The available {\tt commands} are the following. \begin{itemize} \item {\tt energy}\makelink{command:easyeq_energy}{}: compute the energy $e$ at a given $\rho$.\par \underline{Output}: [$e$] [Newton error $\epsilon$]. \item {\tt energy\_rho}\makelink{command:easyeq_energy_rho}{}: compute the energy $e$ as a function of $\rho$. The Newton algorithm is initialized with the hardcore scattering solution (\ref{easyeq_init}) for the lowest $\rho$, and with the previously computed $\rho$ for the larger densities.\par \underline{Disabled parameters}: \refname{param:easyeq_rho}{{\tt rho}}.\par \underline{Extra parameters}: \begin{itemize} \item\makelink{param:easyeq_minlrho}{} {\tt minlrho} ({\tt Float64}, default: $10^{-6}$): minimal value for $\log_{10}\rho$. \item\makelink{param:easyeq_maxlrho}{} {\tt maxlrho} ({\tt Float64}, default: $10^{2}$): maximal value for $\log_{10}\rho$. \item\makelink{param:easyeq_nlrho}{} {\tt nlrho} ({\tt Int64}, default: $100$): number of values for $\rho$ (spaced logarithmically). \item\makelink{param:easyeq_minrho}{} {\tt minrho} ({\tt Float64}, default: $10^{-6}$): minimal value for $\rho$. \item\makelink{param:easyeq_maxrho}{} {\tt maxrho} ({\tt Float64}, default: $10^{2}$): maximal value for $\rho$. \item\makelink{param:easyeq_nrho}{} {\tt nrho} ({\tt Int64}, default: $0$): number of values for $\rho$ (spaced linearly). If {\tt nrho} is $\neq0$, then the linear spacing will be used, and \refname{param:easyeq_minlrho}{{\tt minlrho}}, \refname{param:easyeq_maxlrho}{{\tt maxlrho}}, \refname{param:easyeq_nlrho}{{\tt nlrho}} will be ignored. Otherwise, the logarithmic spacing will be used and \refname{param:easyeq_minrho}{{\tt minrho}}, \refname{param:easyeq_maxrho}{{\tt maxrho}} will be ignored. \item\makelink{param:easyeq_rhos}{} {\tt rhos} ({\tt Array\{Float64\}}): list of values for $\rho$, specified as a `{\tt,}' separated list. This parameter takes precedence over \refname{param:easyeq_minlrho}{{\tt minlrho}}, \refname{param:easyeq_maxlrho}{{\tt maxlrho}}, \refname{param:easyeq_nlrho}{{\tt nlrho}}, \refname{param:easyeq_minrho}{{\tt minrho}}, \refname{param:easyeq_maxrho}{{\tt maxrho}}, \refname{param:easyeq_nrho}{{\tt nrho}}. \end{itemize} \underline{Output} (one line for each value of $\rho$): [$\rho$] [$e$] [Newton error $\epsilon$]. \item\makelink{command:easyeq_condensate_fraction}{} {\tt condensate\_fraction}: compute the uncondensed fraction $\eta$ at a given $\rho$.\par \underline{Output}: [$\eta$] [Newton error $\epsilon$]. \item\makelink{command:easyeq_condensate_fraction_rho}{} {\tt condensate\_fraction\_rho}: compute the uncondensed fraction $\eta$ as a function of $\rho$. The Newton algorithm is initialized with the hardcore scattering solution (\ref{easyeq_init}) for the lowest $\rho$, and with the previously computed $\rho$ for the larger densities.\par \underline{Disabled parameters}: same as \refname{command:easyeq_energy_rho}{{\tt energy\_rho}}.\par \underline{Extra parameters}: same as \refname{command:easyeq_energy_rho}{{\tt energy\_rho}}.\par \underline{Output} (one line for each value of $\rho$): [$\rho$] [$\eta$] [Newton error $\epsilon$]. \item\makelink{command:easyeq_uk}{} {\tt uk}: compute the Fourier transform $\hat u(|k|)$. The values $|k|$ at which $\hat u$ is computed are those coming from the Gauss quadratures, and cannot be set.\par \underline{Output} (one line for each value of $|k|$): [$|k|$] [$\hat u(|k|)$] \item\makelink{command:easyeq_ux}{} {\tt ux}: compute $u$ as a function of $|x|$.\par \underline{Extra parameters}: \begin{itemize} \item\makelink{param:easyeq_xmin}{} {\tt xmin} ({\tt Float64}, default: 0): minimum of the range of $|x|$ to be printed. \item\makelink{param:easyeq_xmax}{} {\tt xmax} ({\tt Float64}, default: 100): maximum of the range of $|x|$ to be printed. \item\makelink{param:easyeq_nx}{} {\tt nx} ({\tt Int64}, default: 100): number of points to print (linearly spaced). \end{itemize} \underline{Output} (one line for each value of $x$): [$|x|$] [$u(|x|)$] \item\makelink{command:easyeq_uux}{} {\tt uux}: compute $2u-\rho u\ast u$ as a function of $|x|$.\par \underline{Extra parameters}: Same as \refname{command:easyeq_ux}{{\tt ux}}.\par \underline{Output} (one line for each value of $x$): [$|x|$] [$2u(|x|)-\rho u\ast u(|x|)$] \item\makelink{command:easyeq_2pt}{} {\tt 2pt}: compute the spherically averaged two-point correlation function $C_2(|x|)$ at a given $\rho$.\par \underline{Extra parameters}: same as \refname{command:easyeq_ux}{{\tt ux}}, plus\par \begin{itemize} \item\makelink{param:easyeq_window_L}{} {\tt window\_L} ({\tt Float64}, default: $10^3$): size of the Hann window used to numerically invert the Fourier transform in the computation of the two-point correlation function, see\-~(\ref{hann}). \end{itemize} \underline{Output} (one line for each value of $|x|$): [$|x|$] [$C_2(|x|)$] \item\makelink{command:easyeq_2pt_max}{} {\tt 2pt\_max}: compute the maximum of the spherically averaged two-point correlation function $C_2(|x|)$ at a given $\rho$.\par \underline{Extra parameters}: \refname{param:easyeq_window_L}{{\tt window\_L}} plus \begin{itemize} \item\makelink{param:easyeq_dx}{} {\tt dx} ({\tt Float64}, default: $10^{-7}$): step used to numerically approximate derivatives. \item\makelink{param:easyeq_x0}{} {\tt x0} ({\tt Float64}, default: $1$): initial guess for the maximum is $\rho^{-1/3}\mathrm{\tt x0}$. \item\makelink{param:easyeq_maxstep}{} {\tt maxstep} ({\tt Float64}, default: $\infty$): maximal size of single step in maximization algorithm. \item\makelink{param:easyeq_tolerance_max}{} {\tt tolerance\_max} ({\tt Float64}, default: \refname{param:easyeq_tolerance}{{\tt tolerance}}): same as \refname{param:easyeq_tolerance}{{\tt tolerance}}, used for the Newton algorithm underlying the maximization algorithm. \end{itemize} \underline{Output}: [$|x_{\mathrm{max}}|$] [$C_2(|x_{\mathrm{max}}|)$] \item\makelink{command:easyeq_2pt_max_rho}{} {\tt 2pt\_max\_rho}: compute the maximum of the spherically averaged two-point correlation function $C_2(|x|)$ for a range of $\rho$.\par \underline{Extra parameters}: same as \refname{command:easyeq_2pt_max}{{\tt easyeq\_2pt\_max}} plus those of \refname{command:easyeq_energy_rho}{{\tt energy\_rho}}.\par \underline{Output} (one line for each value of $\rho$): [$\rho$] [$|x_{\mathrm{max}}|$] [$C_2(|x_{\mathrm{max}}|)$]\par \underline{Multithread support}: yes, different values of $\rho$ split up among workers. \item\makelink{command:easyeq_2pt_fourier}{} {\tt 2pt\_fourier}: compute the spherically averaged Fourier transform of the two-point correlation function $\hat C_2(|k|)$ at a given $\rho$.\par \underline{Extra parameters}: \begin{itemize} \item\makelink{param:easyeq_kmin}{} {\tt kmin} ({\tt Float64}, default: 0): minimum of the range of $|k|$ to be printed. \item\makelink{param:easyeq_kmax}{} {\tt kmax} ({\tt Float64}, default: 10): maximum of the range of $|k|$ to be printed. \item\makelink{param:easyeq_nk}{} {\tt nk} ({\tt Int64}, default: 100): number of $|k|$'s to be printed. \item\makelink{param:easyeq_2pt_fourier_window_L}{} {\tt window\_L} ({\tt Float64}, default: 1000): what is actually computed is $\hat C_2$ convolved with a Gaussian of variance $1/\sqrt L$ with $L=\mathrm{\tt window\_L}$, see\-~(\ref{easyeq_gaussian}). \end{itemize} \underline{Output} (one line for each value of $|k|$): [$|k|$] [$\hat C_2(|k|)$].\par \underline{Multithread support}: yes, different values of $k$ are split up among workers. \item\makelink{command:easyeq_2pt_fourier_max}{} {\tt 2pt\_fourier\_max}: compute the maximum of the spherically averaged Fourier transformed two-point correlation function $\hat C_2(|k|)$.\par \underline{Extra parameters}: \refname{param:easyeq_2pt_fourier_window_L}{{\tt window\_L}}, \refname{param:easyeq_maxstep}{{\tt maxstep}} plus \begin{itemize} \item\makelink{param:easyeq_dk}{} {\tt dk} ({\tt Float64}, default: $10^{-7}$): step used to numerically approximate derivatives. \item\makelink{param:easyeq_k0}{} {\tt k0} ({\tt Float64}, default: $1$): initial guess for the maximum is $\rho^{1/3}\mathrm{\tt k0}$. \end{itemize} \underline{Output}: [$|k_{\mathrm{max}}|$] [$\hat C_2(|k_{\mathrm{max}}|)$]\par \item\makelink{command:easyeq_2pt_fourier_max_rho}{} {\tt 2pt\_fourier\_max\_rho}: compute the maximum of the spherically averaged Fourier transformed two-point correlation function $\hat C_2(|k|)$ for a range of $\rho$.\par \underline{Extra parameters}: same as those of \refname{command:easyeq_2pt_fourier_max}{{\tt 2pt\_fourier\_max}} plus those of \refname{command:easyeq_energy_rho}{{\tt energy\_rho}}.\par \underline{Output} (one line for each value of $\rho$): [$\rho$] [$|k_{\mathrm{max}}|$] [$\hat C_2(|k_{\mathrm{max}}|)$]\par \underline{Multithread support}: yes, different values of $\rho$ split up among workers. \item\makelink{command:easyeq_momentum_distribution}{} {\tt momentum\_distribution}: compute the momentum distribution $\mathcal M(|k|)$ at a given $\rho$. The momentum distribution is computed for $k=k_{l,j}$ (see\-~(\ref{klj})).\par \underline{Extra parameters}: \begin{itemize} \item\makelink{param:easyeq_momentum_kmin}{} {\tt kmin} ({\tt Float64}, default: 0): minimum of the range of $|k|$ to be printed. \item\makelink{param:easyeq_momentum_kmax}{} {\tt kmax} ({\tt Float64}, default: 10): maximum of the range of $|k|$ to be printed. \item\makelink{param:easyeq_momentum_window_L}{} {\tt window\_L} ({\tt Float64}, default: 1000): what is actually computed is $\mathfrak M_k$ convolved with a Gaussian of variance $1/\sqrt L$ where $L=\sqrt{\mathrm{\tt window\_L}}/k^2$, see\-~(\ref{easyeq_gaussian_momt}). \end{itemize} \underline{Output} (one line for each value of $|k|$): [$|k|$] [$\mathcal M(|k|)$] \end{itemize} \subsubsection{Description} \subsubsubsection{Fourier space formulation} The computation is carried out in Fourier space. We take the convention that the Fourier transform of a function $f(|x|)$ is \begin{equation} \hat f(|k|)=\int_{\mathbb R^3} dx\ e^{ikx}f(|x|) =\frac{4\pi}{|k|}\int_0^\infty dr\ r\sin(|k|r)f(r) . \end{equation} In Fourier space, (\ref{easyeq}) becomes \begin{equation} k^2\hat u=\hat S-2\rho\hat A_K\hat u+\rho^2\hat A_L\hat u^2 \end{equation} \begin{equation} \hat A_K:=\beta_K\hat S+(1-\beta_K)\frac{2e}\rho ,\quad \hat A_L:=\beta_L\hat S+(1-\beta_L)\frac{2e}\rho \end{equation} with \begin{equation} \hat S(|k|)=\int_{\mathbb R^3} dx\ e^{ikx}(1-u(|x|))v(|x|) =\hat v(k)-\hat u\hat\ast\hat v(k) ,\quad \hat f\hat\ast\hat g(|k|):=\int_{\mathbb R^3}\frac{dp}{(2\pi)^3}\ \hat f(|k-p|)\hat g(|p|) . \label{hatS_easyeq} \end{equation} We write this as a quadratic equation for $\hat u$, and solve it, keeping the solution that decays as $|k|\to\infty$: \begin{equation} \rho\hat u(|k|)= \frac{A_K(|k|)}{A_L(|k|)}\left(\xi(|k|)+1-\sqrt{(\xi(|k|)+1)^2-\frac{A_L(|k|)}{A_K^2(|k|)}\hat S(|k|)}\right) \end{equation} that is, \begin{equation} \rho\hat u(|k|)= \frac{\hat S(|k|)}{2A_K(|k|)(\xi(|k|)+1)}\Phi\left({\textstyle\frac{A_L(|k|)}{A_K^2(|k|)}\frac{\hat S(|k|)}{(\xi(|k|)+1)^2}}\right) \label{uk_easyeq} \end{equation} with \begin{equation} \xi(|k|):=\frac{k^2}{2\rho\hat A_K} ,\quad \Phi(x):=\frac{2(1-\sqrt{1-x})}x . \end{equation} We can write this as a root finding problem: \begin{equation} \Omega(u):= \rho\hat u(|k|)- \frac{\hat S(|k|)}{2A_K(|k|)(\xi(|k|)+1)}\Phi\left({\textstyle\frac{A_L(|k|)}{A_K^2(|k|)}\frac{\hat S(|k|)}{(\xi(|k|)+1)^2}}\right) =0 . \label{Omega_easyeq} \end{equation} Furthermore, using bipolar coordinates (see lemma\-~\ref{lemma:bipolar}), we write $\hat S$ as \begin{equation} \hat S(|k|)=\hat v(|k|)-\frac1{8\pi^3}\int_0^\infty dt\ t\hat u(t)\Eta(|k|,t) ,\quad \Eta(y,t):=\frac{2\pi}y\int_{|y-t|}^{y+t}ds\ s\hat v(s) . \label{easyeq_Eta} \end{equation} By a simple change of variables, \begin{equation} \Eta(y,t)= 4\pi\left(\mathds 1_{y>t}\frac{t}y+\mathds 1_{y\leqslant t}\right) \int_0^1 ds\ ((y+t)s+|y-t|(1-s))v((y+t)s+|y-t|(1-s)) . \label{Eta} \end{equation} \bigskip \subsubsubsection{Evaluating integrals} To compute these integrals numerically, we will use Gauss-Legendre quadratures: \begin{equation} \int_0^1 dt\ f(t)\approx\frac12\sum_{i=1}^N w_if\left({\textstyle\frac{r_i+1}2}\right) \end{equation} where $w_i$ and $r_i$ are the Gauss-Legendre {\it weights} and {\it abcissa}. The order $N$ corresponds to the parameter \refname{param:easyeq_order}{{\tt order}}. The error made by the quadrature is estimated in appendix\-~\ref{appGL}. We compactify the integrals to the interval $(0,1)$: $y:=\frac1{t+1}$: \begin{equation} \hat S(|k|)=\hat v(|k|)-\frac1{8\pi^3}\int_0^1 dy\ \frac{(1-y)\hat u(\frac{1-y}y)\Eta(|k|,\frac{1-y}y)}{y^3} \end{equation} so, using the Gauss-Legendre quadrature, we approximate \begin{equation} \hat S(|k|)\approx\hat v(|k|)-\frac1{16\pi^3}\sum_{j=1}^N w_j \frac{(1-y_j)\hat u(\frac{1-y_j}{y_j})\Eta(|k|,\frac{1-y_j}{y_j})}{y_j^3} ,\quad y_j:=\frac{r_j+1}2 . \end{equation} This suggests a natural discretization of Fourier space: let \begin{equation} k_i:=\frac{1-r_i}{1+r_i}\equiv\frac{1-y_i}{y_i} . \label{easyeq_ki} \end{equation} Thus, defining \begin{equation} \mathbb U_i:=\rho\hat u(k_i) ,\quad \hat v_i:=\hat v(k_i) \end{equation} and approximate\-~(\ref{uk_easyeq}): \begin{equation} \mathbb U_i=\frac{\mathbb T_i}{2(\mathbb X_i+1)}\Phi\left({\textstyle\mathbb B_i\frac{\mathbb T_i}{(\mathbb X_i+1)^2}}\right) \label{bbU_easyeq} \end{equation} with \begin{equation} \mathbb A_{K,i}:=\beta_K\mathbb S_i+(1-\beta_K)\mathbb E ,\quad \mathbb B_{i}:=\frac{\beta_L\mathbb S_i+(1-\beta_L)\mathbb E}{\mathbb A_{K,i}} ,\quad \mathbb T_i:=\frac{\mathbb S_i}{\mathbb A_{K,i}} \end{equation} \begin{equation} \mathbb S_i:=\hat v_i-\frac1{16\pi^3\rho}\sum_{j=1}^N w_j\frac{(1-y_j)\mathbb U_j\Eta(k_i,k_j)}{y_j^3} ,\quad \mathbb E:=\hat v(0)-\frac1{16\pi^3\rho}\sum_{j=1}^N w_j\frac{(1-y_j)\mathbb U_j\Eta(0,k_j)}{y_j^3} \label{bbSE_easyeq} \end{equation} \begin{equation} \mathbb X_i:=\frac{k_i^2}{2\rho\mathbb A_{K,i}} . \end{equation} This is a discrete equation for the vector $(\mathbb U_i)_{i=1}^N$. \bigskip \subsubsubsection{Main algorithm to compute $\mathbb U$}\par\penalty10000 \medskip\penalty10000 \point We rewrite\-~(\ref{bbU_easyeq}) as a root finding problem: \begin{equation} \Xi_i(\mathbb U):=\mathbb U_i-\frac{\mathbb T_i}{2(\mathbb X_i+1)}\Phi\left({\textstyle\mathbb B_i\frac{\mathbb T_i}{(\mathbb X_i+1)^2}}\right) =0 \label{root_easyeq} \end{equation} which we solve using the Newton algorithm, that is, we define a sequence of $\mathbb U$'s: \begin{equation} \mathbb U^{(n+1)}=\mathbb U^{(n)}-(D\Xi(\mathbb U^{(n)}))^{-1}\Xi(\mathbb U^{(n)}) \end{equation} where $D\Xi$ is the Jacobian of $\Xi$: \begin{equation} (D\Xi)_{i,j}:=\frac{\partial\Xi_i}{\partial\mathbb U_j} . \end{equation} \bigskip \point For small values of $\rho$, we initialize the algorithm with the hardcore scattering solution \begin{equation} \hat u_0(k)=\frac{4\pi a_0}{k^2} \label{easyeq_init} \end{equation} where $a_0$ is the scattering length of the potential $v$ (or an approximation thereof, which need not be very good). Thus, \begin{equation} \mathbb U^{(0)}_i=\frac{4\pi a_0}{k_i^2} . \end{equation} This is a good approximation for small $\rho$. For larger $\rho$, we choose $\mathbb U^{(0)}$ as the solution of {\tt easyeq} for a slightly smaller $\rho$, and proceed inductively (using the parameters \refname{param:easyeq_minlrho_init}{{\tt minlrho\_init}} and \refname{param:easyeq_nlrho_init}{{\tt nlrho\_init}}). \bigskip \point We are left with computing the Jacobian of $\Xi$: \begin{equation} \begin{largearray} \frac{\partial\Xi_i}{\partial\mathbb U_j} =\delta_{j,i} -\frac1{2(\mathbb X_i+1)}\left( \partial_j\mathbb T_i-\frac{\mathbb T_i\partial_j\mathbb X_i}{\mathbb X_i+1} \right)\Phi\left({\textstyle\mathbb B_i\frac{\mathbb T_i}{(\mathbb X_i+1)^2}}\right) \\[0.5cm]\hfill -\frac{\mathbb T_i}{2(\mathbb X_i+1)^3}\left( \mathbb B_i\partial_j\mathbb T_i+\mathbb T_i\partial_j\mathbb B_i-2\frac{\mathbb B_i\mathbb T_i\partial_j\mathbb X_i}{\mathbb X_i+1} \right)\partial\Phi\left({\textstyle\mathbb B_i\frac{\mathbb T_i}{(\mathbb X_i+1)^2}}\right) \end{largearray} \label{jacobian_easyeq} \end{equation} with \begin{equation} \partial_j\mathbb B_i= (\beta_L(1-\beta_K)-\beta_K(1-\beta_L)) \frac{\mathbb E\partial_j\mathbb S_i-\mathbb S_i\partial_j\mathbb E}{(\beta_K\mathbb S_i+(1-\beta_K)\mathbb E)^2} \end{equation} \begin{equation} \partial_j\mathbb T_i= (1-\beta_K)\frac{\mathbb E\partial_j\mathbb S_i-\mathbb S_i\partial_j\mathbb E}{(\beta_K\mathbb S_i+(1-\beta_K)\mathbb E)^2} ,\quad \partial_j\mathbb A_{K,i}=\beta_K\partial_j\mathbb S_i+(1-\beta_K)\partial_j\mathbb E \end{equation} \begin{equation} \partial_j\mathbb S_i:=-\frac1{16\pi^3\rho}w_j\frac{(1-y_j)\Eta(k_i,k_j)}{y_j^3} ,\quad \partial_j\mathbb E:=-\frac1{16\pi^3\rho}\sum_{j=1}^N w_j\frac{(1-y_j)\Eta(0,k_j)}{y_j^3} \end{equation} \begin{equation} \partial_j\mathbb X_i:=-\frac{k_i^2}{2\rho\mathbb A_{K,i}^2}\partial_j\mathbb A_{K,i} . \end{equation} \bigskip \point We iterate the Newton algorithm until the Newton relative error $\epsilon$ becomes smaller than the \refname{param:easyeq_tolerance}{{\tt tolerance}} parameter. The Newton error is defined as \begin{equation} \epsilon=\frac{\|\mathbb U^{(n+1)}-\mathbb U^{(n)}\|_2}{\|\mathbb U^{(n)}\|_2} \end{equation} where $\|\cdot\|_2$ is the $l_2$ norm. The energy thus obtained is \begin{equation} e=\frac\rho2\mathds E \end{equation} the Fourier transform $\hat u$ of the solution is \begin{equation} \hat u(k_i)\approx\frac{\mathbb U_i}\rho \end{equation} and the solution $u$ in real space is obtained by inverting the Fourier transform \begin{equation} \begin{array}{>\displaystyle r@{\ }>\displaystyle l} u(|x|)=\int\frac{dk}{8\pi^3}\ e^{-ikx}\hat u(|k|) =&\frac1{2\pi^2|x|}\int_0^1dy\ \frac{(1-y)\sin(|x|\frac{1-y}y)\hat u(\frac{1-y}y)}{y^3} \\[0.5cm] \approx& \frac1{4\pi^2|x|}\sum_{j=1}^Nw_j (1+k_j)^2k_j\sin(|x|k_j)\hat u(k_j) . \end{array} \label{ux} \end{equation} To compute $2u-\rho u\ast u$, we replace $\hat u$ with $2\hat u-\rho\hat u^2$ in the previous equation. \bigskip \subsubsubsection{Condensate fraction} Finally, to compute the uncondensed fraction, we solve the modified {\tt easyeq} (see\-~\cite{CJL21}) \begin{equation} (-\Delta+2\mu)u_\mu=v(1-u_\mu)-2\rho K+\rho^2L \label{easyeq_mu} \end{equation} where $K$ and $L$ are defined as in\-~(\ref{easyeq_KL})-(\ref{easyeq_Se}) in which $u$ is replaced with $u_\mu$. The uncondensed fraction is then \begin{equation} \eta=\partial_\mu e|_{\mu=0} =-\frac\rho2\int dx\ v(|x|)\partial_\mu u_\mu(|x|)|_{\mu=0} . \end{equation} To compute the energy in the presence of the parameter $\mu$, we proceed in the same way as for $\mu=0$, the only difference being that $k^2$ should formally be replaced by $k^2+2\mu$. In other words, we consider $\mathbb U_i=u_\mu(|k_i|)$ and define $\Xi(\mathbb U,\mu)$ in the same way as in\-~(\ref{root_easyeq}), except that $\mathbb X_i$ should be replaced by \begin{equation} \frac{k_i^2+2\mu}{2\rho\mathbb A_{K,i}} . \end{equation} We then solve \begin{equation} \Xi(\mathbb U,\mu)=0 . \end{equation} By differentiating this identity with respect to $\mu$, we find $\partial_\mu u_\mu$: \begin{equation} D\Xi\partial_\mu \mathbb U=-\partial_\mu\Xi \end{equation} and the uncondensed fraction is \begin{equation} \eta =\frac12\int dx\ v(x)(D\Xi)^{-1}\partial_\mu\Xi|_{\mu=0} \end{equation} thus \begin{equation} \eta=\frac1{16\pi^3}\int_0^1 dy\ \frac{(1-y)\Eta(0,\frac{1-y}y)(D\Xi)^{-1}\partial_\mu\Xi|_{\mu=0}(\frac{1-y}y)}{y^3} \end{equation} which we approximate using a Gauss-Legendre quadrature: \begin{equation} \eta\approx\frac1{32\pi^3}\sum_{j=1}^N w_j(1-k_j)^2k_j\Eta(0,k_j)\sum_{i=1}^N(D\Xi)^{-1}_{j,i}\partial_\mu\Xi_i|_{\mu=0} . \label{eta_easyeq} \end{equation} We then compute, using\-~(\ref{jacobian_easyeq}), \begin{equation} \partial_\mu\Xi_i|_{\mu=0} = \frac{\mathbb T_i}{2\rho\mathbb A_{K,i}(\mathbb X_i+1)^2}\Phi\left({\textstyle\mathbb B_i\frac{\mathbb T_i}{(\mathbb X_i+1)^2}}\right) +\frac{\mathbb B_i\mathbb T_i^2}{\rho\mathbb A_{K,i}(\mathbb X_i+1)^4}\partial\Phi\left({\textstyle\mathbb B_i\frac{\mathbb T_i}{(\mathbb X_i+1)^2}}\right) . \end{equation} \subsubsubsection{Correlation function (spherical average)} The two-point correlation function is \begin{equation} c_2(x):= 2\rho\frac{\delta e}{\delta v(x)} \end{equation} and its spherical average is \begin{equation} C_2(|x|):=\frac1{4\pi|x|^2}\int dy\ \delta(|x|-|y|)c_2(y) . \end{equation} In Fourier space, \begin{equation} c_2(x)= 2\rho \int dk\ e^{ikx}\frac{\delta e}{\delta\hat v(k)} \end{equation} so \begin{equation} C_2(|x|)= 2\rho\int dk\ \left(\frac1{4\pi|x|^2}\int dy\ \delta(|x|-|y|)e^{iky}\right)\frac{\delta e}{\delta\hat v(k)} =2\rho\int dk\ \frac{\sin(|k||x|)}{|k||x|}\frac{\delta e}{\delta\hat v(k)} . \label{easyeq_C2fourier} \end{equation} \bigskip \point We can compute this quantity by considering a modified {\tt easyeq} in Fourier space, by formally replacing $\hat v$ with \begin{equation} \hat v+\lambda g(|k|) ,\quad g(|k|):=\frac{\sin(|k||x|)}{|k||x|} . \label{2pt_addv} \end{equation} Indeed, if $e_\lambda$ denotes the energy of this modified equation, \begin{equation} \partial_\lambda e_\lambda|_{\lambda=0} =\int dk\ \frac{\delta e}{\delta\hat v(k)}\partial_\lambda({\textstyle \hat v(k)+\lambda g(|k|)}) =\int dk\ g(|k|)\frac{\delta e}{\delta\hat v(k)} . \end{equation} So, denoting the solution of the modified equation by $u_\lambda$, \begin{equation} C_2(x)=2\rho\partial_\lambda e_\lambda|_{\lambda=0} =\rho^2g(0)-\rho^2\int\frac{dk}{(2\pi)^3}\ (g(k)\hat u(k)+\hat v(k)\partial_\lambda\hat u_\lambda(k)|_{\lambda=0}) . \end{equation} We compute $\partial_\lambda u_\lambda|_{\lambda=0}$ in the same way as the uncondensed fraction: we define $\Xi(\mathbb U,\lambda)$ by formally adding $\lambda g(|k|)$ to $\hat v$, solve $\Xi(\mathbb U,\lambda)=0$, and differentiate: \begin{equation} \partial_\lambda\mathbb U|_{\lambda=0}=-(D\Xi)^{-1}\partial_\lambda\Xi|_{\lambda=0} . \end{equation} \bigskip \point We compute $\partial_\lambda\Xi|_{\lambda=0}$: \begin{equation} \begin{largearray} \partial_\lambda\Xi_i = -\frac1{2(\mathbb X_i+1)}\left( \partial_\lambda\mathbb T_i-\frac{\mathbb T_i\partial_\lambda\mathbb X_i}{\mathbb X_i+1} \right)\Phi\left({\textstyle\mathbb B_i\frac{\mathbb T_i}{(\mathbb X_i+1)^2}}\right) \\[0.5cm]\hfill -\frac{\mathbb T_i}{2(\mathbb X_i+1)^3}\left( \mathbb B_i\partial_\lambda\mathbb T_i+\mathbb T_i\partial_\lambda\mathbb B_i-2\frac{\mathbb B_i\mathbb T_i\partial_\lambda\mathbb X_i}{\mathbb X_i+1} \right)\partial\Phi\left({\textstyle\mathbb B_i\frac{\mathbb T_i}{(\mathbb X_i+1)^2}}\right) \end{largearray} \label{dXi_2pt_easyeq} \end{equation} with \begin{equation} \partial_\lambda\mathbb B_i= (\beta_L(1-\beta_K)-\beta_K(1-\beta_L)) \frac{\mathbb E\partial_\lambda\mathbb S_i-\mathbb S_i\partial_\lambda\mathbb E}{(\beta_K\mathbb S_i+(1-\beta_K)\mathbb E)^2} \end{equation} \begin{equation} \partial_\lambda\mathbb T_i= (1-\beta_K)\frac{\mathbb E\partial_\lambda\mathbb S_i-\mathbb S_i\partial_\lambda\mathbb E}{(\beta_K\mathbb S_i+(1-\beta_K)\mathbb E)^2} ,\quad \partial_\lambda\mathbb A_{K,i}=\beta_K\partial_\lambda\mathbb S_i+(1-\beta_K)\partial_\lambda\mathbb E \end{equation} \begin{equation} \partial_\lambda\mathbb S_i:=g(k_i) -\frac1{16\pi^3\rho}\sum_{j=1}^N w_j\frac{(1-y_j)\mathbb U_j\partial_\lambda\Eta(k_i,k_j)}{y_j^3} \end{equation} \begin{equation} \partial_\lambda \mathbb E:=g(0)-\frac1{16\pi^3\rho}\sum_{j=1}^N w_j\frac{(1-y_j)\mathbb U_j\partial_\lambda\Eta(0,k_j)}{y_j^3} \end{equation} \begin{equation} \partial_\lambda\mathbb X_i:=-\frac{k_i^2}{2\rho\mathbb A_{K,i}^2}\partial_\lambda\mathbb A_{K,i} \end{equation} where $\partial\lambda\Eta$ is computed similarly to $\Eta$\-~(\ref{Eta}) but with $\hat v$ replaced by $g$: \begin{equation} \partial_\lambda\Eta(y,t)= 4\pi\left(\mathds 1_{y>t}\frac{t}y+\mathds 1_{y\leqslant t}\right) \int_0^1 ds\ ((y+t)s+|y-t|(1-s))g((y+t)s+|y-t|(1-s)) . \label{GG} \end{equation} \bigskip \point In order to invert the Fourier transform in\-~(\ref{easyeq_C2fourier}) numerically, we will use a Hann window (see appendix\-~\ref{appendix:hann}) \begin{equation} H_L(k):=\mathds 1_{|k|<\frac L2}\cos^2({\textstyle\frac{\pi|k|}{L}}) . \label{hann} \end{equation} The parameter $L$ is set using \refname{param:easyeq_window_L}{{\tt window\_L}}. The computation is changed only in that $g$ is changed to $H_L(k)\frac{\sin(|k||x|)}{|k||x|}$. \bigskip \point To compute the maximum of $C_2$, we use a modified Newton algorithm. The initial guess for the maximum is $|x_0|=\rho^{-\frac13}$\refname{param:easyeq_x0}{{\tt x0}}. The modified Newton algorithm is an iteration: \begin{equation} x_{n+1}=x_n+\frac{\partial C_2(|x_n|)}{|\partial^2C_2(|x_n|)|} \label{easyeq_newton_2pt} \end{equation} in which the derivatives are approximated using finite differences: \begin{equation} \partial C_2(x)\approx \frac{C_2(|x|+dx)-C_2(|x|)}{dx} ,\quad \partial^2 C_2(x)\approx \frac{C_2(|x|+dx)+C_2(|x|-dx)-2C_2(|x|)}{dx^2} . \label{easyeq_dx_2pt} \end{equation} This is a modification of the usual Newton iteration $x_n+\partial C_2/\partial^2C_2$ which is designed to follow the direction of the gradient, and thus to move toward a local maximum. In addition, if $|\partial C_2|/|\partial^2 C_2|$ is larger than \refname{param:easyeq_maxstep}{{\tt maxstep}}, then the step is replaced with $\pm$\refname{param:easyeq_maxstep}{{\tt maxstep}}. This prevents the algorithm from stepping over a maximum and land on another, further away. This is useful if one has a good idea of where the global maximum is, and does not want to get trapped in a smaller local maximum. \bigskip \indent The algorithm is run for a maximum of \refname{param:easyeq_maxiter}{{\tt maxiter}} iterations, or until $|x_{n+1}-x_n|$ is smaller than \refname{param:easyeq_tolerance}{{\tt tolerance}}. If the maximal number of iterations is reached, or if the solution found is not a local maximum, then the algorithm fails, and returns $+\infty$. The point thus computed is therefore a local maximum, but it is not guaranteed to be the global maximum. \subsubsubsection{Fourier transform of two-point correlation (spherical average)} The Fourier transform of the two-point correlation function is \begin{equation} \hat c_2(q):= 2\rho\frac{\delta e}{\delta v(q)} \end{equation} and its spherical average is \begin{equation} \hat C_2(|q|):=\frac1{4\pi|q|^2}\int dk\ \delta(|q|-|k|)c_2(k) = \frac\rho{2\pi|q|^2}\int dk\ \delta(|q|-|k|)\frac{\delta e}{\delta\hat v(k)} . \end{equation} \bigskip \point To compute $\frac{\delta e}{\delta\hat v(q)}$, one idea would be to proceed in the same way as for the two-point correlation function, by replacing $\hat v$ with \begin{equation} \hat v+\lambda g(|k|) ,\quad g(|k|):=\frac1{4\pi|q|^2}\delta(|q|-|k|) \end{equation} where $\delta$ is the Dirac-delta function distribution (compare this with\-~(\ref{2pt_addv})). However, the $\delta$ function causes all sorts of problems with the quadratures. \bigskip \point Instead, we approximate $\hat C_2$ by convolving it with a normalized Gaussian: let \begin{equation} \Gamma_L(|k|):=\left(\frac{L}{2\pi}\right)^{\frac32}e^{-\frac L2k^2} \label{easyeq_gaussian} \end{equation} \begin{equation} \hat{\mathfrak C}_2(|q|) := \int dp\ \hat C_2(|q-p|)\Gamma_L(|p|) =\int dk\ \int dp\ \frac\rho{2\pi|q-p|^2}\delta(|q-p|-|k|)\frac{\delta e}{\delta\hat v(k)}\Gamma_L(|p|) \end{equation} which by lemma\-~\ref{lemma:bipolar} is \begin{equation} \hat{\mathfrak C}_2(|q|) =\int dk\ \frac\rho{|q|} \frac{\delta e}{\delta\hat v(k)} \int_0^\infty dt\ \int_{||q|-t|}^{|q|+t}ds\ s\frac{\delta(t-|k|)}t\Gamma_L(s) \end{equation} that is \begin{equation} \hat{\mathfrak C}_2(|q|) =\int dk\ \frac{\delta e}{\delta\hat v(k)} \frac{\rho}{|q||k|} \int_{||q|-|k||}^{|q|+|k|}ds\ s\Gamma_L(s) \end{equation} which is the directional derivative of $e$ with respect to $\hat v$ in the direction of $2\rho g$ with \begin{equation} g(|k|):=\frac1{2|q||k|}\int_{||q|-|k||}^{|q|+|k|}ds\ s\Gamma_L(s) = \frac1{2|k|rL}(\Gamma_L(|k|-r)-\Gamma_L(|k|+r)) . \label{easyeq_2pt_fourier_g} \end{equation} Note that \begin{equation} g(0):=\Gamma_L(|q|) . \end{equation} To compute this derivative, we replace $\hat v$ with \begin{equation} \hat v+\lambda g(|k|) \end{equation} so, denoting the solution of the modified equation by $u_\lambda$, for $q\neq 0$, \begin{equation} \hat{\mathfrak C}_2(|q|)=2\rho\partial_\lambda e_\lambda|_{\lambda=0} =\rho^2\left( -\int\frac{dk}{(2\pi)^3}\ g(|k|)\hat u(|k|) -\int\frac{dk}{(2\pi)^3}\ \hat v(|k|)\partial_\lambda\hat u_\lambda(|k|)|_{\lambda=0} \right) . \end{equation} To compute $\partial_\lambda\hat u_\lambda|_{\lambda=0}$, we differentiate $\Xi(\mathbb U,\lambda)=0$: \begin{equation} \partial_\lambda\mathbb U|_{\lambda=0}=-(D\Xi)^{-1}\partial_\lambda\Xi|_{\lambda=0} . \end{equation} The computation of $\partial_\lambda\Xi|_{\lambda=0}$ is identical to\-~(\ref{dXi_2pt_easyeq}), but with the $g$ defined in\-~(\ref{easyeq_2pt_fourier_g}). \bigskip \point To compute the maximum of $\hat C_2$, we proceed as for $C_2$, see\-~(\ref{easyeq_newton_2pt})-(\ref{easyeq_dx_2pt}). The only difference is that the algorithm is initialized with $|k_0|=\rho^{\frac13}$\refname{param:easyeq_k0}{{\tt k0}}. \subsubsubsection{Momentum distribution} To compute the momentum distribution (see\-~\cite{CHe21}), we add a parameter $\lambda$ to {\tt easyeq}: \begin{equation} -\Delta u_\lambda(|x|) = (1-u_\lambda(|x|))v(|x|)-2\rho K(|x|)+\rho^2 L(|x|) -2\lambda \hat u_0(q)\cos(q\cdot x) \end{equation} ($\hat u_0\equiv\hat u_\lambda|_{\lambda=0}$). The momentum distribution is then \begin{equation} \mathcal M(q)=\partial_\lambda e|_{\lambda=0} =-\frac\rho2\int\frac{dk}{(2\pi)^3}\ \hat v(k)\partial_\lambda\hat u_\lambda(k)|_{\lambda=0} . \end{equation} \bigskip \point Note that the Fourier transform of $2\lambda\hat u_0(q)\cos(q\cdot x)$ is \begin{equation} -(2\pi)^3\lambda\hat u_0(q)(\delta(q+k)+\delta(q-k)) . \label{fouriermomentum} \end{equation} The presence of delta functions does not play well with the quadratures. To get around this, we instead compute a regularization of $\mathcal M(q)$ by convolving it with a peaked spherically symmetric function. Let $\Gamma_L$ denote the Gaussian with variance $1/\sqrt L$ \begin{equation} \Gamma_L(|k|):=\left(\frac{L}{2\pi}\right)^{\frac32}e^{-\frac L2k^2} . \label{easyeq_gaussian_momt} \end{equation} In fact, we will scale $L$ with $k$, and set $L$ to \begin{equation} L=\sqrt{\mathrm{\refname{param:easyeq_momentum_window_L}{{\tt window\_L}}}}/k^2 . \end{equation} To compute \begin{equation} \mathfrak M(q):=\mathcal M\ast\Gamma_L(q) \end{equation} we solve the equation \begin{equation} -\Delta u_\lambda(|x|) = (1-u_\lambda(|x|))v(|x|)-2\rho K(|x|)+\rho^2 L(|x|) -2\lambda\int dk\ \hat u_0(k)\cos(k\cdot x)\Gamma_L(q-k) . \end{equation} Note that the Fourier transform of \begin{equation} -2\lambda\int dk\ \hat u_0(k)\cos(k\cdot x)\Gamma_L(q-k) \end{equation} is \begin{equation} -(2\pi)^3\lambda\hat u_0(q)(\Gamma_L(k+q)+\Gamma_L(k-q)) . \end{equation} Since the ground state is unique, $\mathcal M$ is spherically symmetric. The term $\Gamma_L(k\pm q)$ is not, so we take its spherical average (which will not change the final result): by lemma\-~\ref{lemma:bipolar}, \begin{equation} -\frac1{4\pi r^2}\int dq\ \delta(|q|-r)(2\pi)^3\lambda\hat u_0(q)(\Gamma_L(k+q)+\Gamma_L(k-q)) = -\frac{(2\pi)^3}{|k|r}\lambda\hat u_0(r)\int_{||k|-r|}^{|k|+r} ds\ s\Gamma_L(s) . \end{equation} In this setup, the approximation of the delta function is thus \begin{equation} \tilde\delta(|k|,r):= \frac1{2|k|r}\int_{||k|-r|}^{|k|+r} ds\ s\Gamma_L(s) = \frac{1}{2|k|rL}(\Gamma_L(|k|-r)-\Gamma_L(|k|+r)) . \end{equation} \bigskip \point To compute the momentum distribution at $q$, we define $\Xi(\mathbb U,\lambda)$ by replacing $\mathbb T$ with \begin{equation} \mathbb T_{i}= \frac1{\mathbb A_{K,i}} \left( \mathbb S_i -2(2\pi)^3\lambda \hat u(|q|)\tilde\delta(k_i,|q|) \right) . \end{equation} Then we solve $\Xi(\mathbb U,\lambda)=0$, and differentiate: \begin{equation} \partial_\lambda\mathbb U|_{\lambda=0}=-(D\Xi)^{-1}\partial_\lambda\Xi|_{\lambda=0} . \end{equation} Finally, \begin{equation} \partial_\lambda\Xi_{i}|_{\lambda=0} = -\partial_\lambda\mathbb T_{i}|_{\lambda=0}\left( \frac1{2(\mathbb X_{i}+1)} \Phi\left(\mathbb B_i{\textstyle\frac{\mathbb T_{i}}{(\mathbb X_{i}+1)^2}}\right) +\frac{\mathbb B_i\mathbb T_{i}}{2(\mathbb X_{i}+1)^3} \partial\Phi\left(\mathbb B_i{\textstyle\frac{\mathbb T_{i}}{(\mathbb X_{l,j}+1)^2}}\right) \right) \end{equation} with \begin{equation} \partial_\lambda\mathbb T_{i}|_{\lambda=0}= -\frac{2(2\pi)^3}{\mathbb A_{K,i}} \hat u(|q|)\tilde\delta(k_{i},|q|) . \end{equation} \bigskip \subsection{\tt anyeq}\label{sec:anyeq} \indent This method is used to solve any of the equations in the Simplified approach. Specifically, it solves the equation \begin{equation} -\Delta u = v(1-u)-2\rho K+\rho^2 L \label{anyeq} \end{equation} with \begin{equation} K=\gamma_K(1-\alpha_Ku)\left(\beta_K u\ast S+(1-\beta_K)\frac{2e}\rho u\right) \label{anyeq_K} \end{equation} and \begin{equation} L:=L_1+L_2+L_3 \end{equation} \begin{equation} L_1:=(1-\alpha_{L,1}u)\left(\beta_{L,1}u\ast u\ast S+(1-\beta_{L,1})\frac{2e}\rho u\ast u\right) \end{equation} \begin{equation} L_2:=-\gamma_{L,2}(1-\alpha_{L,2}u)\left(\beta_{L,2}2u\ast(u(u\ast S))+(1-\beta_{L,2})\frac{4e}\rho u\ast u^2\right) \end{equation} \begin{equation} L_3:=\gamma_{L,3}(1-\alpha_{L,3}u)\left(\beta_{L,3}\frac12\int dydz\ u(y)u(z-x)u(z)u(y-x)S(z-y)+(1-\beta_{L,3})\frac e\rho u^2\ast u^2\right) \end{equation} \begin{equation} e=\frac\rho2\int dx\ (1-u(|x|))v(|x|) . \label{anyeq_e} \end{equation} The parameters $\alpha_\cdot$, $\beta_\cdot$ and $\gamma_\cdot$ can be set to turn\-~(\ref{anyeq}) into any of the approximations of the Simplified approach. For ease of use, there are several predefined equations, given in the following table. \bigskip \def\arraystretch{1.5} \setlength\tabcolsep{5pt} \makelink{table:anyeq_eqs}{} \hfil\begin{tabular}{|r|c|c|c|c|c|c|c|c|c|c|c|c|} \hline &$\alpha_K$ &$\beta_K$ &$\gamma_K$ &$\alpha_{L,1}$ &$\beta_{L,1}$ &$\alpha_{L,2}$ &$\beta_{L,2}$ &$\gamma_{L,2}$ &$\alpha_{L,3}$ &$\beta_{L,3}$ &$\gamma_{L,3}$\\ \hline \makelink{eq:anyeq_compleq}{}{\tt compleq}&1&1&1&1&1&1&1&1&1&1&1\\ \makelink{eq:anyeq_bigeq}{} {\tt bigeq }&1&1&1&1&1&1&1&1&-&-&0\\ \makelink{eq:anyeq_fulleq}{} {\tt fulleq }&1&1&1&1&1&1&1&1&1&0&1\\ \makelink{eq:anyeq_medeq}{} {\tt medeq }&0&1&1&0&1&-&-&0&-&-&0\\ \makelink{eq:anyeq_simpleq}{}{\tt simpleq}&0&0&1&0&0&-&-&0&-&-&0\\ \hline \end{tabular} \bigskip Note that there is no $\gamma_{L,1}$, whose computation would be rather different. Note, in addition, that {\tt simpleq} and {\tt medeq} coincide with their definitions in\-~(\ref{easyeq}). The method used to solve this equation is very different from \refname{sec:easyeq}{{\tt easyeq}}, and is significantly longer to run. \bigskip \subsubsection{Usage} \indent Unless otherwise noted, this method takes the following parameters (specified via the {\tt [-p params]} flag, as a `{\tt;}' separated list of entries). \begin{itemize} \item\makelink{param:anyeq_rho}{} {\tt rho} ({\tt Float64}, default: $10^{-6}$): density $\rho$. \item\makelink{param:anyeq_tolerance}{} {\tt tolerance} ({\tt Float64}, default: $10^{-11}$): maximal size of final step in Newton iteration. \item\makelink{param:anyeq_maxiter}{} {\tt maxiter} ({\tt Int64}, default: 21): maximal number of iterations of the Newton algorithm before giving up. \item\makelink{param:anyeq_P}{} {\tt P} ({\tt Int64}, default: 11): order of all Chebyshev polynomial expansions (denoted by $P$ below). \item\makelink{param:anyeq_N}{} {\tt N} ({\tt Int64}, default: 12): order of all Gauss quadratures (denoted by $N$ below). \item\makelink{param:anyeq_J}{} {\tt J} ({\tt Int64}, default: 10): number of splines (denoted by $J$ below). \item\makelink{param:anyeq_nlrho_init}{} {\tt nlrho\_init} ({\tt Int64}, default: 0): we initialize the Newton algorithm using the solution of \refname{eq:easyeq_medeq}{{\tt medeq}}, computed using the methods in \refname{sec:easyeq}{{\tt easyeq}}. If {\tt nlrho\_init} is $\neq 0$, then the solution of \refname{eq:easyeq_medeq}{{\tt medeq}} is first computed for {\tt nlrho} smaller values of $\rho$ starting from $10^{\mathrm{\refname{param:anyeq_minlrho_init}{{\tt minlrho\_init}}}}$. This is useful when $\rho$ is too large for the solution of \refname{eq:easyeq_medeq}{{\tt medeq}} to be computed directly. \item\makelink{param:anyeq_minlrho_init}{} {\tt minlrho\_init} ({\tt Float64}, default: $-6$): see \refname{param:anyeq_nlrho_init}{{\tt nlrho\_init}}. \item\makelink{param:anyeq_aK}{} {\tt aK}, {\tt bK}, {\tt gK}, {\tt aL1}, {\tt bL1}, {\tt aL2}, {\tt bL2}, {\tt gL2}, {\tt aL3}, {\tt bL3}, {\tt gL3} ({\tt Float64}, default: 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0): the values of $\alpha_K$, $\beta_K$, $\gamma_K$, $\alpha_{L,1}$, $\beta_{L,1}$, $\alpha_{L,2}$, $\beta_{L,2}$, $\gamma_{L,2}$, $\alpha_{L,3}$, $\beta_{L,3}$, $\gamma_{L,3}$. \item\makelink{param:anyeq_eq}{} {\tt eq} ({\tt String}, default: ``{\tt bigeq}'', acceptable values: ``{\tt compleq}'', ``{\tt bigeq}'', ``{\tt fulleq}'', ``{\tt medeq}'', ``{\tt simpleq}''): A shortcut to select any of the equations defined in the \refname{table:anyeq_eqs}{table above}. When this option is set, none of \refname{param:anyeq_aK}{\tt aK}, \refname{param:anyeq_aK}{\tt bK}, \refname{param:anyeq_aK}{\tt gK}, \refname{param:anyeq_aK}{\tt aL1}, \refname{param:anyeq_aK}{\tt bL1}, \refname{param:anyeq_aK}{\tt aL2}, \refname{param:anyeq_aK}{\tt bL2}, \refname{param:anyeq_aK}{\tt gL2}, \refname{param:anyeq_aK}{\tt aL3}, \refname{param:anyeq_aK}{\tt bL3}, \refname{param:anyeq_aK}{\tt gL3} should be set. \end{itemize} \bigskip The available {\tt commands} are the following. \begin{itemize} \item\makelink{command:anyeq_energy}{} {\tt energy}: compute the energy $e$ at a given $\rho$.\par \underline{Output}: [$e$] [Newton error $\epsilon$]. \item\makelink{command:anyeq_energy_rho}{} {\tt energy\_rho}: compute the energy $e$ as a function of $\rho$. The Newton algorithm is initialized with the solution of \refname{eq:easyeq_medeq}{{\tt medeq}}.\par \underline{Disabled parameters}: \refname{param:anyeq_rho}{{\tt rho}}.\par \underline{Extra parameters}: \begin{itemize} \item\makelink{param:anyeq_minlrho}{} {\tt minlrho} ({\tt Float64}, default: $10^{-6}$): minimal value for $\log_{10}\rho$. \item\makelink{param:anyeq_maxlrho}{} {\tt maxlrho} ({\tt Float64}, default: $10^{2}$): maximal value for $\log_{10}\rho$. \item\makelink{param:anyeq_nlrho}{} {\tt nlrho} ({\tt Int64}, default: $100$): number of values for $\rho$ (spaced logarithmically). \item\makelink{param:anyeq_minrho}{} {\tt minrho} ({\tt Float64}, default: $10^{-6}$): minimal value for $\rho$. \item\makelink{param:anyeq_maxrho}{} {\tt maxrho} ({\tt Float64}, default: $10^{2}$): maximal value for $\rho$. \item\makelink{param:anyeq_nrho}{} {\tt nrho} ({\tt Int64}, default: $0$): number of values for $\rho$ (spaced linearly). If {\tt nrho} is $\neq0$, then the linear spacing will be used, and \refname{param:anyeq_minlrho}{{\tt minlrho}}, \refname{param:anyeq_maxlrho}{{\tt maxlrho}}, \refname{param:anyeq_nlrho}{{\tt nlrho}} will be ignored. Otherwise, the logarithmic spacing will be used and \refname{param:anyeq_minrho}{{\tt minrho}}, \refname{param:anyeq_maxrho}{{\tt maxrho}} will be ignored. \item\makelink{param:anyeq_rhos}{} {\tt rhos} ({\tt Array\{Float64\}}): list of values for $\rho$, specified as a `{\tt,}' separated list. This parameter takes precedence over \refname{param:anyeq_minlrho}{{\tt minlrho}}, \refname{param:anyeq_maxlrho}{{\tt maxlrho}}, \refname{param:anyeq_nlrho}{{\tt nlrho}}, \refname{param:anyeq_minrho}{{\tt minrho}}, \refname{param:anyeq_maxrho}{{\tt maxrho}}, \refname{param:anyeq_nrho}{{\tt nrho}}. \end{itemize} \underline{Output} (one line for each value of $\rho$): [$\rho$] [$e$] [Newton error $\epsilon$].\par \underline{Multithread support}: yes, different values of $\rho$ split up among workers. \item\makelink{command:anyeq_energy_rho_init_prevrho}{} {\tt energy\_rho\_init\_prevrho}: compute the energy $e$ as a function of $\rho$. The Newton algorithm is initialized with the solution of \refname{eq:easyeq_medeq}{{\tt medeq}} for the lowest $\rho$, and with the previously computed $\rho$ for the larger densities.\par \underline{Disabled parameters}: same as \refname{command:anyeq_energy_rho}{{\tt energy\_rho}}.\par \underline{Extra parameters}: same as \refname{command:anyeq_energy_rho}{{\tt energy\_rho}}.\par \underline{Output} (one line for each value of $\rho$): [$\rho$] [$e$] [Newton error $\epsilon$]. \item\makelink{command:anyeq_energy_rho_init_nextrho}{} {\tt energy\_rho\_init\_nextrho}: same as \refname{command:anyeq_energy_rho_init_prevrho}{{\tt energy\_rho\_init\_prevrho}} except that the energy is computed for decreasing densities instead of increasing ones. The Newton algorithm is initialized with the solution of \refname{eq:easyeq_medeq}{{\tt medeq}} for the largest $\rho$, and with the previously computed $\rho$ for the smaller densities.\par \underline{Disabled parameters}: same as \refname{command:anyeq_energy_rho}{{\tt energy\_rho}}.\par \underline{Extra parameters}: same as \refname{command:anyeq_energy_rho}{{\tt energy\_rho}}.\par \underline{Output} (one line for each value of $\rho$): [$\rho$] [$e$] [Newton error $\epsilon$]. \item\makelink{command:anyeq_condensate_fraction}{} {\tt condensate\_fraction}: compute the uncondensed fraction $\eta$ at a given $\rho$.\par \underline{Output}: [$\eta$] [Newton error $\epsilon$]. \item\makelink{command:anyeq_condensate_fraction_rho}{} {\tt condensate\_fraction\_rho}: compute the uncondensed fraction $\eta$ as a function of $\rho$. The Newton algorithm is initialized with the solution of \refname{eq:easyeq_medeq}{{\tt medeq}}.\par \underline{Disabled parameters}: same as \refname{command:anyeq_energy_rho}{{\tt energy\_rho}}.\par \underline{Extra parameters}: same as \refname{command:anyeq_energy_rho}{{\tt energy\_rho}}.\par \underline{Output} (one line for each value of $\rho$): [$\rho$] [$\eta$] [Newton error $\epsilon$].\par \underline{Multithread support}: yes, different values of $\rho$ split up among workers. \item\makelink{command:anyeq_uk}{} {\tt uk}: compute the Fourier transform $\hat u(|k|)$. The values $|k|$ at which $\hat u$ is computed are those coming from the Gauss quadratures, and cannot be set.\par \underline{Output} (one line for each value of $|k|$): [$|k|$] [$\hat u(|k|)$] \item\makelink{command:anyeq_ux}{} {\tt ux}: compute $u$ as a function of $|x|$.\par \underline{Extra parameters}: \begin{itemize} \item\makelink{param:anyeq_xmin}{} {\tt xmin} ({\tt Float64}, default: 0): minimum of the range of $|x|$ to be printed. \item\makelink{param:anyeq_xmax}{} {\tt xmax} ({\tt Float64}, default: 100): maximum of the range of $|x|$ to be printed. \item\makelink{param:anyeq_nx}{} {\tt nx} ({\tt Int64}, default: 100): number of points to print (linearly spaced). \end{itemize} \underline{Output} (one line for each value of $x$): [$|x|$] [$u(|x|)$] \item\makelink{command:anyeq_2pt}{} {\tt 2pt}: compute the spherically averaged two-point correlation function $C_2(|x|)$ at a given $\rho$.\par \underline{Extra parameters}: same as \refname{command:anyeq_ux}{{\tt ux}}, plus\par \begin{itemize} \item\makelink{param:anyeq_window_L}{} {\tt window\_L} ({\tt Float64}, default: $10^3$): size of the Hann window used to numerically invert the Fourier transform in the computation of the tow-point correlation function, see\-~(\ref{hann}). \end{itemize} \underline{Output} (one line for each value of $|x|$): [$|x|$] [$C_2(|x|)$] \item\makelink{command:anyeq_2pt_max}{} {\tt 2pt\_max}: compute the maximum of the spherically averaged two-point correlation function $C_2(|x|)$ at a given $\rho$.\par \underline{Extra parameters}: \refname{param:anyeq_window_L}{{\tt window\_L}} plus \begin{itemize} \item\makelink{param:anyeq_dx}{} {\tt dx} ({\tt Float64}, default: $10^{-7}$): step used to numerically approximate derivatives. \item\makelink{param:anyeq_x0}{} {\tt x0} ({\tt Float64}, default: $1$): initial guess for the maximum is $\rho^{-1/3}\mathrm{\tt x0}$. \item\makelink{param:anyeq_maxstep}{} {\tt maxstep} ({\tt Float64}, default: $\infty$): maximal size of single step in maximization algorithm. \item\makelink{param:easyeq_tolerance_max}{} {\tt tolerance\_max} ({\tt Float64}, default: \refname{param:easyeq_tolerance}{{\tt tolerance}}): same as \refname{param:easyeq_tolerance}{{\tt tolerance}}, used for the Newton algorithm underlying the maximization algorithm. \end{itemize} \underline{Output}: [$|x_{\mathrm{max}}|$] [$C_2(|x_{\mathrm{max}}|)$] \item\makelink{command:anyeq_2pt_max_rho}{} {\tt 2pt\_max\_rho}: compute the maximum of the spherically averaged two-point correlation function $C_2(|x|)$ for a range of $\rho$.\par \underline{Extra parameters}: same as \refname{command:anyeq_2pt_max}{{\tt anyeq\_2pt\_max}} plus those of \refname{command:anyeq_energy_rho}{{\tt energy\_rho}}.\par \underline{Output} (one line for each value of $\rho$): [$\rho$] [$|x_{\mathrm{max}}|$] [$C_2(|x_{\mathrm{max}}|)$]\par \underline{Multithread support}: yes, different values of $\rho$ split up among workers. \item\makelink{command:anyeq_2pt_fourier}{} {\tt 2pt\_fourier}: compute the spherically averaged Fourier transform of the two-point correlation function $\hat C_2(|k|)$ at a given $\rho$.\par \underline{Extra parameters}: \begin{itemize} \item\makelink{param:anyeq_kmin}{} {\tt kmin} ({\tt Float64}, default: 0): minimum of the range of $|k|$ to be printed. \item\makelink{param:anyeq_kmax}{} {\tt kmax} ({\tt Float64}, default: 10): maximum of the range of $|k|$ to be printed. \item\makelink{param:anyeq_nk}{} {\tt nk} ({\tt Int64}, default: 100): number of $|k|$'s to be printed. \item\makelink{param:anyeq_2pt_fourier_window_L}{} {\tt window\_L} ({\tt Float64}, default: 1000): what is actually computed is $\hat C_2$ convolved with a Gaussian of variance $1/\sqrt L$ where $L=\sqrt{\mathrm{\tt window\_L}}$, see\-~(\ref{anyeq_gaussian}). \end{itemize} \underline{Output} (one line for each value of $|k|$): [$|k|$] [$\hat C_2(|k|)$].\par \underline{Multithread support}: yes, different values of $k$ are split up among workers. \item\makelink{command:anyeq_2pt_fourier_max}{} {\tt 2pt\_fourier\_max}: compute the maximum of the spherically averaged Fourier transformed two-point correlation function $\hat C_2(|k|)$.\par \underline{Extra parameters}: \refname{param:anyeq_2pt_fourier_window_L}{{\tt window\_L}}, \refname{param:anyeq_maxstep}{{\tt maxstep}} plus \begin{itemize} \item\makelink{param:anyeq_dk}{} {\tt dk} ({\tt Float64}, default: $10^{-7}$): step used to numerically approximate derivatives. \item\makelink{param:anyeq_k0}{} {\tt k0} ({\tt Float64}, default: $1$): initial guess for the maximum is $\rho^{1/3}\mathrm{\tt k0}$. \end{itemize} \underline{Output}: [$|k_{\mathrm{max}}|$] [$\hat C_2(|k_{\mathrm{max}}|)$]\par \item\makelink{command:anyeq_2pt_fourier_max_rho}{} {\tt 2pt\_fourier\_max\_rho}: compute the maximum of the spherically averaged Fourier transformed two-point correlation function $\hat C_2(|k|)$ for a range of $\rho$.\par \underline{Extra parameters}: same as those of \refname{command:anyeq_2pt_fourier_max}{{\tt 2pt\_fourier\_max}} plus those of \refname{command:anyeq_energy_rho}{{\tt energy\_rho}}.\par \underline{Output} (one line for each value of $\rho$): [$\rho$] [$|k_{\mathrm{max}}|$] [$\hat C_2(|k_{\mathrm{max}}|)$]\par \underline{Multithread support}: yes, different values of $\rho$ split up among workers. \item\makelink{command:anyeq_momentum_distribution}{} {\tt momentum\_distribution}: compute the momentum distribution $\mathcal M(|k|)$ at a given $\rho$. The momentum distribution is computed for $k=k_{l,j}$ (see\-~(\ref{klj})).\par \underline{Extra parameters}: \begin{itemize} \item\makelink{param:anyeq_momentum_kmin}{} {\tt kmin} ({\tt Float64}, default: 0): minimum of the range of $|k|$ to be printed. \item\makelink{param:anyeq_momentum_kmax}{} {\tt kmax} ({\tt Float64}, default: 10): maximum of the range of $|k|$ to be printed. \item\makelink{param:anyeq_momentum_window_L}{} {\tt window\_L} ({\tt Float64}, default: 1000): what is actually computed is $\mathfrak M_k$ convolved with a Gaussian of variance $1/\sqrt L$ where $L=\sqrt{\mathrm{\tt window\_L}}/k^2$, see\-~(\ref{anyeq_gaussian_momt}). \end{itemize} \underline{Output} (one line for each value of $|k|$): [$|k|$] [$\mathcal M(|k|)$] \item\makelink{command:anyeq_compressibility_rho}{} {\tt compressibility\_rho}: compute the compressibility $\chi$ as a function of $\rho$. The Newton algorithm is initialized with the solution of \refname{eq:easyeq_medeq}{{\tt medeq}}. \underline{Disabled parameters}: same as \refname{command:anyeq_energy_rho}{{\tt energy\_rho}}.\par \underline{Extra parameters}: same as \refname{command:anyeq_energy_rho}{{\tt energy\_rho}}.\par \underline{Output} (one line for each value of $\rho$): [$\rho$] [$\chi$]. \item \makelink{command:anyeq_save_Abar}{} {\tt save\_Abar}: compute the matrix $\bar A$. This matrix is used to compute observables for \refname{eq:anyeq_compleq}{{\tt compleq}}. This command is useful to output the value of $\bar A$ to a file once and for all, and use this file to run commands without recomputing $\bar A$.\par \underline{Disabled parameters}: \refname{param:anyeq_rho}{{\tt rho}}, \refname{param:anyeq_tolerance}{{\tt tolerance}}, \refname{param:anyeq_maxiter}{{\tt maxiter}}, \refname{param:anyeq_minlrho_init}{{\tt minlrho\_init}}, \refname{param:anyeq_nlrho_init}{{\tt nlrho\_init}}.\par \underline{Output}: [$\bar A$] (the output is not designed to be human-readable; it is obtained through nested {\tt for} loops; for details, see the code).\par \underline{Multithread support}: yes, the first indices are split up among workers, which produces $NJ$ jobs. \end{itemize} \subsubsection{Description} \subsubsubsection{Fourier space formulation} The computation is carried out in Fourier space. We take the convention that the Fourier transform of a function $f(|x|)$ is \begin{equation} \hat f(|k|)=\int_{\mathbb R^3} dx\ e^{ikx}f(|x|) =\frac{4\pi}{|k|}\int_0^\infty dr\ \frac{\sin(|k|r)}rf(r) . \label{anyeq_fourier} \end{equation} We define a Fourier-space convolution: \begin{equation} \hat f\hat\ast\hat g(|k|):=\int_{\mathbb R^3}\frac{dp}{(2\pi)^3}\ \hat f(|k-p|)\hat g(|p|) . \label{anyeq_hatast} \end{equation} In Fourier space, (\ref{anyeq}) becomes \begin{equation} k^2\hat u(|k|)=\hat S(|k|)-2\rho\hat K(|k|)+\rho^2\hat L(|k|) \end{equation} with \begin{equation} \hat S(|k|)=\int_{\mathbb R^3} dx\ e^{ikx}(1-u(|x|))v(|x|) =\hat v(k)-\hat u\hat\ast\hat v(k) \end{equation} \begin{equation} \rho\hat K= \gamma_K(\beta_K\rho\hat S+(1-\beta_K)2e)\hat u - \gamma_K\alpha_K\hat u\hat\ast((\beta_K\rho\hat S+(1-\beta_K)2e)\hat u) \end{equation} \begin{equation} \rho\hat L_1= (\beta_{L,1}\rho\hat S+(1-\beta_{L,1})2e)\hat u^2 - \alpha_{L,1}\hat u\hat\ast((\beta_{L,1}\rho\hat S+(1-\beta_{L,1})2e)\hat u^2) \end{equation} \begin{equation} \rho\hat L_2= -\gamma_{L,2}(\beta_{L,2}2\rho(\hat u\hat\ast(\hat u\hat S))+(1-\beta_{L,2})4e\hat u\hat\ast\hat u)\hat u + \gamma_{L,2}\alpha_{L,2}\hat u\hat\ast((\beta_{L,2}2\rho(\hat u\hat\ast(\hat u\hat S))+(1-\beta_{L,2})4e\hat u\hat\ast\hat u)\hat u) \end{equation} \begin{equation} \rho\hat L_3= \gamma_{L,3}(1-\beta_{L,3})e(\hat u\hat\ast\hat u)^2 - \gamma_{L,3}\alpha_{L,3}(1-\beta_{L,3})\hat u\hat\ast(e(\hat u\hat\ast\hat u)^2) + \gamma_{L,3}\beta_{L,3}\hat l_3 - \frac1{2\rho}\gamma_{L,3}\alpha_{L,3}\beta_{L,3}\hat u\hat\ast\hat l_3 \end{equation} with \begin{equation} \hat l_3(|k|):= \frac1{\rho^2}\int\frac{dq}{(2\pi)^3}\frac{dq'}{(2\pi)^3}\ \rho\hat u(|k-q'|)\rho\hat u(|q|)\rho\hat u(|q'|)\rho\hat u(|k-q|)\hat S(|q'-q|) \label{l3} \end{equation} Therefore, \begin{equation} \Omega(u)=0 \end{equation} with \begin{equation} \Omega(u):= \rho^2\hat u(|k|)^2\sigma_{L,1}(|k|) -\left(\frac{k^2}\rho+2\sigma_K(|k|)+2f_1(|k|)\right)\rho\hat u(|k|) +\left(\hat S(|k|)+\frac12f_2(|k|)+G(|k|)\right) \label{Omega} \end{equation} with \begin{equation} \sigma_K:=\frac1{\rho}\gamma_K(\beta_K\rho\hat S+(1-\beta_K)2e) ,\quad \sigma_{L,1}:=\frac1{\rho}(\beta_{L,1}\rho\hat S+(1-\beta_{L,1})2e) \end{equation} \begin{equation} f_1:=\frac1{\rho^2}\gamma_{L,2}(\beta_{L,2}\rho(\rho\hat u\hat\ast(\rho\hat u\hat S))+(1-\beta_{L,2})2e\rho\hat u\hat\ast\rho\hat u) \end{equation} \begin{equation} f_2:=\gamma_{L,3}(1-\beta_{L,3})\frac{2e}{\rho^3}(\rho\hat u\hat\ast\rho\hat u)^2 +\gamma_{L,3}\beta_{L,3}\hat l_3 \end{equation} \begin{equation} \begin{largearray} G:= \frac2{\rho^2}\gamma_K\alpha_K\rho\hat u\hat\ast((\beta_K\rho\hat S+(1-\beta_K)2e)\rho\hat u) - \frac1{\rho^2}\alpha_{L,1}\rho\hat u\hat\ast((\beta_{L,1}\rho\hat S+(1-\beta_{L,1})2e)\rho\hat u^2) \\[0.5cm]\hfill + \frac2{\rho}\alpha_{L,2}\rho\hat u\hat\ast(f_1\rho\hat u) - \frac1{2\rho}\alpha_{L,3}\rho\hat u\hat\ast f_2 . \end{largearray} \end{equation} Therefore, \begin{equation} \rho\hat u=1+\xi -\sqrt{(1+\xi)^2-(1+\zeta)} \label{u} \end{equation} with \begin{equation} \xi:=\frac1{\sigma_{L,1}}\left(\sigma_K-\sigma_{L,1}+\frac{k^2}{2\rho}+f_1\right) ,\quad \zeta:=\frac1{\sigma_{L,1}}\left(\hat S-\sigma_{L,1}+\frac12f_2+G\right) \end{equation} We rewrite\-~(\ref{u}) as \begin{equation} U= \frac{1+\zeta}{2(\xi+1)}\ \Phi\left({\textstyle\frac{1+\zeta}{(\xi+1)^2}}\right) \label{anyeq_hatu} \end{equation} with \begin{equation} U:=\rho\hat u \end{equation} \begin{equation} \Phi(x):=\frac{2(1-\sqrt{1-x})}x . \end{equation} \bigskip \subsubsubsection{Evaluating integrals} To evaluate integrals numerically, we will split integration intervals over splines and use Gauss-Legendre quadratures. More specifically, to compute integrals of the form \begin{equation} \int \frac{dk}{(2\pi)^3}\ f(U(k),k) = \frac1{2\pi^2}\int_0^\infty dt\ t^2f(U(t),t) \end{equation} we first compactify space to $(-1,1]$ by changing variables to $\tau=\frac{1-t}{1+t}$: \begin{equation} \int \frac{dk}{(2\pi)^3}\ f(U(k),k) = \frac1{\pi^2}\int_{-1}^1 d\tau\ \frac{(1-\tau)^2}{(1+\tau)^4}f({\textstyle U(\frac{1-\tau}{1+\tau}),\frac{1-\tau}{1+\tau}}) . \end{equation} We then split $(-1,1]$ into $J$ sub-intervals (given by the parameter \refname{param:anyeq_J}{{\tt J}}), called {\it splines}: \begin{equation} (-1,1]=\bigcup_{j=0}^{J-1}(\tau_j,\tau_{j+1}] . \end{equation} The $\tau$ are taken to be equally spaced, but the code is designed in such a way that this could be changed easily in the future: \begin{equation} \tau_j=-1+\frac{2j}J . \end{equation} In these terms, \begin{equation} \int \frac{dk}{(2\pi)^3}\ f(U(k),k) = \sum_{l=0}^{J-1} \int_{\tau_l}^{\tau_{l+1}} d\tau\ \frac{(1-\tau)^2}{(1+\tau)^4}g(U({\textstyle\frac{1-\tau}{1+\tau},\frac{1-\tau}{1+\tau}}) . \end{equation} We then change variables to $r$: \begin{equation} \tau= -\frac{\tau_{l+1}-\tau_l}2\sin({\textstyle\frac{\pi r}2})+\frac{\tau_{l+1}+\tau_l}2 =:\frac{1-\vartheta_l(r)}{1+\vartheta_l(r)} \label{chebyshevvars} \end{equation} (the reason for this specific change of variables will become clear at the end of this paragraph and in the next one) and find \begin{equation} \int \frac{dk}{(2\pi)^3}\ f(U(k),k) = \sum_{l=0}^{J-1} \frac{\tau_{l+1}-\tau_l}{16\pi}\int_{-1}^1 dr\ \cos({\textstyle\frac{\pi r}2})(1+\vartheta_l(r))^2\vartheta_l^2(r)f(U(\vartheta_l(r)),\vartheta_l(r)) . \label{anyeq_preintegration} \end{equation} We then approximate the integral using a Gauss-Legendre quadrature of order $N$ (see appendix\-~\ref{appGL}): \begin{equation} \int \frac{dk}{(2\pi)^3}\ f(k) \approx \sum_{l=0}^{J-1} \frac{\tau_{l+1}-\tau_l}{16\pi} \sum_{j=1}^Nw_j \cos({\textstyle\frac{\pi x_j}2})(1+\vartheta_l(x_j))^2\vartheta_l^2(x_j)f(\mathbb U_{l,j},\vartheta_l(x_j)) \label{anyeq_integration} \end{equation} with \begin{equation} \mathbb U_{l,j}:=U(k_{l,j}) ,\quad k_{l,j}:=\frac{2+(\tau_{l+1}-\tau_l)\sin(\frac{\pi x_j}2)-(\tau_{l+1}+\tau_\alpha)}{2-(\tau_{l+1}-\tau_l)\sin(\frac{\pi x_j}2)+(\tau_{l+1}+\tau_l)} . \end{equation} The choice of the change of variables\-~(\ref{chebyshevvars}) is made so that $U$ is evaluated at $k_{l,j}$, which are the momenta that appear naturally in the Chebyshev polynomial expansion below\-~(\ref{klj}). In this way, we can compute arbitrary integrals of functions of $U$ by just computing the values of $\mathbb U_{l,j}$. \bigskip \subsubsubsection{Chebyshev polynomial expansion} In the case of \refname{sec:easyeq}{{\tt easyeq}}, we saw that using Gauss quadratures reduced the computation to evaluating $U$ at a fixed set of discrete momenta. This is not the case here, due to the presence of more complicated convolutions of $U$. Instead, we will approximate $U$ using polynomial approximations. We will now discuss this approximation for an arbitrary function $a(|k|)$, as we will need it for other functions than simply $U$. \bigskip \point As in the previous paragraph, we compactify $[0,\infty)$ to $(-1,1]$ via the change of variables $r\mapsto\frac{1-r}{1+r}$, and split the interval into splines. Inside each spline, we use a Chebyshev polynomial expansion. However, we need to be careful in the first spline, which encapsulates the behavior at $|k|\to\infty$: $U$ decays to 0 as $|k|\to\infty$, and this behavior cannot be reproduced by a polynomial expansion. To avoid this, if $a\sim|k|^{-\nu_a}$, we expand $|k|^{\nu_a}a$ instead of $a$. Actually, to simplify expressions in the presence of the compactification, we will expand $2^{-\nu_a}(1+|k|)^{\nu_a}a$, which makes no conceptual difference. In the case of $a=U$, we will use $\nu=2$, which we know holds for the Simple equation\-~\cite{CJL20}. Putting all this together, we write, for $\tau\in(-1,1]$, if $|k|\equiv\frac{1-\tau}{1+\tau}$, \begin{equation} 2^{-\nu_a}(1+|k|)^{\nu_a}a(|k|)\equiv \frac1{(1+\tau)^{\nu_a}}a({\textstyle\frac{1-\tau}{1+\tau}}) =\sum_{l=0}^{J-1}\mathds 1_{\tau_l<\tau\leqslant\tau_{l+1}}\sum_{n=0}^\infty \mathbf F_{l,n}^{(\nu_a)}(a) T_n({\textstyle\frac{2\tau-(\tau_l+\tau_{l+1})}{\tau_{l+1}-\tau_l}}) \label{a_chebyshev_spline} \end{equation} in which $\mathds 1_{\tau_l<\tau\leqslant\tau_{l+1}}\in\{0,1\}$ is equal to 1 if and only if $\tau_l<\tau\leqslant\tau_{l+1}$, and \begin{equation} \mathbf F_{l,n}^{(\nu_a)}(a):= \frac{2-\delta_{n,0}}{\pi} \int_0^{\pi} d\theta\ \frac{\cos(n\theta)}{(1+\frac{\tau_{l+1}-\tau_l}2\cos(\theta)+\frac{\tau_{l+1}+\tau_l}2)^{\nu_a}} a({\textstyle\frac{2-(\tau_{l+1}-\tau_l)\cos(\theta)-(\tau_{l+1}+\tau_\alpha)}{2+(\tau_{l+1}-\tau_l)\cos(\theta)+(\tau_{l+1}+\tau_l)}}) \label{bfF} \end{equation} (see appendix\-~\ref{appChebyshev} for a discussion of the Chebyshev polynomial expansion and the error of its truncations). \bigskip \point In order to compute an approximation for $a$ using\-~(\ref{a_chebyshev_spline}), we will truncate the sum over $n$ to a finite value $P$ (given by the parameter \refname{param:anyeq_P}{{\tt P}}). In addition, to compute the integral in\-~(\ref{bfF}), we will use a Gauss-Legendre quadrature of order $N$ (given by the parameter \refname{param:anyeq_N}{{\tt N}}), see appendix\-~\ref{appGL}: \begin{equation} \mathbf F_{l,n}^{(\nu_a)}(a)\approx\mathfrak F_{l,n}^{(\nu_a)}(\mathfrak a):= \frac{2-\delta_{n,0}}2\sum_{j=1}^N w_j \cos({\textstyle\frac{n\pi(1+x_j)}2})\frac{\mathfrak a_{l,j}}{(1-\frac{\tau_{l+1}-\tau_l}2\sin(\frac{\pi x_j}2)+\frac{\tau_{l+1}+\tau_l}2)^{\nu_a}} \label{frakF} \end{equation} with \begin{equation} \mathfrak a_{l,j}:=a(k_{l,j}) ,\quad k_{l,j}:=\frac{2+(\tau_{l+1}-\tau_l)\sin(\frac{\pi x_j}2)-(\tau_{l+1}+\tau_\alpha)}{2-(\tau_{l+1}-\tau_l)\sin(\frac{\pi x_j}2)+(\tau_{l+1}+\tau_l)} \label{klj} \end{equation} and $(x_j,w_j)$ are the abcissa and weights for Gauss-Legendre quadratures. \bigskip \point All in all, we approximate \begin{equation} a({\textstyle\frac{1-\tau}{1+\tau}}) \approx(1+\tau)^{\nu_a}\sum_{l=0}^{J-1}\mathds 1_{\tau_l<\tau\leqslant\tau_{l+1}}\sum_{n=0}^P \mathfrak F_{l,n}^{(\nu_a)}(\mathfrak a) T_n({\textstyle\frac{2\tau-(\tau_l+\tau_{l+1})}{\tau_{l+1}-\tau_l}}) \label{approxchebyshev} \end{equation} with $\mathfrak F$ defined in\-~(\ref{frakF}). Furthermore, using the Chebyshev polynomial expansion and Gauss-Legendre quadratures, we can compute all the observables we are interested in by computing $\mathbb U_{l,j}\equiv U(k_{l,j})$. With this in mind, we will represent the function $U$ as a vector of dimension $NJ$ whose components are $\mathbb U_{l,j}$. \bigskip \subsubsubsection{Convolutions} Using the Chebyshev polynomial approximation, we can compute convolutions as follows. \medskip \point First, we rewrite the convolution as a two-dimensional integral, using bipolar coordinates (see lemma\-~\ref{lemma:bipolar}): \begin{equation} (a\hat\ast b)(|k|)=\frac1{4\pi^2|k|}\int_0^\infty dt\ ta(t)\int_{||k|-t|}^{|k|+t}ds\ sb(s) \label{conv_approx1} \end{equation} We change variables to compactify the integrals $\tau=\frac{1-t}{1+t}$, $\sigma=\frac{1-s}{1+s}$: \begin{equation} (a\hat\ast b)(|k|)=\frac1{4\pi^2|k|}\int_{-1}^1 d\tau\ \frac{2(1-\tau)}{(1+\tau)^3}a({\textstyle\frac{1-\tau}{1+\tau}})\int_{\alpha_-(|k|,\tau)}^{\alpha_+(|k|,\tau)}d\sigma\ \frac{2(1-\sigma)}{(1+\sigma)^3}b({\textstyle\frac{1-\sigma}{1+\sigma}}) \label{convvar} \end{equation} with \begin{equation} \alpha_-(|k|,\tau):=\frac{1-|k|-\frac{1-\tau}{1+\tau}}{1+|k|+\frac{1-\tau}{1+\tau}} ,\quad \alpha_+(|k|,\tau):=\frac{1-||k|-\frac{1-\tau}{1+\tau}|}{1+||k|-\frac{1-\tau}{1+\tau}|} . \label{anyeq_alpha} \end{equation} Therefore, using the approximation\-~(\ref{approxchebyshev}), if $\mathfrak a_{l,j}:=a(k_{l,j})$ and $\mathfrak b_{l,j}:=b(k_{l,j})$ (\ref{klj}), \begin{equation} (a\hat\ast b)(|k_{l,j}|)\approx (\mathfrak a\odot \mathfrak b)_{l,j}:= \sum_{n,m=0}^P \sum_{l',l''=0}^{J-1} \mathfrak F_{l',n}^{(\nu_a)}(\mathfrak a)\mathfrak F_{l'',m}^{(\nu_b)}(\mathfrak b) A_{l',n;l'',m}^{(\nu_a,\nu_b)}(|k_{l,j}|) \label{odot} \end{equation} with \begin{equation} \begin{largearray} A_{l',n;l'',m}^{(\nu_a,\nu_b)}(|k|):= \frac1{4\pi^2|k_{l,j}|} \int_{\tau_{l'}}^{\tau_{l'+1}} d\tau\ \frac{2(1-\tau)}{(1+\tau)^{3-\nu_a}}T_n({\textstyle\frac{2\tau-(\tau_{l'}+\tau_{l'+1})}{\tau_{l'+1}-\tau_{l'}}}) \cdot\\[0.5cm]\hfill\cdot \mathds 1_{\alpha_-(|k|,\tau)<\tau_{l''+1}} \mathds 1_{\alpha_+(|k|,\tau)>\tau_{l''}} \int_{\max(\tau_{l''},\alpha_-(|k|,\tau))}^{\min(\tau_{l''+1},\alpha_+(|k|,\tau))}d\sigma\ \frac{2(1-\sigma)}{(1+\sigma)^{3-\nu_b}}T_m({\textstyle\frac{2\sigma-(\tau_{l''}+\tau_{l''+1})}{\tau_{l''+1}-\tau_{l''}}}) \end{largearray} \label{conv_approx2} \end{equation} (we need the indicator functions to ensure that the bounds of the integral are correct). Note that $A$ is independent of $a$ and $b$, and can be computed once and for all at the beginning of execution for all values of $k_{l,j}$ (\ref{klj}). We then compute the integrals using Gauss-Legendre quadratures (as is proved in the next paragraph, the integrand is non singular provided $\nu_a,\nu_b\geqslant 2$). \bigskip \point Note that these integrals are not singular as long as $\nu_a,\nu_b\geqslant 2$: indeed (since the only possible problems occur at $-1$, it suffices to consider the case with only one spline), $\alpha_-,\alpha_+>-1$ for $\tau>-1$, and \begin{equation} \alpha_\pm(k,\tau)=-1+(1+\tau)\pm\frac k2(1+\tau)^2+O(1+\tau)^3 \end{equation} and \begin{equation} \int_{\alpha_-(|k|,\tau)}^{\alpha_+(|k|,\tau)}d\sigma\ \left|\frac{2(1-\sigma)}{(1+\sigma)^{3-\nu_b}}T_m(\sigma)\right| \leqslant 4\int_{\alpha_-(|k|,\tau)}^{\alpha_+(|k|,\tau)}d\sigma\ \frac1{(1+\sigma)^{3-\nu_b}} \end{equation} which, if $\nu_b=2$, yields \begin{equation} 4\log\left(\frac{1+\alpha_+(|k|,\tau)}{1+\alpha_-(|k|,\tau)}\right) =4|k|(1+\tau)+O(1+\tau)^2 \end{equation} and if $\nu_b>2$, yields \begin{equation} \frac4{\nu_b-2}\left((1+\alpha_+(|k|,\tau))^{\nu_b-2}-(1+\alpha_-(|k|,\tau))^{\nu_b-2}\right) = \frac4{\nu_b-2}|k|(1+\tau)^{\nu_b-1}(1+O(1+\tau)) . \end{equation} Therefore, \begin{equation} \left|\frac{2(1-\tau)}{(1+\tau)^{3-\nu_a}}T_n(\tau)\int_{\alpha_-(|k|,\tau)}^{\alpha_+(|k|,\tau)}d\sigma\ \frac{2(1-\sigma)}{(1+\sigma)^{3-\nu_b}}T_m(\sigma)\right| \leqslant \frac8{c_{\nu_b}} |k|(1-\tau)(1+\tau)^{\nu_a+\nu_b-4}(1+O(1+\tau)) \end{equation} where $c_{2}:=1$ and $c_{\nu_b}:=\nu_b-2$ if $\nu_b>2$. The integrand is, therefore, not singular as long as $\nu_a,\nu_b\geqslant 2$. \bigskip \point Evaluating convolutions at $k=0$ is not immediate, as the formula for $A(0)$ involves a bit of a computation. To compute $A(0)$, we expand \begin{equation} \alpha_-(|k|,\tau)=\tau-\frac{|k|(1+\tau)^2}2+O(k^2) ,\quad \alpha_+(|k|,\tau)=\tau+\frac{|k|(1+\tau)^2}2+O(k^2) \end{equation} so \begin{equation} A_{\zeta,n;\zeta',m}^{(\nu_a,\nu_b)}(0)=\mathds 1_{\zeta=\zeta'} \frac1{\pi^2} \int_{\tau_\zeta}^{\tau_{\zeta+1}} d\tau\ \frac{(1-\tau)^2}{(1+\tau)^{4-\nu_a-\nu_b}}T_n({\textstyle\frac{2\tau-(\tau_\zeta+\tau_{\zeta+1})}{\tau_{\zeta+1}-\tau_\zeta}}) T_m({\textstyle\frac{2\tau-(\tau_{\zeta}+\tau_{\zeta+1})}{\tau_{\zeta+1}-\tau_{\zeta}}}) \end{equation} which we then compute using a Gauss-Legendre quadrature. We can then evaluate convolutions at $k=0$: \begin{equation} (a\hat\ast b)(0)\approx\left<\mathfrak a\mathfrak b\right> := \frac1{4\pi^2|k_{l,j}|} \sum_{n,m=0}^P \sum_{l,l'=0}^{J-1} \mathfrak F_{l,n}^{(\nu_a)}(\mathfrak a)\mathfrak F_{l',m}^{(\nu_b)}(\mathfrak b) A_{l,n;l',m}^{(\nu_a,\nu_b)}(0) \label{avg} \end{equation} \bigskip \point Let us now compute some choices of $\mathfrak a,\mathfrak b$ more explicitly. \medskip \subpoint Let us start with $\mathds 1_{l,n}\hat\ast a$ where $\mathds 1_{l,n}$ is the vector which has 0's everywhere except at position $(l,n)$. We have \begin{equation} \mathfrak F_{l',m}^{(\nu_1)}(\mathds 1_{l,n})=\delta_{l',l}\frac{2-\delta_{m,0}}2w_n\frac{\cos({\textstyle\frac{m\pi(1+x_n)}2})}{(1-\frac{\tau_{l+1}-\tau_l}2\sin(\frac{\pi x_j}2)+\frac{\tau_{l+1}+\tau_l}2)^{\nu_1}} \end{equation} so \begin{equation} (\mathds 1_{n,l''}\odot\mathfrak a)_{m,l}= \sum_{l,p}\sum_{l'}\frac{2-\delta_{l,0}}2w_n\frac{\cos({\textstyle\frac{l\pi(1+x_n)}2})}{(1-\frac{\tau_{l''+1}-\tau_{l''}}2\sin(\frac{\pi x_n}2)+\frac{\tau_{l''+1}+\tau_{l''}}2)^{\nu_1}}A_{l'',l;l',p}^{(\nu_1,\nu_a)}(k_{l,m})\mathfrak F_{l',p}^{(\nu_a)}(\mathfrak a) . \end{equation} \bigskip \subpoint We now turn to $a\hat\ast\hat v$. Let \begin{equation} \Upsilon(t,|k|):= \int_{||k|-t|}^{|k|+t}ds\ \frac{s\hat v(s)}{|k|} \label{Upsilon} \end{equation} We have \begin{equation} a\hat\ast\hat v=\frac 1{4\pi^2}\int_0^\infty dt\ ta(t)\Upsilon(t,|k|) . \label{avUpsilon} \end{equation} Following the integration scheme\-~(\ref{anyeq_integration}), \begin{equation} (\mathfrak a\odot\mathfrak v)_{l,i}= \frac1{32\pi}\sum_{l'=0}^{J-1}(\tau_{l'+1}-\tau_{l'})\sum_{j=1}^Nw_j \cos({\textstyle\frac{\pi x_j}2})(1+\vartheta_{l'}(x_j))^2\vartheta_{l'}(x_j)\mathfrak a_{l',j}\Upsilon(\vartheta(x_j),k_{l,i}) . \label{adotv} \end{equation} At $k=0$, \begin{equation} \left<\mathfrak a\mathfrak v\right>= \frac1{32\pi}\sum_{l'=0}^{J-1}(\tau_{l'+1}-\tau_{l'})\sum_{j=1}^Nw_j \cos({\textstyle\frac{\pi x_j}2})(1+\vartheta_{l'}(x_j))^2\vartheta_{l'}(x_j)\mathfrak a_{l',j}\Upsilon(\vartheta(x_j),0) \label{} \end{equation} with \begin{equation} \Upsilon(t,0)=2tv(t) . \end{equation} The quantities $\Upsilon(\vartheta(x_n),k_{l,i})$ and $\Upsilon(\vartheta(x_n),0)$ are independent of $a$ and can be computed once and for all at execution. The integral in\-~(\ref{Upsilon}) is computed using Gauss-Legendre quadratures, but without splitting into splines. To maintain a high precision, we set the order of the integration to \refname{param:anyeq_J}{{\tt J}}$\times$\refname{param:anyeq_N}{{\tt N}}. \bigskip \subpoint Finally, \begin{equation} (\mathds 1_{l',n}\odot\mathfrak v)_{l,i}= \frac{\tau_{l'+1}-\tau_{l'}}{32\pi}w_n\cos({\textstyle\frac{\pi x_n}2})(1+\vartheta_{l'}(x_n))^2\vartheta_{l'}(x_n)\Upsilon(\vartheta(x_n),k_{l,i}) \end{equation} and \begin{equation} \left<\mathds 1_{l',n}\mathfrak v\right>= \frac{\tau_{l'+1}-\tau_{l'}}{32\pi}w_n\cos({\textstyle\frac{\pi x_n}2})(1+\vartheta_{l'}(x_n))^2\vartheta_{l'}(x_n)\Upsilon(\vartheta(x_n),0) . \end{equation} \bigskip \subsubsubsection{Evaluating $\hat l_3$} The only term in\-~(\ref{anyeq}) that does not involve just convolutions (whose computation was described above) is $\hat l_3$ (\ref{l3}). To evaluate it, we first change variables, using a generalization of bipolar coordinates (see lemma\-~\ref{lemma:bipolar2}): \begin{equation} \begin{largearray} \hat l_3(|k|) = \frac1{(2\pi)^5\rho^2| k|^2}\int_0^\infty dt\int_{|| k|-t|}^{| k|+t}ds\int_0^\infty dt'\int_{|| k|-t'|}^{| k|+t'}ds'\int_0^{2\pi}d\theta\ sts't'U(s)U(t)U(s')U(t') \cdot\\\hfill\cdot S(\mathfrak R(s,t,s',t',\theta,| k|)) \end{largearray} \end{equation} with \begin{equation} \begin{largearray} \mathfrak R(s,t,s',t',\theta,| k|) := \frac1{\sqrt 2| k|} \Big( | k|^2(s^2+t^2+(s')^2+(t')^2)-| k|^4-(s^2-t^2)((s')^2-(t')^2) \\[0.5cm]\hfill - \sqrt{(4| k|^2s^2-(| k|^2+s^2-t^2)^2)(4| k|^2(s')^2-(| k|^2+(s')^2-(t')^2)^2)}\cos\theta \Big)^{\frac12} . \end{largearray} \end{equation} We change variables as in\-~(\ref{convvar}): \begin{equation} \begin{largearray} \hat l_3(|k|) = \frac1{(2\pi)^5\rho^2| k|^2} \int_{-1}^1 d\tau\ \frac{2(1-\tau)}{(1+\tau)^3}U({\textstyle\frac{1-\tau}{1+\tau}}) \int_{\alpha_-(| k|,\tau)}^{\alpha_+(| k|,\tau)} d\sigma\ \frac{2(1-\sigma)}{(1+\sigma)^3}U({\textstyle\frac{1-\sigma}{1+\sigma}}) \int_{-1}^1 d\tau'\ \frac{2(1-\tau')}{(1+\tau')^3} \cdot\\[0.5cm]\hfill\cdot U({\textstyle\frac{1-\tau'}{1+\tau'}}) \int_{\alpha_-(| k|,\tau')}^{\alpha_+(| k|,\tau')} d\sigma'\ \frac{2(1-\sigma')}{(1+\sigma')^3}U({\textstyle\frac{1-\sigma'}{1+\sigma'}}) \int_0^{2\pi}d\theta\ S(\mathfrak R({\textstyle \frac{1-\sigma}{1+\sigma},\frac{1-\tau}{1+\tau},\frac{1-\sigma'}{1+\sigma'},\frac{1-\tau'}{1+\tau'},\theta,| k|})) . \end{largearray} \end{equation} We expand $U$ and $ S$ into Chebyshev polynomials as in\-~(\ref{a_chebyshev_spline}), and split the integrals into splines: \begin{equation} \hat l_3(|k_{l,j}|) \approx (\mathbb U^{\otimes4}\otimes\mathbb S)_{l,j}:= \sum_{\lambda_1,\cdots,\lambda_5=0}^{J-1} \sum_{n_1,\cdots,n_5=1}^\infty \bar A_{\lambda_1,n_1;\cdots;\lambda_5,n_5}^{(\nu)}(|k_{l,j}|) \prod_{i=1}^4 \left(\mathfrak F_{\lambda_i,n_i}^{(\nu)}(\mathbb U)\right) \mathfrak F_{\lambda_5,n_5}^{(\nu)}(\mathbb S) \label{otimes} \end{equation} with $\mathbb U_{l,j}:=U(k_{l,j})$, $\mathbb S_{l,j}:=S(k_{l,j})$ and \begin{equation} \begin{largearray} \bar A_{\lambda_1,n_1;\cdots;\lambda_5,n_5}^{(\nu)}(| k|) = \frac1{(2\pi)^5\rho^2| k|^2} \int_{\tau_{\lambda_1}}^{\tau_{\lambda_1+1}} d\tau\ \frac{2(1-\tau)}{(1+\tau)^{3-\nu}}T_{n_1}({\textstyle\frac{2\tau-(\tau_{\lambda_1}+\tau_{\lambda_1+1})}{\tau_{\lambda_1+1}-\tau_{\lambda_1}}}) \cdot\\[0.5cm]\hskip1cm\cdot \mathds 1_{\alpha_-(| k|,\tau)<\tau_{\lambda_2+1}}\mathds 1_{\alpha_+(| k|,\tau)>\tau_{\lambda_2}} \int_{\max(\tau_{\lambda_2},\alpha_-(| k|,\tau))}^{\min(\tau_{\lambda_2+1},\alpha_+(| k|,\tau))} d\sigma\ \frac{2(1-\sigma)}{(1+\sigma)^{3-\nu}}T_{n_2}({\textstyle\frac{2\sigma-(\tau_{\lambda_2}+\tau_{\lambda_2+1})}{\tau_{\lambda_2+1}-\tau_{\lambda_2}}}) \cdot\\[0.5cm]\hskip1cm\cdot \int_{\tau_{\lambda_3}}^{\tau_{\lambda_3+1}} d\tau'\ \frac{2(1-\tau')}{(1+\tau')^{3-\nu}}T_{n_3}({\textstyle\frac{2\tau'-(\tau_{\lambda_3}+\tau_{\lambda_3+1})}{\tau_{\lambda_3+1}-\tau_{\lambda_3}}}) \cdot\\[0.5cm]\hskip1cm\cdot \mathds 1_{\alpha_-(| k|,\tau')<\tau_{\lambda_4+1}}\mathds 1_{\alpha_+(| k|,\tau')>\tau_{\lambda_4}} \int_{\max(\tau_{\lambda_4},\alpha_-(| k|,\tau'))}^{\min(\tau_{\lambda_4+1},\alpha_+(| k|,\tau'))} d\sigma'\ \frac{2(1-\sigma')}{(1+\sigma')^{3-\nu}}T_{n_4}({\textstyle\frac{2\sigma'-(\tau_{\lambda_4}+\tau_{\lambda_4+1})}{\tau_{\lambda_4+1}-\tau_{\lambda_4}}}) \cdot\\[0.5cm]\hskip1cm\cdot \int_0^{2\pi}d\theta\ \frac{2^\nu}{(2+\mathfrak R)^\nu} T_{n_5}({\textstyle\frac{2\frac{1-\mathfrak R}{1+\mathfrak R}-(\tau_{\lambda_5}+\tau_{\lambda_5+1})}{\tau_{\lambda_5+1}-\tau_{\lambda_5}}}) \mathds 1_{\tau_{\lambda_5}<\frac{1-\mathfrak R}{1+\mathfrak R}<\tau_{\lambda_5+1}} \end{largearray} \end{equation} in which $\mathfrak R$ is a shorthand for $\mathfrak R({\textstyle \frac{1-\sigma}{1+\sigma},\frac{1-\tau}{1+\tau},\frac{1-\sigma'}{1+\sigma'},\frac{1-\tau'}{1+\tau'},\theta,| k|})$. Note that $\bar A$ is independent of $U$, and can be computed once and for all at the beginning of execution. Since the tensor $\bar A$ is quite large (it contains $(NJ)^5$ entries), and its computation can be rather long it can be inconvenient to compute $\bar A$ at every execution. Instead, one can use the \refname{command:anyeq_save_Abar}{{\tt save\_Abar}} method to compute $\bar A$ and save it to a file, which can then be recalled via the {\tt -s} option on the command line. \bigskip \subsubsubsection{Main algorithm to compute $U$} We are now ready to detail how $U\equiv\rho\hat u$ is computed. All of the observables we are interested in are approximated from the values $\mathbb U_{l,j}:=U(k_{l,j})$ (\ref{klj}). \bigskip \point The equation for $(\mathbb U_{l,j})_{l\in\{0,\cdots,J-1\},j\in\{1,\cdots,N\}}$ is obtained by approximating\-~(\ref{anyeq_hatu}) according to the prescriptions detailed above: \begin{equation} \mathbb U_{l,j}:= \frac{1+\mathbb Y_{l,j}}{2(\mathbb X_{l,j}+1)}\ \Phi\left({\textstyle\frac{1+\mathbb Y_{l,j}}{(\mathbb X_{l,j}+1)^2}}\right) \label{bbU} \end{equation} \begin{equation} \mathbb X_{l,j}:=\frac1{\mathbb L_{l,j}}\left(\mathbb K_{l,j}-\mathbb L_{l,j}+\frac{k_{l,j}^2}{2\rho}+\mathbb I_{l,j}\right) ,\quad \mathbb Y_{l,j}:=\frac1{\mathbb L_{l,j}}\left(\mathbb S_{l,j}-\mathbb L_{l,j}+\frac12\mathbb J_{l,j}+\mathbb G_{l,j}\right) \end{equation} \begin{equation} \mathbb S_{l,j}:= \mathfrak v_{l,j}-\frac1{\rho}(\mathbb U\odot\mathfrak v)_{l,j} ,\quad \mathfrak v_{l,j}:=\hat v(k_{l,j}) ,\quad \mathbb E:=\hat v(0)-\frac1{\rho}\left<\mathbb U\mathfrak v\right> \end{equation} \begin{equation} \mathbb I_{l,j}:=\frac1{\rho}\gamma_{L,2}(\beta_{L,2}(\mathbb U\odot(\mathbb U\mathbb S))_{l,j}+(1-\beta_{L,2})\mathbb E(\mathbb U\odot\mathbb U)_{l,j}) \end{equation} \begin{equation} \mathbb K_{l,j}:=\gamma_K(\beta_K\mathbb S_{l,j}+(1-\beta_K)\mathbb E) ,\quad \mathbb L_{l,j}:=\gamma_{L,1}(\beta_{L,1}\mathbb S_{l,j}+(1-\beta_{L,1})\mathbb E) \end{equation} \begin{equation} \mathbb J_{l,j}:=\gamma_{L,3}(1-\beta_{L,3})\frac{\mathbb E}{\rho^2}(\mathbb U\odot\mathbb U)^2_{l,j} +\gamma_{L,3}\beta_{L,3}(\mathbb U^{\otimes4}\otimes\mathbb S)_{l,j} \end{equation} \begin{equation} \begin{largearray} \mathbb G_{l,j}:= \frac2{\rho}\gamma_K\alpha_K(\mathbb U\odot((\beta_K\mathbb S+(1-\beta_K)\mathbb E)\mathbb U))_{l,j} \\[0.5cm]\hfill - \frac1{\rho}\alpha_{L,1}(\mathbb U\odot((\beta_{L,1}\mathbb S+(1-\beta_{L,1})\mathbb E)\mathbb U^2))_{l,j} + \frac2{\rho}\alpha_{L,2}(\mathbb U\odot(\mathbb I\mathbb U))_{l,j} - \frac1{2\rho}\alpha_{L,3}(\mathbb U\odot \mathbb J)_{l,j} \end{largearray} \end{equation} (see\-~(\ref{odot}), (\ref{otimes}) and\-~(\ref{avg}) for the definitions of $\odot$, $\otimes$ and $\left<\cdot\right>$). \bigskip \point We rewrite\-~(\ref{bbU}) as a root finding problem: \begin{equation} \Xi_{l,j}(\mathbb U):= \mathbb U_{l,j}- \frac{1+\mathbb Y_{l,j}}{2(\mathbb X_{l,j}+1)}\ \Phi\left({\textstyle\frac{1+\mathbb Y_{l,j}}{(\mathbb X_{l,j}+1)^2}}\right) =0 \label{root_anyeq} \end{equation} which we solve using the Newton algorithm, that is, we define a sequence of $\mathbb U$'s: \begin{equation} \mathbb U^{(n+1)}=\mathbb U^{(n)}-(D\Xi(\mathbb U^{(n)}))^{-1}\Xi(\mathbb U^{(n)}) \end{equation} where $D\Xi$ is the Jacobian of $\Xi$: \begin{equation} (D\Xi)_{l,j;l',i}:=\frac{\partial\Xi_{l,j}}{\partial\mathbb U_{l',i}} . \end{equation} \bigskip \point We initialize the algorithm with the solution of the \refname{eq:easyeq_medeq}{Medium equation}, which is computed using the \refname{sec:easyeq}{{\tt easyeq}} method. However, \refname{sec:easyeq}{{\tt easyeq}} only computes $\hat u$ at the momenta given by the Gauss-Legendre quadratures\-~(\ref{easyeq_ki}). To obtain a value for $\hat u$ at $k_{l,j}$, we use a linear interpolation (code is provided for a polynomial interpolation, which has not performed as well). The parameters \refname{param:anyeq_tolerance}{{\tt tolerance}}, \refname{param:anyeq_maxiter}{{\tt maxiter}} are passed on to \refname{sec:easyeq}{{\tt easyeq}} as is, and the order of the Gauss-Legendre quadratures is set to \refname{param:anyeq_J}{{\tt J}}$\times$\refname{param:anyeq_N}{{\tt N}}. \bigskip \point We are left with computing the Jacobian of $\Xi$: \begin{equation} \begin{largearray} \partial_{l',i}\Xi_{l,j}\equiv \frac{\partial\Xi_{l,j}}{\partial\mathbb U_{l',i}}= \delta_{l,l'}\delta_{i,j} +\frac1{2(\mathbb X_{l,j}+1)}\left( \frac{(1+\mathbb Y_{l,j})\partial_{l',i}\mathbb X_{l,j}}{\mathbb X_{l,j}+1} - \partial_{l',i}\mathbb Y_{l,j} \right) \Phi\left({\textstyle\frac{1+\mathbb Y_{l,j}}{(\mathbb X_{l,j}+1)^2}}\right) \\[0.5cm]\hfill +\frac{(1+\mathbb Y_{l,j})}{2(\mathbb X_{l,j}+1)^3}\left( \frac{2(1+\mathbb Y_{l,j})}{\mathbb X_{l,j}+1}\partial_{l',i}\mathbb X_{l,j} -\partial_{l',i}\mathbb Y_{l,j} \right)\ \partial\Phi\left({\textstyle\frac{1+\mathbb Y_{l,j}}{(\mathbb X_{l,j}+1)^2}}\right) \end{largearray} \end{equation} with \begin{equation} \partial_{l',i}\mathbb X_{l,j}= \frac1{\mathbb L_{l,j}}\left(\partial_{l',i}\mathbb K_{l,j}-\partial_{l',i}\mathbb L_{l,j}+\partial_{l',i}\mathbb I_{l,j}\right) - \frac{\partial_{l',i}\mathbb L_{l,j}}{\mathbb L_{l,j}}\mathbb X_{l,j} \end{equation} \begin{equation} \partial_{l',i}\mathbb Y_{l,j}= \frac1{\mathbb L_{l,j}}\left(\partial_{l',i}\mathbb S_{l,j}-\partial_{l',i}\mathbb L_{l,j}+\frac12\partial_{l',i}\mathbb J_{l,j}+\partial_{l',i}\mathbb G_{l,j}\right) - \frac{\partial_{l',i}\mathbb L_{l,j}}{\mathbb L_{l,j}}\mathbb Y_{l,j} \end{equation} \begin{equation} \partial_{l',i}\mathbb S_{l,j}= -\frac1{\rho}(\mathds 1_{l',i}\odot\mathfrak v)_{l,j} ,\quad \partial_{l',i}\mathbb E=-\frac1{\rho}\left<\mathds 1_{l',i}\mathfrak v\right> \end{equation} \begin{equation} \partial_{l',i}\mathbb K_{l,j}:=\gamma_K(\beta_K\partial_{l',i}\mathbb S_{l,j}+(1-\beta_K)\partial_{l',i}\mathbb E) ,\quad \partial_{l',i}\mathbb L_{l,j}:=\gamma_{L,1}(\beta_{L,1}\partial_{l',i}\mathbb S_{l,j}+(1-\beta_{L,1})\partial_{l',i}\mathbb E) \end{equation} \begin{equation} \begin{largearray} \partial_{l',i}\mathbb I_{l,j}= \frac1{\rho}\gamma_{L,2}\left(\beta_{L,2}( \mathds 1_{l',i}\odot(\mathbb U\mathbb S) + \mathbb U\odot(\mathds 1_{l',i}\mathbb S+\mathbb U\partial_{l',i}\mathbb S) )_{l,j} \right.\\[0.5cm]\hfill\left. + (1-\beta_{L,2})( \partial_{l',i}\mathbb E(\mathbb U\odot\mathbb U)_{l,j} + 2\mathbb E(\mathbb U\odot\mathds 1_{l',i})_{l,j} )\right) \end{largearray} \end{equation} \begin{equation} \begin{largearray} \partial_{l',i}\mathbb J_{l,j}= \frac1{\rho^2}\gamma_{L,3}(1-\beta_{L,3})\left( \partial_{l',i}\mathbb E(\mathbb U\odot\mathbb U)^2_{l,j} + 4\mathbb E(\mathbb U\odot\mathbb U)_{l,j}(\mathbb U\odot\mathds 1_{l',i})_{l,j} \right) \\[0.5cm]\hfill +\gamma_{L,3}\beta_{L,3}( 4\mathds 1_{l',i}\otimes\mathbb U^{\otimes3}\otimes\mathbb S + \mathbb U^{\otimes4}\otimes\partial_{l',i}\mathbb S )_{l,j} \end{largearray} \end{equation} \begin{equation} \begin{largearray} \partial_{l',i}\mathbb G_{l,j}= \frac2{\rho}\gamma_K\alpha_K\beta_K\left( \mathds 1_{l',i}\odot(\mathbb S\mathbb U) + \mathbb U\odot(\mathbb S\mathds 1_{l',i} +\partial_{l',i}\mathbb S\mathbb U) \right)_{l,j} \\[0.5cm]\hfill +\frac2{\rho}\gamma_K\alpha_K(1-\beta_K)\left( \partial_{l',i}\mathbb E(\mathbb U\odot\mathbb U)_{l,j} +2\mathbb E(\mathds 1_{l',i}\odot\mathbb U)_{l,j} \right) \\[0.5cm]\indent - \frac1{\rho}\alpha_{L,1}\beta_{L,1}\left( \mathds 1_{l',i}\odot(\mathbb S\mathbb U^2) + \mathbb U\odot( \partial_{l',i}\mathbb S\mathbb U^2 + 2\mathbb S\mathbb U\mathds 1_{l',i} ) \right)_{l,j} \\[0.5cm]\hfill - \frac1{\rho}\alpha_{L,1}(1-\beta_{L,1})\left( \mathbb E\mathds 1_{l',i}\odot\mathbb U^2 + \mathbb U\odot( \partial_{l',i}\mathbb E\mathbb U^2 + 2\mathbb E\mathbb U\mathds 1_{l',i} ) \right)_{l,j} \\[0.5cm]\hfill + \frac2{\rho}\alpha_{L,2}( (\mathds 1_{l',i}\odot(\mathbb I\mathbb U))_{l,j} + \mathbb U\odot( \partial_{l',i}\mathbb I\mathbb U + \mathbb I\mathds 1_{l',i}) )_{l,j} - \frac1{2\rho}\alpha_{L,3}( \mathds 1_{l',i}\odot \mathbb J + \mathbb U\odot\partial_{l',i}\mathbb J )_{l,j} . \end{largearray} \end{equation} \bigskip \point We iterate the Newton algorithm until the Newton relative error $\epsilon$ becomes smaller than the \refname{param:easyeq_tolerance}{{\tt tolerance}} parameter. The Newton error is defined as \begin{equation} \epsilon=\frac{\|\mathbb U^{(n+1)}-\mathbb U^{(n)}\|_2}{\|\mathbb U^{(n)}\|_2} \end{equation} where $\|\cdot\|_2$ is the $l_2$ norm. The energy thus obtained is \begin{equation} e=\frac\rho2\mathds E \end{equation} the Fourier transform $\hat u$ of the solution is \begin{equation} \hat u(k_{l,j})\approx\frac{\mathbb U_{l,j}}\rho \end{equation} where $k_{l,j}$ was defined in\-~(\ref{klj}), and the solution $u$ in real space is obtained by inverting the Fourier transform, following the prescription of\-~(\ref{anyeq_integration}): \begin{equation} u(|x|)=\int \frac{dk}{(2\pi)^3}\ e^{-ikx}\hat u(|k|) \approx \sum_{l=0}^{J-1} \frac{\tau_{l+1}-\tau_l}{16\pi} \sum_{j=1}^Nw_j \cos({\textstyle\frac{\pi x_j}2})(1+\vartheta_l(x_j))^2\vartheta_l(x_j)\mathbb U_{l,j}\sin(\vartheta_l(x_j)|x|) . \end{equation} \bigskip \subsubsubsection{Condensate fraction} To compute the condensate fraction, we solve the modified {\tt anyeq} (see\-~\cite{CJL21}): \begin{equation} (-\Delta+2\mu)u_\mu = (1-u_\mu)v-2\rho K+\rho^2 L . \end{equation} where $K$ and $L$ are defined as in\-~(\ref{anyeq_K})-(\ref{anyeq_e}) in which $u$ is replaced with $u_\mu$. The uncondensed fraction is then \begin{equation} \eta=\partial_\mu e|_{\mu=0} =-\frac\rho2\int dx\ v(x)\partial_\mu u_\mu(x)|_{\mu=0} . \end{equation} To compute the energy in the presence of the parameter $\mu$, we proceed in the same way as for $\mu=0$, the only difference being that $k^2$ should formally be replaced by $k^2+2\mu$. In other words, we consider $\mathbb U_{j,l}=u_\mu(|k_{j,l}|)$ and define $\Xi(\mathbb U,\mu)$ in the same way as in\-~(\ref{root_anyeq}), except that $\mathbb X_i$ should be replaced by \begin{equation} \mathbb X_{l,j}=\frac1{\mathbb L_{l,j}}\left(\mathbb K_{l,j}-\mathbb L_{l,j}+\frac{k_{l,j}^2+2\mu}{2\rho}+\mathbb I_{l,j}\right) . \end{equation} We then solve \begin{equation} \Xi(\mathbb U_\mu,\mu)=0 \end{equation} By differentiating this identity with respect to $\mu$, we find $\partial_\mu u_\mu$: \begin{equation} \partial_\mu \mathbb U|_{\mu=0}=-(D\Xi)^{-1}\partial_\mu\Xi|_{\mu=0} \end{equation} and \begin{equation} \partial_\mu\Xi|_{\mu=0}= \frac{(1+\mathbb Y_{l,j})\partial_\mu\mathbb X_{l,j}}{2(\mathbb X_{l,j}+1)^2} \Phi\left({\textstyle\frac{1+\mathbb Y_{l,j}}{(\mathbb X_{l,j}+1)^2}}\right) +\frac{(1+\mathbb Y_{l,j})^2}{(\mathbb X_{l,j}+1)^4}\partial_\mu\mathbb X_{l,j} \partial\Phi\left({\textstyle\frac{1+\mathbb Y_{l,j}}{(\mathbb X_{l,j}+1)^2}}\right) ,\quad \partial_\mu\mathbb X_{l,j}=\frac1{\rho\mathbb L_{l,j}} . \end{equation} We then approximate \begin{equation} \eta\approx -\frac12\left<\mathfrak v\partial_\mu\mathbb U\right> \end{equation} (see\-~(\ref{avg})). \bigskip \subsubsubsection{Correlation function (spherical average)} The two-point correlation function is \begin{equation} c_2(x):= 2\rho\frac{\delta e}{\delta v(x)} \end{equation} and its spherical average is \begin{equation} C_2(|x|):=\frac1{4\pi|x|^2}\int dy\ \delta(|x|-|y|)c_2(y) . \end{equation} In Fourier space, \begin{equation} c_2(x)= 2\rho \int dk\ e^{ikx}\frac{\delta e}{\delta\hat v(k)} \end{equation} so \begin{equation} C_2(|x|)= 2\rho\int dk\ \left(\frac1{4\pi|x|^2}\int dy\ \delta(|x|-|y|)e^{iky}\right)\frac{\delta e}{\delta\hat v(k)} =2\rho\int dk\ \frac{\sin(|k||x|)}{|k||x|}\frac{\delta e}{\delta\hat v(k)} . \label{C2fourier} \end{equation} \bigskip \point We can compute this quantity by considering a modified {\tt anyeq} in Fourier space, by formally replacing $\hat v$ with \begin{equation} \hat v+\lambda g(|k|) ,\quad g(|k|):=\frac{\sin(|k||x|)}{|k||x|} . \label{2pt_addv} \end{equation} Indeed, if $e_\lambda$ denotes the energy of this modified equation, \begin{equation} \partial_\lambda e_\lambda|_{\lambda=0} =\int dk\ \frac{\delta e}{\delta\hat v(k)}\partial_\lambda({\textstyle \hat v(k)+\lambda g(|k|)}) =\int dk\ g(|k|)\frac{\delta e}{\delta\hat v(k)} . \end{equation} So, denoting the solution of the modified equation by $u_\lambda$, \begin{equation} C_2(x)=2\rho\partial_\lambda e_\lambda|_{\lambda=0} =\rho^2g(0)-\rho^2\int\frac{dk}{(2\pi)^3}\ (g(k)\hat u(k)+\hat v(k)\partial_\lambda\hat u_\lambda(k)|_{\lambda=0}) . \end{equation} We compute $\partial_\lambda u_\lambda|_{\lambda=0}$ in the same way as the uncondensed fraction: we define $\Xi(\mathbb U,\lambda)$ by formally adding $\lambda g(|k|)$ to $\hat v$, solve $\Xi(\mathbb U,\lambda)=0$, and differentiate: \begin{equation} \partial_\lambda\mathbb U|_{\lambda=0}=-(D\Xi)^{-1}\partial_\lambda\Xi|_{\lambda=0} . \end{equation} \bigskip \point We compute $\partial_\lambda\Xi|_{\lambda=0}$: \begin{equation} \begin{largearray} \partial_\lambda\Xi_{l,j}= \delta_{l,l'}\delta_{i,j} +\frac1{2(\mathbb X_{l,j}+1)}\left( \frac{(1+\mathbb Y_{l,j})\partial_\lambda\mathbb X_{l,j}}{\mathbb X_{l,j}+1} - \partial_\lambda\mathbb Y_{l,j} \right) \Phi\left({\textstyle\frac{1+\mathbb Y_{l,j}}{(\mathbb X_{l,j}+1)^2}}\right) \\[0.5cm]\hfill +\frac{(1+\mathbb Y_{l,j})}{2(\mathbb X_{l,j}+1)^3}\left( \frac{2(1+\mathbb Y_{l,j})}{\mathbb X_{l,j}+1}\partial_\lambda\mathbb X_{l,j} -\partial_\lambda\mathbb Y_{l,j} \right)\ \partial\Phi\left({\textstyle\frac{1+\mathbb Y_{l,j}}{(\mathbb X_{l,j}+1)^2}}\right) \end{largearray} \label{2pt_dXi} \end{equation} with \begin{equation} \partial_\lambda\mathbb S_{l,j}= \mathfrak g_{l,j} -\frac1{\rho}(\mathbb U\odot\mathfrak g)_{l,j} ,\quad \partial_\lambda\mathbb E=g(0)-\frac1{\rho}\left<\mathbb U\mathfrak g\right> \end{equation} \begin{equation} \partial_\lambda\mathbb X_{l,j}= \frac1{\mathbb L_{l,j}}\left(\partial_\lambda\mathbb K_{l,j}-\partial_\lambda\mathbb L_{l,j}+\partial_\lambda\mathbb I_{l,j}\right) - \frac{\partial_\lambda\mathbb L_{l,j}}{\mathbb L_{l,j}}\mathbb X_{l,j} \end{equation} \begin{equation} \partial_\lambda\mathbb Y_{l,j}= \frac1{\mathbb L_{l,j}}\left(\partial_\lambda\mathbb S_{l,j}-\partial_\lambda\mathbb L_{l,j}+\frac12\partial_\lambda\mathbb J_{l,j}+\partial_\lambda\mathbb G_{l,j}\right) - \frac{\partial_\lambda\mathbb L_{l,j}}{\mathbb L_{l,j}}\mathbb Y_{l,j} \end{equation} \begin{equation} \partial_\lambda\mathbb K_{l,j}:=\gamma_K(\beta_K\partial_\lambda\mathbb S_{l,j}+(1-\beta_K)\partial_\lambda\mathbb E) ,\quad \partial_\lambda\mathbb L_{l,j}:=\gamma_{L,1}(\beta_{L,1}\partial_\lambda\mathbb S_{l,j}+(1-\beta_{L,1})\partial_\lambda\mathbb E) \end{equation} \begin{equation} \partial_\lambda\mathbb I_{l,j}= \frac1{\rho}\gamma_{L,2}\left( \beta_{L,2}(\mathbb U\odot(\mathbb U\partial_\lambda\mathbb S))_{l,j} + (1-\beta_{L,2})\partial_\lambda\mathbb E(\mathbb U\odot\mathbb U)_{l,j} \right) \end{equation} \begin{equation} \partial_\lambda\mathbb J_{l,j}= \frac1{\rho^2}\gamma_{L,3}(1-\beta_{L,3})\partial_\lambda\mathbb E(\mathbb U\odot\mathbb U)^2_{l,j} +\gamma_{L,3}\beta_{L,3}(\mathbb U^{\otimes4}\otimes\partial_\lambda\mathbb S)_{l,j} \end{equation} \begin{equation} \begin{largearray} \partial_\lambda\mathbb G_{l,j}= \frac2{\rho}\gamma_K\alpha_K\beta_K\left(\mathbb U\odot\left(\partial_\lambda\mathbb S\mathbb U\right)\right)_{l,j} +\frac2{\rho}\gamma_K\alpha_K(1-\beta_K)\partial_\lambda\mathbb E(\mathbb U\odot\mathbb U)_{l,j} - \frac1{\rho}\alpha_{L,1}\beta_{L,1}\left(\mathbb U\odot\partial_\lambda\mathbb S\mathbb U^2\right)_{l,j} \\[0.5cm]\hfill - \frac1{\rho}\alpha_{L,1}(1-\beta_{L,1})\partial_\lambda\mathbb E\left(\mathbb U\odot(\mathbb U^2)\right)_{l,j} + \frac2{\rho}\alpha_{L,2}\mathbb U\odot(\partial_\lambda\mathbb I\mathbb U)_{l,j} - \frac1{2\rho}\alpha_{L,3}(\mathbb U\odot\partial_\lambda\mathbb J)_{l,j} . \end{largearray} \end{equation} To evaluate $(\mathbb U\odot\mathfrak g)$ and $\left<\mathbb U\mathfrak g\right>$, we proceed as in\-~(\ref{adotv}) and\-~(\ref{}). To do so, we replace $\hat v$ with $g$ in the computation of $\Upsilon$. \bigskip \point In order to invert the Fourier transform in\-~(\ref{C2fourier}) numerically, we will use a Hann window (see appendix\-~\ref{appendix:hann}) \begin{equation} H_L(k):=\mathds 1_{|k|<\frac L2}\cos^2({\textstyle\frac{\pi|k|}{L}}) . \label{hann} \end{equation} The parameter $L$ is set using \refname{param:anyeq_window_L}{{\tt window\_L}}. The computation is changed only in that $g$ is changed to $H_L(k)\frac{\sin(|k||x|)}{|k||x|}$. \bigskip \point To compute the maximum of $C_2$, we use a modified Newton algorithm. The initial guess for the maximum is $|x_0|=\rho^{-\frac13}$\refname{param:anyeq_x0}{{\tt x0}}. The modified Newton algorithm is an iteration: \begin{equation} x_{n+1}=x_n+\frac{\partial C_2(|x_n|)}{|\partial^2C_2(|x_n|)|} \label{anyeq_newton_2pt} \end{equation} in which the derivatives are approximated using finite differences: \begin{equation} \partial C_2(x)\approx \frac{C_2(|x|+dx)-C_2(|x|)}{dx} ,\quad \partial^2 C_2(x)\approx \frac{C_2(|x|+dx)+C_2(|x|-dx)-2C_2(|x|)}{dx^2} . \label{anyeq_dx_2pt} \end{equation} This is a modification of the usual Newton iteration $x_n+\partial C_2/\partial^2C_2$ which is designed to follow the direction of the gradient, and thus to move toward a local maximum. In addition, if $|\partial C_2|/|\partial^2 C_2|$ is larger than \refname{param:anyeq_maxstep}{{\tt maxstep}}, then the step is replaced with $\pm$\refname{param:anyeq_maxstep}{{\tt maxstep}}. This prevents the algorithm from stepping over a maximum and land on another, further away. This is useful if one has a good idea of where the global maximum is, and does not want to get trapped in a smaller local maximum. \bigskip \indent The algorithm is run for a maximum of \refname{param:anyeq_maxiter}{{\tt maxiter}} iterations, or until $|x_{n+1}-x_n|$ is smaller than \refname{param:anyeq_tolerance}{{\tt tolerance}}. If the maximal number of iterations is reached, or if the solution found is not a local maximum, then the algorithm fails, and returns $+\infty$. The point thus computed is therefore a local maximum, but it is not guaranteed to be the global maximum. \subsubsubsection{Fourier transform of two-point correlation (spherical average)} The Fourier transform of the two-point correlation function is \begin{equation} \hat c_2(q):= 2\rho\frac{\delta e}{\delta v(q)} \end{equation} and its spherical average is \begin{equation} \hat C_2(|q|):=\frac1{4\pi|q|^2}\int dk\ \delta(|q|-|k|)c_2(k) = \frac\rho{2\pi|q|^2}\int dk\ \delta(|q|-|k|)\frac{\delta e}{\delta\hat v(k)} . \end{equation} \bigskip \point To compute $\frac{\delta e}{\delta\hat v(q)}$, one idea would be to proceed in the same way as for the two-point correlation function, by replacing $\hat v$ with \begin{equation} \hat v+\lambda g(|k|) ,\quad g(|k|):=\frac1{4\pi|q|^2}\delta(|q|-|k|) \end{equation} where $\delta$ is the Dirac-delta function distribution (compare this with\-~(\ref{2pt_addv})). However, the $\delta$ function causes all sorts of problems with the Chebyshev polynomial exansion and the quadratures. \bigskip \point Instead, we approximate $\hat C_2$ by convolving it with a normalized Gaussian: let \begin{equation} \Gamma_L(|k|):=\left(\frac{L}{2\pi}\right)^{\frac32}e^{-\frac L2k^2} \label{anyeq_gaussian} \end{equation} \begin{equation} \hat{\mathfrak C}_2(|q|) := \int dp\ \hat C_2(|q-p|)\Gamma_L(|p|) =\int dk\ \int dp\ \frac\rho{2\pi|q-p|^2}\delta(|q-p|-|k|)\frac{\delta e}{\delta\hat v(k)}\Gamma_L(|p|) \end{equation} which by lemma\-~\ref{lemma:bipolar} is \begin{equation} \hat{\mathfrak C}_2(|q|) =\int dk\ \frac\rho{|q|} \frac{\delta e}{\delta\hat v(k)} \int_0^\infty dt\ \int_{||q|-t|}^{|q|+t}ds\ s\frac{\delta(t-|k|)}t\Gamma_L(s) \end{equation} that is \begin{equation} \hat{\mathfrak C}_2(|q|) =\int dk\ \frac{\delta e}{\delta\hat v(k)} \frac{\rho}{|q||k|} \int_{||q|-|k||}^{|q|+|k|}ds\ s\Gamma_L(s) \end{equation} which is the directional derivative of $e$ with respect to $\hat v$ in the direction of $2\rho g$ with \begin{equation} g(|k|):=\frac1{2|q||k|}\int_{||q|-|k||}^{|q|+|k|}ds\ s\Gamma_L(s) = \frac1{2|k|rL}(\Gamma_L(|k|-r)-\Gamma_L(|k|+r)) . \label{anyeq_2pt_fourier_g} \end{equation} Note that \begin{equation} g(0):=\Gamma_L(|q|) . \end{equation} To compute this derivative, we replace $\hat v$ with \begin{equation} \hat v+\lambda g(|k|) \end{equation} so, denoting the solution of the modified equation by $u_\lambda$, for $q\neq 0$, \begin{equation} \hat{\mathfrak C}_2(|q|)=2\rho\partial_\lambda e_\lambda|_{\lambda=0} =\rho^2\left( -\int\frac{dk}{(2\pi)^3}\ g(|k|)\hat u(|k|) -\int\frac{dk}{(2\pi)^3}\ \hat v(|k|)\partial_\lambda\hat u_\lambda(|k|)|_{\lambda=0} \right) . \end{equation} To compute $\partial_\lambda\hat u_\lambda|_{\lambda=0}$, we differentiate $\Xi(\mathbb U,\lambda)=0$: \begin{equation} \partial_\lambda\mathbb U|_{\lambda=0}=-(D\Xi)^{-1}\partial_\lambda\Xi|_{\lambda=0} . \end{equation} The computation of $\partial_\lambda\Xi|_{\lambda=0}$ is identical to\-~(\ref{2pt_dXi}), but taking \begin{equation} \mathfrak g_{l,j}=g(|k_{l,j}|) . \end{equation} with $g$ defined in\-~(\ref{anyeq_2pt_fourier_g}). \bigskip \point To compute the maximum of $\hat C_2$, we proceed as for $C_2$, see\-~(\ref{anyeq_newton_2pt})-(\ref{anyeq_dx_2pt}). The only difference is that the algorithm is initialized with $|k_0|=\rho^{\frac13}$\refname{param:anyeq_k0}{{\tt k0}}. \subsubsubsection{Correlation function of uncondensed particles (spherical average)} To compute the correlation function among uncondensed particles, denoted by $\gamma_2(|\xi|)$, we solve the modified {\tt anyeq} (see\-~\cite{Ja23}): \begin{equation} -\Delta u_\mu = (1-u_\mu)v-2\rho K+\rho^2 L -\frac{\rho\mu}{2\pi|\xi|^2} u(|\xi|)\delta(|\xi|-|x|) \end{equation} where $K$ and $L$ are defined as in\-~(\ref{anyeq_K})-(\ref{anyeq_e}) in which $u$ is replaced with $u_\mu$. In Fourier space, \begin{equation} k^2 \hat u_\mu = \hat S-2\rho\hat K+\rho^2\hat L -2\rho\mu u(|\xi|)\frac{\sin(|k||\xi|)}{|k||\xi|} . \end{equation} The uncondensed correlation function is then \begin{equation} \gamma_2(|\xi|)=\partial_\mu e|_{\mu=0} =-\frac\rho2\int dx\ v(x)\partial_\mu u_\mu(x)|_{\mu=0} . \end{equation} To compute the energy in the presence of the parameter $\mu$, we proceed in the same way as for $\mu=0$, the only difference being that the term \begin{equation} \mu g(|k|):= -\mu 2\rho u(|\xi|)\frac{\sin(|k||\xi|)}{|k||\xi|} \end{equation} should formally be added to the right side of\-~(\ref{Omega}). In other words, we consider $\mathbb U_{j,l}=u_\mu(|k_{j,l}|)$ and define $\Xi(\mathbb U,\mu)$ in the same way as in\-~(\ref{root_anyeq}), except that $\mathbb Y_{l,j}$ should be replaced by \begin{equation} \mathbb Y_{l,j}= \frac1{\mathbb L_{l,j}}\left(\mathbb S_{l,j}-\mathbb L_{l,j}+\frac12\mathbb J_{l,j}+\mathbb G_{l,j}+\mu g(k_{l,j})\right) . \end{equation} We then solve \begin{equation} \Xi(\mathbb U_\mu,\mu)=0 \end{equation} By differentiating this identity with respect to $\mu$, we find $\partial_\mu u_\mu$: \begin{equation} \partial_\mu \mathbb U|_{\mu=0}=-(D\Xi)^{-1}\partial_\mu\Xi|_{\mu=0} \end{equation} and \begin{equation} \partial_\mu\Xi|_{\mu=0}= -\frac{\partial_\mu\mathbb Y_{l,j}}{2(\mathbb X_{l,j}+1)} \Phi\left({\textstyle\frac{1+\mathbb Y_{l,j}}{(\mathbb X_{l,j}+1)^2}}\right) -\frac{(1+\mathbb Y_{l,j})\partial_\mu\mathbb Y_{l,j}}{2(\mathbb X_{l,j}+1)^3} \partial\Phi\left({\textstyle\frac{1+\mathbb Y_{l,j}}{(\mathbb X_{l,j}+1)^2}}\right) ,\quad \partial_\mu\mathbb Y_{l,j}=\frac{g(k_{l,j})}{\mathbb L_{l,j}} . \end{equation} We then approximate \begin{equation} \gamma_2(\xi)\approx -\frac12\left<\mathfrak v\partial_\mu\mathbb U\right> \end{equation} (see\-~(\ref{avg})). \bigskip \indent In order to avoid numerical oscillations due to the $\sin$ function, we will use a Hann window (see appendix\-~\ref{appendix:hann}) \begin{equation} H_L(k):=\mathds 1_{|k|<\frac L2}\cos^2({\textstyle\frac{\pi|k|}{L}}) . \label{hann} \end{equation} The parameter $L$ is set using \refname{param:anyeq_window_L}{{\tt window\_L}}. The computation is changed only in that $g$ is changed to $H_L(k)g(k)$. \subsubsubsection{Momentum distribution} To compute the momentum distribution (see\-~\cite{CHe21}), we add a parameter $\lambda$ to {\tt anyeq}: \begin{equation} -\Delta u_\lambda(|x|) = (1-u_\lambda(|x|))v(|x|)-2\rho K(|x|)+\rho^2 L(|x|) -2\lambda \hat u_0(q)\cos(q\cdot x) \end{equation} ($\hat u_0\equiv\hat u_\lambda|_{\lambda=0}$). The momentum distribution is then \begin{equation} \mathcal M(q)=\partial_\lambda e|_{\lambda=0} =-\frac\rho2\int\frac{dk}{(2\pi)^3}\ \hat v(k)\partial_\lambda\hat u_\lambda(k)|_{\lambda=0} . \end{equation} Note that the Fourier transform of $2\lambda\hat u_0(q)\cos(q\cdot x)$ is \begin{equation} -(2\pi)^3\lambda\hat u_0(q)(\delta(q+k)+\delta(q-k)) . \label{fouriermomentum} \end{equation} The presence of delta functions does not play well with the Chebyshev polynomial expansion and the quadratures. \bigskip \point We will consider two different ways of getting around this. \bigskip \subpoint One idea is to compute a regularization of $\mathcal M(q)$ by convolving it with a peaked spherically symmetric function. Let $\Gamma_L$ denote the Gaussian with variance $1/\sqrt L$: \begin{equation} \Gamma_L(|k|):=\left(\frac{L}{2\pi}\right)^{\frac32}e^{-\frac L2k^2} . \label{anyeq_gaussian_momt} \end{equation} In fact, we will scale $L$ with $k$, and set $L$ to \begin{equation} L=\sqrt{\mathrm{\refname{param:anyeq_momentum_window_L}{{\tt window\_L}}}}/k^2 . \end{equation} To compute \begin{equation} \mathfrak M(q):=\mathcal M\ast\Gamma_L(q) \end{equation} we solve the equation \begin{equation} -\Delta u_\lambda(|x|) = (1-u_\lambda(|x|))v(|x|)-2\rho K(|x|)+\rho^2 L(|x|) -2\lambda\int dk\ \hat u_0(k)\cos(k\cdot x)\Gamma_L(q-k) . \end{equation} Note that the Fourier transform of \begin{equation} -2\lambda\int dk\ \hat u_0(k)\cos(k\cdot x)\Gamma_L(q-k) \end{equation} is \begin{equation} -(2\pi)^3\lambda\hat u_0(q)(\Gamma_L(k+q)+\Gamma_L(k-q)) . \end{equation} Since the ground state is unique, $\mathcal M$ is spherically symmetric. The term $\Gamma_L(k\pm q)$ is not, so we take its spherical average (which will not change the final result): by lemma\-~\ref{lemma:bipolar}, \begin{equation} -\frac1{4\pi r^2}\int dq\ \delta(|q|-r)(2\pi)^3\lambda\hat u_0(q)(\Gamma_L(k+q)+\Gamma_L(k-q)) = -\frac{(2\pi)^3}{|k|r}\lambda\hat u_0(r)\int_{||k|-r|}^{|k|+r} ds\ s\Gamma_L(s) . \end{equation} In this setup, the approximation of the delta function is thus \begin{equation} \tilde\delta(|k|,r):= \frac1{2|k|r}\int_{||k|-r|}^{|k|+r} ds\ s\Gamma_L(s) = \frac{1}{2|k|rL}(\Gamma_L(|k|-r)-\Gamma_L(|k|+r)) . \end{equation} To choose this method, set \refname{param:anyeq_window_L}{{\tt window\_L}} to a finite value. \bigskip \subpoint Another approach is to contruct a discrete analog of the delta-functions in\-~(\ref{fouriermomentum}). The starting point we take is, for $q\neq0$, \begin{equation} \int dk\ f(|k|)\delta(k-q) \equiv\frac1{4\pi|q|^2}\int dk\ f(|k|)\delta(|k|-|q|) =f(|q|) \end{equation} so, when approximating the integral according to\-~(\ref{anyeq_integration}), we find \begin{equation} \frac{\pi}{8|q|^2}\sum_{l=0}^{J-1} (\tau_{l+1}-\tau_l) \sum_{j=1}^Nw_j \cos({\textstyle\frac{\pi x_j}2})(1+\vartheta_l(x_j))^2\vartheta_l^2(x_j)f(\vartheta_l(x_j))\tilde\delta(\vartheta_l(x_j),|q|) = f(|q|) \end{equation} where $\tilde\delta$ is the approximation of the delta-function. Since \begin{equation} \vartheta_l(x_j)\equiv k_{l,j} \end{equation} (see\-~(\ref{chebyshevvars})), we define the approximation of the delta function as \begin{equation} \tilde\delta(k_{l,j},k_{l',i}) := \delta_{l,l'}\delta_{j,i} \frac{8}{\pi} \left( (\tau_{l+1}-\tau_l) w_j \cos({\textstyle\frac{\pi x_j}2}) (1+k_{l,j})^2 \right)^{-1} . \end{equation} To choose this method, set \refname{param:anyeq_window_L}{{\tt window\_L}} to {\tt Inf}. {\color{red}This method seems to yield some fairly poor results!} \bigskip \point To compute the momentum distribution at $q$, we define $\Xi(\mathbb U,\lambda)$ by formally adding \begin{equation} -2(2\pi)^3\lambda \hat u_0(|q|)\tilde\delta(k_{l,j},|q|) \end{equation} to $\mathbb G_{l,j}$, which corresponds to replacing $\mathbb Y$ with \begin{equation} \mathbb Y_{l,j}= \frac1{\mathbb L_{l,j}}\left(\mathbb S_{l,j}-\mathbb L_{l,j}+\frac12\mathbb J_{l,j}+\mathbb G_{l,j} -2(2\pi)^3\lambda \hat u(|q|)\tilde\delta(k_{l,j},|q|) \right) . \end{equation} Then we solve $\Xi(\mathbb U,\lambda)=0$, and differentiate: \begin{equation} \partial_\lambda\mathbb U|_{\lambda=0}=-(D\Xi)^{-1}\partial_\lambda\Xi|_{\lambda=0} . \end{equation} Finally, \begin{equation} \partial_\lambda\Xi_{l,j}|_{\lambda=0} = -\partial_\lambda\mathbb Y_{l,j}|_{\lambda=0}\left( \frac1{2(\mathbb X_{l,j}+1)} \Phi\left({\textstyle\frac{1+\mathbb Y_{l,j}}{(\mathbb X_{l,j}+1)^2}}\right) +\frac{(1+\mathbb Y_{l,j})}{2(\mathbb X_{l,j}+1)^3} \partial\Phi\left({\textstyle\frac{1+\mathbb Y_{l,j}}{(\mathbb X_{l,j}+1)^2}}\right) \right) \end{equation} with \begin{equation} \partial_\lambda\mathbb Y_{l,j}|_{\lambda=0}= -\frac{2(2\pi)^3}{\mathbb L_{l,j}} \hat u(|q|)\tilde\delta(k_{l,j},|q|) . \end{equation} \bigskip \subsubsubsection{Compressibility} The compressibility is defined as \begin{equation} \chi:=\frac1{\rho^2\partial_\rho^2(\rho e)} =\frac1{\partial_{\log\rho}^2(\rho e)-\partial_{\log\rho}(\rho e)} . \end{equation} We approximate these derivatives by finite differences: \begin{equation} \partial_{\log\rho}^2 f(\rho) \approx \frac{f(\rho_{j+1})+f(\rho_{j-1})-2f(\rho_j)}{(\log\rho_{j+2}-\log\rho_{j+1})(\log\rho_{j+1}-\log\rho_j)} \end{equation} and \begin{equation} \partial_{\log\rho} f(\rho) \approx \frac12\left( \frac{f(\rho_{j+1})-f(\rho_j)}{\log\rho_{j+1}-\log\rho_{j}} + \frac{f(\rho_{j})-f(\rho_{j-1})}{\log\rho_{j}-\log\rho_{j-1}} \right) . \end{equation} \subsection{{\tt simpleq-Kv}}\label{sec:simpleq-Kv} \indent The method is used to compute observables for the simple equation \begin{equation} -\Delta u = v(1-u)-4eu+2e\rho u\ast u ,\quad e=\frac\rho2\int dx\ (1-u(|x|))v(|x|) . \label{simpleq} \end{equation} One can show\-~\cite[Theorem\-~1.6]{CJL21} that the condensate fraction is \begin{equation} \eta=\frac{\rho\int v(x)\mathfrak Ku(x)\ dx}{1-\rho\int v(x)\mathfrak K(2u(x)-\rho u\ast u(x))\ dx} \label{simpleq-Kv_eta} \end{equation} with \begin{equation}\label{bg6} \mathfrak K = (-\Delta + v + 4e(1 - \rho u\ast))^{-1} . \label{Kv} \end{equation} Similarly, the two-point correlation function is\-~\cite[(45)]{CHe21} \begin{equation} C_2= \rho^2(1-u)+ \rho^2\frac{\mathfrak Kv(1-u)-2\rho u\ast \mathfrak Kv+\rho^2u\ast u\ast \mathfrak Kv}{1-\rho\int dx\ v(x)\mathfrak K(2u(x)-\rho u\ast u(x))} . \label{simpleq-Kv_C2} \end{equation} Thus, using the fact that $\mathfrak K$ is self-adjoint, we can compute these observables of the simple equation directly from the knowledge of $\mathfrak Kv$. \bigskip \subsubsection{Usage} \indent The computation uses the same approximation scheme as \refname{sec:anyeq}{{\tt anyeq}}, as well as using the solution of \refname{sec:anyeq}{{\tt anyeq}}. As such, it takes a similar list of parameters: \refname{param:anyeq_rho}{{\tt rho}}, \refname{param:anyeq_tolerance}{{\tt tolerance}}, \refname{param:anyeq_maxiter}{{\tt maxiter}}, \refname{param:anyeq_P}{{\tt P}}, \refname{param:anyeq_N}{{\tt N}}, \refname{param:anyeq_J}{{\tt J}}, \refname{param:anyeq_minlrho_init}{{\tt minlrho\_init}}, \refname{param:anyeq_nlrho_init}{{\tt nlrho\_init}}. \bigskip The available {\tt commands} are the following. \bigskip \begin{itemize} \item\makelink{command:simpleq-Kv_Kv}{} {\tt Kv}: compute $\mathfrak Kv$ as a function of $|x|$.\par \underline{Extra parameters}: \begin{itemize} \item\makelink{param:simpleq-Kv_xmin}{} {\tt xmin} ({\tt Float64}, default: 0): minimum of the range of $|x|$ to be printed. \item\makelink{param:simpleq-Kv_xmax}{} {\tt xmax} ({\tt Float64}, default: 100): maximum of the range of $|x|$ to be printed. \item\makelink{param:simpleq-Kv_nx}{} {\tt nx} ({\tt Int64}, default: 100): number of points to print (linearly spaced). \end{itemize} \underline{Output} (one line for each value of $|x|$): [$|x|$] [$\mathfrak K v$] \item\makelink{command:simpleq-Kv_condensate-fraction}{} {\tt condensate-fraction}: compute the uncondensed fraction as a function of $\rho$\par \underline{Extra parameters}: \begin{itemize} \item\makelink{param:simpleq-Kv_minlrho}{} {\tt minlrho} ({\tt Float64}, default: $10^{-6}$): minimal value for $\log_{10}\rho$. \item\makelink{param:simpleq-Kv_maxlrho}{} {\tt maxlrho} ({\tt Float64}, default: $10^{2}$): maximal value for $\log_{10}\rho$. \item\makelink{param:simpleq-Kv_nlrho}{} {\tt nlrho} ({\tt Int64}, default: $100$): number of values for $\rho$ (spaced logarithmically). \item\makelink{param:simpleq-Kv_minrho}{} {\tt minrho} ({\tt Float64}, default: $10^{-6}$): minimal value for $\rho$. \item\makelink{param:simpleq-Kv_maxrho}{} {\tt maxrho} ({\tt Float64}, default: $10^{2}$): maximal value for $\rho$. \item\makelink{param:simpleq-Kv_nrho}{} {\tt nrho} ({\tt Int64}, default: $0$): number of values for $\rho$ (spaced linearly). If {\tt nrho} is $\neq0$, then the linear spacing will be used, and \refname{param:simpleq-Kv_minlrho}{{\tt minlrho}}, \refname{param:simpleq-Kv_maxlrho}{{\tt maxlrho}}, \refname{param:simpleq-Kv_nlrho}{{\tt nlrho}} will be ignored. Otherwise, the logarithmic spacing will be used and \refname{param:simpleq-Kv_minrho}{{\tt minrho}}, \refname{param:simpleq-Kv_maxrho}{{\tt maxrho}} will be ignored. \item\makelink{param:simpleq-Kv_rhos}{} {\tt rhos} ({\tt Array\{Float64\}}): list of values for $\rho$, specified as a `{\tt,}' separated list. This parameter takes precedence over \refname{param:simpleq-Kv_minlrho}{{\tt minlrho}}, \refname{param:simpleq-Kv_maxlrho}{{\tt maxlrho}}, \refname{param:simpleq-Kv_nlrho}{{\tt nlrho}}, \refname{param:simpleq-Kv_minrho}{{\tt minrho}}, \refname{param:simpleq-Kv_maxrho}{{\tt maxrho}}, \refname{param:simpleq-Kv_nrho}{{\tt nrho}}. \end{itemize} \underline{Output} (one line for each value of $\rho$): [$\rho$] [$\eta$] \item\makelink{command:simpleq-Kv_2pt}{} {\tt 2pt}: compute the two-point correlation as a function of $|x|$.\par \underline{Extra parameters}: same as \refname{command:simpleq-Kv_Kv}{{\tt Kv}}.\par \underline{Output} (one line for each value of $|x|$): [$|x|$] [$C_2$] \end{itemize} \subsubsection{Description} \indent In Fourier space\-~(\ref{anyeq_fourier}), \begin{equation} \hat{\mathfrak K}^{-1}f := \int dx\ e^{ikx} (-\Delta+4e(1-\rho u\ast)+v)f =(k^2+4e(1-\rho\hat u(|k|))+\hat v\hat\ast)\hat f \end{equation} where $\hat\ast$ is defined in\-~(\ref{anyeq_hatast}). We follow the same approximation scheme as \refname{sec:anyeq}{{\tt anyeq}}: \begin{equation} \hat{\mathfrak K}^{-1}f(k_{l,j}) \approx(k_{l,j}^2+4e(1-\mathbb U_{l,j}))\mathfrak f_{l,j} + (\mathfrak v\odot\mathfrak f)_{l,j} \end{equation} with $\mathfrak f_{l,j}:=f(k_{l,j})$, $\mathbb U_{l,j}:=\rho u(|k_{l,j}|)$, $\odot$ is defined in\-~(\ref{odot}), and $k_{l,j}$ is defined in\-~(\ref{klj}). Therefore, we approximate the operator $\hat{\mathfrak K}^{-1}$ by a matrix: \begin{equation} \hat{\mathfrak K}^{-1}f(k_{l,j})\approx \sum_{l'=0}^{J-1}\sum_{i=1}^N M_{l,j;l',i}\mathfrak f_{l',i} \end{equation} with, by\-~(\ref{odot}) and\-~(\ref{frakF}), \begin{equation} \begin{largearray} M_{l,j;l',i}:= \delta_{l',l}\delta_{j,i}(k_{l,j}^2+4e(1-\mathbb U_{l,j})) \\\hfill + \frac1{4\pi^2|k_{l,j}|}\sum_{n,m=0}^P\sum_{l''=0}^{J-1}\mathfrak F_{l'',n}^{(\nu_v)}(\mathfrak v) A_{l'',n;l',m}^{(\nu_v,\nu_f)}(|k_{l,j}|) \frac{\frac{2-\delta_{m,0}}2w_i\cos(\frac{m\pi(1+x_i)}2)}{(1-\frac{\tau_{l'+1}-\tau_{l'}}2\sin(\frac{\pi x_i}2)+\frac{\tau_{l'+1}+\tau_{l'}}2)^{\nu_f}} . \end{largearray} \end{equation} Defining $\mathds 1_{l',i}$ as the vector whose only non-vanishing component is that indexed by $l',i$ which is equal to 1, we can rewrite \begin{equation} M_{l,j;l',i}:= \delta_{l',l}\delta_{j,i}(k_{l,j}^2+4e(1-\mathbb U_{l,j})) + (\mathfrak v\odot\mathds 1_{l',i})_{l,j} . \end{equation} Thus \begin{equation} \mathfrak Kv\approx M^{-1}\mathfrak v . \end{equation} \bigskip \indent To compute\-~(\ref{simpleq-Kv_eta}), we write \begin{equation} \eta=\frac{\rho\int\frac{dk}{(2\pi)^3} \hat u(k)\hat{\mathfrak K}\hat v(k)}{1-\rho\int \frac{dk}{(2\pi)^3)}(2\hat u(k)-\rho\hat u^2(k))\hat{\mathfrak K}\hat v(k)} \end{equation} which we approximate as \begin{equation} \eta\approx\frac{\left<\mathbb UM^{-1}\mathfrak v\right>}{1-\left<(2\mathbb U-\mathbb U^2)M^{-1}\mathfrak v\right>} \end{equation} where $\left<\cdot\right>$ is defined in\-~(\ref{avg}). We can thus compute $\eta$ using the solution $\mathbb U$ computed by \refname{sec:anyeq}{{\tt anyeq}}. \bigskip \indent To compute\-~(\ref{simpleq-Kv_C2}), we write \begin{equation} C_2= \rho^2(1-u(x))+ \rho^2\frac{\mathfrak Kv(x)(1-u(x))+\int\frac{dk}{(2\pi)^3}(-2\rho\hat u\hat{\mathfrak K}\hat v+\rho^2\hat u^2\hat{\mathfrak K}\hat v)}{1-\rho\int\frac{dk}{(2\pi)^3}\ (2\hat u(k)-\rho \hat u^2(k))\hat{\mathfrak K}\hat v(k)} \end{equation} which we approximate as \begin{equation} C_2\approx \rho^2-\rho\left+ \rho^2\frac{\left(1-\rho^{-1}\left)+\left<(-2\mathbb U M^{-1}\mathfrak v+\mathbb UM^{-1}\mathfrak v)\right>}{1-\left<(2\mathbb U-\mathbb U^2)M^{-1}\mathfrak v\right>} . \end{equation} We can thus compute $C_2$ using the solution $\mathbb U$ computed by \refname{sec:anyeq}{{\tt anyeq}}. \subsection{\tt simpleq-hardcore}\label{sec:simpleq-hardcore} \indent This method is used to solve the Simple equation with a hardcore potential: \begin{equation} \left\{\begin{array}{>\displaystyle ll} (-\Delta+4e) u(x)=2e\rho u\ast u(x) &\mathrm{for\ }|x|>1\\ u(x)=1 &\mathrm{for\ }|x|\leqslant1 \end{array}\right. \label{linearhc} \end{equation} with \begin{equation} e=-\frac{4\pi\rho\partial u|_{|x|\searrow 1}}{2(1-\frac83\pi\rho+\rho^2\int_{|x|<1}dx\ u\ast u(x))} . \label{hardcore_energy} \end{equation} \bigskip This equation is solved in $x$-space, and as such is very different from \refname{sec:easyeq}{{\tt easyeq}}, and significantly longer to run. \bigskip \subsubsection{Usage} \indent Unless otherwise noted, this method takes the following parameters (specified via the {\tt [-p params]} flag, as a `{\tt;}' separated list of entries). \begin{itemize} \item\makelink{param:simpleq-hardcore_rho}{} {\tt rho} ({\tt Float64}, default: $10^{-6}$): density $\rho$. \item\makelink{param:simpleq-hardcore_tolerance}{} {\tt tolerance} ({\tt Float64}, default: $10^{-11}$): maximal size of final step in Newton iteration. \item\makelink{param:simpleq-hardcore_maxiter}{} {\tt maxiter} ({\tt Int64}, default: 21): maximal number of iterations of the Newton algorithm before giving up. \item\makelink{param:simpleq-hardcore_P}{} {\tt P} ({\tt Int64}, default: 11): order of all Chebyshev polynomial expansions (denoted by $P$ below). \item\makelink{param:simpleq-hardcore_N}{} {\tt N} ({\tt Int64}, default: 12): order of all Gauss quadratures (denoted by $N$ below). \item\makelink{param:simpleq-hardcore_J}{} {\tt J} ({\tt Int64}, default: 10): number of splines (denoted by $J$ below). \end{itemize} \bigskip The available {\tt commands} are the following. \begin{itemize} \item\makelink{command:simpleq-hardcore_energy_rho}{} {\tt energy\_rho}: compute the energy $e$ as a function of $\rho$.\par \underline{Disabled parameters}: \refname{param:simpleq-hardcore_rho}{{\tt rho}}.\par \underline{Extra parameters}: \begin{itemize} \item\makelink{param:simpleq-hardcore_minlrho}{} {\tt minlrho} ({\tt Float64}, default: $10^{-6}$): minimal value for $\log_{10}\rho$. \item\makelink{param:simpleq-hardcore_maxlrho}{} {\tt maxlrho} ({\tt Float64}, default: $10^{2}$): maximal value for $\log_{10}\rho$. \item\makelink{param:simpleq-hardcore_nlrho}{} {\tt nlrho} ({\tt Int64}, default: $100$): number of values for $\rho$ (spaced logarithmically). \item\makelink{param:simpleq-hardcore_minrho}{} {\tt minrho} ({\tt Float64}, default: $10^{-6}$): minimal value for $\rho$. \item\makelink{param:simpleq-hardcore_maxrho}{} {\tt maxrho} ({\tt Float64}, default: $10^{2}$): maximal value for $\rho$. \item\makelink{param:simpleq-hardcore_nrho}{} {\tt nrho} ({\tt Int64}, default: $0$): number of values for $\rho$ (spaced linearly). If {\tt nrho} is $\neq0$, then the linear spacing will be used, and \refname{param:simpleq-hardcore_minlrho}{{\tt minlrho}}, \refname{param:simpleq-hardcore_maxlrho}{{\tt maxlrho}}, \refname{param:simpleq-hardcore_nlrho}{{\tt nlrho}} will be ignored. Otherwise, the logarithmic spacing will be used and \refname{param:simpleq-hardcore_minrho}{{\tt minrho}}, \refname{param:simpleq-hardcore_maxrho}{{\tt maxrho}} will be ignored. \item\makelink{param:simpleq-hardcore_rhos}{} {\tt rhos} ({\tt Array\{Float64\}}): list of values for $\rho$, specified as a `{\tt,}' separated list. This parameter takes precedence over \refname{param:simpleq-hardcore_minlrho}{{\tt minlrho}}, \refname{param:simpleq-hardcore_maxlrho}{{\tt maxlrho}}, \refname{param:simpleq-hardcore_nlrho}{{\tt nlrho}}, \refname{param:simpleq-hardcore_minrho}{{\tt minrho}}, \refname{param:simpleq-hardcore_maxrho}{{\tt maxrho}}, \refname{param:simpleq-hardcore_nrho}{{\tt nrho}}. \end{itemize} \underline{Output} (one line for each value of $\rho$): [$\rho$] [$e$] [Newton error $\epsilon$].\par \underline{Multithread support}: yes, different values of $\rho$ split up among workers. \item\makelink{command:simpleq-hardcore_condensate_fraction_rho}{} {\tt condensate\_fraction\_rho}: compute the uncondensed fraction $\eta$ as a function of $\rho$.\par \underline{Disabled parameters}: same as \refname{command:simpleq-hardcore_energy_rho}{{\tt energy\_rho}}.\par \underline{Extra parameters}: same as \refname{command:simpleq-hardcore_energy_rho}{{\tt energy\_rho}}.\par \underline{Output} (one line for each value of $\rho$): [$\rho$] [$\eta$] [Newton error $\epsilon$].\par \underline{Multithread support}: yes, different values of $\rho$ split up among workers. \item\makelink{command:simpleq-hardcore_ux}{} {\tt ux}: compute $u$ as a function of $|x|$.\par \underline{Extra parameters}: \begin{itemize} \item\makelink{param:simpleq-hardcore_xmin}{} {\tt xmin} ({\tt Float64}, default: 0): minimum of the range of $|x|$ to be printed. \item\makelink{param:simpleq-hardcore_xmax}{} {\tt xmax} ({\tt Float64}, default: 100): maximum of the range of $|x|$ to be printed. \item\makelink{param:simpleq-hardcore_nx}{} {\tt nx} ({\tt Int64}, default: 100): number of points to print (linearly spaced). \end{itemize} \underline{Output} (one line for each value of $x$): [$|x|$] [$u(|x|)$] \end{itemize} \subsubsection{Description} \indent In order to carry out the computation of the solution of\-~(\ref{linearhc}) and compute the condensate fraction at the same time, we will consider the equation with an added parameter $\mu>0$: \begin{equation} (-\Delta+4\epsilon)u=2e\rho u\ast u ,\quad \epsilon:=e+\frac\mu2 \label{hardcore_simple} \end{equation} for $|x|>1$. \bigskip \subsubsubsection{Energy} To compute the energy $e$ of this equation, with the extra parameter $\mu$, we consider the limit of the soft sphere potential $\lambda\mathds 1_{|x|<1}$ (see\-~(\ref{easyeq}) with $\beta_K=\beta_L=0$): \begin{equation} (-\Delta+2\mu+4e)u(x)=s_\lambda(x)+2e\rho u\ast u ,\quad s_\lambda(x):=\lambda(1-u(x))\mathds 1_{|x|\leqslant 1} ,\quad \frac{2e}\rho=\int dx\ s_\lambda(x) . \end{equation} Furthermore, since $\partial u$ need not be continuous at $|x|=1$, by integrating $-\Delta u$ over a thin spherical shell of radius 1, we find that, for $|x|\leqslant 1$, \begin{equation} -\Delta u(x)=-\delta(|x|-1)\partial u|_{|x|\searrow1} \end{equation} so, formally, \begin{equation} s_\infty(x)=\mathds 1_{|x|\leqslant 1}(2e(2-\rho u\ast u(x))+2\mu)-\delta(|x|-1)\partial u|_{|x|\searrow1} \end{equation} and \begin{equation} \frac{2e}\rho=\int dx\ s_\infty(x) = 2e\left(\frac{8\pi}3-\rho\int_{|x|<1}u\ast u(x)\right)+\frac{8\pi}3\mu-4\pi\partial u|_{|x|\searrow1} . \end{equation} Therefore, \begin{equation} e=\frac{2\pi\rho(\frac23\mu-\partial u|_{|x|\searrow1})}{1-\frac{8\pi}3\rho+\rho^2\int_{|x|<1}dx\ u\ast u(x)} . \label{emu} \end{equation} \bigskip \subsubsubsection{Integral equation} We turn the differential equation in\-~(\ref{hardcore_simple}) into an integral equation. Let \begin{equation} w(|x|):=|x|u(x) \end{equation} we have, for $r>1$, \begin{equation} (-\partial_r^2+4\epsilon)w(r)=2e\rho ru\ast u(r) . \end{equation} Furthermore, the bounded solution of \begin{equation} (-\partial_r^2+\alpha^2)w(r)=F(r) ,\quad w(1)=1 \end{equation} is \begin{equation} w(r)=e^{-\alpha(r-1)}+\int_0^\infty ds\ \frac{F(s+1)}{2\alpha}\left(e^{-\alpha|(r-1)-s|}-e^{-\alpha((r-1)+s)}\right) \label{solw} \end{equation} so, for $r>1$, \begin{equation} u(r)= \frac 1re^{-2\sqrt\epsilon(r-1)} + \frac{\rho e}{2r\sqrt\epsilon}\int_0^\infty ds\ (s+1)(u\ast u(s+1))\left(e^{-2\sqrt\epsilon|(r-1)-s|}-e^{-2\sqrt\epsilon((r-1)+s)}\right) . \end{equation} In order to compute the integral more easily, we split it: \begin{equation} \begin{largearray} u(r)= \frac 1re^{-2\sqrt\epsilon(r-1)} + \frac{\rho e}{r\sqrt\epsilon}\int_0^{r-1} ds\ (s+1)(u\ast u(s+1))\sinh(2\sqrt\epsilon s)e^{-2\sqrt\epsilon(r-1)} \\[0.5cm]\hfill + \frac{\rho e}{r\sqrt\epsilon}\sinh(2\sqrt\epsilon(r-1))\int_{r-1}^\infty ds\ (s+1)(u\ast u(s+1))e^{-2\sqrt\epsilon s} . \end{largearray} \end{equation} We change variables in the last integral: \begin{equation} \begin{largearray} u(r)= \frac 1re^{-2\sqrt\epsilon(r-1)} + \frac{\rho e}{r\sqrt\epsilon}\int_0^{r-1} ds\ (s+1)(u\ast u(s+1))\sinh(2\sqrt\epsilon s)e^{-2\sqrt\epsilon(r-1)} \\[0.5cm]\hfill + \frac{\rho e}{2r\sqrt\epsilon}\left(1-e^{-4\sqrt\epsilon(r-1)}\right)\int_0^\infty d\sigma\ (\sigma+r)(u\ast u(\sigma+r))e^{-2\sqrt\epsilon\sigma} . \end{largearray} \label{uinteq} \end{equation} \bigskip \subsubsubsection{The auto-convolution term} We split \begin{equation} u(r)=\mathds 1_{r>1}u_+(r-1)+\mathds 1_{r\leqslant 1} . \end{equation} in terms of which \begin{equation} u\ast u= \mathds 1_{r\leqslant 1}\ast\mathds 1_{r\leqslant 1} +2\mathds 1_{r\leqslant 1}\ast(u_+(r-1)\mathds 1_{r>1}) +(\mathds 1_{r>1}u_+(r-1))\ast(u_+(r-1)\mathds 1_{r>1}) \label{u*u_dcmp} \end{equation} In bipolar coordinates (see lemma\-~\ref{lemma:bipolar}), \begin{equation} \mathds 1_{r\leqslant 1}\ast\mathds 1_{r\leqslant 1}(r) = \frac{2\pi}{r}\int_0^1 dt\ t\int_{|r-t|}^{r+t}ds\ s\mathds 1_{s\leqslant 1} \label{1*1} \end{equation} and, if $r\geqslant 0$ and $t\geqslant 0$, then \begin{equation} \int_{|r-t|}^{r+t}ds\ s\mathds 1_{s\leqslant 1} = \mathds 1_{r-1\leqslant t\leqslant r+1}\int_{|r-t|}^{\min(1,r+t)}ds\ s = \mathds 1_{r-1\leqslant t\leqslant r+1} \left( \mathds 1_{t\leqslant 1-r} 2rt + \mathds 1_{t>1-r} \frac{1-(r-t)^2}2 \right) . \label{int1} \end{equation} Therefore, if $r\geqslant 1$ \begin{equation} \mathds 1_{r\leqslant 1}\ast\mathds 1_{r\leqslant 1}(r) = \mathds 1_{r\leqslant 2} \frac{2\pi}{r}\int_{r-1}^1 dt\ t \frac{1-(r-t)^2}2 = \mathds 1_{r\leqslant 2}\frac\pi{12}(r-2)^2(r+4) \label{u*u_1} \end{equation} and if $r<1$, \begin{equation} \mathds 1_{r\leqslant 1}\ast\mathds 1_{r\leqslant 1}(r) = \frac{2\pi}r\int_0^1dt\ t \left( \mathds 1_{t\leqslant 1-r} 2rt + \mathds 1_{t>1-r} \frac{1-(r-t)^2}2 \right) \end{equation} so \begin{equation} \mathds 1_{r\leqslant 1}\ast\mathds 1_{r\leqslant 1}(r) =\frac43\pi(1-r)^3 +\frac\pi{12}r(36-48r+17r^2) = \frac\pi{12}(r-2)^2(r+4) . \end{equation} Thus, (\ref{u*u_1}) holds for all $r$. Furthermore, \begin{equation} 2\mathds 1_{r\leqslant 1}\ast(u_+(r-1)\mathds 1_{r>1}) = \frac{4\pi}{r}\int_1^\infty dt\ tu_+(t-1)\int_{|r-t|}^{r+t}ds\ s\mathds 1_{s\leqslant 1} \end{equation} so, if $r\geqslant 0$ then, by\-~(\ref{int1}), \begin{equation} 2\mathds 1_{r\leqslant 1}\ast(u_+(r-1)\mathds 1_{r>1})(r) = \frac{2\pi}{r}\int_{\max(1,r-1)}^{r+1} dt\ tu_+(t-1)(1-(r-t)^2) . \label{u*u_cross} \end{equation} Finally, if $r\geqslant 0$ then \begin{equation} (\mathds 1_{r>1}u_+(r-1))\ast(u_+(r-1)\mathds 1_{r>1})(r) = \frac{2\pi}{r}\int_1^\infty dt\ tu_+(t-1)\int_{\max{(1,|r-t|)}}^{r+t}ds\ su_+(s-1) . \label{u*u_u} \end{equation} Thus, by~\-(\ref{u*u_1}), (\ref{u*u_cross}) and\-~(\ref{u*u_u}), for $r\geqslant -1$, \begin{equation} \begin{largearray} u\ast u(1+r)= \mathds 1_{r\leqslant 1}\frac\pi{12}(r-1)^2(r+5) + \frac{2\pi}{r+1}\int_{\max(0,r-1)}^{r+1} dt\ (t+1)u_+(t)(1-(r-t)^2) \\[0.5cm]\hfill + \frac{2\pi}{r+1}\int_0^\infty dt\ (t+1)u_+(t)\int_{\max{(0,|r-t|-1)}}^{r+t+1}ds\ (s+1)u_+(s) . \end{largearray} \end{equation} We then compactify the integrals by changing variables to $\tau=\frac{1-t}{1+t}$ and $\sigma=\frac{1-s}{1+s}$: \begin{equation} \begin{largearray} u\ast u(1+r)= \mathds 1_{r\leqslant 1}\frac\pi{12}(r-1)^2(r+5) + \frac{8\pi}{r+1}\int_{-\frac r{2+r}}^{\min(1,\frac{2-r}r)} d\tau\ \frac1{(1+\tau)^3}u_+({\textstyle\frac{1-\tau}{1+\tau}})(1-(r-{\textstyle\frac{1-\tau}{1+\tau}})^2) \\[0.5cm]\hfill + \frac{32\pi}{r+1}\int_{-1}^1 d\tau\ \frac1{(1+\tau)^3}u_+({\textstyle\frac{1-\tau}{1+\tau}})\int_{\alpha_-(1+r,\tau)}^{\min(1,\beta_+(r,\tau))}d\sigma\ \frac1{(1+\sigma)^3}u_+({\textstyle\frac{1-\sigma}{1+\sigma}}) . \end{largearray} \label{u*u} \end{equation} with \begin{equation} \alpha_-(1+r,\tau):=\frac{-r-\frac{1-\tau}{1+\tau}}{2+r+\frac{1-\tau}{1+\tau}} ,\quad \beta_+(r,\tau):=\frac{2-|r-\frac{1-\tau}{1+\tau}|}{|r-\frac{1-\tau}{1+\tau}|} \end{equation} (note that $\alpha_-$ is the same as defined for fulleq). Finally, note that, if $\beta_+<1$, that is, if $|r-\frac{1-\tau}{1+\tau}|>1$, then \begin{equation} \beta_+(r,\tau)=\alpha_+({\textstyle |r-\frac{1-\tau}{1+\tau}|-1+\frac{1-\tau}{1+\tau}},\tau) \end{equation} where \begin{equation} \alpha_+(r,\tau):=\frac{1-|r-\frac{1-\tau}{1+\tau}|}{1+|r-\frac{1-\tau}{1+\tau}|} \end{equation} is the same as is defined for \refname{sec:anyeq}{{\tt anyeq}} (\ref{anyeq_alpha}). \bigskip \subsubsubsection{Chebyshev polynomial expansion} We use the same interpolation as we used in \refname{sec:anyeq}{{\tt anyeq}}: (\ref{a_chebyshev_spline}) \begin{equation} \frac1{(1+\tau)^{\nu_u}}u_+({\textstyle\frac{1-\tau}{1+\tau}})=\sum_{l=0}^{J-1}\mathds 1_{\tau_l\leqslant\tau<\tau_{l+1}}\sum_{n=0}^\infty \mathbf F_{l,n}^{(\nu_u)}(u_+) T_n({\textstyle\frac{2\tau-(\tau_l+\tau_{l+1})}{\tau_{l+1}-\tau_l}}) \label{a_chebyshev_spline} \end{equation} ($J$ is set by the parameter \refname{param:simpleq-hardcore_J}{{\tt J}}) with \begin{equation} \mathbf F_{l,n}^{(\nu_u)}(u_+):= \frac{2-\delta_{n,0}}{\pi} \int_0^{\pi} d\theta\ \frac{\cos(n\theta)}{(1+\frac{\tau_{l+1}-\tau_l}2\cos(\theta)+\frac{\tau_{l+1}+\tau_l}2)^{\nu_u}} u_+({\textstyle\frac{2-(\tau_{l+1}-\tau_l)\cos(\theta)-(\tau_{l+1}+\tau_\alpha)}{2+(\tau_{l+1}-\tau_l)\cos(\theta)+(\tau_{l+1}+\tau_l)}}) \label{bfF} \end{equation} and we take $\nu_u$ to be the decay exponent of $u$, which we will assume is $\nu_u=4$. In particular, by\-~(\ref{u*u}), \begin{equation} u\ast u(1+r)=\mathds 1_{r\leqslant 1}B^{(0)}(r)+\sum_l\sum_n\mathbf F_{l,n}^{(\nu_u)}(u_+)B^{(1)}_{l,n}(r)+\sum_{l,l'}\sum_{n,m}\mathbf F_{l,n}^{(\nu_u)}(u_+)\mathbf F_{l',m}^{(\nu_u)}(u_+)B^{(2)}_{l,n;l',m}(r) \label{u*u} \end{equation} with \begin{equation} B^{(0)}(r):= \frac\pi{12}(r-1)^2(r+5) \end{equation} \begin{equation} B^{(1)}_{l,n}(r):= \mathds 1_{\tau_l<\frac{2-r}r}\mathds 1_{\tau_{l+1}>-\frac r{2+r}} \frac{8\pi}{r+1}\int_{\max(\tau_l,-\frac r{2+r})}^{\min(\tau_{l+1},\frac{2-r}r)} d\tau\ \frac1{(1+\tau)^{3-\nu_u}}(1-(r-{\textstyle\frac{1-\tau}{1+\tau}})^2)T_n({\textstyle\frac{2\tau-(\tau_l+\tau_{l+1})}{\tau_{l+1}-\tau_l}}) \end{equation} \begin{equation} \begin{largearray} B^{(2)}_{l,n;l',m}(r):= \frac{32\pi}{r+1}\int_{\tau_l}^{\tau_{l+1}} d\tau\ \frac1{(1+\tau)^{3-\nu_u}}T_n({\textstyle\frac{2\tau-(\tau_l+\tau_{l+1})}{\tau_{l+1}-\tau_l}}) \mathds 1_{\tau_{l'}<\alpha_+(|r-\frac{1-\tau}{1+\tau}|-\frac{2\tau}{1+\tau},\tau)}\mathds 1_{\tau_{l'+1}>\alpha_-(1+r,\tau)} \cdot\\[0.5cm]\hfill\cdot \int_{\max(\tau_{l'},\alpha_-(1+r,\tau))}^{\min(\tau_{l'+1},\alpha_+(|r-\frac{1-\tau}{1+\tau}|-\frac{2\tau}{1+\tau},\tau))}d\sigma\ \frac1{(1+\sigma)^{3-\nu_u}}T_m({\textstyle\frac{2\sigma-(\tau_{l'}+\tau_{l'+1})}{\tau_{l'+1}-\tau_{l'}}}) . \end{largearray} \end{equation} Thus, by\-~(\ref{uinteq}), for $r>0$, \begin{equation} u_+(r,e,\epsilon)= D^{(0)}(r,e,\epsilon) + \sum_l\sum_n\mathbf F_{l,n}^{(\nu_u)}(u_+)D^{(1)}_{l,n}(r,e,\epsilon) + \sum_{l,l'}\sum_{n,m}\mathbf F_{l,n}^{(\nu_u)}(u_+)\mathbf F_{l',m}^{(\nu_u)}(u_+)D^{(2)}_{l,n;l',m}(r,e,\epsilon) \label{hardcore_u+} \end{equation} with \begin{equation} \begin{largearray} D^{(0)}(r,e,\epsilon):= \frac 1{r+1}e^{-2\sqrt\epsilon r} + \frac{\rho e}{(r+1)\sqrt\epsilon}\int_0^{\min(1,r)} ds\ (s+1)B^{(0)}(s)\sinh(2\sqrt\epsilon s)e^{-2\sqrt\epsilon r} \\[0.5cm]\hfill + \mathds 1_{r\leqslant 1} \frac{\rho e}{2(r+1)\sqrt\epsilon}\left(1-e^{-4\sqrt\epsilon r}\right)\int_0^{1-r} d\sigma\ (\sigma+r+1)B^{(0)}(\sigma+r)e^{-2\sqrt\epsilon\sigma} \end{largearray} \label{D0} \end{equation} \begin{equation} \begin{largearray} D^{(1)}_{l,n}(r,e,\epsilon):= \frac{\rho e}{(r+1)\sqrt\epsilon}\int_0^r ds\ (s+1)B^{(1)}_{l,n}(s)\sinh(2\sqrt\epsilon s)e^{-2\sqrt\epsilon r} \\[0.5cm]\hfill + \frac{\rho e}{2(r+1)\sqrt\epsilon}\left(1-e^{-4\sqrt\epsilon r}\right)\int_0^\infty d\sigma\ (\sigma+r+1)B_{l,n}^{(1)}(\sigma+r)e^{-2\sqrt\epsilon\sigma} . \end{largearray} \label{D1} \end{equation} and \begin{equation} \begin{largearray} D^{(2)}_{l,n;l',m}(r,e,\epsilon):= \frac{\rho e}{(r+1)\sqrt\epsilon}\int_0^r ds\ (s+1)B^{(2)}_{l,n;l',m}(s)\sinh(2\sqrt\epsilon s)e^{-2\sqrt\epsilon r} \\[0.5cm]\hfill + \frac{\rho e}{2(r+1)\sqrt\epsilon}\left(1-e^{-4\sqrt\epsilon r}\right)\int_0^\infty d\sigma\ (\sigma+r+1)B_{l,n;l',m}^{(2)}(\sigma+r)e^{-2\sqrt\epsilon\sigma} . \end{largearray} \label{D2} \end{equation} \bigskip \subsubsubsection{Energy} We now compute the approximation for the energy, using\-~(\ref{hardcore_energy}). \bigskip \point We start with $\partial u|_{|x|\searrow 1}$. By\-~(\ref{uinteq}), \begin{equation} \partial u|_{|x|\searrow 1} = -(1+2\sqrt\epsilon) + 2\rho e\int_0^\infty d\sigma\ (\sigma+1)(u\ast u(\sigma+1))e^{-2\sqrt\epsilon\sigma} \end{equation} \bigskip which, by\-~(\ref{u*u}), becomes \begin{equation} \begin{largearray} \partial u|_{|x|\searrow 1} = -(1+2\sqrt\epsilon) + \gamma^{(0)}(e,\epsilon) + \sum_l\sum_n\mathbf F_{l,n}^{(\nu_u)}(u_+)\gamma^{(1)}_{l,n}(e,\epsilon) +\\\hfill+ \sum_{l,l'}\sum_{n,m}\mathbf F_{l,n}^{(\nu_u)}(u_+)\mathbf F_{l',m}^{(\nu_u)}(u_+)\gamma^{(2)}_{l,n;l',m}(e,\epsilon) \end{largearray} \end{equation} with \begin{equation} \gamma^{(0)}(e,\epsilon):= 2\rho e\int_0^1 d\sigma\ (\sigma+1)B^{(0)}(\sigma)e^{-2\sqrt\epsilon\sigma} \end{equation} \begin{equation} \gamma^{(1)}_{l,n}(e,\epsilon):= 2\rho e\int_0^\infty d\sigma\ (\sigma+1)B_{l,n}^{(1)}(\sigma)e^{-2\sqrt\epsilon\sigma} \end{equation} and \begin{equation} \gamma^{(2)}_{l,n;l',m}(e,\epsilon):= 2\rho e\int_0^\infty d\sigma\ (\sigma+1)B_{l,n;l',m}^{(2)}(\sigma)e^{-2\sqrt\epsilon\sigma} . \end{equation} \point Let us now turn to $\int_{|x|<1}dx\ u\ast u(x)$. We have \begin{equation} \int_{|x|<1}dx\ u\ast u(x) = 4\pi\int_0^1 dr\ r^2 u\ast u(r) \end{equation} so, by\-~(\ref{u*u}), \begin{equation} \int_{|x|<1}dx\ u\ast u(x) = \bar\gamma^{(0)}(r) + \sum_l\sum_n\mathbf F_{l,n}^{(\nu_u)}(u_+)\bar\gamma^{(1)}_{l,n}(r) + \sum_{l,l'}\sum_{n,m}\mathbf F_{l,n}^{(\nu_u)}(u_+)\mathbf F_{l',m}^{(\nu_u)}(u_+)\bar\gamma^{(2)}_{l,n;l',m}(r) \label{intusu} \end{equation} with \begin{equation} \bar\gamma^{(0)}:= 4\pi\int_0^1 d\sigma\ \sigma^2B^{(0)}(\sigma-1) \end{equation} \begin{equation} \bar\gamma^{(1)}_{l,n}:= 4\pi\int_0^1 d\sigma\ \sigma^2B_{l,n}^{(1)}(\sigma-1) \end{equation} and \begin{equation} \bar\gamma^{(2)}_{l,n;l',m}:= 4\pi\int_0^1 d\sigma\ \sigma^2B_{l,n;l',m}^{(2)}(\sigma-1) . \end{equation} \bigskip \point Thus, \begin{equation} \begin{largearray} e=2\pi\rho \cdot\\\hfill\cdot \frac { C(\epsilon) - \gamma^{(0)}(e,\epsilon) - \sum_l\sum_n\mathbf F_{l,n}^{(\nu_u)}(u_+)\gamma^{(1)}_{l,n}(e,\epsilon) - \sum_{l,l'}\sum_{n,m}\mathbf F_{l,n}^{(\nu_u)}(u_+)\mathbf F_{l',m}^{(\nu_u)}(u_+)\gamma^{(2)}_{l,n;l',m}(e,\epsilon) } {1-\frac83\pi\rho+\rho^2\left( \bar\gamma^{(0)} + \sum_l\sum_n\mathbf F_{l,n}^{(\nu_u)}(u_+)\bar\gamma^{(1)}_{l,n} + \sum_{l,l'}\sum_{n,m}\mathbf F_{l,n}^{(\nu_u)}(u_+)\mathbf F_{l',m}^{(\nu_u)}(u_+)\bar\gamma^{(2)}_{l,n;l',m} \right)} . \end{largearray} \end{equation} \begin{equation} C(\epsilon):=\frac23\mu+(1+2\sqrt\epsilon) =\frac43(\epsilon-e)+1+2\sqrt\epsilon \end{equation} \bigskip \subsubsubsection{Newton algorithm} In this paragraph, we set $\epsilon=e$, that is, $\mu=0$. \bigskip \point As we did for \refname{sec:anyeq}{{\tt anyeq}}, we discretize the integral in\-~(\ref{bfF}) by using a Gauss-Legendre quadrature, and truncate the sum over Chebyshev polynomials to order \refname{param:simpleq-hardcore_P}{{\tt P}}. We then reduce the computation to a finite system of equations, whose variables are \begin{equation} e ,\quad \mathfrak u_{l,j}:=u_+(r_{l,j}) ,\quad r_{l,j}:= \frac{2+(\tau_{l+1}-\tau_l)\sin(\frac{\pi x_j}2)-(\tau_{l+1}+\tau_\alpha)}{2-(\tau_{l+1}-\tau_l)\sin(\frac{\pi x_j}2)+(\tau_{l+1}+\tau_l)} \end{equation} where $x_j$ are the abcissa for Gauss-Legendre quadratures (see\-~(\ref{klj})). In other words, we define a vector $\mathbb U$ in dimension \refname{param:simpleq-hardcore_N}{{\tt N}}$\times$\refname{param:simpleq-hardcore_J}{{\tt J}}$+1$, where the first \refname{param:simpleq-hardcore_N}{{\tt N}}$\times$\refname{param:simpleq-hardcore_J}{{\tt J}} terms are $\mathfrak u_{l,j}$ and the last component is $e$. We then write\-~(\ref{hardcore_u+}) as, for $l\in\{0,\cdots,J-1\}$, $j\in\{1,\cdots,N\}$, \begin{equation} \Xi_{lN+j}(\mathbb U)=0 ,\quad \Xi_{NJ+1}(\mathbb U)=0 \end{equation} (note that $\Xi_{lN+j}$ corresponds to the pair $(l,j)$) with \begin{equation} \begin{largearray} \Xi_{lN+j}(\mathbb U):= -\mathfrak u_{l,j} + \mathbb D^{(0)}(r_{l,j},e) + \sum_{l'}\sum_n\mathfrak F_{l',n}^{(\nu_u)}(\mathfrak u)\mathbb D^{(1)}_{l',n}(r_{l,j},e) +\\\hfill+ \sum_{l',l''}\sum_{n,m}\mathfrak F_{l',n}^{(\nu_u)}(\mathfrak u)\mathfrak F_{l'',m}^{(\nu_u)}(\mathfrak u)\mathbb D^{(2)}_{l',n;l'',m}(r_{l,j},e) \end{largearray} \end{equation} \begin{equation} \begin{largearray} \Xi_{NJ+1}(\mathbb U):=-e +\\\hfill+ 2\pi\rho \frac { C(e) - \mathfrak g^{(0)}(e) - \sum_l\sum_n\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\mathfrak g^{(1)}_{l,n}(e) - \sum_{l,l'}\sum_{n,m}\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\mathfrak F_{l',m}^{(\nu_u)}(\mathfrak u)\mathfrak g^{(2)}_{l,n;l',m}(e) } {1-\frac83\pi\rho+\rho^2\left( \bar{\mathfrak g}^{(0)} + \sum_l\sum_n\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\bar{\mathfrak g}^{(1)}_{l,n} + \sum_{l,l'}\sum_{n,m}\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\mathfrak F_{l',m}^{(\nu_u)}(\mathfrak u)\bar{\mathfrak g}^{(2)}_{l,n;l',m} \right)} . \end{largearray} \end{equation} where $\mathfrak F$ is defined in\-~(\ref{frakF}), and $\mathbb D^{(i)}$, $\mathfrak g^{(i)}$, $\bar{\mathfrak g}^{(i)}$ are defined like $D^{(i)}$, $\gamma^{(i)}$ and $\bar\gamma^{(i)}$ except that the integrals over bounded intervals are approximated using Gauss-Legendre quadratures, and the integrals from $0$ to $\infty$ are approximated using Gauss-Laguerre quadratures. Gauss-Legendre and Gauss-Laguerre quadratures and their errors are discussed in appendix\-~\ref{appGL}. The orders of the quadratures are given by the variable \refname{param:simpleq-hardcore_N}{{\tt N}}. \bigskip \point We then solve $\Xi=0$ using the Newton algorithm, that is, we define a sequence of $\mathbb U$'s: \begin{equation} \mathbb U^{(n+1)}=\mathbb U^{(n)}-(D\Xi(\mathbb U^{(n)}))^{-1}\Xi(\mathbb U^{(n)}) \end{equation} where $D\Xi$ is the Jacobian of $\Xi$: \begin{equation} (D\Xi)_{\alpha,\beta}:=\frac{\partial\Xi_\alpha}{\partial\mathbb U_\beta} . \end{equation} We initialize the algorithm with \begin{equation} \mathbb U^{(0)}_{lN+j}=\frac1{(1+r_{l,j}^2)^2} ,\quad \mathbb U^{(0)}_{JN+1}=\pi\rho . \end{equation} \bigskip \point We are left with computing the Jacobian of $\Xi$: \begin{equation} \begin{largearray} \frac{\partial\Xi_{lN+j}(\mathbb U)}{\partial\mathfrak u_{l',i}}= -\delta_{l,l'}\delta_{j,i} + \sum_{l''}\sum_n\mathfrak F_{l'',n}^{(\nu_u)}(\mathds 1_{l',i})\mathbb D^{(1)}_{l'',n}(r_{l,j},e) +\\\hfill+ 2\sum_{l'',l'''}\sum_{n,m}\mathfrak F_{l'',n}^{(\nu_u)}(\mathfrak u)\mathfrak F_{l''',m}^{(\nu_u)}(\mathds 1_{l',j})\mathbb D^{(2)}_{l'',n;l''',m}(r_{l,j},e) \end{largearray} \end{equation} where $\mathds 1_{l',i}$ is a vector whose only non-vanishing entry is the $(l',i)$-th, which is 1, \begin{equation} \begin{largearray} \frac{\partial\Xi_{JN+1}(\mathbb U)}{\partial\mathfrak u_{l',i}} =\\\hfill= 2\pi\rho \frac { - \sum_l\sum_n\mathfrak F_{l,n}^{(\nu_u)}(\mathds 1_{l',i})\mathfrak g^{(1)}_{l,n}(e) - 2\sum_{l,l''}\sum_{n,m}\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\mathfrak F_{l'',m}^{(\nu_u)}(\mathds 1_{l',i})\mathfrak g^{(2)}_{l,n;l'',m}(e) } {1-\frac83\pi\rho+\rho^2\left( \bar{\mathfrak g}^{(0)} + \sum_l\sum_n\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\bar{\mathfrak g}^{(1)}_{l,n} + \sum_{l,l''}\sum_{n,m}\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\mathfrak F_{l'',m}^{(\nu_u)}(\mathfrak u)\bar{\mathfrak g}^{(2)}_{l,n;l'',m} \right)} \\[1cm]\hfill -(\Xi_{NJ+1}(\mathbb U)+e) \frac {\rho^2\left( \sum_l\sum_n\mathfrak F_{l,n}^{(\nu_u)}(\mathds 1_{l'.i})\bar{\mathfrak g}^{(1)}_{l,n} + 2\sum_{l,l''}\sum_{n,m}\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\mathfrak F_{l'',m}^{(\nu_u)}(\mathds 1_{l',i})\bar{\mathfrak g}^{(2)}_{l,n;l'',m} \right)}{1-\frac83\pi\rho+\rho^2\left( \bar{\mathfrak g}^{(0)} + \sum_l\sum_n\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\bar{\mathfrak g}^{(1)}_{l,n} + \sum_{l,l''}\sum_{n,m}\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\mathfrak F_{l'',m}^{(\nu_u)}(\mathfrak u)\bar{\mathfrak g}^{(2)}_{l,n;l'',m} \right)} . \end{largearray} \end{equation} \begin{equation} \begin{largearray} \frac{\partial\Xi_{lN+j}(\mathbb U)}{\partial e}= \partial_e\mathbb D^{(0)}(r_{l,j},e) + \sum_{l'}\sum_n\mathfrak F_{l',n}^{(\nu_u)}(\mathfrak u)\partial_e\mathbb D^{(1)}_{l',n}(r_{l,j},e) +\\\hfill+ \sum_{l',l''}\sum_{n,m}\mathfrak F_{l',n}^{(\nu_u)}(\mathfrak u)\mathfrak F_{l'',m}^{(\nu_u)}(\mathfrak u)\partial_e\mathbb D^{(2)}_{l',n;l'',m}(r_{l,j},e) \end{largearray} \end{equation} \begin{equation} \begin{largearray} \frac{\partial\Xi_{NJ+1}(\mathbb U)}{\partial e} = -1 +\\\hfill+ 2\pi\rho \frac { \partial_e C(e) - \partial_e\mathfrak g^{(0)}(e) - \sum_l\sum_n\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\partial_e\mathfrak g^{(1)}_{l,n}(e) - \sum_{l,l'}\sum_{n,m}\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\mathfrak F_{l',m}^{(\nu_u)}(\mathfrak u)\partial_e\mathfrak g^{(2)}_{l,n;l',m}(e) } {1-\frac83\pi\rho+\rho^2\left( \bar{\mathfrak g}^{(0)} + \sum_l\sum_n\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\bar{\mathfrak g}^{(1)}_{l,n} + \sum_{l,l'}\sum_{n,m}\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\mathfrak F_{l',m}^{(\nu_u)}(\mathfrak u)\bar{\mathfrak g}^{(2)}_{l,n;l',m} \right)} . \end{largearray} \end{equation} To compute $\partial_e\mathbb D^{(i)}$ and $\partial_e\mathfrak g^{(i)}$, we use $\partial_e=\frac1{2\sqrt e}\frac\partial{\partial\sqrt e}$ and \begin{equation} \frac{\partial C(e)}{\partial\sqrt e}=2 \end{equation} \begin{equation} \begin{largearray} \frac{\partial D^{(0)}(r,e)}{\partial\sqrt e} = -\frac{2r}{r+1}e^{-2\sqrt er} \\[0.5cm]\hfill + \frac{\rho}{r+1}\int_0^{\min(1,r)} ds\ (s+1)B^{(0)}(s)e^{-2\sqrt er}\left( (1-2\sqrt er)\sinh(2\sqrt e s) +2\sqrt es\cosh(2\sqrt e s) \right) \\[0.5cm] + \mathds 1_{r\leqslant 1} \frac{\rho}{2(r+1)}\int_0^{1-r} d\sigma\ (\sigma+r+1)B^{(0)}(\sigma+r) \cdot\\[0.5cm]\hfill\cdot \left( (1-2\sqrt e\sigma)\left(1-e^{-4\sqrt er}\right)e^{-2\sqrt e\sigma} + 4\sqrt e re^{-2\sqrt e(2r+\sigma)} \right) \end{largearray} \end{equation} \begin{equation} \begin{largearray} \frac{\partial D_{l,n}^{(1)}(r,e)}{\partial\sqrt e} = \frac{\rho}{r+1}\int_0^{r} ds\ (s+1)B_{l,n}^{(1)}(s)e^{-2\sqrt er}\left( (1-2\sqrt er)\sinh(2\sqrt e s) +2\sqrt es\cosh(2\sqrt e s) \right) \\[0.5cm] + \frac{\rho}{2(r+1)}\int_0^\infty d\sigma\ (\sigma+r+1)B_{l,n}^{(1)}(\sigma+r) \cdot\\[0.5cm]\hfill\cdot \left( (1-2\sqrt e\sigma)\left(1-e^{-4\sqrt er}\right)e^{-2\sqrt e\sigma} + 4\sqrt e re^{-2\sqrt e(2r+\sigma)} \right) \end{largearray} \end{equation} \begin{equation} \begin{largearray} \frac{\partial D_{l,n;l',m}^{(2)}(r,e)}{\partial\sqrt e} \\[0.5cm] = \frac{\rho}{r+1}\int_0^{r} ds\ (s+1)B_{l,n;l',m}^{(2)}(s)e^{-2\sqrt er}\left( (1-2\sqrt er)\sinh(2\sqrt e s) +2\sqrt es\cosh(2\sqrt e s) \right) \\[0.5cm] + \frac{\rho}{2(r+1)}\int_0^\infty d\sigma\ (\sigma+r+1)B_{l,n;l',m}^{(2)}(\sigma+r) \cdot\\[0.5cm]\hfill\cdot \left( (1-2\sqrt e\sigma)\left(1-e^{-4\sqrt er}\right)e^{-2\sqrt e\sigma} + 4\sqrt e re^{-2\sqrt e(2r+\sigma)} \right) . \end{largearray} \end{equation} Furthermore, \begin{equation} \frac{\partial\gamma^{(0)}(e)}{\partial\sqrt e}= 4\rho\sqrt e\int_0^1 d\sigma\ (\sigma+1)B^{(0)}(\sigma)(1-\sqrt e\sigma)e^{-2\sqrt e\sigma} \end{equation} \begin{equation} \frac{\partial\gamma^{(1)}_{l,n}(e)}{\partial\sqrt e}= 4\rho e\int_0^\infty d\sigma\ (\sigma+1)B_{l,n}^{(1)}(\sigma)(1-\sqrt e\sigma)e^{-2\sqrt e\sigma} \end{equation} and \begin{equation} \frac{\partial\gamma^{(2)}_{l,n;l',m}(e)}{\partial\sqrt e}= 4\rho e\int_0^\infty d\sigma\ (\sigma+1)B_{l,n;l',m}^{(2)}(\sigma)(1-\sqrt e\sigma)e^{-2\sqrt e\sigma} . \end{equation} Finally, to get from $D$ to $\mathbb D$ and $\gamma$ to $\mathfrak g$, we approximate the integrate using Gauss-Legendre and Gauss-Laguerre quadratures (see appendix\-~\ref{appGL}), as described above. \bigskip \point We iterate the Newton algorithm until the Newton relative error $\epsilon$ becomes smaller than the \refname{param:easyeq_tolerance}{{\tt tolerance}} parameter. The Newton error is defined as \begin{equation} \epsilon=\frac{\|\mathbb U^{(n+1)}-\mathbb U^{(n)}\|_2}{\|\mathbb U^{(n)}\|_2} \end{equation} where $\|\cdot\|_2$ is the $l_2$ norm. The energy thus obtained is \begin{equation} e=\mathbb U_{JN+1} . \end{equation} \bigskip \subsubsubsection{Condensate fraction} To compute the condensate fraction, we use the parameter $\mu$ in\-~(\ref{hardcore_simple}). The uncondensed fraction is \begin{equation} \eta=\partial_\mu e|_{\mu=0} . \end{equation} To compute $\partial_\mu e$, we use \begin{equation} \Xi(\mathbb U)=0 \end{equation} which we differentiate with respect to $\mu$: \begin{equation} \partial_\mu\mathbb U=-(D\Xi)^{-1}\partial_\mu\Xi . \end{equation} We are left with computing $\partial_\mu\Xi$: \begin{equation} \begin{largearray} \frac{\partial\Xi_{lN+j}(\mathbb U)}{\partial\mu}= \partial_\mu\mathbb D^{(0)}(r_{l,j},e,\epsilon) + \sum_{l'}\sum_n\mathfrak F_{l',n}^{(\nu_u)}(\mathfrak u)\partial_\mu\mathbb D^{(1)}_{l',n}(r_{l,j},e,\epsilon) +\\\hfill+ \sum_{l',l''}\sum_{n,m}\mathfrak F_{l',n}^{(\nu_u)}(\mathfrak u)\mathfrak F_{l'',m}^{(\nu_u)}(\mathfrak u)\partial_\mu\mathbb D^{(2)}_{l',n;l'',m}(r_{l,j},e,\epsilon) \end{largearray} \end{equation} \begin{equation} \begin{largearray} \frac{\partial\Xi_{NJ+1}(\mathbb U)}{\partial \mu} =\\\hfill= 2\pi\rho \frac { \partial_\mu C(\epsilon) - \partial_\mu\mathfrak g^{(0)}(e,\epsilon) - \sum_l\sum_n\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\partial_\mu\mathfrak g^{(1)}_{l,n}(e,\epsilon) - \sum_{l,l'}\sum_{n,m}\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\mathfrak F_{l',m}^{(\nu_u)}(\mathfrak u)\partial_\mu\mathfrak g^{(2)}_{l,n;l',m}(e,\epsilon) } {1-\frac83\pi\rho+\rho^2\left( \bar{\mathfrak g}^{(0)} + \sum_l\sum_n\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\bar{\mathfrak g}^{(1)}_{l,n} + \sum_{l,l'}\sum_{n,m}\mathfrak F_{l,n}^{(\nu_u)}(\mathfrak u)\mathfrak F_{l',m}^{(\nu_u)}(\mathfrak u)\bar{\mathfrak g}^{(2)}_{l,n;l',m} \right)} . \end{largearray} \end{equation} We then use $\partial_\mu=\frac1{4\sqrt\epsilon}\partial_{\sqrt\epsilon}$ and \begin{equation} \left.\frac{\partial C(\epsilon)}{\partial\sqrt\epsilon}\right|_{\mu=0} = \frac83\sqrt\epsilon+2 \end{equation} \begin{equation} \begin{largearray} \left.\frac{\partial D^{(0)}(r)}{\partial\sqrt\epsilon}\right|_{\mu=0}= -\frac{2r}{r+1}e^{-2\sqrt{e}r} \\[0.5cm]\hfill + \frac{\rho}{(r+1)}\int_0^{\min(1,r)} ds\ (s+1)B^{(0)}(s)e^{-2\sqrt{e}r}((-1-2\sqrt er)\sinh(2\sqrt{e} s)+2\sqrt es\cosh(2\sqrt{e} s)) \\[0.5cm] + \mathds 1_{r\leqslant 1} \frac{\rho}{2(r+1)}\int_0^{1-r} d\sigma\ (\sigma+r+1)B^{(0)}(\sigma+r) \cdot\\[0.5cm]\hfill\cdot \left((-1-2\sqrt e\sigma)\left(1-e^{-4\sqrt er}\right)e^{-2\sqrt{e}\sigma}+4\sqrt ere^{-2\sqrt{e}(2r+\sigma)}\right) \end{largearray} \end{equation} \begin{equation} \begin{largearray} \left.\frac{\partial D_{l,n}^{(1)}(r)}{\partial\sqrt\epsilon}\right|_{\mu=0} = \frac{\rho}{r+1}\int_0^{r} ds\ (s+1)B_{l,n}^{(1)}(s)e^{-2\sqrt er}\left( (-1-2\sqrt er)\sinh(2\sqrt e s) +2\sqrt es\cosh(2\sqrt e s) \right) \\[0.5cm] + \frac{\rho}{2(r+1)}\int_0^\infty d\sigma\ (\sigma+r+1)B_{l,n}^{(1)}(\sigma+r) \cdot\\[0.5cm]\hfill\cdot \left( (-1-2\sqrt e\sigma)\left(1-e^{-4\sqrt er}\right)e^{-2\sqrt e\sigma} + 4\sqrt e re^{-2\sqrt e(2r+\sigma)} \right) \end{largearray} \end{equation} \begin{equation} \begin{largearray} \left.\frac{\partial D_{l,n;l',m}^{(2)}(r)}{\partial\sqrt\epsilon}\right|_{\mu=0} \\[0.5cm] = \frac{\rho}{r+1}\int_0^{r} ds\ (s+1)B_{l,n;l',m}^{(2)}(s)e^{-2\sqrt er}\left( (-1-2\sqrt er)\sinh(2\sqrt e s) +2\sqrt es\cosh(2\sqrt e s) \right) \\[0.5cm] + \frac{\rho}{2(r+1)}\int_0^\infty d\sigma\ (\sigma+r+1)B_{l,n;l',m}^{(2)}(\sigma+r) \cdot\\[0.5cm]\hfill\cdot \left( (-1-2\sqrt e\sigma)\left(1-e^{-4\sqrt er}\right)e^{-2\sqrt e\sigma} + 4\sqrt e re^{-2\sqrt e(2r+\sigma)} \right) . \end{largearray} \end{equation} \begin{equation} \left.\frac{\partial \gamma^{(0)}}{\partial\sqrt\epsilon}\right|_{\mu=0} = -4\rho e\int_0^1 d\sigma\ \sigma(\sigma+1)B^{(0)}(\sigma)e^{-2\sqrt e\sigma} \end{equation} \begin{equation} \left.\frac{\partial\gamma^{(1)}_{l,n}}{\partial\sqrt\epsilon}\right|_{\mu=0} = -4\rho e\int_0^\infty d\sigma\ (\sigma+1)\sigma B_{l,n}^{(1)}(\sigma)e^{-2\sqrt e\sigma} \end{equation} and \begin{equation} \left.\frac{\partial\gamma^{(2)}_{l,n;l',m}}{\partial\sqrt\epsilon}\right|_{\mu=0} = -4\rho e\int_0^\infty d\sigma\ (\sigma+1)\sigma B_{l,n;l',m}^{(2)}(\sigma)e^{-2\sqrt e\sigma} . \end{equation} \subsection{\tt simpleq-iteration}\label{sec:simpleq-iteration} \indent This method is used to solve the Simple equation using the iteration described in\-~\cite{CJL20}. The Simple equation is \begin{equation} -\Delta u = S-4e u+2e\rho u\ast u \label{simpleq_iteration} \end{equation} \begin{equation} S:=(1-u)v ,\quad \rho:=\frac{2e}{\int dx\ (1-u(|x|))v(|x|)} . \label{simpleq_Se} \end{equation} for a soft potential $v$ at fixed energy $e>0$. The iteration is defined as \begin{equation} -\Delta u_n=S_n-4eu_n+2e\rho_{n-1}u_{n-1}\ast u_{n-1} ,\quad u_0=0 ,\quad \rho_{n-1}= \frac{2e}{\int dx\ (1-u_{n-1}(|x|))v(|x|)} . \label{iteration} \end{equation} \bigskip \subsubsection{Usage} \indent Unless otherwise noted, this method takes the following parameters (specified via the {\tt [-p params]} flag, as a `{\tt;}' separated list of entries). \begin{itemize} \item\makelink{param:simpleq-iteration_e}{} {\tt e} ({\tt Float64}, default: $10^{-4}$): energy $e$. \item\makelink{param:simpleq-iteration_maxiter}{} {\tt maxiter} ({\tt Int64}, default: 21): maximal number of iterations. \item\makelink{param:simpleq-iteration_order}{} {\tt order} ({\tt Int64}, default: 100): order used for all Gauss quadratures (denoted by $N$ below). \end{itemize} \bigskip The available {\tt commands} are the following. \begin{itemize} \item {\tt rho\_e}\makelink{command:simpleq-iteration_rho_e}{}: compute the density $\rho$ as a function of $e$.\par \underline{Disabled parameters}: \refname{param:simpleq-iteration_e}{{\tt e}}.\par \underline{Extra parameters}: \begin{itemize} \item\makelink{param:simpleq-iteration_minle}{} {\tt minle} ({\tt Float64}, default: $10^{-6}$): minimal value for $\log_{10}e$. \item\makelink{param:simpleq-iteration_maxle}{} {\tt maxle} ({\tt Float64}, default: $10^{2}$): maximal value for $\log_{10}e$. \item\makelink{param:simpleq-iteration_nle}{} {\tt nle} ({\tt Int64}, default: $100$): number of values for $e$ (spaced logarithmically). \item\makelink{param:simpleq-iteration_es}{} {\tt es} ({\tt Array\{Float64\}}, default: $(10^{{\tt minle}+\frac{{\tt maxle}-{\tt minle}}{{\tt nle}}n})_n$: list of values for $e$, specified as a `{\tt,}' separated list. \end{itemize} \underline{Output} (one line for each value of $e$): [$e$] [$\rho$]. \item\makelink{command:simpleq-iteration_ux}{} {\tt ux}: compute $u$ as a function of $|x|$.\par \underline{Extra parameters}: \begin{itemize} \item\makelink{param:simpleq-iteration_xmin}{} {\tt xmin} ({\tt Float64}, default: 0): minimum of the range of $|x|$ to be printed. \item\makelink{param:simpleq-iteration_xmax}{} {\tt xmax} ({\tt Float64}, default: 100): maximum of the range of $|x|$ to be printed. \item\makelink{param:simpleq-iteration_nx}{} {\tt nx} ({\tt Int64}, default: 100): number of points to print (linearly spaced). \end{itemize} \underline{Output} (one line for each value of $x$): [$|x|$] [$u_1(|x|)$] [$u_2(|x|)$] [$u_3(|x|)$] $\cdots$ \end{itemize} \subsubsection{Description} \indent In Fourier space \begin{equation} \hat u_n(|k|):=\int dx\ e^{ikx}u_n(|x|) \end{equation} (\ref{iteration}) becomes \begin{equation} (k^2+4e)\hat u_n(|k|)=\hat S_n(|k|)+2e\rho_{n-1}\hat u_{n-1}(k)^2 ,\quad \rho_n:=\frac{2e}{\hat S_n(0)} \label{iteration_uk} \end{equation} with \begin{equation} \hat S_n(|k|) =\hat v(|k|)-\frac1{(2\pi)^3}\int dp\ \hat u_n(|p|)\hat v(|k-p|) . \end{equation} We write $\hat S_n$ in bipolar coordinates (see lemma\-~\ref{lemma:bipolar}): \begin{equation} \hat S_n(|k|)=\hat v(|k|)-\frac1{(2\pi)^3}\int_0^\infty dt\ t\hat u_n(t)\Eta(|k|,t) \end{equation} with \begin{equation} \Eta(y,t):=\frac{2\pi}y\int_{|y-t|}^{y+t}ds\ s\hat v(s) \end{equation} (note that this agrees with the function\-~(\ref{easyeq_Eta}) defined for \refname{sec:easyeq}{{\tt easyeq}}). We also change variables to \begin{equation} y=\frac1{t+1} ,\quad t=\frac{1-y}y \end{equation} \begin{equation} \hat S_n(|k|)=\hat v(|k|)-\frac1{(2\pi)^3}\int_0^1 dy\ \frac{(1-y)\hat u_n(\frac{1-y}y)\Eta(|k|,\frac{1-y}y)}{y^3} . \end{equation} We approximate this integral using a Gauss-Legendre quadrature (see appendix\-~\ref{appGL}) and discretize Fourier space: \begin{equation} k_i:=\frac{1-x_i}{1+x_i} ,\quad y_i:=\frac{x_i+1}2 \end{equation} where $x_i$ are the Gauss-Legendre abscissa, and \begin{equation} \hat S_n(k_i)\approx\hat v(k_i)-\frac1{2(2\pi)^3}\sum_{j=1}^Nw_j\frac{(1-y_j)\hat u_n(\frac{1-y_j}{y_j})\Eta(k_i,\frac{1-y_j}{y_j})}{y_j^3} \end{equation} so if \begin{equation} \mathbb U_i(n):=\hat u_n(k_i) ,\quad \mathbf v_i:=\hat v(k_i) \end{equation} we have \begin{equation} \sum_{j=1}^NA_{i,j}\mathbb U_j(n)=b_i^{(n)} \end{equation} with \begin{equation} A_{i,j}:=(k_i^2+4e)\delta_{i,j}+\frac{w_j(1-y_j)\Eta(k_i,\frac{1-y_j}{y_j})}{2(2\pi)^3y_j^3} \end{equation} and \begin{equation} b_i^{(n)}:=\mathbf v_i+2e\rho_{n-1}\mathbb U_i(n-1)^2 \end{equation} in terms of which \begin{equation} \mathbb U=A^{-1}b^{(n)} . \end{equation} Finally, we compute $\rho_n$ using the second of\-~(\ref{iteration_uk}): \begin{equation} \rho_n=\frac{2e}{\hat S_n(0)} ,\quad S_n(0)\approx\hat v(0) -\frac1{2(2\pi)^3}\sum_{j=1}^Nw_j\frac{(1-y_j)\hat u_n(\frac{1-y_j}{y_j})\Eta(0,\frac{1-y_j}{y_j})}{y_j^3} . \end{equation} \section{Potentials}\label{sec:potentials} \indent In this section, we describe the potentials available in {\tt simplesolv}, and provide documentation to add custom potentials to {\tt simplesolv}. \bigskip \subsection{Built-in potentials} \subsubsection{{\tt exp}}\label{sec:exp} \indent In $x$-space, \begin{equation} v(|x|)=ae^{-|x|} . \end{equation} The constant $a$ is specified through the {\tt v\_a} parameter, and can be any real number. Note that $v\geqslant 0$ if and only if $a\geqslant 0$. This is the potential that is selected by default. \bigskip \point In Fourier space, \begin{equation} \hat v(|k|)=\int dx\ e^{ikx}v(|x|) =\frac{8\pi a}{(1+k^2)^2} . \end{equation} In particular, $v$ is of positive type (that is, $\hat v\geqslant 0$) if and only if $a\geqslant 0$. \bigskip \point The zero energy scattering solution, that is, the solution of \begin{equation} (-\Delta+v)\psi=0 ,\quad \lim_{|x|\to\infty}\psi=1 \end{equation} is \begin{equation} \psi(r)=\frac{cI_0(2\sqrt a e^{-\frac r2})+2K_0(2\sqrt a e^{-\frac r2})}r ,\quad c=-\frac{2K_0(2\sqrt a)}{I_0(2\sqrt a)} \end{equation} where $I_0$ and $K_0$ are the modified Bessel functions, and the square root is taken with a branch cut on the negative imaginary axis. In other words, if $a<0$, $\sqrt a=i\sqrt{|a|}$ and\-~\dlmfcite{(10.27.6),(10.27.9),(10.27.10)}{1.1.3} \begin{equation} I_0(ix)=J_0(x) ,\quad K_0(ix)=-\frac\pi2(Y_0(x)+iJ_0(x)) \end{equation} where $J_0$ and $Y_0$ are the Bessel functions, so \begin{equation} \psi(r)=\pi\frac{c'J_0(2\sqrt{|a|} e^{-\frac r2})-Y_0(2\sqrt{|a|} e^{-\frac r2})}r ,\quad c'=\frac{Y_0(2\sqrt{|a|})}{J_0(2\sqrt{|a|})} . \end{equation} The scattering length is\-~\dlmfcite{(10.25.2),(10.31.2)}{1.1.3} \begin{equation} \mathfrak a_0=\lim_{r\to\infty}r(1-\psi(r)) =\log(a)+2\gamma+\frac{2K_0(2\sqrt a)}{I_0(2\sqrt a)} \end{equation} where $\gamma$ is the Euler constant, which, for $a<0$, is \begin{equation} \mathfrak a_0=\log|a|+2\gamma-\frac{\pi Y_0(2\sqrt{|a|})}{J_0(2\sqrt{|a|})} . \end{equation} \subsubsection{{\tt tent}}\label{sec:tent} \indent In $x$-space, \begin{equation} v(|x|)=\mathds 1_{|x|0$. Note that $v\geqslant 0$ if and only if $a\geqslant 0$. Note that \begin{equation} v(|x|)=\frac a{b^3}\mathds 1_{|x|<\frac b2}\ast\mathds 1_{|x|<\frac b2} \end{equation} (this is more easily checked in Fourier space than explicitly computing the convolution). \bigskip \indent In Fourier space, \begin{equation} \hat v(|k|)=\int dx\ e^{ikx}v(|x|)=a\frac{b^3}8\left(4\pi\frac{\sin(\frac{|k|b}2)-\frac{|k|b}2\cos(\frac{|k|b}2)}{\frac{(|k|b)^3}8}\right)^2 . \end{equation} Note that this is $\frac a{b^3}(\hat\mathds 1_{|x|<\frac b2})^2$. In particular, $v$ is of positive type (that is, $\hat v\geqslant 0$) if and only if $a\geqslant 0$. \subsubsection{{\tt expcry}}\label{sec:expcry} \indent In $x$-space \begin{equation} v(|x|)=e^{-|x|}-ae^{-b|x|} . \end{equation} The constants $a$ and $b$ are specified through the {\tt v\_a} and {\tt v\_b} parameters, and $a\in\mathbb R$, $b>0$. Note that $v\geqslant 0$ if and only if $a\leqslant 1$ and $b\geqslant 1$. \bigskip \indent In Fourier space \begin{equation} \hat v(|k|)=\int dx\ e^{ikx}v(|x|) =8\pi\left(\frac1{(1+k^2)^2}-\frac{ab}{(b^2+k^2)^2}\right) \end{equation} In particular, $\hat v$ is of positive type (that is, $\hat v\geqslant 0$) if and only if $ab\leqslant 1$, $a\leqslant b$ and $a\leqslant b^3$. If $a\leqslant 1$, $b\geqslant 1$ and $ab>1$, then $\hat v$ has a unique minimum at \begin{equation} |k_*|=\sqrt{\frac{b^2-(ab)^{\frac13}}{(ab)^{\frac13}-1}} ,\quad \hat v(|k_*|)= -8\pi\frac{((ab)^{\frac13}-1)^3}{b^2-1} . \end{equation} \subsubsection{{\tt npt}}\label{sec:npt} \indent In $x$-space \begin{equation} v(|x|)=x^2e^{-|x|} . \end{equation} Note that $v\geqslant 0$. \bigskip \indent In Fourier space \begin{equation} \hat v(|k|)=\int dx\ e^{ikx}v(|x|) =96\pi\frac{1-k^2}{(1+k^2)^4} . \end{equation} In particular, $v$ is not of positive type (that is, $\hat v$ is not $\geqslant 0$). \subsubsection{{\tt alg}}\label{sec:alg} \indent In $x$-space \begin{equation} v(|x|)=\frac1{1+\frac14|x|^4} . \end{equation} Note that $v\geqslant 0$. \bigskip \indent In Fourier space \begin{equation} \hat v(|k|)=\int dx\ e^{ikx}v(|x|) =4\pi^2 e^{-|k|}\frac{\sin(|k|)}{|k|} . \end{equation} In particular, $v$ is not of positive type (that is, $\hat v$ is not $\geqslant 0$). \subsubsection{{\tt algwell}}\label{sec:algwell} \indent In $x$-space \begin{equation} v(|x|)=\frac{1+a|x|^4}{(1+|x|^2)^4} . \end{equation} The constant $a$ can be set using the parameter {\tt v\_a} and can be any real number. Note that $v\geqslant 0$ if and only if $a\geqslant 0$. If $a>8$, this potential has a local minimum at $|x_-|$ and a local maximum at $|x_+|$: \begin{equation} |x_\pm|=\sqrt{\frac12(1\pm\sqrt{1-8a^{-1}})} . \end{equation} \bigskip \indent In Fourier space \begin{equation} \hat v(|k|)=\int dx\ e^{ikx}v(|x|) =\frac{\pi^2}{24}e^{-|k|}(a(k^2-9|k|+15)+k^2+3|k|+3) . \end{equation} In particular, $v$ is of positive type (that is, $\hat v$ is not $\geqslant 0$) if and only if \begin{equation} -\frac15\leqslant a\leqslant 3+\frac87\sqrt7\approx6.02 . \end{equation} \subsubsection{{\tt exact}}\label{sec:exact} \indent In $x$-space \begin{equation} v(|x|)= \frac{12c( x^6b^6(2e-b^2) +b^4x^4(9e-7b^2) +4b^2x^2(3e-2b^2) +(5e+16b^2))}{(1+b^2x^2)^2(4+b^2x^2)^2((1+b^2x^2)^2-c)} \end{equation} The constants $a,b,c,e$ can be set using the parameters {\tt v\_a}, {\tt v\_b}, {\tt v\_c}, {\tt v\_e}, and $a,e\in\mathbb R$, $b\neq0$, $c>0$, $c\neq9$. Note that $v\geqslant 0$ if and only if\-~\cite{CJL21} \begin{equation} b>0 ,\quad 0{\rm \{potential function\}}(k,v\_param\_a,v\_param\_b,{\rm...})\par \indent a0={\rm \{scattering length function\}}(v\_param\_a,v\_param\_b,{\rm...})\par \end{code} where the number of {\tt v\_param} entries should be the number of parameters of the potential. The parameters that are currently read from the parameters list are {\tt a}, {\tt b}, {\tt c} and {\tt e}. To add a parameter, it must first be declared and initialized after line 35, and code to read it should be added after line 172: \begin{code} elseif lhs="v\_{\rm\{name of parameter\}}"\par \indent v\_param\_{\rm\{name of parameter\}}=parse(Float64,rhs) \end{code} If the new parameter has a type other than {\tt Float64}, this should be changed in the {\tt parse} function, and in the initialization. \bigskip \indent The approximation of the scattering length is only used to initialize the Newton algorithm for \refname{sec:easyeq}{{\tt easyeq}}, so it is not important that it be exact. In fact, some of the built-in potentials set the scattering length to 1, when it has proved too difficult to compute it exactly. \appendix \section{Chebyshev polynomial expansion}\label{appChebyshev} \indent In this appendix, we compute the error of Chebyshev polynomial expansions of regular functions. Specifically, we will consider class-$s$ Gevrey functions (which is a generalization of the notion of analyticity: analytic functions are Gevrey functions with $s=1$). A class-$s$ Gevrey function on $[-1,1]$ is a $\mathcal C^\infty$ function that satisfies, $\forall n\in\mathbb N$, \begin{equation} \mathop{\mathrm{sup}}_{x\in[-1,1]}\left|\frac{d^nf(x)}{dx^n}\right|\leqslant C_0C^n(n!)^s. \end{equation} Formally, the Chebyshev polynomial expansion of $f$ is \begin{equation} f(x)=\frac{c_0}2+\sum_{j=1}^\infty c_j T_j(x) \label{eqcheby} \end{equation} where $T_j$ is the $j$-th Chebyshev polynomial: \begin{equation} T_j(x):=\cos(j\arccos(x)) \end{equation} and \begin{equation} c_j:=\frac2\pi\int_0^\pi d\theta\ f(\cos\theta)\cos(j\theta) . \end{equation} \bigskip \theo{Lemma}\label{lemmaChebyshev} Let $f$ be a class-$s$ Gevrey function on $[-1,1]$ with $s\in\mathbb N\setminus\{0\}$. There exist $b_0,b>0$, which are independent of $s$, such that the coefficients $c_j$ of the Chebyshev polynomial expansion are bounded as \begin{equation} c_j\leqslant b_0e^{-bj^{\frac1s}} . \label{boundchebyshevcoef} \end{equation} In particular, the Chebyshev polynomial expansion is absolutely convergent pointwise (and, therefore in any $L_p$ norm), and, for every $N\geqslant 1$, \begin{equation} \left|f(x)-\frac{c_0}2-\sum_{j=1}^N c_j T_j(x)\right| \leqslant \frac{b_0}{1-e^{-b}}(s-1)!(N^{\frac1s}+b^{-1})^{s-1}e^{-bN^{\frac1s}} . \label{boundchebyshev} \end{equation} \endtheo \bigskip \indent\underline{Proof}: Note that~\-(\ref{eqcheby}) is nothing other than the Fourier cosine series expansion of $F(\theta):=f(\cos(\theta))$, which is an even, periodic, class-$s$ Gevrey function on $[-\pi,\pi]$, whose $j$-th Fourier coefficient for $j\in\mathbb Z$ is equal to $\frac12c_{|j|}$. The bound\-~(\ref{boundchebyshevcoef}) follows from a well-known estimate of the decay of Fourier coefficients of class-$s$ Gevrey functions (see e.g.~\-\cite[Theorem~\-3.3]{Ta87}). The bound\-~(\ref{boundchebyshev}) then follows from $|T_j(x)|\leqslant 1$ and lemma\-~\ref{lemma:ineqgevrey} below. \qed \bigskip \theo{Lemma}\label{lemma:ineqgevrey} Given $b>0$ and two integers $N,s>0$, \nopagebreakaftereq \begin{equation} \sum_{j=N}^\infty e^{-bj^{\frac1s}} \leqslant (s-1)!\left(N^{\frac1s}+\frac1{1-e^{-b}}\right)^{s-1}\frac{e^{-bN^{\frac1s}}}{1-e^{-b}} \end{equation} \endtheo \restorepagebreakaftereq \bigskip \indent\underline{Proof}: If $\nu_{N,s}^s:=\lfloor N^{\frac1s}\rfloor^s$ denotes the largest integer that is $\leqslant N$ and has an integer $s$-th root, then \begin{equation} \sum_{j=N}^\infty e^{-bj^{\frac1s}}\leqslant \sum_{j=\nu_{N,s}^s}^\infty e^{-bj^{\frac1s}}\leqslant \sum_{k=\nu_{N,s}}^\infty(k^s-(k-1)^s)e^{-bk}\leqslant s\sum_{k=\nu_{N,s}}^\infty k^{s-1}e^{-bk} . \end{equation} We then estimate \begin{equation} \sum_{k=\nu_{N,s}}^\infty k^{s-1}e^{-bk}=\frac{d^{s-1}}{d(-b)^{s-1}}\sum_{k=\nu_{N,s}}^\infty e^{-bk} \leqslant (s-1)!\left(\nu_{N,s}+\frac1{1-e^{-b}}\right)^{s-1}\frac{e^{-b\nu_{N,s}}}{1-e^{-b}} \end{equation} which concludes the proof of the lemma. \qed \section{Gauss quadratures}\label{appGL} \indent Gauss quadratures are approximation schemes to compute integrals of the form \begin{equation} \int_a^bdt\ \omega(t)f(t) \end{equation} where $\omega(x)\geqslant 0$ is one of several functions that Gauss quadratures can treat. The possible choices of $\omega$ and $(a,b)$ are \begin{itemize} \item $(a,b)=(-1,1)$, $\omega(t)=1$: Gauss-Legendre quadratures. \item $(a,b)=(-1,1)$, $\omega(t)=(1-t)^\alpha(1+t)^\beta$, $\alpha,\beta>-1$: Gauss-Jacobi quadratures. \item $(a,b)=(-1,1)$, $\omega(t)=(1-t^2)^{-\frac12}$: Gauss-Chebyshev quadratures. \item $(a,b)=(0,\infty)$, $\omega(t)=e^{-t}$: Gauss-Laguerre quadratures. \item $(a,b)=(0,\infty)$, $\omega(t)=t^\alpha e^{-t}$, $\alpha>-1$: generalized Gauss-Laguerre quadratures. \item $(a,b)=(0,\infty)$, $\omega(t)=e^{-t^2}$: Gauss-Hermite quadratures. \end{itemize} It is not our goal here to discuss Gauss quadratures in detail, or their relation to orthogonal polynomials. Instead, we will compute the error made when approximating such an integral by a Gauss quadrature. \bigskip \indent For each Gauss quadrature, the integral is approximated in the form \begin{equation} \int_a^b dt\ \omega(t) f(t) \approx \sum_{i=1}^Nw_if(r_i) \end{equation} where $w_i$ are called the {\it weights}, $r_i$ are the {\it abcissa}, and $N$ is the {\it order}. The weights and abcissa depend on both $\omega$ and the order $N$. The crucial property of Gauss quadratures is that they are {\it exact} when $f$ is a polynomial of order $\leqslant 2N-1$. \bigskip \indent In this appendix, we compute the error of Gauss quadratures when used to integrate regular functions. Specifically, we will consider class-$s$ Gevrey functions (which is a generalization of the notion of analyticity: analytic functions are Gevrey functions with $s=1$). A class-$s$ Gevrey function on is a $\mathcal C^\infty$ function that satisfies, $\forall n\in\mathbb N$, \begin{equation} \mathop{\mathrm{sup}}_{x\in[-1,1]}\left|\frac{d^nf(x)}{dx^n}\right|\leqslant C_0C^n(n!)^s. \end{equation} \bigskip \theo{Lemma}\label{lemmaGL} Let $f$ be a class-$s$ Gevrey function with $s\in\mathbb N\setminus\{0\}$. There exist $b_0,b>0$, which are independent of $s$, and $N_0>0$, which is independent of $s$ and $f$, such that, if $N\geqslant N_0$, then, denoting the Gauss weights and abcissa by $w_i$ and $r_i$, \begin{equation} \left|\int_a^bdt\ \omega(t)f(t)-\sum_{i=1}^Nw_if(r_i)\right| \leqslant b_0(s-1)!((2N-1)^{\frac1s}+b^{-1})^{s-1}e^{-b(2N-1)^{\frac1s}}\left(\int_a^b\omega(t)+\sum_{i=1}^Nw_i\right) . \end{equation} In particular, if $f$ is analytic (i.e. $s=1$), then \begin{equation} \left|\int_a^bdt\ \omega(t)f(t)-\sum_{i=1}^Nw_if(r_i)\right| \leqslant b_0e^{-b(2N-1)}\left(\int_a^b\omega(t)+\sum_{i=1}^Nw_i\right) . \end{equation} \endtheo \bigskip \indent\underline{Proof}: We expand $f$ into Chebyshev polynomials as in\-~(\ref{eqcheby}): \begin{equation} f(x)=\frac{c_0}2+\sum_{j=1}^{\infty}c_jT_j(x) \end{equation} Let \begin{equation} g(x):=f(x)-\frac{c_0}2-\sum_{j=1}^{2N-1} c_j T_j(x) . \end{equation} Since order-$N$ Gauss quadratures are exact on polynomials of order $\leqslant 2N-1$, we have \begin{equation} \int_a^bdt\ \omega(t)f(t)-\sum_{i=1}^Nw_if(r_i) = \int_a^bdt\ \omega(t)g(t)-\sum_{i=1}^Nw_ig(r_i) \end{equation} and, by lemma\-~\ref{lemmaChebyshev}, \begin{equation} |g(x)|\leqslant (\mathrm{const}.)(s-1)!((2N-1)^{\frac1s}+b^{-1})^{s-1}e^{-b(2N-1)^{\frac1s}} . \label{boundchebyshev} \end{equation} \qed \section{Bipolar coordinates}\label{appendix:bipolar} \indent Bipolar coordinates are very useful for computing convolutions of radial functions in three dimensions. \bigskip \theo{Lemma}\label{lemma:bipolar} For $y\in\mathbb R^3$, \nopagebreakaftereq \begin{equation} \int_{\mathbb R^3}dx\ f(|x|,|x-y|) = \frac{2\pi}{|y|}\int_0^\infty dt\int_{||y|-t|}^{|y|+t}ds\ stf(s,t) \end{equation} \endtheo \restorepagebreakaftereq \indent\underline{Proof}: Without loss of generality, we assume that $y=(0,0,a)$ with $a>0$. We first change to cylindrical coordinates: $(\rho,\theta,x_3)$: \begin{equation} \int_{\mathbb R^3}dx\ f(|x|,|x-y|) = 2\pi\int_0^\infty d\rho\int_{-\infty}^\infty dx_3\ \rho f(|(\rho,0,x_3)|,|(\rho,0,x_3-a)|) . \end{equation} Next, we change variables to \begin{equation} s=|(\rho,0,x_3)| ,\quad t=|(\rho,0,x_3-a)| . \end{equation} The inverse of this transformation is \begin{equation} x_3=\pm\frac{a^2+s^2-t^2}{2a} ,\quad \rho=\frac1{2a}\sqrt{4a^2s^2-(a^2+s^2-t^2)^2} \label{inverse} \end{equation} and its Jacobian is \begin{equation} \frac{2ts}{\sqrt{-2a^4+2a^2(t^2+s^2)-(t^2-s^2)^2}} =\frac{ts}{\rho a} . \label{jacobian} \end{equation} \qed \bigskip \indent The following is a generalization of the previous lemma to functions of four variables. \bigskip \theo{Lemma}\label{lemma:bipolar2} For $y\in\mathbb R^3$, \begin{equation} \begin{largearray} \int_{\mathbb R^6}dxdx'\ f(|x|,|x-y|,|x'|,|x'-y|,|x-x'|) \\[0.5cm]\hfill = \frac{2\pi}{|y|^2}\int_0^\infty dt\int_{||y|-t|}^{|y|+t}ds\int_0^\infty dt'\int_{||y|-t'|}^{|y|+t'}ds'\int_0^{2\pi}d\theta\ sts't'f(s,t,s',t',\xi(s,t,s',t',\theta,|y|)) \end{largearray} \end{equation} with \nopagebreakaftereq \begin{equation} \begin{largearray} \xi(s,t,s',t',\theta,|y|) := \frac1{\sqrt 2|y|} \Big( |y|^2(s^2+t^2+(s')^2+(t')^2)-|y|^4-(s^2-t^2)((s')^2-(t')^2) \\[0.5cm]\hfill - \sqrt{(4|y|^2s^2-(|y|^2+s^2-t^2)^2)(4|y|^2(s')^2-(|y|^2+(s')^2-(t')^2)^2)}\cos\theta \Big)^{\frac12} . \end{largearray} \end{equation} \endtheo \restorepagebreakaftereq \indent\underline{Proof}: Without loss of generality, we assume that $y=(0,0,a)$ with $a>0$. We first change to cylindrical coordinates: $(\rho,\theta,x_3;\rho',\theta',x_3')$: \begin{equation} \begin{largearray} \int_{\mathbb R^6}dxdx'\ f(|x|,|x-y|,|x'|,|x'-y|,|x-x'|) \\[0.5cm]\hfill = 2\pi\int_0^\infty d\rho\int_{-\infty}^\infty dx_3 \int_0^\infty d\rho'\int_{-\infty}^\infty dx_3'\int_0^{2\pi}d\theta' \ \rho\rho' f( s,t,s',t', |(\rho-\rho'\cos\theta',\rho'\sin\theta',x_3-x_3')| ) . \end{largearray} \end{equation} where \begin{equation} s:=|(\rho,0,x_3)| ,\quad t:=|(\rho,0,x_3-a)| ,\quad s':=|(\rho'\cos\theta',\rho'\sin\theta',x_3')|, t':=|(\rho'\cos\theta',\rho'\sin\theta',x_3'-a)| \end{equation} Next, we change variables to $(s,t,s',t',\theta')$. The Jacobian of this transformation is, by\-~(\ref{jacobian}), \begin{equation} \frac{tst's'}{\rho\rho' a^2} . \end{equation} Furthermore, by\-~(\ref{inverse}), \begin{equation} x_3-x_3'=\frac{s^2-t^2-(s')^2+(t')^2}{2a} \end{equation} and \begin{equation} \rho=\frac{\sqrt{4a^2s^2-(a^2+s^2-t^2)^2}}{2a} ,\quad \rho'=\frac{\sqrt{4a^2(s')^2-(a^2+(s')^2-(t')^2)^2}}{2a} \end{equation} so \begin{equation} \begin{largearray} |(\rho-\rho'\cos\theta',\rho'\sin\theta',x_3-x_3')| = \frac1{2a} \left( 4a^2s^2-(a^2+s^2-t^2)^2 + 4a^2(s')^2-(a^2+(s')^2-(t')^2)^2 \right.\\[0.5cm]\hfill\left. - 2\sqrt{(4a^2s^2-(a^2+s^2-t^2)^2)(4a^2(s')^2-(a^2+(s')^2-(t')^2)^2)}\cos\theta' + (s^2-t^2-(s')^2+(t')^2)^2 \right)^{\frac12} \end{largearray} \end{equation} and \begin{equation} \begin{largearray} |(\rho-\rho'\cos\theta',\rho'\sin\theta',x_3-x_3')| = \frac1{\sqrt 2a} \Big( a^2(s^2+t^2+(s')^2+(t')^2)-a^4-(s^2-t^2)((s')^2-(t')^2) \\[0.5cm]\hfill - \sqrt{(4a^2s^2-(a^2+s^2-t^2)^2)(4a^2(s')^2-(a^2+(s')^2-(t')^2)^2)}\cos\theta' \Big)^{\frac12} . \end{largearray} \end{equation} \qed \section{Hann windowing}\label{appendix:hann} \indent In this appendix, we discuss the use of Hann windows to compute Fourier transforms. Consider the Fourier transform \begin{equation} \hat f(k)=\int dx\ e^{ikx}f(x) . \end{equation} Evaluating this integral numerically can be tricky, especially at high $|k|$, because of the rapid oscillations at large $|x|$. A trick to palliate such a problem is to multiply $f$ by a {\it window function} $h_L$, which cuts off distances of order $L$. We then compute, instead of $\hat f$, \begin{equation} \tilde f(k)=\int dx\ e^{ikx}h_L(x)f(x) . \end{equation} We can then evaluate $\tilde f$ using standard numerical techniques, such as Gauss quadratures (see appendix\-~\ref{appGL}), without issues at large $|x|$. However, in doing so, we will make an error in the Fourier transform. To quantify this error, note that \begin{equation} \tilde f(k) =\hat h_L\hat\ast\hat f(k) \equiv\int\frac{dq}{(2\pi)^d}\ \hat h_L(q)\hat f(k-q) \end{equation} so if we choose $h_L$ in such a way that $\hat h_L$ is peaked around the origin, then $\tilde f$ will not differ too much from $\hat f$: \begin{equation} \hat f(k)-\tilde f(k) =((2\pi)^d\delta(k)-\hat h_L)\hat\ast\hat f(k) . \end{equation} \bigskip \indent The {\it Hann} window is defined as \begin{equation} h_L(x)=\cos^2({\textstyle\frac{\pi|x|}L})\mathds 1_{|x|<\frac L2} \end{equation} whose Fourier transform is, in $d=3$, \begin{equation} \hat h_L(k)= \frac{4\pi^3 L^2}{|k|}\frac{((|k|L)^3-4|k|L\pi^2)\cos(\frac{|k|L}2)-2(3(|k|L)^2-4\pi^2)\sin(\frac{|k|L}2)}{((|k|L)^3-4|k|L\pi^2)^2} \end{equation} which decays at large $|k|L$ as \begin{equation} \hat h_L(k)\sim\frac{4\pi^3}{|k|^4L}\cos({\textstyle\frac{|k|L}2}) . \end{equation} \vfill \eject \begin{thebibliography}{WWW99} \small \IfFileExists{bibliography/bibliography.tex}{\input bibliography/bibliography.tex}{} \end{thebibliography} \end{document}