Ian Jauslin
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIan Jauslin <ian.jauslin@roma1.infn.it>2016-05-24 13:39:23 +0000
committerIan Jauslin <ian.jauslin@roma1.infn.it>2016-05-24 13:39:23 +0000
commitfa9b6f2b9bcb80778e63ef2aa4e17c7573de0015 (patch)
tree92b740d0736c9ed6f5bda051c224c8bb7196bb03
Initial commitHEADv1.0master
-rw-r--r--INSTALL36
-rw-r--r--LGPL3165
-rw-r--r--LICENSE202
-rw-r--r--Makefile113
-rw-r--r--NOTICE15
l---------doc/hhtop-doc/hhtop-doc.pdf1
l---------doc/hhtop-doc/hhtop-doc.tex1
-rw-r--r--doc/hhtop-doc/src/BBlog.sty46
-rw-r--r--doc/hhtop-doc/src/bibliography.BBlog.tex2
-rw-r--r--doc/hhtop-doc/src/header.sty13
-rw-r--r--doc/hhtop-doc/src/hhtop-doc.pdfbin0 -> 286238 bytes
-rw-r--r--doc/hhtop-doc/src/hhtop-doc.tex990
-rw-r--r--doc/hhtop-doc/src/iansecs.sty614
-rw-r--r--doc/hhtop-doc/src/kiss.cls51
-rw-r--r--doc/hhtop-doc/src/toolbox.sty50
-rw-r--r--libinum-1.0.1.tar.gzbin0 -> 100827 bytes
-rw-r--r--man/hhtop.1161
-rw-r--r--src/definitions.h23
-rw-r--r--src/double_util.c30
-rw-r--r--src/double_util.h43
-rw-r--r--src/hh_integral.c276
-rw-r--r--src/hh_integral.h63
-rw-r--r--src/hh_integral_double.c107
-rw-r--r--src/hh_integral_double.h54
-rw-r--r--src/hh_root.c144
-rw-r--r--src/hh_root.h39
-rw-r--r--src/hh_root_double.c75
-rw-r--r--src/hh_root_double.h35
-rw-r--r--src/hhtop.c597
-rw-r--r--src/parser.c274
-rw-r--r--src/parser.h37
-rw-r--r--src/ss_integral.c958
-rw-r--r--src/ss_integral.h200
-rw-r--r--src/ss_integral_double.c437
-rw-r--r--src/ss_integral_double.h170
-rw-r--r--src/types.h35
-rw-r--r--src/zz_integral.c237
-rw-r--r--src/zz_integral.h53
-rw-r--r--src/zz_integral_double.c89
-rw-r--r--src/zz_integral_double.h35
40 files changed, 6471 insertions, 0 deletions
diff --git a/INSTALL b/INSTALL
new file mode 100644
index 0000000..a1e1d38
--- /dev/null
+++ b/INSTALL
@@ -0,0 +1,36 @@
+#######################################################################
+## ##
+## Compiling and installing hhtop. ##
+## ##
+#######################################################################
+
+* hhtop should work on any POSIX compliant system, such as GNU/Linux or OSX.
+
+* hhtop is linked against the GNU MPFR, GNU GMP, pthread and libinum libraries.
+
+* Compiling:
+ Run
+ make
+
+ This will compile hhtop as well as libinum.
+
+ The default paths can be modified by passing the appropriate arguments to
+ make, as specified in the following table
+ compiler : CC=/usr/bin/gcc
+ linker : LD=/usr/bin/gcc
+ archive : AR=/usr/bin/ar
+ include : INCLUDE=
+ lib paths : LIB=
+ optimize : OPT=-O3
+
+ For example,
+ make CC=/usr/local/bin/gcc INCLUDE=/usr/local/include LIBDIR=-L/usr/local/lib
+
+ In addition, libinum can be linked dynamically or statically, which is
+ controlled by the STATIC option of the makefile.
+ If STATIC=0 then link dynamically
+ If STATIC=2 then link statically
+ If STATIC=1 (default) then link libinum statically but other libraries dynamically.
+
+ The compiled binary is placed in the ./build directory.
+
diff --git a/LGPL3 b/LGPL3
new file mode 100644
index 0000000..65c5ca8
--- /dev/null
+++ b/LGPL3
@@ -0,0 +1,165 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..9fe6bd8
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,113 @@
+## Copyright 2016 Ian Jauslin
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+
+# whether to link dynamically
+# if static=0 then link dynamically
+# if static=2 then link statically
+# if static=1 then link libinum statically but other libraries dynamically
+STATIC=1
+
+# whether to compile libinum
+LIBINUM_COMPILE=1
+
+VERSION=1.0
+LIBINUM_VERSION=1.0.1
+
+# products of the compilation
+PROJECT_BINS= hhtop
+
+# debug and optimization flags
+#DB= -ggdb
+OPT= -O3
+
+# warning flags
+WARNINGS= -Wall -Wextra -Wno-strict-overflow -std=c99 -pedantic
+
+# compiler
+CC=/usr/bin/gcc
+LD=$(CC)
+AR=/usr/bin/ar
+
+# directories
+INCLUDE =
+LIB =
+
+# flags
+# do not override CLI flags (so they can be pushed to libinum)
+LDFLAGS_HH =$(LDFLAGS) $(LIB)
+CFLAGS_HH =$(CFLAGS) $(INCLUDE) $(DB) $(OPT) $(WARNINGS)
+
+# build directories
+BUILDDIR=./build
+SRCDIR=./src
+OBJDIR=./objs
+
+# objects
+OBJS = $(addprefix $(OBJDIR)/, hh_integral.o hh_root.o hhtop.o parser.o ss_integral.o zz_integral.o hh_integral_double.o hh_root_double.o ss_integral_double.o zz_integral_double.o double_util.o)
+
+# flags which depend on whether to link statically or dynamically
+# lib flag for libinum
+LIBINUM_FLAG=
+# additional library required for static linking
+XTRA_LIBS=
+
+ifeq ($(STATIC),0)
+ XTRA_LIBS=-lm -lmpfr -lgmp -lpthread
+ LIBINUM_FLAG=-linum
+else ($(STATIC),1)
+ # libinum is linked against libm, libmpfr, libgmp and libpthread
+ XTRA_LIBS=-lm -lmpfr -lgmp -lpthread
+ # link binaries using the static library
+ LIBINUM_FLAG=-l:libinum.a
+else ifeq ($(STATIC),2)
+ # libinum is linked against libm, libmpfr, libgmp and libpthread
+ XTRA_LIBS=-lm -lmpfr -lgmp -lpthread
+ LIBINUM_FLAG=-linum
+ # link binaries statically
+ LDFLAGS_HH += -static
+endif
+
+
+LIBINUM_COMPILE_COMMAND=
+ifeq ($(LIBINUM_COMPILE),1)
+ LIBINUM_COMPILE_COMMAND=libinum
+ LDFLAGS_HH += -L./libinum-$(LIBINUM_VERSION)/build
+ CFLAGS_HH += -I./libinum-$(LIBINUM_VERSION)/include
+endif
+
+
+
+all: $(LIBINUM_COMPILE_COMMAND) init hhtop
+
+# compile libinum
+libinum:
+ tar xzf ./libinum-$(LIBINUM_VERSION).tar.gz
+ make STATIC=0 CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" CC="$(CC)" LD="$(LD)" AR="$(AR)" OPT="$(OPT)" DB="$(DB)" INCLUDE="$(INCLUDE)" LIB="$(LIB)" -C ./libinum-$(LIBINUM_VERSION)
+
+# create dirs
+init:
+ @[ -d $(OBJDIR) ] || /bin/mkdir $(OBJDIR)
+ @[ -d $(BUILDDIR) ] || /bin/mkdir $(BUILDDIR)
+
+
+hhtop: $(OBJS)
+ $(LD) $(LDFLAGS_HH) -o $(BUILDDIR)/$@ $^ $(LIBINUM_FLAG) $(XTRA_LIBS)
+
+%.o : ../$(SRCDIR)/%.c
+ $(CC) -c $(CFLAGS_HH) $< -o $@
+
+clean:
+ @rm -rf $(OBJDIR)
+ @rm -rf $(BUILDDIR)
+ @rm -rf libinum-$(LIBINUM_VERSION)
diff --git a/NOTICE b/NOTICE
new file mode 100644
index 0000000..f17f6f5
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1,15 @@
+hhtop
+Copyright 2016 Ian Jauslin
+
+Numerical values may be represented as multi-precision floats using
+the GNU MPFR library, which is licensed under the GNU Lesser General
+Public License (LGPL) version 3 (see LGPL3 for a copy of the license).
+See
+ http://www.mpfr.org/
+for details.
+
+The GNU MPFR library is based on the GNU GMP library, which is licensed
+under the GNU Lesser General Public License (LGPL) version 3 (see LGPL3
+for a copy of the license). See
+ http://www.gmplib.org/
+for details.
diff --git a/doc/hhtop-doc/hhtop-doc.pdf b/doc/hhtop-doc/hhtop-doc.pdf
new file mode 120000
index 0000000..128f309
--- /dev/null
+++ b/doc/hhtop-doc/hhtop-doc.pdf
@@ -0,0 +1 @@
+./src/hhtop-doc.pdf \ No newline at end of file
diff --git a/doc/hhtop-doc/hhtop-doc.tex b/doc/hhtop-doc/hhtop-doc.tex
new file mode 120000
index 0000000..e22abda
--- /dev/null
+++ b/doc/hhtop-doc/hhtop-doc.tex
@@ -0,0 +1 @@
+./src/hhtop-doc.tex \ No newline at end of file
diff --git a/doc/hhtop-doc/src/BBlog.sty b/doc/hhtop-doc/src/BBlog.sty
new file mode 100644
index 0000000..e5e6703
--- /dev/null
+++ b/doc/hhtop-doc/src/BBlog.sty
@@ -0,0 +1,46 @@
+%%
+%% BBlog bibliography related commands
+%%
+
+%% length used to display the bibliography
+\newlength{\rw}
+\setlength{\rw}{1.75cm}
+
+%% read header
+\IfFileExists{header.BBlog.tex}{\input{header.BBlog}}{}
+
+%% cite a reference
+\def\cite#1{%
+\ref{cite#1}%
+%% add entry to citelist after checking it has not already been added
+\ifcsname if#1cited\endcsname%
+\expandafter\if\csname if#1cited\endcsname%
+\else%
+\csname if#1citedtrue\endcsname%
+\immediate\write\@auxout{\noexpand\BBlogcite{#1}}%
+\fi%
+\else%
+\expandafter\newif\csname if#1cited\endcsname%
+\csname if#1citedtrue\endcsname%
+\immediate\write\@auxout{\noexpand\BBlogcite{#1}}%
+\fi%
+}
+%% an empty definition for the aux file
+\def\BBlogcite#1{}
+
+%% an entry
+\long\def\BBlogentry#1#2#3{
+ \hrefanchor
+ \outdef{label@cite#1}{#2}
+ \parbox[t]{\rw}{[\cite{#1}]}\parbox[t]{\colw}{#3}\par
+ \bigskip
+}
+
+%% display the bibliography
+\long\def\BBlography{
+ \newlength{\colw}
+ \setlength{\colw}{\textwidth}
+ \addtolength{\colw}{-\rw}
+ \IfFileExists{bibliography.BBlog.tex}{
+ \input{bibliography.BBlog}}{{\tt error: missing BBlog bibliography file}}
+}
diff --git a/doc/hhtop-doc/src/bibliography.BBlog.tex b/doc/hhtop-doc/src/bibliography.BBlog.tex
new file mode 100644
index 0000000..1291d7c
--- /dev/null
+++ b/doc/hhtop-doc/src/bibliography.BBlog.tex
@@ -0,0 +1,2 @@
+\BBlogentry{CR72}{CP72}{A.R. Curtis, P. Rabinowitz - {\it On the Gaussian integration of Chebyshev polynomials}, Mathematics of Computation, Vol.~26, n.~117, p.~207-211, 1972, doi:{\tt\color{blue}\href{http://dx.doi.org/10.1090/S0025-5718-1972-0298934-5}{10.1090/S0025-5718-1972-0298934-5}}.}
+\BBlogentry{Ta87}{Ta87}{Y. Taguchi - {\it Fourier coefficients of periodic functions of Gevrey classes and ultradistributions}, Yokohama Mathematical Journal, Vol.~35, p.~51-60, 1987.}
diff --git a/doc/hhtop-doc/src/header.sty b/doc/hhtop-doc/src/header.sty
new file mode 100644
index 0000000..4ebdea7
--- /dev/null
+++ b/doc/hhtop-doc/src/header.sty
@@ -0,0 +1,13 @@
+%%
+%% Load packages
+%%
+
+\usepackage{color}
+\usepackage[hidelinks]{hyperref}
+\usepackage{graphicx}
+\usepackage{amsfonts}
+\usepackage{amssymb}
+\usepackage{array}
+\usepackage{etoolbox}
+\usepackage{dsfont}
+
diff --git a/doc/hhtop-doc/src/hhtop-doc.pdf b/doc/hhtop-doc/src/hhtop-doc.pdf
new file mode 100644
index 0000000..fadb364
--- /dev/null
+++ b/doc/hhtop-doc/src/hhtop-doc.pdf
Binary files differ
diff --git a/doc/hhtop-doc/src/hhtop-doc.tex b/doc/hhtop-doc/src/hhtop-doc.tex
new file mode 100644
index 0000000..0b423fd
--- /dev/null
+++ b/doc/hhtop-doc/src/hhtop-doc.tex
@@ -0,0 +1,990 @@
+\documentclass{kiss}
+% load packages
+\usepackage{header}
+% BBlog bibliography commands
+\usepackage{BBlog}
+% miscellaneous commands
+\usepackage{toolbox}
+% main style file
+\usepackage{iansecs}
+
+\begin{document}
+\hfil{\bf\Large hhtop}\par
+\bigskip
+\hfil{\bf v1.0}\par
+\hugeskip
+
+\indent {\tt hhtop} is a tool to compute, numerically, the following quantities for the Haldane-Hubbard model:
+\begin{itemize}
+\item the one-loop renormalization of the topological phase diagram,
+\item the difference of the $(a,a)$ and $(b,b)$ wave-function renormalizations, at second order,
+\end{itemize}
+\hugeskip
+
+\tableofcontents
+\vfill\eject
+
+\setcounter{page}1
+\pagestyle{plain}
+
+\section{Phase diagram}
+\indent In this section we discuss the computation of the renormalization of the phase diagram.
+\subseqskip
+
+\subsection{Description of the computation}
+\subsubsection{Definition of the problem}
+\indent We wish to solve the following equation:
+\begin{equation}
+\tilde M_{\omega,t_1,t_2,\lambda}(W,\phi):=W+3\sqrt3\omega t_2\sin\phi+\frac{3\sqrt3}{16\pi^3}\lambda\int_{\mathcal B}dk\int_{-\infty}^\infty dk_0\ \frac{m_{t_2,W,\phi}(k)}{D_{t_1,t_2,W,\phi}(k_0,k)}=0
+\label{eqrenmass}\end{equation}
+for $W\in\mathbb{R}$, $\phi\in(-\pi,\pi]$, where the parameters $\omega=\pm1$, $t_2\geqslant0$, $t_1\geqslant3t_2$ and $\lambda\in\mathbb{R}$ are fixed. We now define the quantities appearing in~\-(\ref{eqrenmass}):
+\begin{equation}
+\mathcal B:=\left\{\left(\frac{2\pi}3+k'_1,k_2\right)\in\mathbb{R}^2\quad|\quad |k_2|<\frac{2\pi}{\sqrt3}-\sqrt3|k'_1|\right\},
+\label{eqbrillouin}\end{equation}
+\begin{equation}
+\alpha_1(k_1,k_2):=\frac32+
+\cos(\sqrt3k_2)+2\cos\left(\frac32k_1\right)\cos\left(\frac{\sqrt3}2k_2\right),
+\label{eqalpha1}\end{equation}
+
+\begin{equation}
+\alpha_2(k_1,k_2):=
+-\sin(\sqrt3k_2)+2\cos\left(\frac32k_1\right)\sin\left(\frac{\sqrt3}2k_2\right),
+\label{eqalpha2}\end{equation}
+
+\begin{equation}
+\Omega(k_1,k_2):=1+2e^{-\frac32ik_1}\cos\left(\frac{\sqrt3}2k_2\right),
+\label{eqOmega}\end{equation}
+
+\begin{equation}
+m_{t_2,W,\phi}(k):=W-2t_2\sin\phi\alpha_2(k)
+\label{eqm}\end{equation}
+
+\begin{equation}
+\zeta_{t_2,\phi}(k):=2t_2\cos\phi\alpha_1(k),\quad
+\xi_{t_1,t_2,W,\phi}(k):=\sqrt{m_{t_2,W,\phi}^2(k)+t_1^2|\Omega(k)|^2}
+\label{eqOmega}\end{equation}
+
+\begin{equation}
+D_{t_1,t_2,W,\phi}(k_0,k):=(ik_0+\zeta_{t_2,\phi}(k))^2-\xi_{t_1,t_2,W,\phi}^2(k).
+\label{eqDdef}\end{equation}
+\bigskip
+
+\subsubsection{Integration of the Matsubara momentum}
+\indent We first integrate out $k_0$ analytically. We use the following identity: for $x\in\mathbb{R}$ and $y>0$,
+\begin{equation}
+\int_{-\infty}^\infty dk_0\ \frac1{((ik_0+x)^2-y^2}=-\chi(x^2<y^2)\frac\pi y
+\label{eqintk0}\end{equation}
+in which $\chi(x^2<y^2)\in\{1,0\}$ is equal to 1 if and only if $x^2<y^2$. Furthermore (see appendix~\-(\ref{appintk0})), if
+\begin{equation}
+t_1\geqslant3t_2
+\label{eqcondt}\end{equation}
+then
+\begin{equation}
+\zeta_{t_2,\phi}^2(k)\leqslant\xi_{t_1,t_2,W,\phi}^2(k)
+\label{eqineqab}\end{equation}
+for all $k\in\mathcal B$, $\phi\in(-\pi,\pi]$ and $W\in\mathbb{R}$, which implies that
+\begin{equation}
+\tilde M_{\omega,t_1,t_2,\lambda}(W,\phi)=W+3\sqrt3\omega t_2\sin\phi-\frac{3\sqrt3}{16\pi^2}\lambda\int_{\mathcal B}dk\ \frac{m_{t_2,W,\phi}(k)}{\xi_{t_1,t_2,W,\phi}(k)}.
+\label{eqrenmassintk0}\end{equation}
+\bigskip
+
+\subsubsection{Reduction by symmetries}
+\indent By using some symmetries of the integrand of~\-(\ref{eqrenmassintk0}), we can reduce the integration region. Indeed, $m_{t_2,W,\phi}(k)$ and $\xi_{t_1,t_2,W,\phi}(k)$ are symmetric under $k_1\mapsto-k_1$ and under rotations of angle $\frac{2\pi}3$. In addition, $m_{t_2,W,\phi}(k_1,k_2)=m_{t_2,W,-\phi}(k_1,-k_2)$ and $\xi_{t_1,t_2,W,\phi}(k_1,k_2)=\xi_{t_1,t_2,W,-\phi}(k_1,-k_2)$. We can therefore rewrite
+\begin{equation}
+\tilde M_{\omega, t_1, t_2,\lambda}(W,\phi)=W+3\sqrt3\omega t_2\sin\phi-\lambda(I_{t_1,t_2}(W,\phi)+I_{t_1,t_2}(W,-\phi))
+\label{eqrenmassintk0half}\end{equation}
+where
+\begin{equation}
+I_{t_1,t_2}(W,\phi):=
+\frac{9\sqrt3}{8\pi^2}\int_{\mathcal B_+}dk\ \frac{m_{t_2,W,\phi}(k)}{\xi_{t_1,t_2,W,\phi}(k)}.
+\label{eqI}\end{equation}
+with
+\begin{equation}
+\mathcal B_+:=\left\{(k_1,k_2)\in\mathcal B\ |\ k_2>0,\ k_1<\frac23\pi,\ k_2<\frac1{\sqrt3}k_1\right\}.
+\label{eqB0}\end{equation}
+\bigskip
+
+\subsubsection{Polar coordinates}
+\indent Let
+\begin{equation}
+p_F^\pm:=\left(\frac{2\pi}3,\ \frac{2\pi}{3\sqrt3}\right).
+\label{eqfermi}\end{equation}
+We note that $\xi_{t_1,t_2,W,\phi}$ has roots if and only if $m_{t_2,W,\phi}(p_F^+)=0$ or $m_{t_2,W,\phi}(p_F^-)=0$, located at $p_F^+$ in the former case and at $p_F^-$ in the latter. If $m_{t_2,W,\phi}$ vanishes at both $p_F^\pm$, which can only occur if $W=0$ and $\phi=0,\pi$, then $\xi_{t_1,t_2,W,\phi}$ vanishes at both $p_F^\pm$. Nevertheless, the integrand in~\-(\ref{eqI}) is not singular, since $\xi_{t_1,t_2,W,\phi}(k'+p_F^+)\sim t_1|k'|$, and the integration over $k$ is 2-dimensional. In order to make this lack of singularity apparent, it is convenient to switch to polar coordinates around $p_F^+$: $(k_1,k_2)=p_F^++\frac{2\pi}{3\sqrt3}\rho(\cos\theta,\sin\theta)$:
+\begin{equation}
+I_{t_1,t_2}(W,\phi)=\frac{\sqrt3}{6}\int_{-\frac\pi6}^{\frac\pi6} d\theta\int_0^{R(\theta)} d\rho\ \rho\frac{\bar m_{t_2,W,\phi}(\rho,\theta)}{\bar\xi_{t_1,t_2,W,\phi}(\rho,\theta)},
+\label{eqI}\end{equation}
+in which
+\begin{equation}\begin{array}c
+\bar m_{t_2,W,\phi}(\rho,\theta):=W-2t_2\sin\phi\bar\alpha_2(\rho,\theta),\quad
+\bar\xi_{t_1,t_2,W,\phi}(\rho,\theta):=\sqrt{m_{t_2,W,\phi}^2(\rho,\theta)+t_1^2|\bar\Omega(\rho,\theta)|^2}\\[0.5cm]
+\bar\alpha_2(\rho,\theta):=-2\sin\left(\frac\pi3(1+\rho\sin\theta)\right)\left(\cos\left(\frac\pi3(1+\rho\sin\theta)\right)+\cos\left(\frac\pi{\sqrt3}\rho\cos\theta\right)\right)\\[0.5cm]
+|\bar\Omega(\rho,\theta)|^2=1+4\cos\left(\frac\pi3(1+\rho\sin\theta)\right)\left(\cos\left(\frac\pi3(1+\rho\sin\theta)\right)-\cos\left(\frac\pi{\sqrt3}\rho\cos\theta\right)\right)
+\end{array}\label{eqxipolar}\end{equation}
+and
+\begin{equation}
+R(\theta):=\frac1{\cos(\theta-\frac\pi6)}.
+\label{eqpolarbound}\end{equation}
+
+\vskip10pt
+\subsection{Strategy of the numerical computation}
+\subsubsection{Newton scheme}\label{secnewtonphase}
+\indent In order to solve~\-(\ref{eqrenmass}), we will use a Newton scheme (see section~\-\ref{secnewton}). More precisely, we fix $\phi$ and compute $W(\phi)$ as the limit of
+\begin{equation}\begin{array}{>\displaystyle c}
+W_0(\phi):=-\omega 3\sqrt3t_2\sin\phi\\[0.3cm]
+W_{n+1}(\phi):=W_n(\phi)-\frac{\tilde M_{\omega,t_1,t_2,\lambda}(W_n(\phi),\phi)}{\partial_W\tilde M_{\omega,t_1,t_2,\lambda}(W_n(\phi),\phi)}.
+\end{array}\label{eqnewton}\end{equation}
+The first two derivatives of $\tilde M$ are
+\begin{equation}
+\partial_W\tilde M_{\omega,t_1,t_2,\lambda}(W,\phi)=1-\lambda(\partial_WI_{t_1,t_2}(W,\phi)+\partial_WI_{t_1,t_2}(W,-\phi))
+\label{eqdM}\end{equation}
+with
+\begin{equation}
+\partial_WI_{t_1,t_2}(W,\phi)=\frac{\sqrt3}{6}\int_{-\frac\pi6}^{\frac\pi6}d\theta\int_0^{R(\theta)}d\rho\ \frac{\rho}{\bar\xi_{t_1,t_2,W,\phi}(\rho,\theta)}\left(1-\frac{\bar m^2_{t_2,W,\phi}(\rho,\theta)}{\bar\xi_{t_1,t_2,W,\phi}^2(\rho,\theta)}\right)
+\label{eqdI}\end{equation}
+and
+\begin{equation}
+\partial_W^2\tilde M_{\omega,t_1,t_2,\lambda}(W,\phi)=-\lambda(\partial_W^2I_{t_1,t_2}(W,\phi)+\partial_W^2I_{t_1,t_2}(W,-\phi))
+\label{eqddM}\end{equation}
+with
+\begin{equation}
+\partial_W^2I_{t_1,t_2}(W,\phi)=-\frac{\sqrt3}{2}\int_{-\frac\pi6}^{\frac\pi6}d\theta\int_0^{R(\theta)}d\rho\ \frac{\rho\bar m_{t_2,W,\phi}(\rho,\theta)}{\bar\xi^3_{t_1,t_2,W,\phi}(\rho,\theta)}\left(1-\frac{\bar m^2_{t_2,W,\phi}(\rho,\theta)}{\bar\xi_{t_1,t_2,W,\phi}^2(\rho,\theta)}\right).
+\label{eqddI}\end{equation}
+
+\subsubsection{Integration}\label{secintegrationphase}
+\indent In order to compute $W_n(\phi)$, we have to evaluate $I_{t_1,t_2}(W,\phi)$ and $\partial_W I_{t_1,t_2}(W,\phi)$. The integrations are carried out using Gauss-Legendre quadratures (see section~\-\ref{secintegrationgl}). In order to use this method to compute the double integral over $\theta$ and $\rho$, we rewrite
+\begin{equation}
+\int d\theta\int d\rho\ F(\theta,\rho)=\int d\theta\ G(\theta),\quad
+G(\theta):=\int d\rho\ F(\theta,\rho)
+\label{eqmultidimint}\end{equation}
+for the appropriate $F$.
+
+\subsection{Usage and examples}
+\indent We will now describe some basic usage cases of {\tt hhtop phase}. For a full description of the options of {\tt hhtop}, see the {\tt man} page.
+\subseqskip
+
+\subsubsection{Basic usage}
+\indent The value of the parameters can be set via the {\tt -p} flag. Here is an example\par
+\medskip
+\indent{\tt hhtop phase -p "omega=1;t1=1.;t2=.1;lambda=.01;sinphi=1;"}\par
+\medskip
+Note that $\phi$ can be set instead of $\sin\phi$, though the result of the computation only depends on $\sin\phi$. The parameters that are not specified by the {\tt-p} flag are set to their default value: $\omega=1$, $t_1=1$, $t_2=0.1$, $\lambda=0.01$, $\sin\phi=1$.
+
+\subsubsection{Precision of the computation}
+\indent The precision of the computation can be controlled by three parameters: the precision of the numbers manipulated by {\tt hhtop} (set via the {\tt -P} flag, see section~\-\ref{secprecision}), the order of the integration, and the tolerance of the Newton scheme.
+\bigskip
+
+\point{\bf Order of the integration.} The order of the integration, that is, the value of the number $N$ introduced in section~\-\ref{secintegrationgl}, can be specified via the {\tt -O} flag. Its default value is 10. The difference of the value of the integral at different orders is a good measure of the numerical error. Example:\par
+\medskip
+\indent{\tt hhtop phase -O 30}
+\bigskip
+
+\point{\bf Tolerance of the Newton scheme.} The Newton iteration halts when the difference $|x_{n+1}-x_n|$ (see section~\-\ref{secnewton}) is smaller than a number, called the {\it tolerance} of the algorithm, or when the toal number of steps exceeds a given threshold. The tolerance can be set via the {\tt-t} flag, and the maximal number of steps via the {\tt -N} flag. Their default values are $10^{-11}$ and $1000000$. The tolerance and maximal number of steps are also used in the computation of the roots $\{x_1,\cdots,x_N\}$ of the $N$-th Legendre polynomial which are used for the numerical integration (see section~\ref{secintegrationgl}). If the tolerance or the maximal number of steps are too small, and the precision of multi-precision floats is too low, then the iteration may not converge. Example:\par
+\medskip
+\indent{\tt hhtop phase -t 1e-30 -N 2000000 -O 100 -P 256}
+
+\subsubsection{Using double precision floats instead of multi-precision floats}
+\indent Using the {\tt -D} command-line flag, {\tt hhtop} can be instructed to use {\tt long double}'s instead of MPFR floats. Whereas one then loses the ability of adjusting the precision, the computation time can be drastically reduced. Example:\par
+\medskip
+\indent{\tt hhtop -D phase -p "sinphi=1.;"}\par
+\bigskip
+The precision of {\tt long double}s is compiler dependent, see section~\-\ref{secprecision}.
+
+\vfill\eject
+
+\section{Wave function renormalization}\label{secz1z2}
+\indent In this section we discuss the computation of the difference and the sum of the $(a,a)$ and $(b,b)$ wave-function renormalizations.
+\bigskip
+
+{\bf Warning: This computation is only accurate if $\phi$ is not too close to $0$.}
+\subseqskip
+
+\subsection{Description of the computation}
+\subsubsection{Definition of the problem}
+\indent We wish to compute the following quantities:
+\begin{equation}
+ \begin{array}{>\displaystyle c}
+ z_1-z_2=i\frac{27}{128\pi^4}(\partial_{k_0}S_+|_{k_0=0}-\partial_{k_0}S_-|_{k_0=0})
+ \\[0.5cm]
+ z_1+z_2=i\frac{27}{128\pi^4}(\partial_{k_0}S_+|_{k_0=0}+\partial_{k_0}S_-|_{k_0=0})
+ \end{array}
+ \label{eqz1z2}
+\end{equation}
+where
+\begin{equation}
+S_\pm(k_0)=\int_{\mathcal B} dpdq\ \int_{-\infty}^\infty \frac{dp_0 dq_0}{2\pi^2}\ \frac{(-ip_0-\zeta_p\mp m_p)(-iq_0-\zeta_q\mp m_q)(-i(p_0+q_0-k_0)-\zeta_F\mp m_F)}{((ip_0+\zeta_p)^2-\xi_p^2)((iq_0+\zeta_q)^2-\xi_q^2)((i(p_0+q_0-k_0)+\zeta_F)^2-\xi_F^2)}
+\label{eqS}\end{equation}
+in which
+\begin{equation}
+\mathcal B:=\left\{\left(\frac{2\pi}3+k'_1,k_2\right)\in\mathbb{R}^2\quad|\quad |k_2|<\frac{2\pi}{\sqrt3}-\sqrt3|k'_1|\right\},
+\label{eqbrillouin}\end{equation}
+\begin{equation}
+\alpha_1(k_1,k_2):=
+2\cos\left(\frac{\sqrt3}2k_2\right)\left(\cos\left(\frac32k_1\right)+\cos\left(\frac{\sqrt3}2k_2\right)\right)+\frac12,
+\label{eqalpha1}\end{equation}
+\begin{equation}
+\alpha_2(k_1,k_2):=
+2\sin\left(\frac{\sqrt3}2k_2\right)\left(\cos\left(\frac32k_1\right)-\cos\left(\frac{\sqrt3}2k_2\right)\right),
+\label{eqalpha2}\end{equation}
+\begin{equation}
+m(k):=W-2t_2\sin\phi\alpha_2(k)
+\label{eqm}\end{equation}
+\begin{equation}
+\zeta(k):=2t_2\cos\phi\alpha_1(k),\quad
+\xi(k):=\sqrt{m^2(k)+2t_1^2\alpha_1(k)}
+\label{eqOmega}\end{equation}
+and
+\begin{equation}\begin{array}c
+\zeta_p\equiv\zeta(p),\quad
+\zeta_q\equiv\zeta(q),\quad
+\zeta_F\equiv\zeta(p+q-p_F^\omega),\quad
+\xi_p\equiv\xi(p),\quad
+\xi_q\equiv\xi(q),\quad
+\xi_F\equiv\xi(p+q-p_F^\omega),\\[0.3cm]
+m_p\equiv m(p),\quad
+m_q\equiv m(q),\quad
+m_F\equiv m(p+q-p_F^\omega)
+\end{array}\label{eqxip}\end{equation}
+with $\omega\in\{-1,+1\}$ and
+\begin{equation}
+ p_F^\pm:=\left(\frac{2\pi}3,\pm\frac{2\pi}{3\sqrt3}\right).
+\end{equation}
+
+\subsubsection{Integration of the Matsubara momentum}
+\indent We first integrate out $p_0$ and $q_0$ analytically.
+We recall (see appendix~\-(\ref{appintk0})) that, provided $t_1\geqslant3t_2$,
+\begin{equation}
+\zeta^2(k)\leqslant\xi^2(k).
+\label{eqineqab2}\end{equation}
+By closing the integration path over $p_0$ around the positive-imaginary half-plane (which, by~\-(\ref{eqineqab2}), contains two poles), and using the residues theorem, we find that
+\begin{equation}\begin{largearray}
+S_\pm(k_0)=-\int_{\mathcal B} dpdq\int_{-\infty}^\infty \frac{dq_0}{2\pi}\ \left(\frac{(\xi_p\mp m_p)(-iq_0-\zeta_q\mp m_q)(-i(q_0-k_0)+\zeta_p-\zeta_F+\xi_p\mp m_F)}{\xi_p((iq_0+\zeta_q)^2-\xi_q^2)((i(q_0-k_0)-\zeta_p+\zeta_F-\xi_p)^2-\xi_F^2)}\right.\\[0.5cm]
+\hfill+\left.\frac{(i(q_0-k_0)-\zeta_p+\zeta_F+\xi_F\mp m_p)(-iq_0-\zeta_q\mp m_q)(\xi_F\mp m_F)}{((-i(q_0-k_0)+\zeta_p-\zeta_F-\xi_F)^2-\xi_p^2)((iq_0+\zeta_q)^2-\xi_q^2)\xi_F}\right).
+\end{largearray}\label{eqintmatsubara1}\end{equation}
+We then close the integration path over $q_0$ around the positive-imaginary half-plane for the first term, and the negative imaginary half-plane for the second, and find
+\begin{equation}\begin{array}{>\displaystyle r@{\ }>\displaystyle l}
+S_\pm(k_0)=\frac12\int_{\mathcal B} dpdq&\left(
+\frac{(\xi_p\mp m_p)(\xi_q\mp m_q)(ik_0+Z+\xi_p+\xi_q\mp m_F)}{\xi_p\xi_q((ik_0+Z+\xi_p+\xi_q)^2-\xi_F^2)}
+\right.\\[0.5cm]&+\left.
+\frac{(\xi_q\pm m_q)(\xi_F\mp m_F)(ik_0+Z-\xi_q-\xi_F\pm m_p)}{\xi_q\xi_F((ik_0+Z-\xi_q-\xi_F)^2-\xi_p^2)}
+\right.\\[0.5cm]&-\left.
+\frac{(\xi_p\mp m_p)(\xi_F\mp m_F)(ik_0+Z+\xi_p-\xi_F\pm m_q)}{\xi_p\xi_F((ik_0+Z+\xi_p-\xi_F)^2-\xi_q^2)}
+\right)
+\end{array}\label{eqSnok0}\end{equation}
+with
+\begin{equation}
+Z:=\zeta_p+\zeta_q-\zeta_F.
+\label{eqZ}\end{equation}
+The sum of the three terms in the right side of~\-(\ref{eqSnok0}) yields
+\begin{equation}\begin{largearray}
+S_\pm(k_0)=\frac12\int_{\mathcal B} dpdq\ \left(\frac{(\xi_p\xi_q\xi_F-(\xi_pm_q+\xi_qm_p)m_F+m_pm_q\xi_F)(ik_0+Z)}{\xi_p\xi_q\xi_F((ik_0+Z)^2-(\xi_p+\xi_q+\xi_F)^2)}\right.\\[0.5cm]
+\hfill\left.\pm\frac{(\xi_p+\xi_q+\xi_F)(m_p\xi_q\xi_F+\xi_pm_q\xi_F-\xi_p\xi_qm_F-m_pm_qm_F)}{\xi_p\xi_q\xi_F((ik_0+Z)^2-(\xi_p+\xi_q+\xi_F)^2)}\right).
+\end{largearray}\label{eqSnok0simp}\end{equation}
+Therefore,
+\begin{equation}
+ \begin{array}{>\displaystyle c}
+ z_1-z_2
+ =\frac{27}{64\pi^4}\int_{\mathcal B} dpdq\ \left(\frac{(\xi_p+\xi_q+\xi_F)(\frac{m_p}{\xi_p}+\frac{m_q}{\xi_q}-\frac{m_F}{\xi_F}-\frac{m_pm_qm_F}{\xi_p\xi_q\xi_F})Z}{(Z^2-(\xi_p+\xi_q+\xi_F)^2)^2}\right).
+ \\[0.5cm]
+ z_1+z_2
+ =\frac{27}{128\pi^4}\int_{\mathcal B} dpdq\ \left(\frac{\left(1-\frac{m_pm_F}{\xi_p\xi_F}-\frac{m_qm_F}{\xi_q\xi_F}+\frac{m_pm_q}{\xi_p\xi_q}\right)(Z^2+(\xi_p+\xi_q+\xi_F)^2)}{(Z^2-(\xi_p+\xi_q+\xi_F)^2)^2}\right).
+ \end{array}
+ \label{eqz1z2}
+\end{equation}
+
+\subsubsection{Singularities of the integrand}
+\indent In order to compute the integrals in~\-(\ref{eqz1z2}) numerically, we will use Gauss quadratures, which are only accurate if the integrands are smooth (i.e. if high order derivatives of the integrand are bounded). In this case, the integrand has singularities, indeed
+\begin{itemize}
+ \item $\alpha_1$ and $\zeta$ vanish at $p_F^+$ and $p_F^-$, and if $W=-\omega3\sqrt3t_2\sin\phi$, then $m$ vanishes at $p_F^\omega$,
+ \item if $W=-\omega3\sqrt3t_2\sin\phi$, then the second derivative of $\xi$ diverges at $p_F^\omega$.
+\end{itemize}
+The asymptotics near the singularities are
+\begin{equation}
+ \begin{array}{r@{\ }>\displaystyle l}
+ \sqrt{2t_1^2\alpha_1(p_F^\omega+k')}=&\frac32t_1|k'|+t_1\ O(|k'|^2)\\[0.3cm]
+ \zeta(p_F^\omega+k')=&t_2\cos\phi\ O(|k'|^2)\\[0.3cm]
+ m(p_F^\omega+k')-(W-\omega3\sqrt3t_2\sin\phi)=&t_2\sin\phi\ O(|k'|^2)
+ \end{array}
+\end{equation}
+which implies that, if $W=-\omega3\sqrt3t_2\sin\phi$, $p=p_F^\omega+p'$, $q=p_F^\omega+q'$ and $k=p_F^\omega$, then
+\begin{equation}
+ \begin{array}{>\displaystyle c}
+ \xi_p+\xi_q+\xi_F=\frac32t_1(|p'|+|q'|+|p'+q'|)(1+O(|p'|)+O(|q'|)),\\[0.3cm]
+ Z=O(|p'|^2)+O(|q'|^2)+O(|p'+q'|^2),\quad
+ \frac{m_p}{\xi_p}=O(|p'|),\quad
+ \frac{m_q}{\xi_q}=O(|q'|),\quad
+ \frac{m_F}{\xi_F}=O(|p'+q'|).
+ \end{array}
+ \label{eqasymp}
+\end{equation}
+In addition, the $O(\cdot)$ factors in~\-(\ref{eqasymp}) are analytic functions of $|p'|$, $|q'|$ and $|p'+q'|$. Note that, since $|\cdot|$ is not an analytic function (its second derivative diverges at $0$), the $O(\cdot)$ factors are {\it not} analytic functions of $p'$, $q'$ or $p'+q'$. Therefore, if $W=-\omega 3\sqrt3 t_2\sin\phi$, then
+\begin{equation}
+ \mathcal I_-(p,q):=\frac{27}{64\pi^4}\frac{(\xi_p+\xi_q+\xi_F)(\frac{m_p}{\xi_p}+\frac{m_q}{\xi_q}-\frac{m_F}{\xi_F}-\frac{m_pm_qm_F}{\xi_p\xi_q\xi_F})Z}{(Z^2-(\xi_p+\xi_q+\xi_F)^2)^2}
+ \label{eqintegrand-}
+\end{equation}
+\begin{itemize}
+ \item is smooth as long as $p\neq p_F^\omega$ and $q\neq p_F^\omega$ and $p+q\neq 2p_F^\omega$,
+ \item is bounded for all $p,q$,
+ \item its derivatives diverge if $p=p_F^\omega$ or $q=p_F^\omega$ or $p+q=2p_F^\omega$.
+\end{itemize}
+Similarly, if $W=-\omega 3\sqrt3 t_2\sin\phi$, then
+\begin{equation}
+ \mathcal I_+(p,q):=\frac{27}{128\pi^4}\frac{\left(1-\frac{m_pm_F}{\xi_p\xi_F}-\frac{m_qm_F}{\xi_q\xi_F}+\frac{m_pm_q}{\xi_p\xi_q}\right)(Z^2+(\xi_p+\xi_q+\xi_F)^2)}{(Z^2-(\xi_p+\xi_q+\xi_F)^2)^2}
+ \label{eqintegrand+}
+\end{equation}
+\begin{itemize}
+ \item is smooth as long as $p\neq p_F^\omega$ and $q\neq p_F^\omega$ and $p+q\neq 2p_F^\omega$,
+ \item diverges if $p=p_F^\omega$ and $q=p_F^\omega$ (it would remain bounded if it were multiplied by $|p-p_F^\omega|\cdot|q-p_F^\omega|$),
+ \item is bounded for all $(p,q)\neq(p_F^\omega,p_F^\omega)$,
+ \item its derivatives diverge if $p=p_F^\omega$ or $q=p_F^\omega$ or $p+q=2p_F^\omega$.
+\end{itemize}
+In the next section, we will regularize these singularities by changing performing an appropriate change of variables.
+
+\subsubsection{Sunrise coordinates}
+\indent In this section, we will show how to regularize the singularities mentioned in the previous section. We assume throughout this section that $W=-\omega 3\sqrt3t_2\sin\phi$ (if this is not the case, then there are no singularities).
+\bigskip
+
+{\bf Warning}: As it is set up here, {\bf this computation is only accurate if $\phi$ is not too close to 0} (see the remark on p.~\-\ref{rkphi}).
+\bigskip
+
+\indent While $\mathcal I_-$ and $|p-p_F^\omega||q-p_F^\omega|\mathcal I_+$ are singular functions of $p$ and $q$ (because of the divergence of the second derivative of $|p-p_F^\omega|$), they can be re-expressed as smooth functions of $p$, $q$, $\rho:=|p-p_F^\omega|$, $r:=|q-p_F^\omega|$ and $\gamma:=|p+q-2p_F^\omega|$. We will, therefore, change to the {\it sunrise} coordinates, described in appendix~\-\ref{appsunrise}, which, by lemma~\-\ref{lemmasunrise}, regularize the singularities of $\mathcal I_-$ and $\mathcal I_+$. However, the sunrise coordinates are only defined for rotationally symmetric integration regions, so we will have to split the integration regions, and note that in the regions where we cannot change to sunrise coordinates, it suffices to use polar coordinates.
+\bigskip
+
+\indent Let
+\begin{equation}
+ \mathcal B_\pm:=\mathcal B\cap\{(k_1,k_2)\in\mathcal B\ |\ \pm k_2>0\}
+\end{equation}
+and
+\begin{equation}
+ \mathcal B^{(F)}_\pm:=\left\{p_F^\pm+k',\ |k'|<R\right\},\quad
+ R:=\frac{2\pi}{3\sqrt3},\quad
+ \mathcal B^{(R)}_\pm:=\mathcal B_\pm\setminus\mathcal B^{(F)}_\pm
+\end{equation}
+($\mathcal B^{(F)}_\pm$ is the largest disk that is included in $\mathcal B_\pm$). As is discussed below (see the remark on p.~\-\ref{rkcutoff}), it is inconvenient to sharply split the integral, so we will use a smooth cut-off function instead: we define, for $\tau\in(0,1)$, $\chi_\tau:[0,\infty)\to[0,1]$:
+\begin{equation}
+ \chi_\tau(x):=
+ \left\{\begin{array}{l@{\quad}>\displaystyle l}
+ \frac{e^{-\frac{1-\tau}{1-x}}}{e^{-\frac{1-\tau}{x-\tau}}+e^{-\frac{1-\tau}{1-x}}(1-e^{-\frac{1-\tau}{x-\tau}})}&\mathrm{if\ }x\in(\tau,1)\\[0.5cm]
+ 0&\mathrm{if\ }x\in[0,\tau]\cup[1,\infty)
+ \end{array}\right.
+\end{equation}
+which is equal to $1$ if $x\leqslant\tau$, and to $0$ if $x\geqslant1$, and is $\mathcal C^\infty$. In addition, one can prove that $\chi_\tau$ is a class-2 Gevrey function, that is, $\exists C_0,C>0$ such that for all $x\in[0,\infty)$ and $n\in\mathbb N$,
+\begin{equation}
+ \mathrm{sup}\left|\frac{d^n\chi_\tau}{dx^n}\right|\leqslant C_0C^n(n!)^2.
+\end{equation}
+Note that $C_0$ and $C$ depend on $\tau$, and diverge as $\tau\to1$. We will fix $\tau=\frac12$ in the following.
+\bigskip
+
+{\bf Remark}: By introducing such a cutoff function, the integrands will no longer be a analytic, but class-2 Gevrey functions. By lemma~\-\ref{lemmaGL} (see appendix~\-\ref{appGL}), the error of the numerical integration scheme nevertheless decays as an exponential in $\sqrt N$ where $N$ denotes the order of the quadrature.
+\bigskip
+
+Let, for $p\in\mathcal B$
+\begin{equation}
+ f_{\omega}^{(F)}(p):=\chi_{\frac12}\left(\frac{|p-p_F^\omega|_{\mathcal B}}R\right)\quad
+ f_{\omega}^{(R)}(p):=1-f_{\omega}^{(F)}(p)
+\end{equation}
+where the choice $\tau=\frac12$ is arbitrary (any other value would do, as long as it is not too close to $0$ or $1$), we recall that $R:=\frac{2\pi}{3\sqrt3}$, and $|\cdot|_{\mathcal B}$ denotes the periodic Euclidian norm on $\mathcal B$:
+\begin{equation}
+ |k|_{\mathcal B}:=\min\{|k+n_1G_++n_2G_-|,\ (n_1,n_2)\in\mathbb Z^2\},\quad
+ \textstyle G_\pm:=(\frac{2\pi}3,\pm\frac{2\pi}{\sqrt3}).
+\end{equation}
+We then split
+\begin{equation}
+ z_1\mp z_2=A_{F,F}^{(\mp)}+2A_{R,F}^{(\mp)}+A_{R,R}^{(\mp)}
+\end{equation}
+where
+\begin{equation}
+ \begin{array}{r@{\ }>\displaystyle l}
+ A_{F,F}^{(\mp)}:=&\int_{\mathcal B_\omega^{(F)}}dp\int_{\mathcal B_\omega^{(F)}}dq\ f_{\omega}^{(F)}(p)f_{\omega}^{(F)}(q)\ \mathcal I_\mp(p,q)\\[0.5cm]
+ A_{R,F}^{(\mp)}:=&\int_{\mathcal B}dp\int_{\mathcal B^{(F)}_\omega}dq\ f_{\omega}^{(R)}(p)f_{\omega}^{(F)}(q)\ \mathcal I_\mp(p,q)\\[0.5cm]
+ A_{R,R}^{(\mp)}:=&\int_{\mathcal B}dp\int_{\mathcal B}dq\ f_{\omega}^{(R)}(p)f_{\omega}^{(R)}(q)\ \mathcal I_\mp(p,q)
+ \end{array}
+\end{equation}
+in which we used the symmetry $\mathcal I_\mp(p,q)=\mathcal I_\mp(q,p)$. We will change to sunrise coordinates in $A_{F,F}$ and to polar coordinates in $A_{R,F}$ and $A_{R,R}$.
+\bigskip
+
+\point The integrand of $A_{F,F}^{(\mp)}$ has the same singularities as $\mathcal I_{\mp}$, which we regularize by changing to {\it sunrise coordinates}. Since $\mathcal B_\omega^{(F)}$ is a disk, these coordinates are well defined (see lemma~\-\ref{lemmasunrise}). In order to get rid of factors of $\pi$, we first rescale $p$ and $q$ by $R=\frac{2\pi}{3\sqrt3}$, and find
+\begin{equation}
+ A_{F,F}^{(\mp)}=2\int_0^1d\rho\int_0^{2\pi}d\theta\int_{-\frac\pi2}^{\frac\pi2}d\psi\int_0^1dz\ \Sigma f_{\omega,1}^{(F)}(\sigma)\Sigma f_{\omega,2}^{(F)}(\sigma)\Sigma J(\sigma)\Sigma\mathcal I_\mp(\sigma)
+\end{equation}
+with $\sigma\equiv(\rho,\theta,\psi,z)$,
+\begin{equation}
+ \Sigma J(\sigma)=
+ 4\rho^3\frac{\bar r(1+\bar r\cos(2\psi))}{(1+\cos\psi)\sqrt{1+\bar r\cos^2\psi}},
+\end{equation}
+where $\bar r$ is defined in~\-(\ref{eqr}),
+\begin{equation}
+ \Sigma f_{\omega,1}^{(F)}(\sigma):=\chi_{\frac12}\left(\rho\right),\quad
+ \Sigma f_{\omega,2}^{(F)}(\sigma):=\chi_{\frac12}\left(\rho\bar r\right),
+\end{equation}
+\begin{equation}
+ \begin{array}{>\displaystyle c}
+ \Sigma\mathcal I_-(\sigma):=\frac{1}{108}\frac{(\Sigma\xi_p+\Sigma\xi_q+\Sigma\xi_F)(\frac{\Sigma m_p}{\Sigma\xi_p}+\frac{\Sigma m_q}{\Sigma\xi_q}-\frac{\Sigma m_F}{\Sigma\xi_F}-\frac{\Sigma m_p\Sigma m_q\Sigma m_F}{\Sigma\xi_p\Sigma\xi_q\Sigma\xi_F})\Sigma Z}{(\Sigma Z^2-(\Sigma\xi_p+\Sigma\xi_q+\Sigma\xi_F)^2)^2}\\[0.5cm]
+ \Sigma\mathcal I_+(\sigma):=\frac{1}{216}\frac{\left(1-\frac{\Sigma m_p\Sigma m_F}{\Sigma\xi_p\Sigma\xi_F}-\frac{\Sigma m_q\Sigma m_F}{\Sigma\xi_q\Sigma\xi_F}+\frac{\Sigma m_p\Sigma m_q}{\Sigma\xi_p\Sigma\xi_q}\right)(\Sigma Z^2+(\Sigma\xi_p+\Sigma\xi_q+\Sigma\xi_F)^2)}{(\Sigma Z^2-(\Sigma\xi_p+\Sigma\xi_q+\Sigma\xi_F)^2)^2}
+ \end{array}
+\end{equation}
+in which
+\begin{equation}
+ \begin{array}{c}
+ \Sigma\xi_p:=\bar\xi\left(\sqrt3+\rho\cos\theta,\omega+\omega\rho\sin\theta\right),\quad
+ \Sigma\xi_q:=\bar\xi\left(\sqrt3+\rho\bar r\cos(\theta+\varphi),\omega+\omega\rho\bar r\sin(\theta+\varphi)\right),
+ \\[0.3cm]
+ \Sigma\xi_F:=\bar\xi\left(\sqrt3+\rho(\cos\theta+\bar r\cos(\theta+\varphi)),\omega+\omega\rho(\sin(\theta)+\bar r\sin(\theta+\varphi))\right),
+ \end{array}
+\end{equation}
+where $\varphi$ is defined in~\-(\ref{eqphi}), and
+\begin{equation}
+ \begin{array}{>\displaystyle c}
+ \bar\xi(\bar k):=\sqrt{\bar m^2(\bar k)+2t_1^2\bar\alpha_1(\bar k)},\quad
+ \bar\zeta(\bar k):=2t_2\cos\phi\bar\alpha_1(\bar k),\quad
+ \bar m(\bar k):=W-2t_2\sin\phi\bar\alpha_2(\bar k),\\[0.3cm]
+ \bar\alpha_1(\bar k_1,\bar k_2):=2\cos\left(\frac\pi3 \bar k_2\right)\left(\cos\left(\frac\pi{\sqrt3}\bar k_1\right)+\cos\left(\frac\pi3 \bar k_2\right)\right)+\frac12,\\[0.5cm]
+ \bar\alpha_2(\bar k_1,\bar k_2):=2\sin\left(\frac\pi3 \bar k_2\right)\left(\cos\left(\frac\pi{\sqrt3}\bar k_1\right)-\cos\left(\frac\pi3 \bar k_2\right)\right),
+ \end{array}
+ \label{eqbarxi}
+\end{equation}
+\begin{equation}
+ \begin{array}{c}
+ \Sigma m_p:=\bar m\left(\sqrt3+\rho\cos\theta,\omega+\omega\rho\sin\theta\right),\quad
+ \Sigma m_q:=\bar m\left(\sqrt3+\rho\bar r\cos(\theta+\varphi),\omega+\omega\rho\bar r\sin(\theta+\varphi)\right),
+ \\[0.3cm]
+ \Sigma m_F:=\bar m\left(\sqrt3+\rho(\cos\theta+\bar r\cos(\theta+\varphi)),\omega+\omega\rho(\sin(\theta)+\bar r\sin(\theta+\varphi))\right),
+ \end{array}
+\end{equation}
+and
+\begin{equation}
+ \Sigma Z:=\Sigma\zeta_p+\Sigma\zeta_q-\Sigma\zeta_F
+\end{equation}
+with
+\begin{equation}
+ \begin{array}{c}
+ \Sigma\zeta_p:=\bar \zeta\left(\sqrt3+\rho\cos\theta,\omega+\omega\rho\sin\theta\right),\quad
+ \Sigma\zeta_q:=\bar \zeta\left(\sqrt3+\rho\bar r\cos(\theta+\varphi),\omega+\omega\rho\bar r\sin(\theta+\varphi)\right),
+ \\[0.3cm]
+ \Sigma\zeta_F:=\bar \zeta\left(\sqrt3+\rho(\cos\theta+\bar r\cos(\theta+\varphi)),\omega+\omega\rho(\sin(\theta)+\bar r\sin(\theta+\varphi))\right).
+ \end{array}
+\end{equation}
+\bigskip
+
+\indent Let us now check that the functions $\Sigma J\Sigma\mathcal I_\mp$ and $\Sigma f_{\omega,i}$ are smooth. This is almost a direct consequence of lemma~\-\ref{lemmasunrise} and~\-(\ref{eqasymp}), if not for the fact that the sunrise coordinates ignore the periodic nature of the Brillouin zone $\mathcal B$. If $p+q-p_F^\omega$ were equal to $p_F^\omega+(n_1G_++n_2G_-)$ with $G_\pm=(\frac2\pi3,\pm\frac{2\pi}{\sqrt3})$ and $(n_1,n_2)\in\mathbb Z^2\setminus\{(0,0)\}$ then $\mathcal I_\mp(p,q)$ would have a singularity that is not regularized by the sunrise coordinates. However, one readily checks that this cannot happen when $p$ and $q$ are in $\mathcal B_\omega^{(F)}$. All in all, $\Sigma J\Sigma\mathcal I_\mp$ is an analytic function on the closure of the integration domain, and $\Sigma f_{\omega,i}^{(F)}$ is a class-2 Gevrey function.
+\bigskip
+
+\makelink{rkphi}{\thepage}
+{\bf Remark}: In the discussion above, we assumed that $\mathcal I(p,q)$ is not singular at $p_F^{-\omega}$, which is only true if $\phi\neq0$. If $\phi$ is small, then the derivatives of $\mathcal I(p,q)$ may be very large if one of $p$, $q$ or $p+q-p_F^\omega$ is close to $p_F^{-\omega}$. When $p$ and $q$ are in $\mathcal B_{\omega}^{(F)}$, $p+q-p_F^\omega$ may be arbitrarily close to $p_F^{-\omega}$, which means that $\phi$ must be sufficiently far from $0$ for the accuracy of the computation described above to be good.
+\bigskip
+
+\indent Finally, using the $\frac{2\pi}3$ rotation symmetry, we rewrite
+\begin{equation}
+ A_{F,F}^{(\mp)}=6\int_0^1d\rho\int_{-\frac\pi6}^{\frac\pi2}d\theta\int_{-\frac\pi2}^{\frac\pi2}d\psi\int_0^1dz\ \Sigma f_{\omega,1}^{(F)}(\sigma)\Sigma f_{\omega,2}^{(F)}(\sigma)\Sigma J(\sigma)\Sigma\mathcal I_\mp(\sigma).
+\end{equation}
+\bigskip
+
+\point The integrand of $A_{R,F}^{(\mp)}$ is only singular if $q=p_F^\omega$ or $p+q-p_F^\omega=p_F^\omega$, because $|p-p_F^\omega|_{\mathcal B}>\frac R2$. We regularize these singularities by switching to polar coordinates corresponding to $q$ and $p+q-p_F^\omega$, which we denote by $(r,\theta,\rho,\varphi)$: if $p+q-p_F^\omega\in\mathcal B_\nu$,
+\begin{equation}
+ q=p_F^\omega+\omega\frac{2\pi}{3\sqrt3}\rho(\cos\theta,\sin\theta),\quad
+ p+q-p_F^\omega=p_F^\nu+\nu\frac{2\pi}{3\sqrt3}r(\cos\varphi,\sin\varphi)
+\end{equation}
+in terms of which
+\begin{equation}
+ A_{R,F}^{(\mp)}=\sum_{\nu=\pm}\int_0^{2\pi}d\theta\int_{0}^{2\pi}d\varphi\int_0^{1}dr\int_0^{R(\varphi)}d\rho\ \rho r\Pi f_{\omega}^{(R)}(\varpi)\Pi f_{\omega}^{(F)}(\varpi)\Pi\mathcal I_\mp(\varpi)
+\end{equation}
+with $\varpi\equiv(r,\theta,\rho,\varphi)$,
+\begin{equation}
+ \Pi f_{\omega}^{(R)}(\varpi):=\chi_{\frac12}\left(|(\rho\cos\varphi-r\cos\theta,\ \nu-\omega+\nu \rho\sin\varphi-\omega r\sin\theta)|_{\mathbb T}\right),\quad
+ \Pi f_{\omega}^{(F)}(\varpi):=\chi_{\frac12}(r)
+\end{equation}
+where
+\begin{equation}
+ |\bar k|_{\mathbb T}:=\min\left\{|\bar k+n_1(\sqrt3,1)+n_2(\sqrt3,-1)|,\ (n_1,n_2)\in\mathbb Z^2\right\},
+\end{equation}
+\begin{equation}
+ \begin{array}{>\displaystyle c}
+ \Pi\mathcal I_-(\varpi):=\frac{1}{108}\frac{(\Pi\xi_p+\Pi\xi_q+\Pi\xi_F)(\frac{\Pi m_p}{\Pi\xi_p}+\frac{\Pi m_q}{\Pi\xi_q}-\frac{\Pi m_F}{\Pi\xi_F}-\frac{\Pi m_p\Pi m_q\Pi m_F}{\Pi\xi_p\Pi\xi_q\Pi\xi_F})\Pi Z}{(\Pi Z^2-(\Pi\xi_p+\Pi\xi_q+\Pi\xi_F)^2)^2}\\[0.5cm]
+ \Pi\mathcal I_+(\varpi):=\frac{1}{216}\frac{\left(1-\frac{\Pi m_p\Pi m_F}{\Pi\xi_p\Pi\xi_F}-\frac{\Pi m_q\Pi m_F}{\Pi\xi_q\Pi\xi_F}+\frac{\Pi m_p\Pi m_q}{\Pi\xi_p\Pi\xi_q}\right)(\Pi Z^2+(\Pi\xi_p+\Pi\xi_q+\Pi\xi_F)^2)}{(\Pi Z^2-(\Pi\xi_p+\Pi\xi_q+\Pi\xi_F)^2)^2}
+ \end{array}
+\end{equation}
+in which
+\begin{equation}
+ \begin{array}{c}
+ \Pi\xi_q:=\bar\xi\left(\sqrt3+r\cos\theta,\omega+\omega r\sin\theta\right),\quad
+ \Pi\xi_F:=\bar\xi\left(\sqrt3+\rho\cos\varphi,\nu+\nu \rho\sin\varphi\right),
+ \\[0.3cm]
+ \Pi\xi_p:=\bar\xi\left(\sqrt3-r\cos\theta+\rho\cos\varphi),\nu-\omega r\sin\theta+\nu \rho\sin\varphi\right),
+ \end{array}
+\end{equation}
+where $\bar\xi$ is defined in~\-(\ref{eqbarxi}),
+\begin{equation}
+ \begin{array}{c}
+ \Pi m_q:=\bar m\left(\sqrt3+r\cos\theta,\omega+\omega r\sin\theta\right),\quad
+ \Pi m_F:=\bar m\left(\sqrt3+\rho\cos\varphi,\nu+\nu \rho\sin\varphi\right),
+ \\[0.3cm]
+ \Pi m_p:=\bar m\left(\sqrt3-r\cos\theta+\rho\cos\varphi),\nu-\omega r\sin\theta+\nu \rho\sin\varphi\right),
+ \end{array}
+\end{equation}
+where $\bar m$ is defined in~\-(\ref{eqbarxi}),
+\begin{equation}
+ \Pi Z:=\Pi\zeta_p+\Pi\zeta_q-\Pi\zeta_F
+\end{equation}
+with
+\begin{equation}
+ \begin{array}{c}
+ \Pi\zeta_q:=\bar\zeta\left(\sqrt3+r\cos\theta,\omega+\omega r\sin\theta\right),\quad
+ \Pi\zeta_F:=\bar\zeta\left(\sqrt3+\rho\cos\varphi,\nu+\nu \rho\sin\varphi\right),
+ \\[0.3cm]
+ \Pi\zeta_p:=\bar\zeta\left(\sqrt3-r\cos\theta+\rho\cos\varphi),\nu-\omega r\sin\theta+\nu \rho\sin\varphi\right)
+ \end{array}
+\end{equation}
+where $\bar\zeta$ is defined in~\-(\ref{eqbarxi}), and
+\begin{equation}
+ R(\theta)=
+ \left\{\begin{array}{>\displaystyle l}
+ \frac1{\cos(\theta-\frac\pi6)}\quad\mathrm{if\ }\theta\in\left[-\frac\pi6,\frac\pi2\right]\\[0.5cm]
+ \frac1{\cos(\theta-\frac{5\pi}6)}\quad\mathrm{if\ }\theta\in\left[\frac\pi2,\frac{7\pi}6\right]\\[0.5cm]
+ \frac1{\cos(\theta+\frac\pi2)}\quad\mathrm{if\ }\theta\in\left[\frac{7\pi}6,\frac{11\pi}6\right].
+ \end{array}\right.
+ \label{eqR}
+\end{equation}
+Note that $R(\theta)$ is smooth by parts, so, in order to keep the accuracy of the computation high, we must split the integral over $\varphi$:
+\begin{equation}
+ A_{R,F}^{(\mp)}=3\sum_{\nu=\pm}\int_{0}^{2\pi}d\theta\int_{-\frac\pi6}^{\frac\pi2}d\varphi\int_0^{1}dr\int_0^{R(\varphi)}d\rho\ \rho r\Pi f_{\omega}^{(R)}(\varpi)\Pi f_{\omega}^{(F)}(\varpi)\Pi\mathcal I_\mp(\varpi)
+\end{equation}
+In which we used the symmetry under $\frac{2\pi}3$ rotations of $p$ and $q$.
+\bigskip
+
+\indent By~\-(\ref{eqasymp}) and the fact that $|p-p_F^\omega|_{\mathcal B}>\frac R2$ on the support of $f_\omega^{(R)}$, $\rho r\Pi\mathcal I_\mp$ is an analytic function on the closure of the integration domain, and $\Pi f_{\omega}^{(F)}$ and $\Pi f_{\omega}^{(R)}$ are class-2 Gevrey functions.
+\bigskip
+
+\makelink{rkcutoff}{\thepage}
+{\bf Remark}: In order to regularize the singularity at $p+q-p_F^\omega=p_F^\omega$, we had to change variables to $(q,p+q-p_F^\omega)$. If, instead of the smooth cutoff function $f_\omega$, we had used a step function, the integration region for $p+q-p_F^\omega$ would have been $\mathcal B$ minus a disk centered around $q$ of radius $R$. This creates trouble, since the parametrization of this disk is singular when $p_F^\omega$ tends to the boundary of the disk. The reason for which we have used a smooth cutoff function is to avoid this problem.
+\bigskip
+
+\point The integrand of $A_{R,R}^{(\mp)}$ is only singular if $p+q-p_F^\omega=p_F^\omega$, because $|p-p_F^\omega|_{\mathcal B}>\frac R2$ and $|q-p_F^\omega|_{\mathcal B}>\frac R2$. We regularize this singularity by switching to polar coordinates corresponding to $q$ and $p+q-p_F^\omega$, which we denote by $(r,\theta,\rho,\varphi)$: if $q\in\mathcal B_\eta$ and $p+q-p_F^\omega\in\mathcal B_\nu$, then we define
+\begin{equation}
+ q=p_F^\eta+\eta\frac{2\pi}{3\sqrt3} r(\cos\theta,\sin\theta),\quad
+ p+q-p_F^\omega=p_F^\nu+\nu\frac{2\pi}{3\sqrt3}\rho(\cos\varphi,\sin\varphi)
+\end{equation}
+in terms of which
+\begin{equation}
+ A_{R,R}^{(\mp)}=\sum_{\eta,\nu=\pm}\int_0^{2\pi}d\theta\int_{0}^{2\pi}d\varphi\int_{0}^{R(\theta)}dr\int_0^{R(\varphi)}d\rho\ \rho r\Xi f_{\omega,1}^{(R)}(\varpi)\Xi f_{\omega,2}^{(R)}(\varpi)\Xi\mathcal I_\mp(\varpi)
+\end{equation}
+with $\varpi\equiv(r,\theta,\rho,\varphi)$,
+\begin{equation}
+ \begin{array}c
+ \Xi f_{\omega,1}^{(R)}(\varpi):=\chi_{\frac12}\left(|(\rho\cos\varphi-r\cos\theta,\ \nu-\eta+\nu \rho\sin\varphi-\eta r\sin\theta)|_{\mathbb T}\right),\\[0.3cm]
+ \Xi f_{\omega,2}^{(R)}(\varpi):=\chi_{\frac12}\left(|(r\cos\theta,\ \eta-\omega+\eta r\sin\theta)|_{\mathbb T}\right)
+ \end{array}
+\end{equation}
+where
+\begin{equation}
+ |\bar k|_{\mathbb T}:=\min\left\{|\bar k+n_1(\sqrt3,1)+n_2(\sqrt3,-1)|,\ (n_1,n_2)\in\mathbb Z^2\right\},
+\end{equation}
+\begin{equation}
+ \begin{array}{>\displaystyle c}
+ \Xi\mathcal I_-(\varpi):=\frac{1}{108}\frac{(\Xi\xi_p+\Xi\xi_q+\Xi\xi_F)(\frac{\Xi m_p}{\Xi\xi_p}+\frac{\Xi m_q}{\Xi\xi_q}-\frac{\Xi m_F}{\Xi\xi_F}-\frac{\Xi m_p\Xi m_q\Xi m_F}{\Xi\xi_p\Xi\xi_q\Xi\xi_F})\Xi Z}{(\Xi Z^2-(\Xi\xi_p+\Xi\xi_q+\Xi\xi_F)^2)^2}\\[0.5cm]
+ \Xi\mathcal I_+(\varpi):=\frac{1}{216}\frac{\left(1-\frac{\Xi m_p\Xi m_F}{\Xi\xi_p\Xi\xi_F}-\frac{\Xi m_q\Xi m_F}{\Xi\xi_q\Xi\xi_F}+\frac{\Xi m_p\Xi m_q}{\Xi\xi_p\Xi\xi_q}\right)(\Xi Z^2+(\Xi\xi_p+\Xi\xi_q+\Xi\xi_F)^2)}{(\Xi Z^2-(\Xi\xi_p+\Xi\xi_q+\Xi\xi_F)^2)^2}
+ \end{array}
+\end{equation}
+in which
+\begin{equation}
+ \begin{array}{c}
+ \Xi\xi_q:=\bar\xi\left(\sqrt3+r\cos\theta,\eta+\eta r\sin\theta\right),\quad
+ \Xi\xi_F:=\bar\xi\left(\sqrt3+\rho\cos\varphi,\nu+\nu \rho\sin\varphi\right),
+ \\[0.3cm]
+ \Xi\xi_p:=\bar\xi\left(\sqrt3-r\cos\theta+\rho\cos\varphi),\nu+\omega-\eta-\eta r\sin\theta+\nu \rho\sin\varphi\right),
+ \end{array}
+\end{equation}
+where $\bar\xi$ is defined in~\-(\ref{eqbarxi}),
+\begin{equation}
+ \begin{array}{c}
+ \Xi m_q:=\bar m\left(\sqrt3+r\cos\theta,\eta+\eta r\sin\theta\right),\quad
+ \Xi m_F:=\bar m\left(\sqrt3+\rho\cos\varphi,\nu+\nu \rho\sin\varphi\right),
+ \\[0.3cm]
+ \Xi m_p:=\bar m\left(\sqrt3-r\cos\theta+\rho\cos\varphi),\nu+\omega-\eta-\eta r\sin\theta+\nu \rho\sin\varphi\right),
+ \end{array}
+\end{equation}
+where $\bar m$ is defined in~\-(\ref{eqbarxi}),
+\begin{equation}
+ \Xi Z:=\Xi\zeta_p+\Xi\zeta_q-\Xi\zeta_F
+\end{equation}
+with
+\begin{equation}
+ \begin{array}{c}
+ \Xi\zeta_q:=\bar\zeta\left(\sqrt3+r\cos\theta,\eta+\eta r\sin\theta\right),\quad
+ \Xi\zeta_F:=\bar\zeta\left(\sqrt3+\rho\cos\varphi,\nu+\nu \rho\sin\varphi\right),
+ \\[0.3cm]
+ \Xi\zeta_p:=\bar\zeta\left(\sqrt3-r\cos\theta+\rho\cos\varphi),\nu+\omega-\eta-\eta r\sin\theta+\nu \rho\sin\varphi\right)
+ \end{array}
+\end{equation}
+where $\bar\zeta$ is defined in~\-(\ref{eqbarxi}), and $R$ is defined in~\-(\ref{eqR}). Here, again, since $R(\theta)$ is only smooth by parts, we must split the integral over $\theta$ and $\varphi$:
+\begin{equation}
+ A_{R,R}^{(\mp)}=3\sum_{\eta,\nu=\pm}\sum_{a=0,1,2}\int_{(4a-1)\frac\pi6}^{(4a+3)\frac\pi6}d\theta\int_{-\frac\pi6}^{\frac\pi2}d\varphi\int_{0}^{R(\theta)}dr\int_0^{R(\varphi)}d\rho\ \rho r\Xi f_{\omega,1}^{(R)}(\varpi)\Xi f_{\omega,2}^{(R)}(\varpi)\Xi\mathcal I_\mp(\varpi)
+\end{equation}
+In which we used the symmetry under $\frac{2\pi}3$ rotations of $p$ and $q$.
+\bigskip
+
+\indent By~\-(\ref{eqasymp}) and the fact that $|p-p_F^\omega|_{\mathcal B}>\frac R2$ and $|q-p_F^\omega|_{\mathcal B}>\frac R2$ on the support of $f_\omega^{(R)}$, $r\Xi\mathcal I_\mp$ is an analytic function on the closure of the integration domain, and $\Xi f_{\omega,1}^{(R)}$ and $\Xi f_{\omega,2}^{(R)}$ are class-2 Gevrey functions.
+\bigskip
+
+\subsection{Strategy of the numerical computation}
+\indent The integrations are carried out using Gauss-Legendre quadratures (see section~\-\ref{secintegrationgl}).
+
+\subsection{Usage and examples}
+\indent We will now describe some basic usage cases of {\tt hhtop z1-z2}. For a full description of the options of {\tt hhtop}, see the {\tt man} page.
+\subseqskip
+
+\subsubsection{Basic usage}
+\indent The value of the parameters can be set via the {\tt -p} flag. Here is an example\par
+\medskip
+\indent{\tt hhtop z1-z2 -p "omega=1;t1=1.;t2=.1;phi=1;"}\par
+\indent{\tt hhtop z1+z2 -p "omega=1;t1=1.;t2=.1;phi=1;"}\par
+\medskip
+The parameters that are not specified by the {\tt-p} flag are set to their default value: $\omega=1$, $t_1=1$, $t_2=0.1$, $\phi=\frac\pi2$, $W=\omega3\sqrt{3}t_2\sin\phi$.
+
+\subsubsection{Precision of the computation}
+\indent The precision of the computation can be controlled by three parameters: the precision of the numbers manipulated by {\tt hhtop} (set via the {\tt -P} flag, see section~\-\ref{secprecision}), the order of the integration, and the tolerance of the computation of abcissa and weights.
+\bigskip
+
+\point{\bf Order of the integration.} The order of the integration, that is, the value of the number $N$ introduced in section~\-\ref{secintegrationgl}, can be specified via the {\tt -O} flag. Its default value is 10. The difference of the value of the integral at different orders is a good measure of the numerical error. Example:\par
+\medskip
+\indent{\tt hhtop z1-z2 -O 30}\par
+\indent{\tt hhtop z1+z2 -O 30}
+\bigskip
+
+\point{\bf Tolerance of the abcissa and weights.} A Newton scheme is used to compute the abcissa and weights of the Gauss-Legendre integration. The scheme halts when the difference $|x_{n+1}-x_n|$ (see section~\-\ref{secnewton}) is smaller than a number, called the {\it tolerance} of the algorithm, or when the toal number of steps exceeds a given threshold. The tolerance can be set via the {\tt-t} flag, and the maximal number of steps via the {\tt -N} flag. Their default values are $10^{-11}$ and 1000000. If the tolerance or the maximal number of steps are too small, and the precision of multi-precision floats is too low, then the iteration may not converge. Example:\par
+\medskip
+\indent{\tt hhtop z1-z2 -t 1e-30 -N 2000000 -O 100 -P 256}\par
+\indent{\tt hhtop z1+z2 -t 1e-30 -N 2000000 -O 100 -P 256}
+
+\subsubsection{Using double precision floats instead of multi-precision floats}
+\indent Using the {\tt -D} command-line flag, {\tt hhtop} can be instructed to use {\tt long double}'s instead of MPFR floats. Whereas one then loses the ability of adjusting the precision, the computation time can be drastically reduced. Example:\par
+\medskip
+\indent{\tt hhtop -D z1-z2 -p "sinphi=1.;"}\par
+\indent{\tt hhtop -D z1+z2 -p "sinphi=1.;"}\par
+\bigskip
+The precision of {\tt long double}s is compiler dependent, see section~\-\ref{secprecision}.
+
+\vfill\eject
+
+
+\section{Algorithms}
+\indent In this section, we describe the algorithms used by {\tt hhtop}. Their implementation is provided by the {\tt libinum} library.
+\subseqskip
+
+\subsection{Newton scheme}\label{secnewton}
+\indent The Newton algorithm is used to compute roots: given a real function $f$ and an initial guess $x_0$ for the root, the Newton scheme produces a sequence $(x_n)$:
+\begin{equation}\begin{array}{>\displaystyle c}
+x_{n+1}:=x_n-\frac{f(x_n)}{\partial_x f(x_n)}
+\end{array}\label{eqnewton}\end{equation}
+which, provided the sequence converges, it tends to a solution of $f(x)=0$, with a quadratic rate of convergence
+\begin{equation}
+|x_{n+1}-x_{n}|\leqslant c_n|x_{n}-x_{n-1}|^2
+\label{eqconvnewton}\end{equation}
+where
+\begin{equation}
+c_n:=\frac12\frac{\mAthop{\displaystyle\mathrm{sup}}_{x\in[x_{n+1},x_{n}]}|\partial^2_x f(x)|}{\mAthop{\displaystyle\mathrm{inf}}_{x\in[x_{n+1},x_n]}|\partial_xf(x)|}.
+\label{eqboundNewton}\end{equation}
+
+\subsection{Gauss-Legendre integration}\label{secintegrationgl}
+\indent The Gauss-Legendre method allows us to compute
+\begin{equation}
+\int_{-1}^1dx\ f(x)
+\label{eqgenericint}\end{equation}
+for $f:[-1,1]\to\mathbb{R}$. Having fixed an {\it order} $N\in\mathbb N\setminus\{0\}$, let $\{x_1,\cdots,x_N\}$ denote the set of roots of the $N$-th Legendre polynomial $P_N$,and let
+\begin{equation}
+w_i=\frac2{(1-x_i^2)P_N'(x_i)}
+\label{eqweights}\end{equation}
+for $i\in\{1,\cdots,N\}$. One can show that, if $f$ is a polynomial of order $\leqslant2N-1$, then
+\begin{equation}
+\int_{-1}^1dx\ f(x)=\sum_{i=1}^Nw_if(x_i).
+\label{eqgenericint}\end{equation}
+\bigskip
+
+\indent If $f$ is an analytic function, then one can show that the error decays exponentially as $N\to\infty$. However, in the computation of $z_1-z_2$ (see section~\-\ref{secz1z2}), we use Gauss-Legendre quadratures to integrate a class-2 Gevrey function, so we will need to generalize this result. Let us first define class-$s$ Gevrey functions on $[-1,1]$, as $\mathcal C^\infty$ functions, for which there exist $C_0,C>0$, such that $\forall n\in\mathbb N$,
+\begin{equation}
+ \mathop{\mathrm{sup}}_{x\in[-1,1]}\left|\frac{d^nf(x)}{dx^n}\right|\leqslant C_0C^n(n!)^s.
+\end{equation}
+Note that the set of analytic functions on $[-1,1]$ is equal to the set of class-1 Gevrey functions on $[-1,1]$. We assume that $s\in\mathbb N\setminus\{0\}$.
+\bigskip
+
+\indent The basic strategy to estimate the error
+\begin{equation}
+ E_N(f):=\left|\int_{-1}^1dx\ f(x)-\sum_{i=1}^Nw_if(x)\right|
+\end{equation}
+is to approximate $f$ using Chebyshev polynomials, bound the error of this approximation for Gevrey functions, and use an estimate of the error when $f$ is a Chebyshev polynomial. This is done in detail in appendix~\-\ref{appGL}, and we find~\-(see lemma~\-\ref{lemmaGL})
+\begin{equation}
+ E_N(f)\leqslant c_0c_1^{s-1}(2N)^{1-\frac1s}e^{-b(2N)^{\frac1s}}s!.
+\end{equation}
+
+
+\subsection{Precision}\label{secprecision}
+\indent The numerical values manipulated by {\tt hhtop} are represented as multi-precision floats (using the GNU MPFR library). The number of bits allocated to each number, that is, the number of digits used in the computation, can be specified using the {\tt -P} flag. The default precision is 53 bits. Example:\par
+\medskip
+\indent{\tt hhtop phase -P 128}\par
+\bigskip
+
+\indent This behavior can be changed using the {\tt -D} flag, in which case the numerical values are represented as {\tt long double}, which have a fixed precision, but yield faster computation times. Example:\par
+\medskip
+\indent{\tt hhtop -D phase}\par
+\bigskip
+The precision of {\tt long double}'s is compiler-dependent, and can be checked using the {\tt -Vv} flag:
+\medskip
+\indent{\tt hhtop -Vv}\par
+\bigskip
+Using the GNU GCC compiler, version 5.3.0, on the {\tt x86-64} architecture, the precision of {\tt long double}'s is 64.
+
+
+\vfill\eject
+
+\appendix
+\section{Proof of (\expandonce{\ref{eqineqab}})}\label{appintk0}
+\indent In this appendix, we show that~\-(\ref{eqcondt}) holds, then~\-(\ref{eqineqab}) does as well. To alleviate the notation, we will drop the $_{t_1,t_2,W,\phi}$ indices as well as the `$(k)$'. We have
+\begin{equation}
+\xi^2-\zeta^2=m^2+t_1^2|\Omega|^2-4t_2^2\cos^2\phi\alpha_1^2
+\label{eqximinuszeta1}\end{equation}
+which, using $|\Omega|^2=2\alpha_1$, becomes
+\begin{equation}
+\xi^2-\zeta^2=m^2+2\alpha_1(t_1^2-2t_2^2\alpha_1).
+\label{eqximinuszeta2}\end{equation}
+Furthermore, $0\leqslant\alpha_1\leqslant\frac92$ (both $0$ and $\frac92$ are reached, respectively at 0 and $p_F:=(\frac{2\pi}3,\frac{2\pi}{3\sqrt3})$). This implies that $\xi^2>\zeta^2$.
+
+\section{Sunrise coordinates}\label{appsunrise}
+\indent In this appendix, we discuss the {\it sunrise} coordinates, which are used to compute sunrise Feynman diagrams. Such diagrams give rise to an integral of the form
+\begin{equation}
+ \int dpdq\ F(p,q,|p|,|q|,|p+q|)
+\end{equation}
+where $\rho r F(p,q,\rho,r,\gamma)$ is an analytic function of $p$, $q$, $\rho$, $r$ and $\gamma$, and
+\begin{equation}
+ |(p_1,p_2)|:=\sqrt{p_1^2+p_2^2}.
+\end{equation}
+However, since $|p|$, $|q|$ and $|p+q|$ are not analytic, the derivatives of $F(p,q,|p|,|q|,|p+q|)$ are, typically, unbounded, which can cause the error in the numerical evaluation of the integral uncontrollably large. In order to avoid this problem, we introduce coordinates, $(\rho,\theta,\psi,z)$, called {\it sunrise coordinated}, which are such that $p$, $q$, $|p|$, $|q|$, $|p+q|$, as well as the Jacobian of the change of variables, are analytic functions of $(\rho,\theta,\psi,z)$. Expressed using the sunrise coordinates, the integral of $F$ can be computed with good numerical accuracy.
+\bigskip
+
+{\bf Remark}: Note that if, instead of the sunrise coordinates, one used the (simpler) polar coordinates $p=\rho(\cos\theta,\sin\theta)$ and $q=r(\cos\varphi,\sin\varphi)$, then $|p+q|=\sqrt{\rho^2+r^2+2\rho r\cos(\theta-\varphi)}$, which has a divergent second derivative at $(\rho,\theta)=(r,-\varphi)$. Polar coordinates, therefore, do not do the trick.
+\bigskip
+
+{\bf Remark}: The sunrise coordinates are introduced in the following lemma, which is only stated for the case $|p|>|q|$. The integration over the regime $|q|<|p|$ can be performed by exchanging $p$ and $q$.
+\bigskip
+
+\theo{Lemma}\label{lemmasunrise}
+ Let $\mathcal B_R:=\{p\in\mathbb R^2,\ |p|<R\}$. We define the map $\mathcal S$
+ \begin{equation}
+ \begin{array}{rrcl}
+ \mathcal S:&
+ \{(p,q)\in\mathcal B_R^2,\ |p|>|q|\}
+ &\longrightarrow&
+ (0,R)\times[0,2\pi)\times[-\frac\pi2,\frac\pi2]\times(0,1)\\[0.3cm]
+ &(p,q)&\longmapsto&(\rho,\theta,\psi,z)
+ \end{array}
+ \end{equation}
+ with
+ \begin{equation}
+ \rho:=|p|\in(0,R),
+ \label{eqrho}
+ \end{equation}
+ $\theta\in[0,2\pi)$ is the unique solution of
+ \begin{equation}
+ p=\rho(\cos\theta,\sin\theta),
+ \label{eqtheta}
+ \end{equation}
+ if $\varphi$ denotes the angle between $p$ and $q$, then $\psi\in[-\frac\pi2,\frac\pi2]$ is the unique solution of
+ \begin{equation}
+ \cos\psi=\sqrt{\frac{|p+q|-|p|+|q|}{2|q|}},\quad
+ \mathrm{sign}(\psi)=\mathrm{sign}(\sin\varphi),
+ \label{eqpsi}
+ \end{equation}
+ and
+ \begin{equation}
+ z:=1-\frac{1-\sqrt{1-\frac{|q|}\rho\sin^2\psi}}{1-\cos\psi}\in(0,1).
+ \label{eqz}
+ \end{equation}
+ The map $\mathcal S$ is invertible, its inverse is analytic, and is such that, if $(p,q)=\mathcal S^{-1}(\rho,\theta,\psi,z)$, then $|p|$, $|q|$ and $|p+q|$ are analytic functions of $(\rho,\theta,\psi,z)$. Furthermore, the Jacobian
+ \begin{equation}
+ J:=\left|\det\left(\frac{\partial(p_1,p_2,q_1,q_2)}{\partial(\rho,\theta,\psi,z)}\right)\right|
+ \end{equation}
+ is an analytic function of $(\rho,\theta,\psi,z)$. In addition, $\mathcal S^{-1}$, $|p|$, $|q|$, $|p+q|$ and $J$, as functions of $(\rho,\theta,\psi,z)$, can be continued analytically to $[0,R]\times[0,2\pi)\times[-\frac\pi2,\frac\pi2]\times[0,1]$. Explicitly,
+ \begin{equation}
+ p_1=\rho\cos\theta,\quad
+ p_2=\rho\sin\theta,\quad
+ q_1=\rho\bar r\cos(\theta+\varphi),\quad
+ q_2=\rho\bar r\sin(\theta+\varphi),
+ \label{eqpq}
+ \end{equation}
+ \begin{equation}
+ |p|=\rho,\quad
+ |q|=\rho\bar r,\quad
+ |p+q|=\rho(1+\bar r\cos(2\psi))
+ \label{eqnorms}
+ \end{equation}
+ and
+ \begin{equation}
+ J=4\rho^3\frac{\bar r(1+\bar r\cos(2\psi))}{(1+\cos\psi)\sqrt{1+\bar r\cos^2\psi}}
+ \label{eqjacobian}
+ \end{equation}
+ with
+ \begin{equation}
+ \bar r:=(1-z)(1+zh(\psi)),\quad
+ h(\psi):=\frac{1-\cos\psi}{1+\cos\psi},\quad
+ t:=1-(1-z)(1-\cos\psi),
+ \label{eqr}
+ \end{equation}
+ and
+ \begin{equation}
+ \cos\varphi:=\cos(2\psi)-\frac{\bar r}2\sin^2(2\psi),\quad
+ \sin\varphi:=t\sin(2\psi)\sqrt{1+\bar r\cos^2\psi}.
+ \label{eqphi}
+ \end{equation}
+\endtheo
+\bigskip
+
+\indent\underline{Proof}: In order to prove the lemma, we will compose several changes of coordinates. The {\it sunrise} coordinates described above are obtained by combining these intermediate changes of variables.
+\bigskip
+
+\point The first, consists in changing $p$ to polar coordinates, which yields~\-(\ref{eqrho}), (\ref{eqtheta}) and the first two equations of~\-(\ref{eqpq}), and contributes a factor $\rho$ to the Jacobian:
+\begin{equation}
+ \int_{\mathcal B_R}dp\int_{\mathcal B_{|p|}}dq\ F=\int_0^R d\rho\int_0^{2\pi} d\theta\int_{\mathcal B_\rho} dq\ \rho\ F.
+\end{equation}
+\bigskip
+
+\point We then change variables to
+\begin{equation}
+ (\rho,\theta,q_1,q_2)\longmapsto(\rho,\theta,r,\gamma)
+\end{equation}
+with
+\begin{equation}
+ r:=|q|,\quad
+ \gamma:=|p+q|=\sqrt{\rho^2+r^2+2\rho(q_1\cos\theta+q_2\sin\theta)}
+\end{equation}
+so that
+\begin{equation}
+ \int_{\mathcal B_R}dp\int_{\mathcal B_{|p|}}dq\ F= \int_0^R d\rho \int_0^{2\pi}d\theta \int_0^\rho dr \int_{\rho-r}^{\rho+r}d\gamma\ \frac \gamma{|\sin\varphi|}\ F
+\end{equation}
+where $\varphi$ is the angle between $p$ and $q$:
+\begin{equation}
+ \cos\varphi=\frac{\gamma^2-\rho^2-r^2}{2r\rho},\quad
+ |\sin\varphi|=\sqrt{1-\cos^2\varphi}=\frac1{2r\rho}\sqrt{4r^2\rho^2-(\gamma^2-r^2-\rho^2)^2}
+\end{equation}
+which we rewrite as
+\begin{equation}
+ |\sin\varphi|=\frac1{2r\rho}
+ \sqrt{(\rho+r+\gamma)(\rho-r+\gamma)(-\rho+r+\gamma)(\rho+r-\gamma)}.
+\end{equation}
+\bigskip
+
+\point We then adimensionalize $r$ and $\gamma$, that is, we change to $\bar r,\bar \gamma$ in such a way that $\bar r,\bar \gamma\in(0,1)$:
+\begin{equation}
+ \bar r:=\frac r\rho,\quad
+ \bar \gamma:=\frac{\gamma-\rho+r}{2r}
+\end{equation}
+in terms of which
+\begin{equation}
+ \int_{\mathcal B_R}dp\int_{\mathcal B_{|p|}}dq\ F= \int_0^R d\rho \int_0^{2\pi}d\theta \int_0^1 d\bar r \int_0^1d\bar \gamma\ \frac{2\rho^3\bar r(1-\bar r+2\bar r\bar \gamma)}{|\sin\varphi|}\ F
+\end{equation}
+and
+\begin{equation}
+ |\sin\varphi|=2\sqrt{\bar \gamma(1-\bar \gamma)(1-\bar r+\bar r\bar \gamma)(1+\bar r\bar \gamma)}.
+ \label{eqphi2}
+\end{equation}
+\bigskip
+
+\point At this point, the singularities have all been shifted to $|\sin\varphi|$: $p$, $q$, $|p|$, $|q|$ and $|p+q|$ are analytic functions of $\rho$, $\bar r$, $\bar \gamma$, $\cos\theta$, $\sin\theta$, $\cos\varphi$ and $\sin\varphi$, and the only one of these that is singular is $\sin\varphi$, because of the square root in~\-(\ref{eqphi2}). We first note that $\sqrt{1+\bar r\bar \gamma}>1$, so that factor is not singular. In order to regularize the divergence in the other terms, we change variables to
+\begin{equation}
+ \cos\psi:=\sqrt{\bar \gamma},\quad
+ \sin\psi:=\mathrm{sign}(\sin\varphi)\sqrt{1-\bar \gamma},\quad
+ t:=\sqrt{1-\bar r(1-\bar \gamma)}
+\end{equation}
+after which
+\begin{equation}
+ \int_{\mathcal B_R}dp\int_{\mathcal B_{|p|}}dq\ F= \int_0^R d\rho \int_0^{2\pi}d\theta \int_{-\frac\pi2}^{\frac\pi2} d\psi \int_{\cos\psi}^1dt\ 4\rho^3\frac{(1-t^2)}{\sin^4\psi}\frac{\left(1+\frac{(1-t^2)}{\sin^2\psi}(2\cos^2\psi-1)\right)}{\sqrt{1+\frac{(1-t^2)}{\sin^2\psi}\cos^2\psi}}\ F.
+\end{equation}
+\bigskip
+
+\point Finally, we adimensionalize $t$:
+\begin{equation}
+ z:=1-\frac{1-t}{1-\cos\psi}
+\end{equation}
+so that
+\begin{equation}
+ \begin{largearray}
+ \int_{\mathcal B_R}dp\int_{\mathcal B_{|p|}}dq\ F= \int_0^R d\rho \int_0^{2\pi}d\theta \int_{-\frac\pi2}^{\frac\pi2} d\psi \int_0^1dz
+ \\[0.3cm]\hfill
+ 4\rho^3\frac{(1-z)(1+zh(\psi))}{1+\cos\psi}\frac{(1+(1-z)(1+zh(\psi))(2\cos^2\psi-1))}{\sqrt{1+(1-z)(1+zh(\psi))\cos^2\psi}}\ F.
+ \end{largearray}
+ \label{eqintfinal}
+\end{equation}
+\bigskip
+
+\point Equations~\-(\ref{eqpq}) through~\-(\ref{eqphi}) follow from~\-(\ref{eqintfinal}). The analyticity of $p$, $q$, $|p|$, $|q|$, $|p+q|$ and $J$ is a simple comsequence of~\-(\ref{eqpq}), (\ref{eqnorms}) and~\-(\ref{eqjacobian}).\qed
+
+\vfill\eject
+
+
+\section{Estimate of the error of Gauss-Legendre quadratures for Gevrey functions}\label{appGL}
+\indent In this appendix, we compute the error of Gauss-Legendre quadratures when used to integrate class-$s$ Gevrey functions. A class-$s$ Gevrey function on $[-1,1]$ is a $\mathcal C^\infty$ function that satisfies, $\forall n\in\mathbb N$,
+\begin{equation}
+ \mathop{\mathrm{sup}}_{x\in[-1,1]}\left|\frac{d^nf(x)}{dx^n}\right|\leqslant C_0C^n(n!)^s.
+\end{equation}
+
+\bigskip
+
+\theo{Lemma}\label{lemmaGL}
+ Let $f$ be a class-$s$ Gevrey function with $s\in\mathbb N\setminus\{0\}$. There exist $c_0,c_1,b>0$, which are independent of $s$, and $N_0>0$, which is independent of $s$ and $f$, such that, if $N\geqslant N_0$, then
+ \begin{equation}
+ E_N(f)\leqslant c_0c_1^{s-1}(2N)^{1-\frac1s}e^{-b(2N)^{\frac1s}}s!.
+ \end{equation}
+ In particular, if $f$ is analytic (i.e. $s=1$), then
+ \begin{equation}
+ E_N(f)\leqslant c_0e^{-2bN}.
+ \end{equation}
+\endtheo
+\bigskip
+
+\indent\underline{Proof}:\par\penalty10000
+\medskip\penalty10000
+\point We approximate $f$ by Chebyshev polynomials:
+\begin{equation}
+ f(x)=\frac{c_0}2+\sum_{j=1}^{\infty}c_jT_j(x)
+ \label{eqcheby}
+\end{equation}
+where $T_j$ is the $j$-th Chebyshev polynomial:
+\begin{equation}
+ T_j(x):=\cos(j\arccos(x)),\quad
+ c_j:=\frac2\pi\int_0^\pi d\theta\ f(\cos\theta)\cos(j\theta).
+\end{equation}
+Note that~\-(\ref{eqcheby}) is nothing other than the Fourier cosine series expansion of $F(\theta):=f(\cos(\theta))$, which is an even, periodic, class-$s$ Gevrey function on $[-\pi,\pi]$, whose $j$-th Fourier coefficient for $j\in\mathbb Z$ is equal to $\frac12c_{|j|}$. Furthermore, using a well-known estimate of the decay of Fourier coefficients of class-$s$ Gevrey functions (see e.g.~\-[\cite{Ta87}, Theorem~\-3.3]), there exists $b_0,b>0$ such that
+\begin{equation}
+ c_j\leqslant b_0e^{-bj^{\frac1s}}.
+ \label{eqcjbound}
+\end{equation}
+\bigskip
+
+\point Furthermore, since order-$N$ Gauss-Legendre quadratures are exact on polynomials of order $\leqslant 2N-1$, we have, formally,
+\begin{equation}
+ E_N(f)=\sum_{j=2N}^\infty c_jE_N(T_j).
+\end{equation}
+As was proved by A.R.~\-Curtis and P.~Rabinowitz~\-[\cite{CR72}], if $N$ is large enough, then
+\begin{equation}
+ E_N(T_j)\leqslant\pi
+\end{equation}
+which, by~\-(\ref{eqcjbound}), implies that
+\begin{equation}
+ E_N(f)\leqslant\pi\sum_{j=2N}^\infty c_j\leqslant\pi b_0\sum_{j=2N}^\infty e^{-bj^{\frac1s}}.
+\end{equation}
+Furthermore, if $\nu_{N,s}^s:=\lfloor(2N)^{\frac1s}\rfloor^s$ denotes the largest integer that is $\leqslant 2N$ and has an integer $s$-th root, then
+\begin{equation}
+ \sum_{j=2N}^\infty e^{-bj^{\frac1s}}\leqslant
+ \sum_{j=\nu_{N,s}^s}^\infty e^{-bj^{\frac1s}}\leqslant
+ \sum_{k=\nu_{N,s}}^\infty(k^s-(k-1)^s)e^{-bk}\leqslant
+ s\sum_{k=\nu_{N,s}}^\infty k^{s-1}e^{-bk}.
+\end{equation}
+We then estimate
+\begin{equation}
+ \sum_{k=\nu_{N,s}}^\infty k^{s-1}e^{-bk}=\frac{d^{s-1}}{d(-b)^{s-1}}\sum_{k=\nu_{N,s}}^\infty e^{-bk}
+ \leqslant (s-1)!\left(\nu_{N,s}+\frac1{1-e^{-b}}\right)^{s-1}\frac{e^{-b\nu_{N,s}}}{1-e^{-b}}
+\end{equation}
+which concludes the proof of the lemma.\qed
+
+\vfill\eject
+
+\references
+\BBlography
+
+\end{document}
diff --git a/doc/hhtop-doc/src/iansecs.sty b/doc/hhtop-doc/src/iansecs.sty
new file mode 100644
index 0000000..5e9acb1
--- /dev/null
+++ b/doc/hhtop-doc/src/iansecs.sty
@@ -0,0 +1,614 @@
+%%
+%% This file contains the main style commands
+%%
+%% Some options can be set by changing the \loaddefaults command
+%%
+
+\usepackage{color}
+\usepackage{marginnote}
+
+\def\loaddefaults{
+ \sectionstrue
+ \subseqcountfalse
+ \def\seqskip{\vskip1.5cm}
+ \def\subseqskip{\vskip1cm}
+ \def\subsubseqskip{\vskip0.5cm}
+ \resetpointattheofalse
+ \parindent=0pt
+ \def\indent{\hskip20pt}
+}
+
+% false if there are no sections
+\newif\ifsections
+% true if equation numbers should include the subsection number
+\newif\ifsubseqcount
+% true if there is a table of contents
+\newif\iftoc
+% true if point counting should reset at each theorem
+\newif\ifresetpointattheo
+
+% a prefix to put before the section number, e.g. A for appendices
+\def\sectionprefix{}
+
+\loaddefaults
+
+%% style for the equation number
+\def\eqnumstyle{}
+
+%% correct vertical alignment at the end of a document
+\AtEndDocument{
+ \vfill
+ \eject
+}
+
+%% hyperlinks
+% hyperlinkcounter
+\newcounter{lncount}
+% hyperref anchor
+\def\hrefanchor{%
+\stepcounter{lncount}%
+\hypertarget{ln.\thelncount}{}%
+}
+
+%% define a command and write it to aux file
+\def\outdef#1#2{
+ % define command
+ \expandafter\xdef\csname #1\endcsname{#2}
+ % hyperlink number
+ \expandafter\xdef\csname #1@hl\endcsname{\thelncount}
+ % write command to aux
+ \immediate\write\@auxout{\noexpand\expandafter\noexpand\gdef\noexpand\csname #1\endcsname{\csname #1\endcsname}}
+ \immediate\write\@auxout{\noexpand\expandafter\noexpand\gdef\noexpand\csname #1@hl\endcsname{\thelncount}}
+}
+
+%% can call commands even when they are not defined
+\def\safe#1{%
+ \ifdefined#1%
+ #1%
+ \else%
+ {\color{red}\bf?}%
+ \fi%
+}
+
+%% define a label for the latest tag
+%% label defines a command containing the string stored in \tag
+\AtBeginDocument{
+ \def\label#1{\expandafter\outdef{label@#1}{\safe\tag}
+}
+
+%% make a custom link at any given location in the document
+\def\makelink#1#2{
+ \hrefanchor
+ \outdef{label@#1}{#2}
+}
+
+\def\ref#1{%
+ % check whether the label is defined (hyperlink runs into errors if this check is omitted)
+ \ifcsname label@#1@hl\endcsname%
+ \hyperlink{ln.\csname label@#1@hl\endcsname}{{\color{blue}\safe\csname label@#1\endcsname}}%
+ \else%
+ \ifcsname label@#1\endcsname%
+ {\color{blue}\csname #1\endcsname}%
+ \else%
+ {\bf ??}%
+ \fi%
+ \fi%
+ }%
+}
+
+%% counters
+\newcounter{sectioncount}
+\newcounter{subsectioncount}
+\newcounter{subsubsectioncount}
+\newcounter{pointcount}
+\newcounter{subpointcount}
+\newcounter{subsubpointcount}
+\newcounter{seqcount}
+\newcounter{figcount}
+\newcounter{Theocount}
+\newcounter{tocsectioncount}
+\newcounter{tocsubsectioncount}
+\newcounter{tocsubsubsectioncount}
+
+%% section command
+\newlength\secnumwidth
+\newlength\sectitlewidth
+\def\section#1{
+ % reset counters
+ \stepcounter{sectioncount}
+ \setcounter{subsectioncount}{0}
+ \setcounter{subsubsectioncount}{0}
+ \setcounter{pointcount}{0}
+ \setcounter{subpointcount}{0}
+ \setcounter{subsubpointcount}{0}
+ \setcounter{figcount}{0}
+ \setcounter{Theocount}{0}
+ \setcounter{seqcount}{0}
+
+ % space before section (if not first)
+ \ifnum\thesectioncount>1
+ \seqskip
+ \penalty-1000
+ \fi
+
+ % hyperref anchor
+ \hrefanchor
+ % define tag (for \label)
+ \xdef\tag{\sectionprefix\thesectioncount}
+
+ % get widths
+ \def\@secnum{{\bf\Large\sectionprefix\thesectioncount.\hskip10pt}}
+ \settowidth\secnumwidth{\@secnum}
+ \setlength\sectitlewidth\textwidth
+ \addtolength\sectitlewidth{-\secnumwidth}
+ % print name
+ \parbox{\textwidth}{
+ \@secnum
+ \parbox[t]{\sectitlewidth}{\Large\bf #1}}
+
+ % write to table of contents
+ \iftoc
+ % save lncount in aux variable which is written to toc
+ \immediate\write\tocoutput{\noexpand\expandafter\noexpand\edef\noexpand\csname toc@sec.\thesectioncount\endcsname{\thelncount}}
+ \write\tocoutput{\noexpand\tocsection{#1}{\thepage}}
+ \fi
+
+ %space
+ \par\penalty10000
+ \bigskip\penalty10000
+}
+
+%% subsection
+\def\subsection#1{
+ % counters
+ \stepcounter{subsectioncount}
+ \setcounter{subsubsectioncount}{0}
+ \setcounter{pointcount}{0}
+ \setcounter{subpointcount}{0}
+ \setcounter{subsubpointcount}{0}
+ \ifsubseqcount
+ \setcounter{seqcount}0
+ \fi
+
+ % space before subsection (if not first)
+ \ifnum\thesubsectioncount>1
+ \subseqskip
+ \penalty-500
+ \fi
+
+ % hyperref anchor
+ \hrefanchor
+ % define tag (for \label)
+ \xdef\tag{\sectionprefix\thesectioncount.\thesubsectioncount}
+
+ % get widths
+ \def\@secnum{{\bf\large\hskip.5cm\sectionprefix\thesectioncount.\thesubsectioncount.\hskip5pt}}
+ \settowidth\secnumwidth{\@secnum}
+ \setlength\sectitlewidth\textwidth
+ \addtolength\sectitlewidth{-\secnumwidth}
+ % print name
+ \parbox{\textwidth}{
+ \@secnum
+ \parbox[t]{\sectitlewidth}{\large\bf #1}}
+
+ % write to table of contents
+ \iftoc
+ % save lncount in aux variable which is written to toc
+ \immediate\write\tocoutput{\noexpand\expandafter\noexpand\edef\noexpand\csname toc@subsec.\thesectioncount.\thesubsectioncount\endcsname{\thelncount}}
+ \write\tocoutput{\noexpand\tocsubsection{#1}{\thepage}}
+ \fi
+
+ % space
+ \par\penalty10000
+ \medskip\penalty10000
+}
+
+%% subsubsection
+\def\subsubsection#1{
+ % counters
+ \stepcounter{subsubsectioncount}
+ \setcounter{pointcount}{0}
+ \setcounter{subpointcount}{0}
+ \setcounter{subsubpointcount}{0}
+
+ % space before subsubsection (if not first)
+ \ifnum\thesubsubsectioncount>1
+ \subsubseqskip
+ \penalty-500
+ \fi
+
+ % hyperref anchor
+ \hrefanchor
+ % define tag (for \label)
+ \xdef\tag{\sectionprefix\thesectioncount.\thesubsectioncount.\thesubsubsectioncount}
+
+ % get widths
+ \def\@secnum{{\bf\hskip1.cm\sectionprefix\thesectioncount.\thesubsectioncount.\thesubsubsectioncount.\hskip5pt}}
+ \settowidth\secnumwidth{\@secnum}
+ \setlength\sectitlewidth\textwidth
+ \addtolength\sectitlewidth{-\secnumwidth}
+ % print name
+ \parbox{\textwidth}{
+ \@secnum
+ \parbox[t]{\sectitlewidth}{\large\bf #1}}
+
+ % write to table of contents
+ \iftoc
+ % save lncount in aux variable which is written to toc
+ \immediate\write\tocoutput{\noexpand\expandafter\noexpand\edef\noexpand\csname toc@subsubsec.\thesectioncount.\thesubsectioncount.\thesubsubsectioncount\endcsname{\thelncount}}
+ \write\tocoutput{\noexpand\tocsubsubsection{#1}{\thepage}}
+ \fi
+
+ % space
+ \par\penalty10000
+ \medskip\penalty10000
+}
+
+%% itemize
+\newlength\itemizeskip
+% left margin for items
+\setlength\itemizeskip{20pt}
+% item symbol
+\def\itemizept{\textbullet}
+\newlength\itemizeseparator
+% space between the item symbol and the text
+\setlength\itemizeseparator{5pt}
+% penalty preceding an itemize
+\def\itemizepenalty{0}
+
+\newlength\current@itemizeskip
+\setlength\current@itemizeskip{0pt}
+\def\itemize{
+ \par\penalty\itemizepenalty\medskip\penalty\itemizepenalty
+ \addtolength\current@itemizeskip{\itemizeskip}
+ \leftskip\current@itemizeskip
+}
+\def\enditemize{
+ \addtolength\current@itemizeskip{-\itemizeskip}
+ \par\leftskip\current@itemizeskip
+ \medskip
+}
+\newlength\itempt@total
+\def\item{
+ \settowidth\itempt@total{\itemizept}
+ \addtolength\itempt@total{\itemizeseparator}
+ \par
+ \medskip
+ \hskip-\itempt@total\itemizept\hskip\itemizeseparator
+}
+
+%% enumerate
+\newcounter{enumerate@count}
+\def\enumerate{
+ \setcounter{enumerate@count}0
+ \let\olditem\item
+ \let\olditemizept\itemizept
+ \def\item{
+ % counter
+ \stepcounter{enumerate@count}
+ % set header
+ \def\itemizept{\theenumerate@count.}
+ % hyperref anchor
+ \hrefanchor
+ % define tag (for \label)
+ \xdef\tag{\theenumerate@count}
+ \olditem
+ }
+ \itemize
+}
+\def\endenumerate{
+ \enditemize
+ \let\item\olditem
+ \let\itemizept\olditemizept
+}
+
+%% points
+\def\point{
+ \stepcounter{pointcount}
+ \setcounter{subpointcount}{0}
+ % hyperref anchor
+ \hrefanchor
+ \indent{\bf \thepointcount\ - }
+ % define tag (for \label)
+ \xdef\tag{\thepointcount}
+}
+\def\subpoint{
+ \stepcounter{subpointcount}
+ \setcounter{subsubpointcount}0
+ % hyperref anchor
+ \hrefanchor
+ \indent\hskip.5cm{\bf \thepointcount-\thesubpointcount\ - }
+ % define tag (for \label)
+ \xdef\tag{\thepointcount-\thesubpointcount}
+}
+\def\subsubpoint{
+ \stepcounter{subsubpointcount}
+ % hyperref anchor
+ \hrefanchor
+ \indent\hskip1cm{\bf \thepointcount-\thesubpointcount-\thesubsubpointcount\ - }
+ % define tag (for \label)
+ \xdef\tag{\thepointcount-\thesubpointcount-\thesubsubpointcount}
+}
+\def\pspoint{
+ \stepcounter{pointcount}
+ \stepcounter{subpointcount}
+ \setcounter{subsubpointcount}0
+ % hyperref anchor
+ \hrefanchor
+ \indent\hskip.5cm{\bf \thepointcount-\thesubpointcount\ - }
+ % define tag (for \label)
+ \xdef\tag{\thepointcount-\thesubpointcount}
+}
+
+% reset points
+\def\resetpointcounter{
+ \setcounter{pointcount}{0}
+ \setcounter{subpointcount}{0}
+ \setcounter{subsubpointcount}{0}
+}
+
+%% equation numbering
+\def\seqcount{
+ \stepcounter{seqcount}
+ % the output
+ \edef\seqformat{\theseqcount}
+ % add subsection number
+ \ifsubseqcount
+ \let\tmp\seqformat
+ \edef\seqformat{\thesubsectioncount.\tmp}
+ \fi
+ % add section number
+ \ifsections
+ \let\tmp\seqformat
+ \edef\seqformat{\sectionprefix\thesectioncount.\tmp}
+ \fi
+ % define tag (for \label)
+ \xdef\tag{\seqformat}
+ % write number
+ \marginnote{\eqnumstyle\hfill(\seqformat)}
+}
+%% equation environment compatibility
+\def\equation{\hrefanchor$$\seqcount}
+\def\endequation{$$\@ignoretrue}
+
+%% figures
+\newlength\figwidth
+\setlength\figwidth\textwidth
+\addtolength\figwidth{-2.5cm}
+
+\def\caption#1{
+ \stepcounter{figcount}
+
+ % hyperref anchor
+ \hrefanchor
+
+ % the number of the figure
+ \edef\figformat{\thefigcount}
+ % add section number
+ \ifsections
+ \let\tmp\figformat
+ \edef\figformat{\sectionprefix\thesectioncount.\tmp}
+ \fi
+
+ % define tag (for \label)
+ \xdef\tag{\figformat}
+
+ % write
+ \hfil fig \figformat: \parbox[t]{\figwidth}{\leavevmode\small#1}
+
+ % space
+ \par\bigskip
+}
+%% short caption: centered
+\def\captionshort#1{
+ \stepcounter{figcount}
+
+ % hyperref anchor
+ \hrefanchor
+
+ % the number of the figure
+ \edef\figformat{\thefigcount}
+ % add section number
+ \ifsections
+ \let\tmp\figformat
+ \edef\figformat{\sectionprefix\thesectioncount.\tmp}
+ \fi
+
+ % define tag (for \label)
+ \xdef\tag{\figformat}
+
+ % write
+ \hfil fig \figformat: {\small#1}
+
+ %space
+ \par\bigskip
+}
+
+%% environment
+\def\figure{
+ \par\penalty-500
+}
+\def\endfigure{
+ \par\penalty-1000
+}
+
+%% delimiters
+\def\delimtitle#1{
+ \par%
+ \leavevmode%
+ \raise.3em\hbox to\hsize{%
+ \lower0.3em\hbox{\vrule height0.3em}%
+ \hrulefill%
+ \ \lower.3em\hbox{#1}\ %
+ \hrulefill%
+ \lower0.3em\hbox{\vrule height0.3em}%
+ }%
+ \par\penalty10000%
+}
+
+%% callable by ref
+\def\delimtitleref#1{
+ \par%
+%
+ % hyperref anchor%
+ \hrefanchor%
+%
+ % define tag (for \label)%
+ \xdef\tag{#1}%
+%
+ \leavevmode%
+ \raise.3em\hbox to\hsize{%
+ \lower0.3em\hbox{\vrule height0.3em}%
+ \hrulefill%
+ \ \lower.3em\hbox{\bf #1}\ %
+ \hrulefill%
+ \lower0.3em\hbox{\vrule height0.3em}%
+ }%
+ \par\penalty10000%
+}
+
+%% no title
+\def\delim{
+ \par%
+ \leavevmode\raise.3em\hbox to\hsize{%
+ \lower0.3em\hbox{\vrule height0.3em}%
+ \hrulefill%
+ \lower0.3em\hbox{\vrule height0.3em}%
+ }%
+ \par\penalty10000%
+}
+
+%% end delim
+\def\enddelim{
+ \par\penalty10000%
+ \leavevmode%
+ \raise.3em\hbox to\hsize{%
+ \vrule height0.3em\hrulefill\vrule height0.3em%
+ }%
+ \par%
+}
+
+%% theorem headers
+\def\theo#1{
+ \stepcounter{Theocount}
+ % reset points
+ \ifresetpointattheo\resetpointcounter\fi
+ % hyperref anchor
+ \hrefanchor
+ % the number
+ \def\formattheo{\theTheocount}
+ % add section number
+ \ifsections
+ \let\tmp\formattheo
+ \edef\formattheo{\sectionprefix\thesectioncount.\tmp}
+ \fi
+ % define tag (for \label)
+ \xdef\tag{\formattheo}
+ % write
+ \delimtitle{\bf #1 \formattheo}
+}
+\let\endtheo\enddelim
+%% theorem headers with name
+\def\theoname#1#2{
+ \theo{#1}\hfil({\it #2})\par\penalty10000\medskip%
+}
+
+%% start appendices
+\def\appendix{
+ \vfill
+ \pagebreak
+
+ % counter
+ \setcounter{sectioncount}0
+
+ % prefix
+ \def\sectionprefix{A}
+
+ % write
+ {\bf \LARGE Appendices}\par\penalty10000\bigskip\penalty10000
+
+ % add a mention in the table of contents
+ \iftoc
+ \immediate\write\tocoutput{\noexpand\tocappendices}\penalty10000
+ \fi
+
+ %% uncomment for new page for each appendix
+ %\def\seqskip{\vfill\pagebreak}
+}
+
+%% start references
+\def\references{
+ \hrefanchor
+
+ % write
+ {\bf \LARGE References}\par\penalty10000\bigskip\penalty10000
+
+ % add a mention in the table of contents
+ \iftoc
+ % save lncount in aux variable which is written to toc
+ \immediate\write\tocoutput{\noexpand\expandafter\noexpand\edef\noexpand\csname toc@references\endcsname{\thelncount}}
+ \write\tocoutput{\noexpand\tocreferences{\thepage}}\penalty10000
+ \fi
+}
+
+
+%% table of contents
+\newif\iftocopen
+\def\tableofcontents{
+ {\bf \large Table of contents:}\par\penalty10000\bigskip\penalty10000
+
+ % copy content from file
+ \IfFileExists{\jobname.toc}{\input{\jobname.toc}}{{\tt error: table of contents missing}}
+
+ % open new toc
+ \newwrite\tocoutput
+ \immediate\openout\tocoutput=\jobname.toc
+
+ \toctrue
+}
+%% close file
+\AtEndDocument{
+ % close toc
+ \iftoc
+ \immediate\closeout\tocoutput
+ \fi
+}
+
+
+%% fill line with dots
+\def\leaderfill{\leaders\hbox to 1em {\hss. \hss}\hfill}
+
+%% same as sectionprefix
+\def\tocsectionprefix{}
+
+%% toc formats
+\def\tocsection #1#2{
+ \stepcounter{tocsectioncount}
+ \setcounter{tocsubsectioncount}{0}
+ \setcounter{tocsubsubsectioncount}{0}
+ % write
+ \smallskip\hyperlink{ln.\csname toc@sec.\thetocsectioncount\endcsname}{{\bf \tocsectionprefix\thetocsectioncount}.\hskip5pt {\color{blue}#1}\leaderfill#2}\par
+}
+\def\tocsubsection #1#2{
+ \stepcounter{tocsubsectioncount}
+ \setcounter{tocsubsubsectioncount}{0}
+ % write
+ {\hskip10pt\hyperlink{ln.\csname toc@subsec.\thetocsectioncount.\thetocsubsectioncount\endcsname}{{\bf \thetocsubsectioncount}.\hskip5pt {\color{blue}\small #1}\leaderfill#2}}\par
+}
+\def\tocsubsubsection #1#2{
+ \stepcounter{tocsubsubsectioncount}
+ % write
+ {\hskip20pt\hyperlink{ln.\csname toc@subsubsec.\thetocsectioncount.\thetocsubsectioncount.\thetocsubsubsectioncount\endcsname}{{\bf \thetocsubsubsectioncount}.\hskip5pt {\color{blue}\small #1}\leaderfill#2}}\par
+}
+\def\tocappendices{
+ \medskip
+ \setcounter{tocsectioncount}0
+ {\bf Appendices}\par
+ \smallskip
+ \def\tocsectionprefix{A}
+}
+\def\tocreferences#1{
+ \medskip
+ {\hyperlink{ln.\csname toc@references\endcsname}{{\color{blue}\bf References}\leaderfill#1}}\par
+ \smallskip
+}
diff --git a/doc/hhtop-doc/src/kiss.cls b/doc/hhtop-doc/src/kiss.cls
new file mode 100644
index 0000000..7f0029f
--- /dev/null
+++ b/doc/hhtop-doc/src/kiss.cls
@@ -0,0 +1,51 @@
+%%
+%% Barebones class declaration
+%%
+
+\NeedsTeXFormat{LaTeX2e}[1995/12/01]
+\ProvidesClass{kiss}
+
+\setlength\paperheight {297mm}
+\setlength\paperwidth {210mm}
+
+%% fonts
+\input{size11.clo}
+\DeclareOldFontCommand{\rm}{\normalfont\rmfamily}{\mathrm}
+\DeclareOldFontCommand{\sf}{\normalfont\sffamily}{\mathsf}
+\DeclareOldFontCommand{\tt}{\normalfont\ttfamily}{\mathtt}
+\DeclareOldFontCommand{\bf}{\normalfont\bfseries}{\mathbf}
+\DeclareOldFontCommand{\it}{\normalfont\itshape}{\mathit}
+\DeclareOldFontCommand{\sl}{\normalfont\slshape}{\@nomath\sl}
+\DeclareOldFontCommand{\sc}{\normalfont\scshape}{\@nomath\sc}
+
+%% something is wrong with \thepage, redefine it
+\gdef\thepage{\the\c@page}
+
+%% array lines
+\setlength\arraycolsep{5\p@}
+\setlength\arrayrulewidth{.4\p@}
+
+
+%% default offsets: 1in, correct with \hoffset and \voffset
+%\hoffset=0pt
+\hoffset=-50pt
+%\voffset=0pt
+\voffset=-72pt
+%% horizontal margins
+%\oddsidemargin=31pt
+%\evensidemargin=31pt
+%% vertical margin
+%\topmargin=20pt
+%% body size
+%\textwidth=390pt
+\textwidth=460pt
+%\textheight=592pt
+\textheight=704pt
+%% header size and margin
+%\headheight=12pt
+%\headsep=25pt
+%% footer size
+%\footskip=30pt
+%% margin size and margin
+%\marginparwidth=35pt
+%\marginparsep=10pt
diff --git a/doc/hhtop-doc/src/toolbox.sty b/doc/hhtop-doc/src/toolbox.sty
new file mode 100644
index 0000000..c606711
--- /dev/null
+++ b/doc/hhtop-doc/src/toolbox.sty
@@ -0,0 +1,50 @@
+%%
+%% A collection of useful commands
+%%
+
+
+%% larger skip
+\newskip\hugeskipamount
+ \hugeskipamount=24pt plus8pt minus8pt
+\def\hugeskip{\vskip\hugeskipamount}
+
+
+%% penalty before large blocks
+\def\preblock{
+ \penalty-500
+}
+
+%% listparpenalty prevents page breaks before lists
+\newcount\prevparpenalty
+\def\listparpenalty{
+ \prevparpenalty=\@beginparpenalty
+ \@beginparpenalty=10000
+}
+%% back to previous value
+\def\unlistparpenalty{
+ \@beginparpenalty=\prevparpenalty
+}
+
+%% prevent page breaks after displayed equations
+\newcount\prevpostdisplaypenalty
+\def\nopagebreakaftereq{
+ \prevpostdisplaypenalty=\postdisplaypenalty
+ \postdisplaypenalty=10000
+}
+%% back to previous value
+\def\restorepagebreakaftereq{
+ \postdisplaypenalty=\prevpostdisplaypenalty
+}
+
+%% stack relations in subscript or superscript
+\def\mAthop#1{\displaystyle\mathop{\scriptstyle #1}}
+
+%% array spanning the entire line
+\newlength\largearray@width
+\setlength\largearray@width\textwidth
+\addtolength\largearray@width{-10pt}
+\def\largearray{\begin{array}{@{}>{\displaystyle}l@{}}\hphantom{\hspace{\largearray@width}}\\[-.5cm]}
+\def\endlargearray{\end{array}}
+
+%% qedsquare
+\def\qed{\penalty10000\hfill\penalty10000$\square$}
diff --git a/libinum-1.0.1.tar.gz b/libinum-1.0.1.tar.gz
new file mode 100644
index 0000000..84e7ee5
--- /dev/null
+++ b/libinum-1.0.1.tar.gz
Binary files differ
diff --git a/man/hhtop.1 b/man/hhtop.1
new file mode 100644
index 0000000..40e1ef7
--- /dev/null
+++ b/man/hhtop.1
@@ -0,0 +1,161 @@
+.Dd $Mdocdate: May 20 2016 $
+.Dt hhtop 1.0
+.Os
+.Sh NAME
+.Nm hhtop
+.Nd A tool to compute the renormalization of various quantities in the Haldane-Hubbard model
+.Sh SYNOPSIS
+.Nm
+.Sy phase
+.Op Fl p Ar params
+.Op Fl v
+.Op Fl O Ar order
+.Op Fl t Ar tolerance
+.Op Fl N Ar maxiter
+.Op Fl P Ar precision
+.Op Fl E Ar emax
+.Pp
+.Nm
+.Sy z1-z2
+.Op Fl p Ar params
+.Op Fl v
+.Op Fl O Ar order
+.Op Fl t Ar tolerance
+.Op Fl N Ar maxiter
+.Op Fl P Ar precision
+.Op Fl E Ar emax
+.Op Fl C Ar threads
+.Pp
+.Nm
+.Sy z1+z2
+.Op Fl p Ar params
+.Op Fl v
+.Op Fl O Ar order
+.Op Fl t Ar tolerance
+.Op Fl N Ar maxiter
+.Op Fl P Ar precision
+.Op Fl E Ar emax
+.Op Fl C Ar threads
+.Pp
+.Pp
+.Nm
+.Fl D
+.Sy phase
+.Op Fl p Ar params
+.Op Fl v
+.Op Fl O Ar order
+.Op Fl t Ar tolerance
+.Op Fl N Ar maxiter
+.Pp
+.Nm
+.Fl D
+.Sy z1-z2
+.Op Fl p Ar params
+.Op Fl v
+.Op Fl O Ar order
+.Op Fl t Ar tolerance
+.Op Fl N Ar maxiter
+.Op Fl C Ar threads
+.Pp
+.Nm
+.Fl D
+.Sy z1+z2
+.Op Fl p Ar params
+.Op Fl v
+.Op Fl O Ar order
+.Op Fl t Ar tolerance
+.Op Fl N Ar maxiter
+.Op Fl C Ar threads
+.Pp
+.Pp
+.Nm
+.Fl V
+.Op Fl v
+.Pp
+.Sh DESCRIPTION
+.Nm
+can carry out several computations, depending on the arguments provided on the command-line.
+.Bl -tag -width Ds
+.It Sy phase
+When called with this argument,
+.Nm
+computes the one-loop renormalization of the curve at which the effective mass of the Haldane-Hubbard model vanishes. Given the values of the parameters omega, t1, t2, lambda and phi,
+.Nm
+computes the W that is such that M(W,phi)=0.
+.It Sy z1-z2
+When called with this argument,
+.Nm
+computes the difference of the (a,a) and (b,b) components of the wave-function renormalization at second order.
+.It Sy z1+z2
+When called with this argument,
+.Nm
+computes the sum of the (a,a) and (b,b) components of the wave-function renormalization at second order.
+.El
+.Pp
+The numerical values are stored, by default, as multi-precision floats (using the GNU MPFR library), which allows the computation to be carried out at arbitrary precision. This behaviour can be changed: when the
+.Fl -D
+flag is up, numerical values are represented as 'long double's. The precision is then fixed to 64 bits, but the computation is usually a lot faster.
+.Pp
+.Sh COMMAND-LINE ARGUMENTS
+.Bl -tag -width Ds
+.It Fl D
+Represent numerical values using 'long double's instead of 'mpfr's. The precision is then fixed to 64. This can drastically speed up the computation.
+.It Fl p Ar params
+The value of the parameters of the model.
+.Sx params
+should be a sequence of statements of the form "variable=value;" where 'variable' is one of omega, t1, t2, lambda, sinphi, W.
+.Bl -tag -width Ds
+.It Sy omega
++1 or -1 (default: +1)
+.It Sy t1
+real number (default: 1.)
+.It Sy t2
+real number, 3|t2|<|t1| (default: 0.1)
+.It Sy lambda
+real number (default: 0.01)
+.It Sy phi
+real number (default: pi/2)
+.It Sy sinphi
+[-1,1] (default: 1.)
+.It Sy W
+real number (default: 3*sqrt(3)*t2*sin(phi) or 3*sqrt(3)*t2*sinphi)
+.El
+.Pp
+Note that
+.Sy phi
+and
+.Sy sinphi
+cannot be set simultaneously.
+.It Fl v
+Print the values of the parameters to
+.Sx stderr .
+.Pp
+If used in conjunction with the
+.Fl V
+flag, then
+.Nm
+will print information about size of integers, the precision of double and long double, and the datatype used to store the precision and maximal exponent of MPFR floats (which are all system-dependent).
+.It Fl O Ar order
+The order of the Gauss-Legendre quadrature integration scheme (default: 10).
+.It Fl t Ar tolerance
+Allowed error when computing roots, i.e. computing the roots of Legendre polynomials to setup the Gauss-Legendre quadratures, or the solution of M(W,phi)=0 (default: 1.e-11).
+.It Fl N Ar maxiter
+Maximal number of Newton steps to perform before giving up (default: 1000000).
+.It Fl P Ar precision
+Precision of multi-precision floats in bits (default: 53).
+.It Fl E Ar emax
+Largest allowed exponent for multi-precision floats (default: 1073741823)
+.It Fl C Ar threads
+Number of threads used for the computation (only for the
+.Sy z1-z2
+and
+.Sy z1+z2
+computations).
+.It Fl V
+Print version and exit.
+.El
+.Pp
+.Sh RETURN CODE
+.Nm
+returns 0 on success and -1 on error.
+.Pp
diff --git a/src/definitions.h b/src/definitions.h
new file mode 100644
index 0000000..16c640f
--- /dev/null
+++ b/src/definitions.h
@@ -0,0 +1,23 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef DEFINITIONS_S
+#define DEFINITIONS_S
+
+#define VERSION "1.0"
+
+#endif
+
diff --git a/src/double_util.c b/src/double_util.c
new file mode 100644
index 0000000..a4b5949
--- /dev/null
+++ b/src/double_util.c
@@ -0,0 +1,30 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "double_util.h"
+#include <mpfr.h>
+
+// convert hh_params
+int hh_params_todouble(hh_params_double* params_double, hh_params params){
+ params_double->omega=params.omega;
+ params_double->t1=mpfr_get_ld(params.t1, MPFR_RNDN);
+ params_double->t2=mpfr_get_ld(params.t2, MPFR_RNDN);
+ params_double->lambda=mpfr_get_ld(params.lambda, MPFR_RNDN);
+ params_double->sinphi=mpfr_get_ld(params.sinphi, MPFR_RNDN);
+ params_double->phi=mpfr_get_ld(params.phi, MPFR_RNDN);
+ params_double->W=mpfr_get_ld(params.W, MPFR_RNDN);
+ return(0);
+}
diff --git a/src/double_util.h b/src/double_util.h
new file mode 100644
index 0000000..f903809
--- /dev/null
+++ b/src/double_util.h
@@ -0,0 +1,43 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+ convert hh_params to double
+*/
+
+#ifndef DOUBLE_UTIL_H
+#define DOUBLE_UTIL_H
+
+#include "types.h"
+
+// params
+typedef struct hh_params_double {
+ int omega;
+ long double t1;
+ long double t2;
+ long double lambda;
+ long double sinphi;
+ long double phi;
+ long double W;
+} hh_params_double;
+
+// format for I function (used to compute sunrise diagrams)
+#define TYPE_I_DOUBLE long double (*I)(long double, long double, long double, long double, long double, long double, long double, long double, long double, long double)
+
+// convert hh_params
+int hh_params_todouble(hh_params_double* params_double, hh_params params);
+
+#endif
diff --git a/src/hh_integral.c b/src/hh_integral.c
new file mode 100644
index 0000000..f8eda10
--- /dev/null
+++ b/src/hh_integral.c
@@ -0,0 +1,276 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "hh_integral.h"
+
+#include <stdarg.h>
+// define MPFR_USE_VA_LIST to enable the use of mpfr_inits and mpfr_clears
+#define MPFR_USE_VA_LIST
+#include <mpfr.h>
+#include <libinum.h>
+
+int hh_integrate(mpfr_t* out, hh_params params, array_mpfr abcissa, array_mpfr weights){
+ mpfr_t lower, upper;
+ // arguments for first integral
+ hh_argsint1 args1;
+ int ret;
+
+ args1.params=params;
+ args1.abcissa=abcissa;
+ args1.weights=weights;
+
+ mpfr_inits(lower, upper, NULL);
+
+ // compute the boundaries of the integral over theta
+ // pi/6
+ mpfr_const_pi(upper, MPFR_RNDN);
+ mpfr_div_ui(upper, upper, 6, MPFR_RNDN);
+ // -pi/6
+ mpfr_neg(lower, upper, MPFR_RNDN);
+
+ // integrate
+ ret=integrate_gauss_mpfr(out, &hh_integrand1, lower, upper, abcissa, weights, &args1);
+
+ mpfr_clears(lower, upper, NULL);
+
+ return(ret);
+}
+
+// integrand of the integral over theta
+int hh_integrand1(mpfr_t* out, mpfr_t theta, void* args){
+ hh_argsint2 args2;
+ mpfr_t lower, upper;
+ int ret;
+
+ mpfr_inits(upper, lower, NULL);
+
+ // recover parameters
+ args2.params=((hh_argsint1*)args)->params;
+ args2.theta=theta;
+
+ // boundaries of the integral over rho
+ // 1/cos(theta-pi/6)
+ mpfr_const_pi(upper, MPFR_RNDN);
+ mpfr_div_ui(upper, upper, 6, MPFR_RNDN);
+ mpfr_sub(upper, theta, upper, MPFR_RNDN);
+ mpfr_cos(upper, upper, MPFR_RNDN);
+ mpfr_ui_div(upper, 1, upper, MPFR_RNDN);
+
+ // 0
+ mpfr_set_ui(lower, 0, MPFR_RNDN);
+
+ ret=integrate_gauss_mpfr(out, &hh_integrand2, lower, upper, ((hh_argsint1*)args)->abcissa, ((hh_argsint1*)args)->weights, &args2);
+
+ mpfr_clears(upper, lower, NULL);
+
+ return(ret);
+}
+
+// integrand of the integral over rho
+int hh_integrand2(mpfr_t* out, mpfr_t rho, void* args){
+ mpfr_t tmp1, tmp2, tmp3;
+
+ mpfr_inits(tmp1, tmp2, tmp3, NULL);
+
+ // out = Omega^2
+ // tmp1 = alpha2
+ hh_Omega2_alpha2(*out, tmp1, rho, ((hh_argsint2*)args)->theta, tmp2, tmp3);
+ // tmp1 = m
+ hh_m(tmp1, ((hh_argsint2*)args)->params.W, ((hh_argsint2*)args)->params.t2, ((hh_argsint2*)args)->params.sinphi, tmp1);
+ // out = xi^2
+ hh_xi2(*out, *out, tmp1, ((hh_argsint2*)args)->params.t1, tmp2);
+
+ // out = rho/sqrt(xi^2)*m
+ mpfr_sqrt(*out, *out, MPFR_RNDN);
+ mpfr_div(*out, rho, *out, MPFR_RNDN);
+ mpfr_mul(*out, *out, tmp1, MPFR_RNDN);
+
+ mpfr_clears(tmp1, tmp2, tmp3, NULL);
+
+ return(0);
+}
+
+// derivative
+int hh_d_integrate(mpfr_t* out, hh_params params, array_mpfr abcissa, array_mpfr weights){
+ mpfr_t lower, upper;
+ // arguments for first integral
+ hh_argsint1 args1;
+ int ret;
+
+ args1.params=params;
+ args1.abcissa=abcissa;
+ args1.weights=weights;
+
+ mpfr_inits(lower, upper, NULL);
+
+ // compute the boundaries of the integral over theta
+ // pi/6
+ mpfr_const_pi(upper, MPFR_RNDN);
+ mpfr_div_ui(upper, upper, 6, MPFR_RNDN);
+ // -pi/6
+ mpfr_neg(lower, upper, MPFR_RNDN);
+
+ // integrate
+ ret=integrate_gauss_mpfr(out, &hh_d_integrand1, lower, upper, abcissa, weights, &args1);
+
+ mpfr_clears(lower, upper, NULL);
+
+ return(ret);
+}
+
+// derivative of the integrand of the integral over theta
+int hh_d_integrand1(mpfr_t* out, mpfr_t theta, void* args){
+ hh_argsint2 args2;
+ mpfr_t lower, upper;
+ int ret;
+
+ mpfr_inits(lower, upper, NULL);
+
+ // recover parameters
+ args2.params=((hh_argsint1*)args)->params;
+ args2.theta=theta;
+
+ // boundaries of the integral over rho
+ // 1/cos(theta-pi/6)
+ mpfr_const_pi(upper, MPFR_RNDN);
+ mpfr_div_ui(upper, upper, 6, MPFR_RNDN);
+ mpfr_sub(upper, theta, upper, MPFR_RNDN);
+ mpfr_cos(upper, upper, MPFR_RNDN);
+ mpfr_ui_div(upper, 1, upper, MPFR_RNDN);
+
+ // 0
+ mpfr_set_ui(lower, 0, MPFR_RNDN);
+
+ ret=integrate_gauss_mpfr(out, &hh_d_integrand2, lower, upper, ((hh_argsint1*)args)->abcissa, ((hh_argsint1*)args)->weights, &args2);
+
+ mpfr_clears(lower, upper, NULL);
+
+ return(ret);
+}
+
+// derivative of the integrand of the integral over rho
+int hh_d_integrand2(mpfr_t* out, mpfr_t rho, void* args){
+ mpfr_t tmp1, tmp2, tmp3;
+
+ mpfr_inits(tmp1, tmp2, tmp3, NULL);
+
+ // out = Omega^2
+ // tmp1 = alpha2
+ hh_Omega2_alpha2(*out, tmp1, rho, ((hh_argsint2*)args)->theta, tmp2, tmp3);
+ // tmp1 = m
+ hh_m(tmp1, ((hh_argsint2*)args)->params.W, ((hh_argsint2*)args)->params.t2, ((hh_argsint2*)args)->params.sinphi, tmp1);
+ // out = xi^2
+ hh_xi2(*out, *out, tmp1, ((hh_argsint2*)args)->params.t1, tmp2);
+
+ // tmp2 = 1-m^2/xi^2
+ mpfr_pow_ui(tmp2, tmp1, 2,MPFR_RNDN);
+ mpfr_div(tmp2, tmp2, *out, MPFR_RNDN);
+ mpfr_ui_sub(tmp2, 1, tmp2, MPFR_RNDN);
+
+ // out = rho/sqrt(xi^2)*(1-m^2/xi^2)
+ mpfr_sqrt(*out, *out, MPFR_RNDN);
+ mpfr_div(*out, rho, *out, MPFR_RNDN);
+ mpfr_mul(*out, *out, tmp2, MPFR_RNDN);
+
+ mpfr_clears(tmp1, tmp2, tmp3, NULL);
+
+ return(0);
+}
+
+// Omega^2 and alpha_2
+// provide two initialized tmp mpfr_t's
+// Omega2 and alpha2 must be initialized
+int hh_Omega2_alpha2(mpfr_t Omega2, mpfr_t alpha2, mpfr_t rho, mpfr_t theta, mpfr_t tmp1, mpfr_t tmp2){
+ // Omega2 and alpha2 will be used as tmp variables whenever possible
+
+ // Omega2 = pi
+ mpfr_const_pi(Omega2, MPFR_RNDN);
+
+ // tmp1 = pi/sqrt(3)*rho
+ mpfr_sqrt_ui(tmp1, 3, MPFR_RNDN);
+ mpfr_div(tmp1, Omega2, tmp1, MPFR_RNDN);
+ mpfr_mul(tmp1, tmp1, rho, MPFR_RNDN);
+
+ // alpha2 = cos(theta)
+ mpfr_cos(alpha2, theta, MPFR_RNDN);
+
+ // tmp1 = cos(pi/sqrt(3)*rho*cos(theta))
+ mpfr_mul(tmp1, tmp1, alpha2, MPFR_RNDN);
+ //// alpha2 free
+ mpfr_cos(tmp1, tmp1, MPFR_RNDN);
+
+ // alpha2 = pi/3*(1+rho*sin(theta))
+ mpfr_sin(alpha2, theta, MPFR_RNDN);
+ mpfr_mul(alpha2, alpha2, rho, MPFR_RNDN);
+ mpfr_add_ui(alpha2, alpha2, 1, MPFR_RNDN);
+ mpfr_div_ui(alpha2, alpha2, 3, MPFR_RNDN);
+ mpfr_mul(alpha2, alpha2, Omega2, MPFR_RNDN);
+ //// Omega2 free
+
+ // Omega2 = cos(pi/3*(1+rho*sin(theta)))
+ mpfr_cos(Omega2, alpha2, MPFR_RNDN);
+
+ // tmp2 = sin(pi/3*(1+rho*sin(theta)))
+ mpfr_sin(tmp2, alpha2, MPFR_RNDN);
+ //// alpha2 free
+
+ // alpha2 = -2*sin(pi/3*(1+rho*sin(theta)))*(cos(pi/3*(1+rho*sin(theta)))+cos(pi/sqrt(3)*rho*cos(theta)))
+ mpfr_add(alpha2, Omega2, tmp1, MPFR_RNDN);
+ mpfr_mul(alpha2, alpha2, tmp2, MPFR_RNDN);
+ //// tmp2 free
+ mpfr_mul_si(alpha2, alpha2, -2, MPFR_RNDN);
+
+ // tmp1 = cos(pi/3*(1+rho*sin(theta)))-cos(pi/sqrt(3)*rho*cos(theta))
+ mpfr_sub(tmp1, Omega2, tmp1, MPFR_RNDN);
+ // Omega2 = 1+4*cos(pi/3*(1+rho*sin(theta)))*(cos(pi/3*(1+rho*sin(theta)))-cos(pi/sqrt(3)*rho*cos(theta)))
+ mpfr_mul(Omega2, Omega2, tmp1, MPFR_RNDN);
+ //// tmp1 free
+ mpfr_mul_ui(Omega2, Omega2, 4, MPFR_RNDN);
+ mpfr_add_ui(Omega2, Omega2, 1, MPFR_RNDN);
+
+ return(0);
+}
+
+// m
+// out must be initialized
+// out and alpha2 can point to the same number
+int hh_m(mpfr_t out, mpfr_t W, mpfr_t t2, mpfr_t sinphi, mpfr_t alpha2){
+ // out = W-2*t2*sinphi*alpha2
+ mpfr_mul(out, alpha2, sinphi, MPFR_RNDN);
+ mpfr_mul(out, out, t2, MPFR_RNDN);
+ mpfr_mul_ui(out, out, 2, MPFR_RNDN);
+ mpfr_sub(out, W, out, MPFR_RNDN);
+ return(0);
+}
+
+// xi^2
+// provide one initialized tmp mpfr_t
+// out must be initialized
+// out and Omega2 can point to the same number
+// tmp and m can point to the same number
+int hh_xi2(mpfr_t out, mpfr_t Omega2, mpfr_t m, mpfr_t t1, mpfr_t tmp){
+ // out = t1^2*Omega^2
+ mpfr_mul(out, Omega2, t1, MPFR_RNDN);
+ mpfr_mul(out, out, t1, MPFR_RNDN);
+
+ // tmp = m^2
+ mpfr_pow_ui(tmp, m, 2, MPFR_RNDN);
+
+ // out = m^2+t1^2*Omega^2
+ mpfr_add(out, out, tmp, MPFR_RNDN);
+
+ return(0);
+}
diff --git a/src/hh_integral.h b/src/hh_integral.h
new file mode 100644
index 0000000..4db9bf3
--- /dev/null
+++ b/src/hh_integral.h
@@ -0,0 +1,63 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+ integrals required for the computation
+*/
+
+#ifndef HH_INTEGRAL_H
+#define HH_INTEGRAL_H
+
+#include "types.h"
+#include <libinum.h>
+#include <mpfr.h>
+
+// extra arguments for the integration over theta (includes the integration options)
+typedef struct hh_argsint1 {
+ array_mpfr abcissa;
+ array_mpfr weights;
+ hh_params params;
+} hh_argsint1;
+
+// extra arguments for integration over rho (includes theta)
+typedef struct hh_argsint2 {
+ mpfr_ptr theta;
+ hh_params params;
+} hh_argsint2;
+
+// the integral
+int hh_integrate(mpfr_t* out, hh_params params, array_mpfr abcissa, array_mpfr weights);
+// integrand of the first integration
+int hh_integrand1(mpfr_t* out, mpfr_t theta, void* args);
+// integrand of the second integration
+int hh_integrand2(mpfr_t* out, mpfr_t rho, void* args);
+
+// derivative of the integral
+int hh_d_integrate(mpfr_t* out, hh_params params, array_mpfr abcissa, array_mpfr weights);
+// integrand of the first integration
+int hh_d_integrand1(mpfr_t* out, mpfr_t k1, void* args);
+// integrand of the second integration
+int hh_d_integrand2(mpfr_t* out, mpfr_t k2, void* args);
+
+// functions
+// Omega^2 and alpha_2
+int hh_Omega2_alpha2(mpfr_t Omega2, mpfr_t alpha2, mpfr_t rho, mpfr_t theta, mpfr_t tmp1, mpfr_t tmp2);
+// m
+int hh_m(mpfr_t out, mpfr_t W, mpfr_t t2, mpfr_t sinphi, mpfr_t alpha2);
+// xi^2
+int hh_xi2(mpfr_t out, mpfr_t Omega2, mpfr_t m, mpfr_t t1, mpfr_t tmp);
+
+#endif
diff --git a/src/hh_integral_double.c b/src/hh_integral_double.c
new file mode 100644
index 0000000..2776849
--- /dev/null
+++ b/src/hh_integral_double.c
@@ -0,0 +1,107 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "hh_integral_double.h"
+
+#include <math.h>
+
+#define PI 3.1415926535897932385L
+
+// compute the integral
+int hh_integrate_double(long double* out, hh_params_double params, array_ldouble abcissa, array_ldouble weights){
+ hh_argsint1_double args;
+ int ret;
+
+ args.params=params;
+ args.abcissa=abcissa;
+ args.weights=weights;
+
+ ret=integrate_gauss_ldouble(out, &hh_integrand1_double, -PI/6, PI/6, abcissa, weights, &args);
+
+ return(ret);
+}
+
+// integrand of the integral over theta
+int hh_integrand1_double(long double* out, long double theta, void* args){
+ hh_argsint2_double nargs;
+ int ret;
+ hh_argsint1_double* argument=(hh_argsint1_double*)args;
+
+ nargs.params=argument->params;
+ nargs.theta=theta;
+
+ ret=integrate_gauss_ldouble(out, &hh_integrand2_double, 0, 1./cos(theta-PI/6), argument->abcissa, argument->weights, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over rho
+int hh_integrand2_double(long double* out, long double rho, void* args){
+ hh_argsint2_double* argument=(hh_argsint2_double*)args;
+ long double m, xi, alpha2, O2;
+
+ alpha2=-2*sinl(PI/3*(1+rho*sinl(argument->theta)))*(cosl(PI/3*(1+rho*sinl(argument->theta)))+cosl(PI/sqrtl(3.)*rho*cosl(argument->theta)));
+ O2=1+4*cosl(PI/3*(1+rho*sinl(argument->theta)))*(cosl(PI/3*(1+rho*sinl(argument->theta)))-cosl(PI/sqrtl(3.)*rho*cosl(argument->theta)));
+ m=argument->params.W-2*argument->params.t2*argument->params.sinphi*alpha2;
+ xi=sqrtl(m*m+argument->params.t1*argument->params.t1*O2);
+ *out=rho*m/xi;
+
+ return(0);
+}
+
+
+// derivative
+int hh_d_integrate_double(long double* out, hh_params_double params, array_ldouble abcissa, array_ldouble weights){
+ hh_argsint1_double args;
+ int ret;
+
+ args.params=params;
+ args.abcissa=abcissa;
+ args.weights=weights;
+
+ ret=integrate_gauss_ldouble(out, &hh_d_integrand1_double, -PI/6, PI/6, abcissa, weights, &args);
+
+ return(ret);
+}
+
+// derivative of the integrand of the integral over theta
+int hh_d_integrand1_double(long double* out, long double theta, void* args){
+ hh_argsint2_double nargs;
+ int ret;
+ hh_argsint1_double* argument=(hh_argsint1_double*)args;
+
+ nargs.params=argument->params;
+ nargs.theta=theta;
+
+ ret=integrate_gauss_ldouble(out, &hh_d_integrand2_double, 0, 1./cos(theta-PI/6), argument->abcissa, argument->weights, &nargs);
+
+ return(ret);
+}
+
+// derivative of the integrand of the integral over rho
+int hh_d_integrand2_double(long double* out, long double rho, void* args){
+ hh_argsint2_double* argument=(hh_argsint2_double*)args;
+ long double m, xi, alpha2, O2;
+
+ alpha2=-2*sinl(PI/3*(1+rho*sinl(argument->theta)))*(cosl(PI/3*(1+rho*sinl(argument->theta)))+cosl(PI/sqrtl(3.)*rho*cosl(argument->theta)));
+ O2=1+4*cosl(PI/3*(1+rho*sinl(argument->theta)))*(cosl(PI/3*(1+rho*sinl(argument->theta)))-cosl(PI/sqrtl(3.)*rho*cosl(argument->theta)));
+ m=argument->params.W-2*argument->params.t2*argument->params.sinphi*alpha2;
+ xi=sqrtl(m*m+argument->params.t1*argument->params.t1*O2);
+ *out=rho/xi*(1-m*m/xi/xi);
+
+ return(0);
+}
+
diff --git a/src/hh_integral_double.h b/src/hh_integral_double.h
new file mode 100644
index 0000000..9f631cb
--- /dev/null
+++ b/src/hh_integral_double.h
@@ -0,0 +1,54 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef HH_INTEGRAL_DOUBLE_H
+#define HH_INTEGRAL_DOUBLE_H
+
+#include "double_util.h"
+#include "libinum.h"
+
+// arguments for the integral over theta
+typedef struct hh_argsint1_double{
+ hh_params_double params;
+ array_ldouble weights;
+ array_ldouble abcissa;
+} hh_argsint1_double;
+// arguments for the integral over rho
+typedef struct hh_argsint2_double{
+ hh_params_double params;
+ long double theta;
+} hh_argsint2_double;
+
+
+// compute the integral
+int hh_integrate_double(long double* out, hh_params_double params, array_ldouble abcissa, array_ldouble weights);
+
+// integrand of the integral over theta
+int hh_integrand1_double(long double* out, long double theta, void* args);
+
+// integrand of the integral over rho
+int hh_integrand2_double(long double* out, long double rho, void* args);
+
+// derivative
+int hh_d_integrate_double(long double* out, hh_params_double params, array_ldouble abcissa, array_ldouble weights);
+
+// derivative of the integrand of the integral over theta
+int hh_d_integrand1_double(long double* out, long double theta, void* args);
+
+// derivative of the integrand of the integral over rho
+int hh_d_integrand2_double(long double* out, long double rho, void* args);
+
+#endif
diff --git a/src/hh_root.c b/src/hh_root.c
new file mode 100644
index 0000000..6df4c82
--- /dev/null
+++ b/src/hh_root.c
@@ -0,0 +1,144 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "hh_root.h"
+
+#include <mpfr.h>
+
+#include "hh_integral.h"
+
+// wrapper for the integration function, used for the Newton scheme
+int integration_wrapper(mpfr_t* out, mpfr_t in, void* extra_args){
+ mpfr_t tmp;
+ hh_params params;
+ int ret;
+
+ mpfr_init(tmp);
+
+ mpfr_set(((args_integration*)extra_args)->params.W, in, MPFR_RNDN);
+
+ // out = I+
+ ret=hh_integrate(out, ((args_integration*)extra_args)->params, ((args_integration*)extra_args)->abcissa, ((args_integration*)extra_args)->weights);
+ if(ret<0){
+ mpfr_clear(tmp);
+ return(ret);
+ }
+
+ // clone params (to set sinphi=-sinphi in order to compute I_-)
+ mpfr_inits(params.t1, params.t2, params.lambda, params.W, params.sinphi, NULL);
+ mpfr_set(params.t1, ((args_integration*)extra_args)->params.t1, MPFR_RNDN);
+ mpfr_set(params.t2, ((args_integration*)extra_args)->params.t2, MPFR_RNDN);
+ mpfr_set(params.lambda, ((args_integration*)extra_args)->params.lambda, MPFR_RNDN);
+ mpfr_set(params.W, ((args_integration*)extra_args)->params.W, MPFR_RNDN);
+ mpfr_neg(params.sinphi, ((args_integration*)extra_args)->params.sinphi, MPFR_RNDN);
+
+ // tmp = I-
+ ret=hh_integrate(&tmp, params, ((args_integration*)extra_args)->abcissa, ((args_integration*)extra_args)->weights);
+ if(ret<0){
+ mpfr_clear(tmp);
+ mpfr_clears(params.t1, params.t2, params.lambda, params.W, params.sinphi, NULL);
+ return(ret);
+ }
+
+ mpfr_clears(params.t1, params.t2, params.lambda, params.W, params.sinphi, NULL);
+
+ // out=I+ + I-
+ mpfr_add(*out, *out, tmp, MPFR_RNDN);
+ //// tmp free
+
+ // tmp = sqrt(3)
+ mpfr_sqrt_ui(tmp, 3, MPFR_RNDN);
+
+ // out = W-sqrt(3)*lambda/6*(I+ + I-)
+ mpfr_mul(*out, *out, ((args_integration*)extra_args)->params.lambda, MPFR_RNDN);
+ mpfr_div_ui(*out, *out, 6, MPFR_RNDN);
+ mpfr_mul(*out, *out, tmp, MPFR_RNDN);
+ mpfr_sub(*out, ((args_integration*)extra_args)->params.W, *out, MPFR_RNDN);
+
+ // tmp = 3*sqrt(3)*t2*sin(phi)
+ mpfr_mul_ui(tmp, tmp, 3, MPFR_RNDN);
+ mpfr_mul(tmp, tmp, ((args_integration*)extra_args)->params.t2, MPFR_RNDN);
+ mpfr_mul(tmp, tmp, ((args_integration*)extra_args)->params.sinphi, MPFR_RNDN);
+
+ // W+w*3*sqrt(3)*t2*sin(phi)-sqrt(3)*lambda/6*(I+ + I-)
+ if(((args_integration*)extra_args)->params.omega==1){
+ mpfr_add(*out, *out, tmp, MPFR_RNDN);
+ }
+ else{
+ mpfr_sub(*out, *out, tmp, MPFR_RNDN);
+ }
+ //// tmp free
+
+ mpfr_clear(tmp);
+ return(0);
+}
+
+// wrapper for the derivative of the integration function, used for the Newton scheme
+int d_integration_wrapper(mpfr_t* out, mpfr_t in, void* extra_args){
+ mpfr_t tmp;
+ hh_params params;
+ int ret;
+
+ mpfr_init(tmp);
+
+ mpfr_set(((args_integration*)extra_args)->params.W, in, MPFR_RNDN);
+
+ // out = dI+
+ ret=hh_d_integrate(out, ((args_integration*)extra_args)->params, ((args_integration*)extra_args)->abcissa, ((args_integration*)extra_args)->weights);
+ if(ret<0){
+ mpfr_clear(tmp);
+ return(ret);
+ }
+
+ // clone params (to set sinphi=-sinphi in order to compute dI_-)
+ mpfr_inits(params.t1, params.t2, params.lambda, params.W, params.sinphi, params.phi, NULL);
+ mpfr_set(params.t1, ((args_integration*)extra_args)->params.t1, MPFR_RNDN);
+ mpfr_set(params.t2, ((args_integration*)extra_args)->params.t2, MPFR_RNDN);
+ mpfr_set(params.lambda, ((args_integration*)extra_args)->params.lambda, MPFR_RNDN);
+ mpfr_set(params.W, ((args_integration*)extra_args)->params.W, MPFR_RNDN);
+ params.omega=((args_integration*)extra_args)->params.omega;
+ mpfr_neg(params.sinphi, ((args_integration*)extra_args)->params.sinphi, MPFR_RNDN);
+ mpfr_neg(params.phi, ((args_integration*)extra_args)->params.phi, MPFR_RNDN);
+
+ // tmp = dI-
+ ret=hh_d_integrate(&tmp, params, ((args_integration*)extra_args)->abcissa, ((args_integration*)extra_args)->weights);
+ if(ret<0){
+ mpfr_clear(tmp);
+ mpfr_clears(params.t1, params.t2, params.lambda, params.W, params.sinphi, params.phi, NULL);
+ return(ret);
+ }
+
+ mpfr_clears(params.t1, params.t2, params.lambda, params.W, params.sinphi, params.phi, NULL);
+
+ // out=dI+ + dI-
+ mpfr_add(*out, *out, tmp, MPFR_RNDN);
+ //// tmp free
+
+ // tmp = sqrt(3)
+ mpfr_sqrt_ui(tmp, 3, MPFR_RNDN);
+
+ // out = 1-sqrt(3)*lambda/6*(dI+ + dI-)
+ mpfr_mul(*out, *out, ((args_integration*)extra_args)->params.lambda, MPFR_RNDN);
+ mpfr_div_ui(*out, *out, 6, MPFR_RNDN);
+ mpfr_mul(*out, *out, tmp, MPFR_RNDN);
+ //// tmp free
+ mpfr_ui_sub(*out, 1, *out, MPFR_RNDN);
+
+ mpfr_clear(tmp);
+
+ return(0);
+}
+
diff --git a/src/hh_root.h b/src/hh_root.h
new file mode 100644
index 0000000..2546fa3
--- /dev/null
+++ b/src/hh_root.h
@@ -0,0 +1,39 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+ wrappers for the functions used to find the desired roots
+*/
+
+#ifndef HH_ROOT_H
+#define HH_ROOT_H
+
+#include "types.h"
+#include <libinum.h>
+
+// the arguments for the integration wrapper
+typedef struct args_integration{
+ array_mpfr abcissa;
+ array_mpfr weights;
+ hh_params params;
+} args_integration;
+
+// wrapper for the integration function, used for the Newton scheme
+int integration_wrapper(mpfr_t* out, mpfr_t in, void* extra_args);
+// wrapper for the derivative of the integration function, used for the Newton scheme
+int d_integration_wrapper(mpfr_t* out, mpfr_t in, void* extra_args);
+
+#endif
diff --git a/src/hh_root_double.c b/src/hh_root_double.c
new file mode 100644
index 0000000..5474bcb
--- /dev/null
+++ b/src/hh_root_double.c
@@ -0,0 +1,75 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "hh_root_double.h"
+#include <math.h>
+#include "hh_integral_double.h"
+
+// wrapper for the integration function, used for the Newton scheme
+int hh_integration_wrapper_double(long double* out, long double in, void* args){
+ hh_args_integration_double* argument=(hh_args_integration_double*)args;
+ long double val;
+ int ret;
+
+ argument->params.W=in;
+
+ ret=hh_integrate_double(&val, argument->params, argument->abcissa, argument->weights);
+
+ if(ret<0){
+ return(ret);
+ }
+
+ // repeat with -sinphi
+ argument->params.sinphi=-argument->params.sinphi;
+
+ ret=hh_integrate_double(out, argument->params, argument->abcissa, argument->weights);
+
+ // reset sinphi
+ argument->params.sinphi=-argument->params.sinphi;
+
+ *out=argument->params.W+3*sqrtl(3)*argument->params.omega*argument->params.t2*argument->params.sinphi-argument->params.lambda*sqrtl(3)/6*(*out+val);
+
+ return(ret);
+}
+
+// wrapper for the derivative of the integration function, used for the Newton scheme
+int hh_d_integration_wrapper_double(long double* out, long double in, void* args){
+ hh_args_integration_double* argument=(hh_args_integration_double*)args;
+ long double val;
+ int ret;
+
+ argument->params.W=in;
+
+ ret=hh_d_integrate_double(&val, argument->params, argument->abcissa, argument->weights);
+
+ if(ret<0){
+ return(ret);
+ }
+
+ // repeat with -sinphi
+ argument->params.sinphi=-argument->params.sinphi;
+ argument->params.phi=-argument->params.phi;
+
+ ret=hh_d_integrate_double(out, argument->params, argument->abcissa, argument->weights);
+
+ // reset sinphi
+ argument->params.sinphi=-argument->params.sinphi;
+ argument->params.phi=-argument->params.phi;
+
+ *out=1-argument->params.lambda*sqrtl(3)/6*(*out+val);
+
+ return(ret);
+}
diff --git a/src/hh_root_double.h b/src/hh_root_double.h
new file mode 100644
index 0000000..2201322
--- /dev/null
+++ b/src/hh_root_double.h
@@ -0,0 +1,35 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef HH_ROOT_DOUBLE_H
+#define HH_ROOT_DOUBLE_H
+
+#include <libinum.h>
+#include "double_util.h"
+
+typedef struct hh_args_integration_double{
+ hh_params_double params;
+ array_ldouble abcissa;
+ array_ldouble weights;
+} hh_args_integration_double;
+
+// wrapper for the integration function, used for the Newton scheme
+int hh_integration_wrapper_double(long double* out, long double in, void* args);
+
+// wrapper for the derivative of the integration function, used for the Newton scheme
+int hh_d_integration_wrapper_double(long double* out, long double in, void* args);
+
+#endif
diff --git a/src/hhtop.c b/src/hhtop.c
new file mode 100644
index 0000000..0889b76
--- /dev/null
+++ b/src/hhtop.c
@@ -0,0 +1,597 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include <stdio.h>
+#include <stdarg.h>
+// define MPFR_USE_VA_LIST to enable the use of mpfr_inits and mpfr_clears
+#define MPFR_USE_VA_LIST
+#include <mpfr.h>
+#include <math.h>
+#include <libinum.h>
+
+#include "types.h"
+#include "hh_integral.h"
+#include "hh_root.h"
+#include "hh_integral_double.h"
+#include "hh_root_double.h"
+#include "ss_integral_double.h"
+#include "zz_integral.h"
+#include "zz_integral_double.h"
+#include "parser.h"
+#include "definitions.h"
+
+// usage message
+int print_usage();
+// read arguments
+int read_args(int argc, char* argv[], hh_params* params, mpfr_t tolerance, unsigned int* maxiter, unsigned int* order, unsigned int* computation_nr, unsigned int* threads, unsigned int* use_double);
+
+// compute the first loop correction to the topological phase diagram
+int compute_hh(hh_params params, mpfr_t tolerance, unsigned int maxiter, unsigned int order);
+// using doubles instead of mpfr
+int compute_hh_double(hh_params params, mpfr_t tolerance, unsigned int maxiter, unsigned int order);
+
+// compute the sunrise diagram
+int compute_ss(TYPE_I, hh_params params, mpfr_t tolerance, unsigned int maxiter, unsigned int order, unsigned int threads);
+// using doubles instead of mpfr
+int compute_ss_double(TYPE_I_DOUBLE, hh_params params, mpfr_t tolerance, unsigned int maxiter, unsigned int order, unsigned int threads);
+
+// codes for possible computations
+#define COMPUTATION_PHASE 1
+#define COMPUTATION_ZZ 2
+#define COMPUTATION_ZZZZ 3
+
+int main(int argc, char* argv[]){
+ hh_params params;
+ mpfr_t tolerance;
+ unsigned int maxiter;
+ unsigned int order;
+ int ret;
+ unsigned int computation_nr;
+ unsigned int threads;
+ unsigned int use_double;
+
+ // default computation: phase diagram
+ computation_nr=COMPUTATION_PHASE;
+
+ mpfr_inits(params.W, params.sinphi, params.phi, params.t1, params.t2, params.lambda, tolerance, NULL);
+ // read command line arguments
+ ret=read_args(argc, argv, &params, tolerance, &maxiter, &order, &computation_nr, &threads, &use_double);
+ if(ret<0){
+ mpfr_clears(params.W, params.sinphi, params.phi, params.t1, params.t2, params.lambda, tolerance, NULL);
+ return(-1);
+ }
+ if(ret>0){
+ mpfr_clears(params.W, params.sinphi, params.phi, params.t1, params.t2, params.lambda, tolerance, NULL);
+ return(0);
+ }
+
+ // phase diagram
+ if(computation_nr==COMPUTATION_PHASE){
+ // compute the first-loop correction to the phase diagram
+ if(use_double==0){
+ compute_hh(params, tolerance, maxiter, order);
+ }
+ else{
+ compute_hh_double(params, tolerance, maxiter, order);
+ }
+ }
+ else if(computation_nr==COMPUTATION_ZZ){
+ // compute second-order correction to z1-z2
+ if(use_double==0){
+ compute_ss(&zz_I, params, tolerance, maxiter, order, threads);
+ }
+ else{
+ compute_ss_double(&zz_I_double, params, tolerance, maxiter, order, threads);
+ }
+ }
+ else if(computation_nr==COMPUTATION_ZZZZ){
+ // compute second-order correction to z1+z2
+ if(use_double==0){
+ compute_ss(&ZZ_I, params, tolerance, maxiter, order, threads);
+ }
+ else{
+ compute_ss_double(&ZZ_I_double, params, tolerance, maxiter, order, threads);
+ }
+ }
+
+ mpfr_clears(params.W, params.sinphi, params.phi, params.t1, params.t2, params.lambda, tolerance, NULL);
+
+ return(0);
+}
+
+// usage message
+int print_usage(){
+ fprintf(stderr, "usage:\n hhtop phase [-p params] [-v] [-O order] [-t tolerance] [-N maxiter] [-P precision] [-E emax]\n hhtop z1-z2 [-p params] [-v] [-O order] [-t tolerance] [-N maxiter] [-P precision] [-E emax] [-C threads]\n hhtop z1+z2 [-p params] [-v] [-O order] [-t tolerance] [-N maxiter] [-P precision] [-E emax] [-C threads]\n\n hhtop -D phase [-p params] [-v] [-O order] [-t tolerance] [-N maxiter]\n hhtop -D z1-z2 [-p params] [-v] [-O order] [-t tolerance] [-N maxiter] [-C threads]\n\n hhtop -V [-v]\n\n");
+ return(0);
+}
+
+// read command line arguments
+#define CP_FLAG_PARAMS 1
+#define CP_FLAG_ORDER 2
+#define CP_FLAG_TOLERANCE 3
+#define CP_FLAG_MAXITER 4
+#define CP_FLAG_MPFR_PREC 5
+#define CP_FLAG_MPFR_EXP 6
+#define CP_FLAG_THREADS 7
+int read_args(int argc, char* argv[], hh_params* params, mpfr_t tolerance, unsigned int* maxiter, unsigned int* order, unsigned int* computation_nr, unsigned int* threads, unsigned int* use_double){
+ int i;
+ int ret;
+ // temporary long int
+ long int tmp_lint;
+ // temporary unsigned int
+ unsigned int tmp_uint;
+ // pointers
+ char* ptr;
+ // flag that indicates what argument is being read
+ int flag=0;
+ // pointer to various arguments
+ char* tolerance_str=NULL;
+ char* params_str=NULL;
+ // whether to print the variables after they are read
+ int print_vars=0;
+ // keep track of which flags were used (to check for incompatibilities)
+ unsigned char pflag=0;
+ unsigned char Oflag=0;
+ unsigned char tflag=0;
+ unsigned char Nflag=0;
+ unsigned char Pflag=0;
+ unsigned char Eflag=0;
+ unsigned char vflag=0;
+ unsigned char Cflag=0;
+ unsigned char Dflag=0;
+ unsigned char Vflag=0;
+
+ // defaults
+ mpfr_set_d(params->t1, 1., MPFR_RNDN);
+ mpfr_set_d(params->t2, .1, MPFR_RNDN);
+ mpfr_set_d(params->lambda, .01, MPFR_RNDN);
+ mpfr_set_d(params->sinphi, 1., MPFR_RNDN);
+ mpfr_set_d(tolerance, 1e-11, MPFR_RNDN);
+ *maxiter=1000000;
+ *order=10;
+ params->omega=+1;
+ *threads=1;
+ *use_double=0;
+
+ mpfr_const_pi(params->phi, MPFR_RNDN);
+ mpfr_div_ui(params->phi, params->phi, 2, MPFR_RNDN);
+
+ // default W=-3*sqrt(3)*t2*sin(phi)
+ mpfr_sqrt_ui(params->W, 3, MPFR_RNDN);
+ mpfr_mul_si(params->W, params->W, -3, MPFR_RNDN);
+ mpfr_mul(params->W, params->W, params->sinphi, MPFR_RNDN);
+ mpfr_mul(params->W, params->W, params->t2, MPFR_RNDN);
+
+ // loop over arguments
+ for(i=1;i<argc;i++){
+ // flag
+ if(argv[i][0]=='-'){
+ for(ptr=((char*)argv[i])+1;*ptr!='\0';ptr++){
+ switch(*ptr){
+ // parameters
+ case 'p':
+ flag=CP_FLAG_PARAMS;
+ pflag=1;
+ break;
+ // order of the integration
+ case 'O':
+ flag=CP_FLAG_ORDER;
+ Oflag=1;
+ break;
+ // tolerance
+ case 't':
+ flag=CP_FLAG_TOLERANCE;
+ tflag=1;
+ break;
+ // maximal number of Newton steps
+ case 'N':
+ flag=CP_FLAG_MAXITER;
+ Nflag=1;
+ break;
+ // mpfr precision
+ case 'P':
+ flag=CP_FLAG_MPFR_PREC;
+ Pflag=1;
+ break;
+ // mpfr emax
+ case 'E':
+ flag=CP_FLAG_MPFR_EXP;
+ Eflag=1;
+ break;
+ // print value of variables
+ case 'v':
+ print_vars=1;
+ vflag=1;
+ break;
+ // number of threads
+ case 'C':
+ flag=CP_FLAG_THREADS;
+ Cflag=1;
+ break;
+ // use doubles instead of mpfr
+ case 'D':
+ *use_double=1;
+ Dflag=1;
+ // set prec to that of long doubles (for consistency when reading cli arguments with many digits)
+ mpfr_set_default_prec(64);
+ break;
+ // print version
+ case 'V':
+ Vflag=1;
+ break;
+ default:
+ fprintf(stderr, "unrecognized option '-%c'\n", *ptr);
+ print_usage();
+ return(-1);
+ break;
+ }
+ }
+ }
+ // parameters
+ else if(flag==CP_FLAG_PARAMS){
+ // read str later (after having set the MPFR precision and emax)
+ params_str=argv[i];
+ flag=0;
+ }
+ // order of the integration
+ else if(flag==CP_FLAG_ORDER){
+ ret=sscanf(argv[i],"%u",&tmp_uint);
+ if(ret!=1){
+ fprintf(stderr, "error: '-O' should be followed by an unsigned int\n got '%s'\n",argv[i]);
+ return(-1);
+ }
+ *order=tmp_uint;
+ flag=0;
+ }
+ // tolerance
+ else if(flag==CP_FLAG_TOLERANCE){
+ // read str later (after having set the MPFR precision and emax)
+ tolerance_str=argv[i];
+ flag=0;
+ }
+ // maximal number of Newton steps
+ else if(flag==CP_FLAG_MAXITER){
+ ret=sscanf(argv[i],"%u",maxiter);
+ if(ret!=1){
+ fprintf(stderr, "error: '-N' should be followed by a positive int\n got '%s'\n",argv[i]);
+ return(-1);
+ }
+ flag=0;
+ }
+ // mpfr precision
+ else if(flag==CP_FLAG_MPFR_PREC){
+ ret=sscanf(argv[i],"%ld",&tmp_lint);
+ if(ret!=1){
+ fprintf(stderr, "error: '-P' should be followed by a long int\n got '%s'\n",argv[i]);
+ return(-1);
+ }
+ mpfr_set_default_prec(tmp_lint);
+ flag=0;
+ }
+ // mpfr emax
+ else if(flag==CP_FLAG_MPFR_EXP){
+ ret=sscanf(argv[i],"%ld",&tmp_lint);
+ if(ret!=1){
+ fprintf(stderr, "error: '-E' should be followed by a long int\n got '%s'\n",argv[i]);
+ return(-1);
+ }
+ mpfr_set_emax(tmp_lint);
+ flag=0;
+ }
+ // number of threads to use for the computation
+ else if(flag==CP_FLAG_THREADS){
+ ret=sscanf(argv[i],"%u",threads);
+ if(ret!=1){
+ fprintf(stderr, "error: '-C' should be followed by a positive int\n got '%s'\n",argv[i]);
+ return(-1);
+ }
+ flag=0;
+ }
+ // computation to run
+ else{
+ if(str_cmp(argv[i], "phase")==1){
+ *computation_nr=COMPUTATION_PHASE;
+ }
+ else if(str_cmp(argv[i], "z1-z2")==1){
+ *computation_nr=COMPUTATION_ZZ;
+ }
+ else if(str_cmp(argv[i], "z1+z2")==1){
+ *computation_nr=COMPUTATION_ZZZZ;
+ }
+ else{
+ fprintf(stderr, "error: unrecognized computation: '%s'\n",argv[i]);
+ print_usage();
+ return(-1);
+ }
+ flag=0;
+ }
+ }
+ if(tolerance_str!=NULL){
+ ret=mpfr_set_str(tolerance, tolerance_str, 10, MPFR_RNDN);
+ if(ret<0){
+ fprintf(stderr, "error: '-t' should be followed by an MPFR floating point number\n got '%s'\n", tolerance_str);
+ return(-1);
+ }
+ }
+ if(params_str!=NULL){
+ ret=read_params(params, params_str);
+ if(ret<0){
+ return(ret);
+ }
+ }
+
+ // check for incompatible flags
+ if((Vflag==1 && (pflag!=0 || Oflag!=0 || tflag!=0 || Nflag!=0 || Pflag!=0 || Eflag!=0 || Cflag!=0 || Dflag!=0)) || \
+ (*computation_nr!=COMPUTATION_ZZ && *computation_nr!=COMPUTATION_ZZZZ && Cflag==1) || \
+ (Dflag==1 && (Pflag==1 || Eflag==1)) \
+ ){
+ print_usage();
+ return(-1);
+ }
+
+ // print version and exit
+ if(Vflag==1){
+ printf("hhtop " VERSION "\n");
+ printf("libinum " LIBINUM_VERSION "\n");
+ if(vflag==1){
+ // print datatype information
+ printf("\n\n");
+ print_datatype_info(stdout);
+ }
+ return(1);
+ }
+
+ // print variables
+ if(print_vars==1){
+ fprintf(stderr, "t1=");
+ fprint_mpfr(stderr,params->t1);
+ fprintf(stderr, "\n");
+
+ fprintf(stderr, "t2=");
+ fprint_mpfr(stderr,params->t2);
+ fprintf(stderr, "\n");
+
+ fprintf(stderr, "lambda=");
+ fprint_mpfr(stderr,params->lambda);
+ fprintf(stderr, "\n");
+
+ fprintf(stderr, "phi=");
+ fprint_mpfr(stderr,params->phi);
+ fprintf(stderr, "\n");
+
+ fprintf(stderr, "sinphi=");
+ fprint_mpfr(stderr,params->sinphi);
+ fprintf(stderr, "\n");
+
+ fprintf(stderr, "W=");
+ fprint_mpfr(stderr,params->W);
+ fprintf(stderr, "\n");
+
+ fprintf(stderr, "omega=%d\n",params->omega);
+
+ fprintf(stderr, "\ntolerance=");
+ fprint_mpfr(stderr,tolerance);
+ fprintf(stderr, "\n");
+
+ fprintf(stderr, "order of integration=%d\n", *order);
+
+ fprintf(stderr, "\nMPFR precision=%ld\n", mpfr_get_default_prec());
+ fprintf(stderr, "MPFR emax=%ld\n", mpfr_get_emax());
+
+ fprintf(stderr, "\n");
+ }
+
+ return(0);
+}
+
+// compute the first loop correcion to the topological phase diagram
+int compute_hh(hh_params params, mpfr_t tolerance, unsigned int maxiter, unsigned int order){
+ mpfr_t val;
+ int ret;
+ args_integration args_int;
+
+ // compute weights
+ ret=gauss_legendre_weights_mpfr(order, tolerance, maxiter, &(args_int.abcissa), &(args_int.weights));
+ // return codes
+ if(ret==LIBINUM_ERROR_MAXITER){
+ fprintf(stderr, "error: maximum number of iterations reached when computing the integration abcissa\n try increasing the precision or the tolerance\n");
+ return(ret);
+ }
+ else if(ret==LIBINUM_ERROR_NAN){
+ fprintf(stderr, "error: infinity encountered when computing the integration abcissa\n");
+ return(ret);
+ }
+
+ // set args
+ args_int.params=params;
+ mpfr_init(args_int.params.W);
+
+ mpfr_init(val);
+ // initial value
+ mpfr_sqrt_ui(val, 3, MPFR_RNDN);
+ mpfr_mul_ui(val, val, 3, MPFR_RNDN);
+ mpfr_mul(val, val, params.sinphi, MPFR_RNDN);
+ mpfr_mul(val, val, params.t2, MPFR_RNDN);
+ if(params.omega==1){
+ mpfr_neg(val, val, MPFR_RNDN);
+ }
+
+ // compute root
+ ret=root_newton_inplace_mpfr(&val, &integration_wrapper, &d_integration_wrapper, tolerance, maxiter, &args_int);
+ // return codes
+ if(ret==LIBINUM_ERROR_MAXITER){
+ fprintf(stderr, "error: maximum number of iterations reached when computing the solution to m=0\n try increasing the precision or the tolerance\n");
+ mpfr_clear(val);
+ mpfr_clear(args_int.params.W);
+ array_mpfr_free(args_int.abcissa);
+ array_mpfr_free(args_int.weights);
+ return(ret);
+ }
+ else if(ret==LIBINUM_ERROR_NAN){
+ fprintf(stderr, "error: infinity encountered: either the integrand is singular or the derivative of the integral vanishes at a point of the Newton iteration\n");
+ mpfr_clear(val);
+ mpfr_clear(args_int.params.W);
+ array_mpfr_free(args_int.abcissa);
+ array_mpfr_free(args_int.weights);
+ return(ret);
+ }
+
+ fprint_mpfr(stdout, val);
+ printf("\n");
+
+ mpfr_clear(val);
+ mpfr_clear(args_int.params.W);
+ array_mpfr_free(args_int.abcissa);
+ array_mpfr_free(args_int.weights);
+
+ return(0);
+}
+// using double instead of mpfr
+int compute_hh_double(hh_params params, mpfr_t tolerance, unsigned int maxiter, unsigned int order){
+ long double tolerance_d;
+ hh_args_integration_double args_int;
+ long double val;
+ int ret;
+
+ // convert mpfr to double
+ tolerance_d=mpfr_get_ld(tolerance, MPFR_RNDN);
+ hh_params_todouble(&(args_int.params), params);
+
+ // compute weights
+ ret=gauss_legendre_weights_ldouble(order, tolerance_d, maxiter, &(args_int.abcissa), &(args_int.weights));
+
+ // return codes
+ if(ret==LIBINUM_ERROR_MAXITER){
+ fprintf(stderr, "error: maximum number of iterations reached when computing the integration abcissa\n");
+ return(ret);
+ }
+ else if(ret==LIBINUM_ERROR_NAN){
+ fprintf(stderr, "error: infinity encountered when computing the integration abcissa\n");
+ return(ret);
+ }
+
+
+ // initial value
+ val=-args_int.params.omega*3*sqrtl(3)*args_int.params.t2*args_int.params.sinphi;
+
+ ret=root_newton_inplace_ldouble(&val, &hh_integration_wrapper_double, &hh_d_integration_wrapper_double, tolerance_d, maxiter, &args_int);
+
+ // return codes
+ if(ret==LIBINUM_ERROR_MAXITER){
+ fprintf(stderr, "error: maximum number of iterations reached when computing the solution to m=0\n");
+ array_ldouble_free(args_int.abcissa);
+ array_ldouble_free(args_int.weights);
+ return(ret);
+ }
+ else if(ret==LIBINUM_ERROR_NAN){
+ fprintf(stderr, "error: infinity encountered: either the integrand is singular or the derivative of the integral vanishes at a point of the Newton iteration\n");
+ array_ldouble_free(args_int.abcissa);
+ array_ldouble_free(args_int.weights);
+ return(ret);
+ }
+
+ printf("% .19Le\n",val);
+
+ array_ldouble_free(args_int.abcissa);
+ array_ldouble_free(args_int.weights);
+
+ return(0);
+}
+
+// compute the sunrise diagram
+int compute_ss(TYPE_I, hh_params params, mpfr_t tolerance, unsigned int maxiter, unsigned int order, unsigned int threads){
+ mpfr_t val;
+ int ret;
+ array_mpfr abcissa;
+ array_mpfr weights;
+
+ // compute weights
+ ret=gauss_legendre_weights_mpfr(order, tolerance, maxiter, &abcissa, &weights);
+ // return codes
+ if(ret==LIBINUM_ERROR_MAXITER){
+ fprintf(stderr, "error: maximum number of iterations reached when computing the integration abcissa\n try increasing the precision, the tolerance, or the maximal number of Newton steps\n");
+ return(ret);
+ }
+ else if(ret==LIBINUM_ERROR_NAN){
+ fprintf(stderr, "error: infinity encountered when computing the integration abcissa\n");
+ return(ret);
+ }
+
+ mpfr_init(val);
+
+ // compute integral
+ ret=ss_integrate(&val, I, params, abcissa, weights, threads);
+ // return codes
+ if(ret==LIBINUM_ERROR_NAN){
+ fprintf(stderr, "error: infinity encountered: the integrand is singular\n");
+ mpfr_clear(val);
+ array_mpfr_free(abcissa);
+ array_mpfr_free(weights);
+ return(ret);
+ }
+
+ fprint_mpfr(stdout, val);
+ printf("\n");
+
+ mpfr_clear(val);
+ array_mpfr_free(abcissa);
+ array_mpfr_free(weights);
+
+ return(0);
+}
+// using doubles instead of mpfr
+int compute_ss_double(TYPE_I_DOUBLE, hh_params params, mpfr_t tolerance, unsigned int maxiter, unsigned int order, unsigned int threads){
+ long double tolerance_d;
+ hh_params_double params_d;
+ long double val;
+ int ret;
+ array_ldouble abcissa;
+ array_ldouble weights;
+
+ // convert mpfr to double
+ tolerance_d=mpfr_get_ld(tolerance, MPFR_RNDN);
+ hh_params_todouble(&params_d, params);
+
+ // compute weights
+ ret=gauss_legendre_weights_ldouble(order, tolerance_d, maxiter, &abcissa, &weights);
+ // return codes
+ if(ret==LIBINUM_ERROR_MAXITER){
+ fprintf(stderr, "error: maximum number of iterations reached when computing the integration abcissa\n try increasing the tolerance, or the maximal number of Newton steps\n");
+ return(ret);
+ }
+ else if(ret==LIBINUM_ERROR_NAN){
+ fprintf(stderr, "error: infinity encountered when computing the integration abcissa\n");
+ return(ret);
+ }
+
+ // compute integral
+ ret=ss_integrate_double(&val, I, params_d, abcissa, weights, threads);
+ // return codes
+ if(ret==LIBINUM_ERROR_NAN){
+ fprintf(stderr, "error: infinity encountered: the integrand is singular\n");
+ array_ldouble_free(abcissa);
+ array_ldouble_free(weights);
+ return(ret);
+ }
+
+ printf("% .19Le\n",val);
+
+ array_ldouble_free(abcissa);
+ array_ldouble_free(weights);
+
+ return(0);
+}
+
diff --git a/src/parser.c b/src/parser.c
new file mode 100644
index 0000000..9c4801a
--- /dev/null
+++ b/src/parser.c
@@ -0,0 +1,274 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "parser.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+// define MPFR_USE_VA_LIST to enable the use of mpfr_inits and mpfr_clears
+#define MPFR_USE_VA_LIST
+#include <mpfr.h>
+
+// read parameter string
+#define P_VAR_T1 1
+#define P_VAR_T2 2
+#define P_VAR_LAMBDA 3
+#define P_VAR_OMEGA 4
+#define P_VAR_SINPHI 5
+#define P_VAR_W 6
+#define P_VAR_PHI 7
+int read_params(hh_params* params, char* str){
+ char* ptr;
+ char* buffer;
+ char* buffer_ptr;
+ int ret;
+ int var_flag=0;
+ unsigned int reset_W=0;
+ unsigned int set_phi=0;
+ unsigned int set_sinphi=0;
+ mpfr_t tmp, tmp2;
+
+ buffer=calloc(str_len(str), sizeof(char));
+ buffer_ptr=buffer;
+ *buffer_ptr='\0';
+
+
+ for(ptr=str; *ptr!='\0'; ptr++){
+ switch(*ptr){
+ // left side of equation
+ case '=':
+ if(str_cmp(buffer, "t1")==1){
+ var_flag=P_VAR_T1;
+ }
+ else if(str_cmp(buffer, "t2")==1){
+ var_flag=P_VAR_T2;
+ }
+ else if(str_cmp(buffer, "lambda")==1){
+ var_flag=P_VAR_LAMBDA;
+ }
+ else if(str_cmp(buffer, "omega")==1){
+ var_flag=P_VAR_OMEGA;
+ }
+ else if(str_cmp(buffer, "sinphi")==1){
+ var_flag=P_VAR_SINPHI;
+ // reset W to -3*omega*sqrt(3)*sin(phi) provided W is not set explicitly
+ if(reset_W==0){
+ reset_W=1;
+ }
+ // do not allow setting both sinphi and phi
+ set_sinphi=1;
+ }
+ else if(str_cmp(buffer, "phi")==1){
+ var_flag=P_VAR_PHI;
+ // reset W to -3*omega*sqrt(3)*sin(phi) provided W is not set explicitly
+ if(reset_W==0){
+ reset_W=1;
+ }
+ // do not allow setting both sinphi and phi
+ set_phi=1;
+ }
+ else if(str_cmp(buffer, "W")==1){
+ var_flag=P_VAR_W;
+ // do not reset W
+ reset_W=2;
+ }
+ else{
+ fprintf(stderr, "parsing error: unrecognized token '%s'\n", buffer);
+ free(buffer);
+ return(-1);
+ }
+
+ // reset buffer
+ buffer_ptr=buffer;
+ *buffer_ptr='\0';
+ break;
+
+ // assign value
+ case ';':
+ if(var_flag==P_VAR_T1){
+ ret=mpfr_set_str(params->t1, buffer, 10, MPFR_RNDN);
+ if(ret<0){
+ fprintf(stderr, "parsing error: t1 must be assigned to an MPFR floating point number\n got '%s'\n", buffer);
+ free(buffer);
+ return(-1);
+ }
+ }
+ else if(var_flag==P_VAR_T2){
+ ret=mpfr_set_str(params->t2, buffer, 10, MPFR_RNDN);
+ if(ret<0){
+ fprintf(stderr, "parsing error: t2 must be assigned to an MPFR floating point number\n got '%s'\n", buffer);
+ free(buffer);
+ return(-1);
+ }
+ }
+ else if(var_flag==P_VAR_LAMBDA){
+ ret=mpfr_set_str(params->lambda, buffer, 10, MPFR_RNDN);
+ if(ret<0){
+ fprintf(stderr, "parsing error: lambda must be assigned to an MPFR floating point number\n got '%s'\n", buffer);
+ free(buffer);
+ return(-1);
+ }
+ }
+ else if(var_flag==P_VAR_SINPHI){
+ if(set_phi==1){
+ fprintf(stderr, "error: cannot set both phi and sinphi\n");
+ free(buffer);
+ return(-1);
+ }
+
+ ret=mpfr_set_str(params->sinphi, buffer, 10, MPFR_RNDN);
+ if(ret<0){
+ fprintf(stderr, "parsing error: sinphi must be assigned to an MPFR floating point number\n got '%s'\n", buffer);
+ free(buffer);
+ return(-1);
+ }
+ // check value
+ if(mpfr_cmp_ui(params->sinphi,1)>0 || mpfr_cmp_si(params->sinphi,-1)<0){
+ fprintf(stderr, "error: sinphi must be in [-1,1]\n");
+ free(buffer);
+ return(-1);
+ }
+ // set phi
+ mpfr_asin(params->phi, params->sinphi, MPFR_RNDN);
+ }
+ else if(var_flag==P_VAR_PHI){
+ if(set_sinphi==1){
+ fprintf(stderr, "error: cannot set both phi and sinphi\n");
+ free(buffer);
+ return(-1);
+ }
+
+ ret=mpfr_set_str(params->phi, buffer, 10, MPFR_RNDN);
+ if(ret<0){
+ fprintf(stderr, "parsing error: phi must be assigned to an MPFR floating point number\n got '%s'\n", buffer);
+ free(buffer);
+ return(-1);
+ }
+ // set sinphi
+ mpfr_sin(params->sinphi, params->phi, MPFR_RNDN);
+ }
+ else if(var_flag==P_VAR_W){
+ ret=mpfr_set_str(params->W, buffer, 10, MPFR_RNDN);
+ if(ret<0){
+ fprintf(stderr, "parsing error: W must be assigned to an MPFR floating point number\n got '%s'\n", buffer);
+ free(buffer);
+ return(-1);
+ }
+ }
+ else if(var_flag==P_VAR_OMEGA){
+ ret=sscanf(buffer, "%d", &(params->omega));
+ if(ret!=1){
+ fprintf(stderr, "parsing error: omega must be assigned to an integer\n got '%s'\n", buffer);
+ free(buffer);
+ return(-1);
+ }
+ // check value
+ if(params->omega!=1 && params->omega!=-1){
+ fprintf(stderr, "error: omega must be either +1 or -1\n");
+ return(-1);
+ }
+ }
+ else{
+ fprintf(stderr, "parsing error: read right side of equation, but the matching token was not found\n");
+ free(buffer);
+ return(-1);
+ }
+
+ var_flag=0;
+
+ // reset buffer
+ buffer_ptr=buffer;
+ *buffer_ptr='\0';
+ break;
+
+ // add to buffer
+ default:
+ buffer_ptr=str_addchar(buffer_ptr, *ptr);
+ break;
+ }
+ }
+
+ // check that all variables were read
+ if(*buffer_ptr!='\0'){
+ fprintf(stderr, "parsing error: trailing characters: '%s'\n", buffer);
+ free(buffer);
+ return(-1);
+ }
+ if(var_flag!=0){
+ fprintf(stderr, "parsing error: empty assignment at the end of the string\n");
+ free(buffer);
+ return(-1);
+ }
+
+ // check that 3*abs(t2)<abs(t1)
+ mpfr_inits(tmp, tmp2, NULL);
+ mpfr_abs(tmp, params->t2, MPFR_RNDN);
+ mpfr_mul_ui(tmp, tmp, 3, MPFR_RNDN);
+ mpfr_abs(tmp2, params->t1, MPFR_RNDN);
+ if(mpfr_cmp(tmp, tmp2)>0){
+ fprintf(stderr, "error: |t2| must be smaller than |t1|/3\n");
+ return(-1);
+ }
+
+ // if W was not set, reset its default to -3*omega*sqrt(3)*t2*sin(phi)
+ if(reset_W==1){
+ mpfr_sqrt_ui(params->W, 3, MPFR_RNDN);
+ mpfr_mul_ui(params->W, params->W, 3, MPFR_RNDN);
+ mpfr_mul(params->W, params->W, params->sinphi, MPFR_RNDN);
+ mpfr_mul(params->W, params->W, params->t2, MPFR_RNDN);
+ if(params->omega==1){
+ mpfr_neg(params->W, params->W, MPFR_RNDN);
+ }
+ }
+
+ mpfr_clears(tmp, tmp2, NULL);
+ free(buffer);
+ return(0);
+}
+
+
+// length of a string
+int str_len(char* str){
+ char* ptr=str;
+ int ret=0;
+ while(*ptr!='\0'){ret++;ptr++;}
+ return(ret);
+}
+
+// compare strings
+int str_cmp(char* str1, char* str2){
+ char* ptr1=str1;
+ char* ptr2=str2;
+ while(*ptr1==*ptr2 && *ptr1!='\0' && *ptr2!='\0'){
+ ptr1++;
+ ptr2++;
+ }
+ if(*ptr1=='\0' && *ptr2=='\0'){
+ return(1);
+ }
+ else{
+ return(0);
+ }
+}
+
+// append a character to the end of a string at the location pointed at by 'ptr'
+char* str_addchar(char* ptr, const char c){
+ *ptr=c;
+ ptr++;
+ *ptr='\0';
+ return(ptr);
+}
+
diff --git a/src/parser.h b/src/parser.h
new file mode 100644
index 0000000..874889c
--- /dev/null
+++ b/src/parser.h
@@ -0,0 +1,37 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+ parser for hhtop
+*/
+
+#ifndef PARSER_H
+#define PARSER_H
+
+#include "types.h"
+
+// read parameter string
+int read_params(hh_params* params, char* str);
+
+// utilities
+// length of a string
+int str_len(char* str);
+// compare strings
+int str_cmp(char* str1, char* str2);
+// append a character to the end of a string at the location pointed at by 'ptr'
+char* str_addchar(char* ptr, const char c);
+
+#endif
diff --git a/src/ss_integral.c b/src/ss_integral.c
new file mode 100644
index 0000000..cb39ea8
--- /dev/null
+++ b/src/ss_integral.c
@@ -0,0 +1,958 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "ss_integral.h"
+#include <pthread.h>
+
+// compute the integral
+// takes a pointer to an integrand function 'I' as an argument. 'I' must have the following arguments
+// int I(mpfr_t out, mpfr_t p1, mpfr_t p2, mpfr_t q1, mpfr_t q2, mpfr_t F1, mpfr_t F2, mpfr_t t1, mpfr_t t2, mpfr_t phi, mpfr_t W, array_mpfr* tmps, struct ss_cache cache)
+int ss_integrate(mpfr_t* out, TYPE_I, hh_params params, array_mpfr abcissa, array_mpfr weights, unsigned int threads){
+ mpfr_t val;
+ array_2_mpfr tmpss;
+ struct ss_cache cache;
+ unsigned int i;
+
+ // compute pi and sqrt3
+ mpfr_inits(cache.pi, cache.sqrt3, NULL);
+ mpfr_const_pi(cache.pi, MPFR_RNDN);
+ mpfr_sqrt_ui(cache.sqrt3, 3, MPFR_RNDN);
+
+ mpfr_init(val);
+
+ // init tmps
+ array_2_mpfr_init(&tmpss, 8*threads);
+ tmpss.length=8*threads;
+ for(i=0;i<threads;i++){
+ // 8 array_mpfr's per thread
+ // do not allocate too much memory (I'm trying to keep a modicum of optimality)
+ array_mpfr_init(tmpss.values+8*i+0, 2);
+ array_mpfr_init(tmpss.values+8*i+1, 4);
+ array_mpfr_init(tmpss.values+8*i+2, 3);
+ array_mpfr_init(tmpss.values+8*i+3, 4);
+ array_mpfr_init(tmpss.values+8*i+4, 2);
+ array_mpfr_init(tmpss.values+8*i+5, 4);
+ array_mpfr_init(tmpss.values+8*i+6, 12);
+ array_mpfr_init(tmpss.values+8*i+7, 9);
+ }
+
+ // compute A_FF, A_RF and A_RR
+ ssFF_integrate(out, I, params, abcissa, weights, cache, tmpss, threads);
+
+ ssRF_integrate(&val, I, params, abcissa, weights, cache, tmpss, threads);
+ mpfr_mul_ui(val, val, 2, MPFR_RNDN);
+ mpfr_add(*out, *out, val, MPFR_RNDN);
+
+ ssRR_integrate(&val, I, params, abcissa, weights, cache, tmpss, threads);
+ mpfr_add(*out, *out, val, MPFR_RNDN);
+
+ mpfr_clears(cache.pi, cache.sqrt3, NULL);
+ mpfr_clear(val);
+
+ array_2_mpfr_free(tmpss);
+ return(0);
+}
+
+
+// compute the integral A_FF
+int ssFF_integrate(mpfr_t* out, TYPE_I, hh_params params, array_mpfr abcissa, array_mpfr weights, struct ss_cache cache, array_2_mpfr tmpss, unsigned int threads){
+ ssFF_argsint_rho args;
+ int ret;
+ mpfr_t upper, lower;
+ array_pthread_t thread_ids;
+
+ // init
+ thread_ids.length=0;
+
+ args.params=params;
+ args.I=I;
+ args.abcissa=abcissa;
+ args.weights=weights;
+ args.cache=cache;
+ args.tmpss=tmpss;
+ args.thread_ids=&thread_ids;
+
+ // bounds
+ mpfr_inits(upper, lower, NULL);
+ mpfr_set_ui(lower, 0, MPFR_RNDN);
+ mpfr_set_ui(upper, 1, MPFR_RNDN);
+
+ if(threads==1){
+ ret=integrate_gauss_mpfr(out, &ssFF_integrand_rho, lower, upper, abcissa, weights, &args);
+ }
+ else{
+ ret=integrate_gauss_multithread_mpfr(out, &ssFF_integrand_rho, lower, upper, abcissa, weights, &args, threads, args.thread_ids);
+ array_pthread_t_free(*(args.thread_ids));
+ }
+
+ // extra factor
+ mpfr_mul_ui(*out, *out, 6, MPFR_RNDN);
+
+ mpfr_clears(upper, lower, NULL);
+
+ return(ret);
+}
+
+// integrand of the integral over rho in A_FF
+int ssFF_integrand_rho(mpfr_t* out, mpfr_t rho, void* args){
+ ssFF_argsint_theta nargs;
+ int ret;
+ ssFF_argsint_rho* argument=(ssFF_argsint_rho*)args;
+ array_mpfr* tmps1;
+ array_mpfr* tmps2;
+ int thread_id;
+
+ // the number of the thread
+ if(argument->thread_ids->length>1){
+ thread_id=array_pthread_t_find(pthread_self(), *(argument->thread_ids));
+ if(thread_id<0){
+ fprintf(stderr, "error: thread not found\nIf you see this message, then you've uncovered a serious bug...\n");
+ return(-1);
+ }
+ }
+ else{
+ thread_id=0;
+ }
+
+ // tmps used in this function (not passed to integrand_theta)
+ tmps1=argument->tmpss.values+8*thread_id;
+ tmps2=argument->tmpss.values+8*thread_id+1;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.abcissa=argument->abcissa;
+ nargs.weights=argument->weights;
+ nargs.cache=argument->cache;
+ nargs.rho=rho;
+
+ // copy tmps
+ // only pass tmps reserved for this thread
+ // warning: 'nargs.tmpss' must not be freed or resized
+ nargs.tmpss.values=argument->tmpss.values+8*thread_id+2;
+ // 6 tmps in integrand_theta
+ nargs.tmpss.length=6;
+
+ // alloc tmps if needed
+ array_mpfr_alloc_tmps(2, tmps1);
+
+ // bounds
+ mpfr_div_si(tmps1->values[0], nargs.cache.pi, -6, MPFR_RNDN);
+ mpfr_div_ui(tmps1->values[1], nargs.cache.pi, 2, MPFR_RNDN);
+
+ ret=integrate_gauss_smarttmp_mpfr(out, &ssFF_integrand_theta, tmps1->values[0], tmps1->values[1], argument->abcissa, argument->weights, tmps2, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over theta in A_FF
+int ssFF_integrand_theta(mpfr_t* out, mpfr_t theta, void* args){
+ ssFF_argsint_psi nargs;
+ int ret;
+ ssFF_argsint_theta* argument=(ssFF_argsint_theta*)args;
+ unsigned int a;
+ // tmps used in this function (not passed to integrand_psi)
+ array_mpfr* tmps1=argument->tmpss.values;
+ array_mpfr* tmps2=argument->tmpss.values+1;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.abcissa=argument->abcissa;
+ nargs.weights=argument->weights;
+ nargs.cache=argument->cache;
+ nargs.rho=argument->rho;
+ nargs.theta=theta;
+
+ // copy tmps
+ // warning: 'nargs.tmpss' must not be freed or resized
+ nargs.tmpss.values=argument->tmpss.values+2;
+ // 4 tmps in integrand_psi
+ nargs.tmpss.length=4;
+
+ // alloc tmps if needed
+ array_mpfr_alloc_tmps(3, tmps1);
+
+ // split the integral into four parts to improve the precision
+ mpfr_set_ui(*out, 0, MPFR_RNDN);
+ for(a=0;a<4;a++){
+ // bounds
+ mpfr_set_ui(tmps1->values[0], a, MPFR_RNDN);
+ mpfr_sub_ui(tmps1->values[0], tmps1->values[0], 2, MPFR_RNDN);
+ mpfr_div_ui(tmps1->values[0], tmps1->values[0], 4, MPFR_RNDN);
+ mpfr_mul(tmps1->values[0], tmps1->values[0], nargs.cache.pi, MPFR_RNDN);
+
+ mpfr_set_ui(tmps1->values[1], a, MPFR_RNDN);
+ mpfr_sub_ui(tmps1->values[1], tmps1->values[1], 1, MPFR_RNDN);
+ mpfr_div_ui(tmps1->values[1], tmps1->values[1], 4, MPFR_RNDN);
+ mpfr_mul(tmps1->values[1], tmps1->values[1], nargs.cache.pi, MPFR_RNDN);
+
+
+ ret=integrate_gauss_smarttmp_mpfr(tmps1->values+2, &ssFF_integrand_psi, tmps1->values[0], tmps1->values[1], argument->abcissa, argument->weights, tmps2, &nargs);
+ if(ret<0){
+ break;
+ }
+ mpfr_add(*out, *out, tmps1->values[2], MPFR_RNDN);
+ }
+
+ return(ret);
+}
+
+// integrand of the integral over psi in A_FF
+int ssFF_integrand_psi(mpfr_t* out, mpfr_t psi, void* args){
+ ssFF_argsint_z nargs;
+ int ret;
+ ssFF_argsint_psi* argument=(ssFF_argsint_psi*)args;
+ // tmps used in this function (not passed to integrand_z)
+ array_mpfr* tmps1=argument->tmpss.values;
+ array_mpfr* tmps2=argument->tmpss.values+1;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.cache=argument->cache;
+ nargs.rho=argument->rho;
+ nargs.theta=argument->theta;
+ nargs.psi=psi;
+
+ // copy tmps
+ // warning: 'nargs.tmpss' must not be freed or resized
+ nargs.tmpss.values=argument->tmpss.values+2;
+ // 2 tmps in integrand_z
+ nargs.tmpss.length=2;
+
+ // alloc tmps if needed
+ array_mpfr_alloc_tmps(2, tmps1);
+
+ // bounds
+ mpfr_set_ui(tmps1->values[0], 0, MPFR_RNDN);
+ mpfr_set_ui(tmps1->values[1], 1, MPFR_RNDN);
+
+ ret=integrate_gauss_smarttmp_mpfr(out, &ssFF_integrand_z, tmps1->values[0], tmps1->values[1], argument->abcissa, argument->weights, tmps2, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over z in A_FF
+int ssFF_integrand_z(mpfr_t* out, mpfr_t z, void* args){
+ ssFF_argsint_z* a=(ssFF_argsint_z*)args;
+ mpfr_t* tmps;
+ // tmps used in this function
+ array_mpfr* tmps1=a->tmpss.values;
+ array_mpfr* tmps2=a->tmpss.values+1;
+
+ // alloc tmps if needed
+ array_mpfr_alloc_tmps(7, tmps1);
+
+ tmps=tmps1->values;
+
+ // r (store in tmps[0])
+ mpfr_cos(tmps[0], a->psi, MPFR_RNDN);
+ mpfr_add_ui(tmps[1], tmps[0], 1, MPFR_RNDN);
+ mpfr_ui_sub(tmps[0], 1, tmps[0], MPFR_RNDN);
+ mpfr_div(tmps[0], tmps[0], tmps[1], MPFR_RNDN);
+ mpfr_mul(tmps[0], tmps[0], z, MPFR_RNDN);
+ mpfr_add_ui(tmps[0], tmps[0], 1, MPFR_RNDN);
+ mpfr_ui_sub(tmps[1], 1, z, MPFR_RNDN);
+ mpfr_mul(tmps[0], tmps[0], tmps[1], MPFR_RNDN);
+
+ // varphi (store in tmps[1])
+ mpfr_mul_ui(tmps[1], a->psi, 2, MPFR_RNDN);
+ mpfr_sin(tmps[2], tmps[1], MPFR_RNDN);
+ mpfr_cos(tmps[1], tmps[1], MPFR_RNDN);
+ mpfr_pow_ui(tmps[2], tmps[2], 2, MPFR_RNDN);
+ mpfr_mul(tmps[2], tmps[2], tmps[0], MPFR_RNDN);
+ mpfr_div_ui(tmps[2], tmps[2], 2, MPFR_RNDN);
+ mpfr_sub(tmps[1], tmps[1], tmps[2], MPFR_RNDN);
+ mpfr_acos(tmps[1], tmps[1], MPFR_RNDN);
+ if(mpfr_cmp_ui(a->psi, 0)<0){
+ mpfr_neg(tmps[1], tmps[1], MPFR_RNDN);
+ }
+
+ // jacobian
+ mpfr_mul_ui(*out, a->psi, 2, MPFR_RNDN);
+ mpfr_cos(*out, *out, MPFR_RNDN);
+ mpfr_mul(*out, *out, tmps[0], MPFR_RNDN);
+ mpfr_add_ui(*out, *out, 1, MPFR_RNDN);
+ mpfr_cos(tmps[2], a->psi, MPFR_RNDN);
+ mpfr_pow_ui(tmps[3], tmps[2], 2, MPFR_RNDN);
+ mpfr_mul(tmps[3], tmps[3], tmps[0], MPFR_RNDN);
+ mpfr_add_ui(tmps[3], tmps[3], 1, MPFR_RNDN);
+ mpfr_sqrt(tmps[3], tmps[3], MPFR_RNDN);
+ mpfr_div(*out, *out, tmps[3], MPFR_RNDN);
+ mpfr_add_ui(tmps[2], tmps[2], 1, MPFR_RNDN);
+ mpfr_div(*out, *out, tmps[2], MPFR_RNDN);
+ mpfr_mul(*out, *out, tmps[0], MPFR_RNDN);
+ mpfr_mul(*out, *out, a->rho, MPFR_RNDN);
+ mpfr_mul(*out, *out, a->rho, MPFR_RNDN);
+ mpfr_mul(*out, *out, a->rho, MPFR_RNDN);
+ mpfr_mul_ui(*out, *out, 4, MPFR_RNDN);
+
+ // cutoffs
+ ss_cutoff(tmps[2], a->rho, tmps[3], tmps[4]);
+ mpfr_mul(*out, *out, tmps[2], MPFR_RNDN);
+ mpfr_mul(tmps[2], a->rho, tmps[0], MPFR_RNDN);
+ ss_cutoff(tmps[2], tmps[2], tmps[3], tmps[4]);
+ mpfr_mul(*out, *out, tmps[2], MPFR_RNDN);
+
+ // q1 (store in tmps[2])
+ mpfr_add(tmps[2], a->theta, tmps[1], MPFR_RNDN);
+ mpfr_cos(tmps[2], tmps[2], MPFR_RNDN);
+ mpfr_mul(tmps[2], tmps[2], tmps[0], MPFR_RNDN);
+ mpfr_mul(tmps[2], tmps[2], a->rho, MPFR_RNDN);
+ mpfr_add(tmps[2], tmps[2], a->cache.sqrt3, MPFR_RNDN);
+ // q2 (store in tmps[3])
+ mpfr_add(tmps[3], a->theta, tmps[1], MPFR_RNDN);
+ mpfr_sin(tmps[3], tmps[3], MPFR_RNDN);
+ mpfr_mul(tmps[3], tmps[3], tmps[0], MPFR_RNDN);
+ mpfr_mul(tmps[3], tmps[3], a->rho, MPFR_RNDN);
+ mpfr_add_ui(tmps[3], tmps[3], 1, MPFR_RNDN);
+ if(a->params.omega==-1){
+ mpfr_neg(tmps[3], tmps[3], MPFR_RNDN);
+ }
+ // p1 (store in tmps[0])
+ mpfr_cos(tmps[0], a->theta, MPFR_RNDN);
+ mpfr_mul(tmps[0], tmps[0], a->rho, MPFR_RNDN);
+ mpfr_add(tmps[0], tmps[0], a->cache.sqrt3, MPFR_RNDN);
+ // p2 (store in tmps[1])
+ mpfr_sin(tmps[1], a->theta, MPFR_RNDN);
+ mpfr_mul(tmps[1], tmps[1], a->rho, MPFR_RNDN);
+ mpfr_add_ui(tmps[1], tmps[1], 1, MPFR_RNDN);
+ if(a->params.omega==-1){
+ mpfr_neg(tmps[1], tmps[1], MPFR_RNDN);
+ }
+ // F1 (store in tmps[4])
+ mpfr_add(tmps[4], tmps[0], tmps[2], MPFR_RNDN);
+ mpfr_sub(tmps[4], tmps[4], a->cache.sqrt3, MPFR_RNDN);
+ // F2 (store in tmps[5])
+ mpfr_add(tmps[5], tmps[1], tmps[3], MPFR_RNDN);
+ mpfr_sub_si(tmps[5], tmps[5], a->params.omega, MPFR_RNDN);
+
+ // I
+ (*(a->I))(tmps[6], tmps[0], tmps[1], tmps[2], tmps[3], tmps[4], tmps[5], a->params.t1, a->params.t2, a->params.phi, a->params.W, tmps2, a->cache);
+ mpfr_mul(*out, *out, tmps[6], MPFR_RNDN);
+
+ return(0);
+}
+
+
+// compute the integral A_RF
+int ssRF_integrate(mpfr_t* out, TYPE_I, hh_params params, array_mpfr abcissa, array_mpfr weights, struct ss_cache cache, array_2_mpfr tmpss, unsigned int threads){
+ ssRF_argsint_theta args;
+ int ret;
+ mpfr_t val;
+ mpfr_t lower, upper;
+ unsigned int a;
+ array_pthread_t thread_ids;
+
+ // init
+ thread_ids.length=0;
+
+ args.params=params;
+ args.I=I;
+ args.abcissa=abcissa;
+ args.weights=weights;
+ args.cache=cache;
+ args.tmpss=tmpss;
+ args.thread_ids=&thread_ids;
+
+ mpfr_inits(val, lower, upper, NULL);
+
+ // split the integral in three parts to improve the precision
+ mpfr_set_ui(*out, 0, MPFR_RNDN);
+ for(a=0;a<3;a++){
+ // bounds
+ mpfr_set_ui(lower, a, MPFR_RNDN);
+ mpfr_mul_ui(lower, lower, 4, MPFR_RNDN);
+ mpfr_sub_ui(lower, lower, 1, MPFR_RNDN);
+ mpfr_div_ui(lower, lower, 6, MPFR_RNDN);
+ mpfr_mul(lower, lower, args.cache.pi, MPFR_RNDN);
+
+ mpfr_set_ui(upper, a, MPFR_RNDN);
+ mpfr_mul_ui(upper, upper, 4, MPFR_RNDN);
+ mpfr_add_ui(upper, upper, 3, MPFR_RNDN);
+ mpfr_div_ui(upper, upper, 6, MPFR_RNDN);
+ mpfr_mul(upper, upper, args.cache.pi, MPFR_RNDN);
+
+ if(threads==1){
+ ret=integrate_gauss_mpfr(&val, &ssRF_integrand_theta, lower, upper, abcissa, weights, &args);
+ }
+ else{
+ ret=integrate_gauss_multithread_mpfr(&val, &ssRF_integrand_theta, lower, upper, abcissa, weights, &args, threads, args.thread_ids);
+ array_pthread_t_free(*(args.thread_ids));
+ }
+ if(ret<0){
+ break;
+ }
+
+ mpfr_add(*out, *out, val, MPFR_RNDN);
+ }
+
+ // extra factor
+ mpfr_mul_ui(*out, *out, 3, MPFR_RNDN);
+
+ mpfr_clears(val, lower, upper, NULL);
+ return(ret);
+}
+
+// integrand of the integral over theta in A_RF
+int ssRF_integrand_theta(mpfr_t* out, mpfr_t theta, void* args){
+ ssRF_argsint_varphi nargs;
+ int ret;
+ ssRF_argsint_theta* argument=(ssRF_argsint_theta*)args;
+ // tmps used in this function (not passed to integrand_varphi)
+ array_mpfr* tmps1;
+ array_mpfr* tmps2;
+ int thread_id;
+
+ // the number of the thread
+ if(argument->thread_ids->length>1){
+ thread_id=array_pthread_t_find(pthread_self(), *(argument->thread_ids));
+ if(thread_id<0){
+ fprintf(stderr, "error: thread not found\nIf you see this message, then you've uncovered a serious bug...\n");
+ return(-1);
+ }
+ }
+ else{
+ thread_id=0;
+ }
+
+ // tmps used in this function (not passed to integrand_varphi)
+ tmps1=argument->tmpss.values+8*thread_id;
+ tmps2=argument->tmpss.values+8*thread_id+1;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.abcissa=argument->abcissa;
+ nargs.weights=argument->weights;
+ nargs.cache=argument->cache;
+ nargs.theta=theta;
+
+ // copy tmps
+ // only pass tmps reserved for this thread
+ // warning: 'nargs.tmpss' must not be freed or resized
+ nargs.tmpss.values=argument->tmpss.values+8*thread_id+2;
+ // 6 tmps in integrand_varphi
+ nargs.tmpss.length=6;
+
+ // alloc tmps if needed
+ array_mpfr_alloc_tmps(2, tmps1);
+
+ // bounds
+ mpfr_div_si(tmps1->values[0], nargs.cache.pi, -6, MPFR_RNDN);
+ mpfr_div_ui(tmps1->values[1], nargs.cache.pi, 2, MPFR_RNDN);
+
+ ret=integrate_gauss_smarttmp_mpfr(out, &ssRF_integrand_varphi, tmps1->values[0], tmps1->values[1], argument->abcissa, argument->weights, tmps2, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over varphi in A_RF
+int ssRF_integrand_varphi(mpfr_t* out, mpfr_t varphi, void* args){
+ ssRF_argsint_r nargs;
+ int ret;
+ ssRF_argsint_varphi* argument=(ssRF_argsint_varphi*)args;
+ // tmps used in this function (not passed to integrand_r)
+ array_mpfr* tmps1=argument->tmpss.values;
+ array_mpfr* tmps2=argument->tmpss.values+1;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.abcissa=argument->abcissa;
+ nargs.weights=argument->weights;
+ nargs.cache=argument->cache;
+ nargs.theta=argument->theta;
+ nargs.varphi=varphi;
+
+ // copy tmps
+ // warning: 'nargs.tmpss' must not be freed or resized
+ nargs.tmpss.values=argument->tmpss.values+2;
+ // 4 tmps in integrand_r
+ nargs.tmpss.length=4;
+
+ // alloc tmps if needed
+ array_mpfr_alloc_tmps(2, tmps1);
+
+ // bounds
+ mpfr_set_ui(tmps1->values[0], 0, MPFR_RNDN);
+ mpfr_set_ui(tmps1->values[1], 1, MPFR_RNDN);
+
+ ret=integrate_gauss_smarttmp_mpfr(out, &ssRF_integrand_r, tmps1->values[0], tmps1->values[1], argument->abcissa, argument->weights, tmps2, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over r in A_RF
+int ssRF_integrand_r(mpfr_t* out, mpfr_t r, void* args){
+ ssRF_argsint_rho nargs;
+ int ret;
+ ssRF_argsint_r* argument=(ssRF_argsint_r*)args;
+ // tmps used in this function (not passed to integrand_rho)
+ array_mpfr* tmps1=argument->tmpss.values;
+ array_mpfr* tmps2=argument->tmpss.values+1;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.cache=argument->cache;
+ nargs.theta=argument->theta;
+ nargs.varphi=argument->varphi;
+ nargs.r=r;
+
+ // copy tmps
+ // warning: 'nargs.tmpss' must not be freed or resized
+ nargs.tmpss.values=argument->tmpss.values+2;
+ // 2 tmps in integrand_rho
+ nargs.tmpss.length=2;
+
+ // alloc tmps if needed
+ array_mpfr_alloc_tmps(2, tmps1);
+
+ // bounds
+ mpfr_set_ui(tmps1->values[0], 0, MPFR_RNDN);
+ ss_R(tmps1->values[1], nargs.varphi, 0, nargs.cache);
+
+ ret=integrate_gauss_smarttmp_mpfr(out, &ssRF_integrand_rho, tmps1->values[0], tmps1->values[1], argument->abcissa, argument->weights, tmps2, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over rho in A_RF
+int ssRF_integrand_rho(mpfr_t* out, mpfr_t rho, void* args){
+ int nu;
+ ssRF_argsint_rho* a=(ssRF_argsint_rho*)args;
+ mpfr_t* tmps;
+ // tmps used in this function
+ array_mpfr* tmps1=a->tmpss.values;
+ array_mpfr* tmps2=a->tmpss.values+1;
+
+ // alloc tmps if needed
+ array_mpfr_alloc_tmps(11, tmps1);
+
+ tmps=tmps1->values;
+
+ mpfr_set_ui(*out, 0, MPFR_RNDN);
+ for(nu=-1;nu<=1;nu=nu+2){
+ // q1 (store in tmps[2])
+ mpfr_cos(tmps[2], a->theta, MPFR_RNDN);
+ mpfr_mul(tmps[2], tmps[2], a->r, MPFR_RNDN);
+ mpfr_add(tmps[2], tmps[2], a->cache.sqrt3, MPFR_RNDN);
+ // q2 (store in tmps[3])
+ mpfr_sin(tmps[3], a->theta, MPFR_RNDN);
+ mpfr_mul(tmps[3], tmps[3], a->r, MPFR_RNDN);
+ mpfr_add_ui(tmps[3], tmps[3], 1, MPFR_RNDN);
+ if(a->params.omega==-1){
+ mpfr_neg(tmps[3], tmps[3], MPFR_RNDN);
+ }
+ // F1 (store in tmps[4])
+ mpfr_cos(tmps[4], a->varphi, MPFR_RNDN);
+ mpfr_mul(tmps[4], tmps[4], rho, MPFR_RNDN);
+ mpfr_add(tmps[4], tmps[4], a->cache.sqrt3, MPFR_RNDN);
+ // F2 (store in tmps[5])
+ mpfr_sin(tmps[5], a->varphi, MPFR_RNDN);
+ mpfr_mul(tmps[5], tmps[5], rho, MPFR_RNDN);
+ mpfr_add_ui(tmps[5], tmps[5], 1, MPFR_RNDN);
+ if(nu==-1){
+ mpfr_neg(tmps[5], tmps[5], MPFR_RNDN);
+ }
+ // p1 (store in tmps[0])
+ mpfr_sub(tmps[0], tmps[4], tmps[2], MPFR_RNDN);
+ mpfr_add(tmps[0], tmps[0], a->cache.sqrt3, MPFR_RNDN);
+ // p2 (store in tmps[1])
+ mpfr_sub(tmps[1], tmps[5], tmps[3], MPFR_RNDN);
+ mpfr_add_si(tmps[1], tmps[1], a->params.omega, MPFR_RNDN);
+
+ // cutoff
+ // tmps[6]=(1-ss_cutoff(ss_norm(F1-q1,F2-q2)))*ss_cutoff(r)
+ mpfr_sub(tmps[6], tmps[4], tmps[2], MPFR_RNDN);
+ mpfr_sub(tmps[7], tmps[5], tmps[3], MPFR_RNDN);
+ ss_norm(tmps[8], tmps[6], tmps[7], a->cache, tmps[9], tmps[10]);
+ ss_cutoff(tmps[6], tmps[8], tmps[7], tmps[9]);
+ mpfr_ui_sub(tmps[6], 1, tmps[6], MPFR_RNDN);
+ ss_cutoff(tmps[7], a->r, tmps[8], tmps[9]);
+ mpfr_mul(tmps[6], tmps[6], tmps[7], MPFR_RNDN);
+
+ (*(a->I))(tmps[7], tmps[0], tmps[1], tmps[2], tmps[3], tmps[4], tmps[5], a->params.t1, a->params.t2, a->params.phi, a->params.W, tmps2, a->cache);
+
+ // r*rho*I*cutoffs
+ mpfr_mul(tmps[6], tmps[6], tmps[7], MPFR_RNDN);
+ mpfr_mul(tmps[6], tmps[6], a->r, MPFR_RNDN);
+ mpfr_mul(tmps[6], tmps[6], rho, MPFR_RNDN);
+
+ // add to *out
+ mpfr_add(*out, *out, tmps[6], MPFR_RNDN);
+ }
+
+ return(0);
+}
+
+
+// compute the integral A_RR
+int ssRR_integrate(mpfr_t* out, TYPE_I, hh_params params, array_mpfr abcissa, array_mpfr weights, struct ss_cache cache, array_2_mpfr tmpss, unsigned int threads){
+ ssRR_argsint_theta args;
+ int ret;
+ unsigned int a;
+ mpfr_t lower, upper;
+ mpfr_t val;
+ array_pthread_t thread_ids;
+
+ // init
+ thread_ids.length=0;
+
+ args.params=params;
+ args.I=I;
+ args.abcissa=abcissa;
+ args.weights=weights;
+ args.cache=cache;
+ args.tmpss=tmpss;
+ args.thread_ids=&thread_ids;
+
+ mpfr_inits(val, lower, upper, NULL);
+
+ // split the integral in three parts
+ mpfr_set_ui(*out, 0, MPFR_RNDN);
+ for(a=0;a<3;a++){
+ // bounds
+ mpfr_set_ui(lower, a, MPFR_RNDN);
+ mpfr_mul_ui(lower, lower, 4, MPFR_RNDN);
+ mpfr_sub_ui(lower, lower, 1, MPFR_RNDN);
+ mpfr_div_ui(lower, lower, 6, MPFR_RNDN);
+ mpfr_mul(lower, lower, args.cache.pi, MPFR_RNDN);
+
+ mpfr_set_ui(upper, a, MPFR_RNDN);
+ mpfr_mul_ui(upper, upper, 4, MPFR_RNDN);
+ mpfr_add_ui(upper, upper, 3, MPFR_RNDN);
+ mpfr_div_ui(upper, upper, 6, MPFR_RNDN);
+ mpfr_mul(upper, upper, args.cache.pi, MPFR_RNDN);
+
+ // save sector
+ args.sector_theta=a;
+
+ if(threads==1){
+ ret=integrate_gauss_mpfr(&val, &ssRR_integrand_theta, lower, upper, abcissa, weights, &args);
+ }
+ else{
+ ret=integrate_gauss_multithread_mpfr(&val, &ssRR_integrand_theta, lower, upper, abcissa, weights, &args, threads, args.thread_ids);
+ array_pthread_t_free(*(args.thread_ids));
+ }
+ if(ret<0){
+ break;
+ }
+
+ mpfr_add(*out, *out, val, MPFR_RNDN);
+ }
+ // extra factor
+ mpfr_mul_ui(*out, *out, 3, MPFR_RNDN);
+
+ mpfr_clears(val, lower, upper, NULL);
+
+ return(ret);
+}
+
+// integrand of the integral over theta in A_RR
+int ssRR_integrand_theta(mpfr_t* out, mpfr_t theta, void* args){
+ ssRR_argsint_varphi nargs;
+ int ret;
+ ssRR_argsint_theta* argument=(ssRR_argsint_theta*)args;
+ array_mpfr* tmps1;
+ array_mpfr* tmps2;
+ int thread_id;
+
+ // the number of the thread
+ if(argument->thread_ids->length>1){
+ thread_id=array_pthread_t_find(pthread_self(), *(argument->thread_ids));
+ if(thread_id<0){
+ fprintf(stderr, "error: thread not found\nIf you see this message, then you've uncovered a serious bug...\n");
+ return(-1);
+ }
+ }
+ else{
+ thread_id=0;
+ }
+
+ // tmps used in this function (not passed to integrand_varphi)
+ tmps1=argument->tmpss.values+8*thread_id;
+ tmps2=argument->tmpss.values+8*thread_id+1;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.abcissa=argument->abcissa;
+ nargs.weights=argument->weights;
+ nargs.cache=argument->cache;
+ nargs.sector_theta=argument->sector_theta;
+ nargs.theta=theta;
+
+ // copy tmps
+ // only pass tmps reserved for this thread
+ // warning: 'nargs.tmpss' must not be freed or resized
+ nargs.tmpss.values=argument->tmpss.values+8*thread_id+2;
+ // 6 tmps in integrand_varphi
+ nargs.tmpss.length=6;
+
+ // alloc tmps if needed
+ array_mpfr_alloc_tmps(2, tmps1);
+
+ // bounds
+ mpfr_div_si(tmps1->values[0], nargs.cache.pi, -6, MPFR_RNDN);
+ mpfr_div_ui(tmps1->values[1], nargs.cache.pi, 2, MPFR_RNDN);
+
+ ret=integrate_gauss_smarttmp_mpfr(out, &ssRR_integrand_varphi, tmps1->values[0], tmps1->values[1], argument->abcissa, argument->weights, tmps2, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over varphi in A_RR
+int ssRR_integrand_varphi(mpfr_t* out, mpfr_t varphi, void* args){
+ ssRR_argsint_r nargs;
+ int ret;
+ ssRR_argsint_varphi* argument=(ssRR_argsint_varphi*)args;
+ // tmps used in this function (not passed to integrand_r)
+ array_mpfr* tmps1=argument->tmpss.values;
+ array_mpfr* tmps2=argument->tmpss.values+1;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.abcissa=argument->abcissa;
+ nargs.weights=argument->weights;
+ nargs.cache=argument->cache;
+ nargs.theta=argument->theta;
+ nargs.varphi=varphi;
+
+ // copy tmps
+ // only pass tmps reserved for this thread
+ // warning: 'nargs.tmpss' must not be freed or resized
+ nargs.tmpss.values=argument->tmpss.values+2;
+ // 4 tmps in integrand_r
+ nargs.tmpss.length=4;
+
+ // alloc tmps if needed
+ array_mpfr_alloc_tmps(2, tmps1);
+
+ // bounds
+ mpfr_set_ui(tmps1->values[0], 0, MPFR_RNDN);
+ ss_R(tmps1->values[1], nargs.theta, argument->sector_theta, nargs.cache);
+
+ ret=integrate_gauss_smarttmp_mpfr(out, &ssRR_integrand_r, tmps1->values[0], tmps1->values[1], argument->abcissa, argument->weights, tmps2, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over r in A_RR
+int ssRR_integrand_r(mpfr_t* out, mpfr_t r, void* args){
+ ssRR_argsint_rho nargs;
+ int ret;
+ ssRR_argsint_r* argument=(ssRR_argsint_r*)args;
+ // tmps used in this function (not passed to integrand_rho)
+ array_mpfr* tmps1=argument->tmpss.values;
+ array_mpfr* tmps2=argument->tmpss.values+1;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.cache=argument->cache;
+ nargs.theta=argument->theta;
+ nargs.varphi=argument->varphi;
+ nargs.r=r;
+
+ // copy tmps
+ // only pass tmps reserved for this thread
+ // warning: 'nargs.tmpss' must not be freed or resized
+ nargs.tmpss.values=argument->tmpss.values+2;
+ // 2 tmps in integrand_rho
+ nargs.tmpss.length=2;
+
+ // alloc tmps if needed
+ array_mpfr_alloc_tmps(2, tmps1);
+
+ // bounds
+ mpfr_set_ui(tmps1->values[0], 0, MPFR_RNDN);
+ ss_R(tmps1->values[1], nargs.varphi, 0, nargs.cache);
+
+ ret=integrate_gauss_smarttmp_mpfr(out, &ssRR_integrand_rho, tmps1->values[0], tmps1->values[1], argument->abcissa, argument->weights, tmps2, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over rho in A_RR
+int ssRR_integrand_rho(mpfr_t* out, mpfr_t rho, void* args){
+ int eta, nu;
+ ssRR_argsint_rho* a=(ssRR_argsint_rho*)args;
+ mpfr_t* tmps;
+ // tmps used in this function (not passed to integrand_rho)
+ array_mpfr* tmps1=a->tmpss.values;
+ array_mpfr* tmps2=a->tmpss.values+1;
+
+ // alloc tmps if needed
+ array_mpfr_alloc_tmps(12, tmps1);
+
+ tmps=tmps1->values;
+
+ mpfr_set_ui(*out, 0, MPFR_RNDN);
+ for(eta=-1;eta<=1;eta=eta+2){
+ for(nu=-1;nu<=1;nu=nu+2){
+ // q1 (store in tmps[2])
+ mpfr_cos(tmps[2], a->theta, MPFR_RNDN);
+ mpfr_mul(tmps[2], tmps[2], a->r, MPFR_RNDN);
+ mpfr_add(tmps[2], tmps[2], a->cache.sqrt3, MPFR_RNDN);
+ // q2 (store in tmps[3])
+ mpfr_sin(tmps[3], a->theta, MPFR_RNDN);
+ mpfr_mul(tmps[3], tmps[3], a->r, MPFR_RNDN);
+ mpfr_add_ui(tmps[3], tmps[3], 1, MPFR_RNDN);
+ if(eta==-1){
+ mpfr_neg(tmps[3], tmps[3], MPFR_RNDN);
+ }
+ // F1 (store in tmps[4])
+ mpfr_cos(tmps[4], a->varphi, MPFR_RNDN);
+ mpfr_mul(tmps[4], tmps[4], rho, MPFR_RNDN);
+ mpfr_add(tmps[4], tmps[4], a->cache.sqrt3, MPFR_RNDN);
+ // F2 (store in tmps[5])
+ mpfr_sin(tmps[5], a->varphi, MPFR_RNDN);
+ mpfr_mul(tmps[5], tmps[5], rho, MPFR_RNDN);
+ mpfr_add_ui(tmps[5], tmps[5], 1, MPFR_RNDN);
+ if(nu==-1){
+ mpfr_neg(tmps[5], tmps[5], MPFR_RNDN);
+ }
+ // p1 (store in tmps[0])
+ mpfr_sub(tmps[0], tmps[4], tmps[2], MPFR_RNDN);
+ mpfr_add(tmps[0], tmps[0], a->cache.sqrt3, MPFR_RNDN);
+ // p2 (store in tmps[1])
+ mpfr_sub(tmps[1], tmps[5], tmps[3], MPFR_RNDN);
+ mpfr_add_si(tmps[1], tmps[1], a->params.omega, MPFR_RNDN);
+
+ // cutoff
+ // tmps[6]=(1-ss_cutoff(ss_norm(F1-q1,F2-q2)))*(1-ss_cutoff(ss_norm(q1-SQRT3,q2-omega)))
+ mpfr_sub(tmps[6], tmps[4], tmps[2], MPFR_RNDN);
+ mpfr_sub(tmps[7], tmps[5], tmps[3], MPFR_RNDN);
+ ss_norm(tmps[8], tmps[6], tmps[7], a->cache, tmps[9], tmps[10]);
+ ss_cutoff(tmps[6], tmps[8], tmps[7], tmps[9]);
+ mpfr_ui_sub(tmps[6], 1, tmps[6], MPFR_RNDN);
+ mpfr_sub(tmps[7], tmps[2], a->cache.sqrt3, MPFR_RNDN);
+ mpfr_sub_si(tmps[8], tmps[3], a->params.omega, MPFR_RNDN);
+ ss_norm(tmps[9], tmps[7], tmps[8], a->cache, tmps[10], tmps[11]);
+ ss_cutoff(tmps[7], tmps[9], tmps[8], tmps[10]);
+ mpfr_ui_sub(tmps[7], 1, tmps[7], MPFR_RNDN);
+ mpfr_mul(tmps[6], tmps[6], tmps[7], MPFR_RNDN);
+
+ (*(a->I))(tmps[7], tmps[0], tmps[1], tmps[2], tmps[3], tmps[4], tmps[5], a->params.t1, a->params.t2, a->params.phi, a->params.W, tmps2, a->cache);
+
+ // r*rho*I*cutoffs
+ mpfr_mul(tmps[6], tmps[6], tmps[7], MPFR_RNDN);
+ mpfr_mul(tmps[6], tmps[6], a->r, MPFR_RNDN);
+ mpfr_mul(tmps[6], tmps[6], rho, MPFR_RNDN);
+
+ // add to *out
+ mpfr_add(*out, *out, tmps[6], MPFR_RNDN);
+ }
+ }
+
+ return(0);
+}
+
+
+// R(theta)
+int ss_R(mpfr_t out, mpfr_t theta, unsigned int sector, struct ss_cache cache){
+ mpfr_set_ui(out, sector, MPFR_RNDN);
+ mpfr_mul_ui(out, out, 8, MPFR_RNDN);
+ mpfr_sub_ui(out, out, 1, MPFR_RNDN);
+ mpfr_mul(out, out, cache.pi, MPFR_RNDN);
+ mpfr_div_ui(out, out, 6, MPFR_RNDN);
+ mpfr_add(out, out, theta, MPFR_RNDN);
+ mpfr_cos(out, out, MPFR_RNDN);
+ mpfr_ui_div(out, 1, out, MPFR_RNDN);
+ return(0);
+}
+
+// cutoff function
+int ss_cutoff(mpfr_t out, mpfr_t x, mpfr_t tmp1, mpfr_t tmp2){
+
+ if(mpfr_cmp_d(x,0.5)<=0){
+ mpfr_set_ui(out, 1, MPFR_RNDN);
+ return(0);
+ }
+ if(mpfr_cmp_ui(x,1)>=0){
+ mpfr_set_ui(out, 0, MPFR_RNDN);
+ return(0);
+ }
+
+ mpfr_ui_sub(tmp1, 1, x, MPFR_RNDN);
+ mpfr_d_div(tmp1, -0.5, tmp1, MPFR_RNDN);
+ mpfr_exp(tmp1, tmp1, MPFR_RNDN);
+
+ mpfr_sub_d(tmp2, x, 0.5, MPFR_RNDN);
+ mpfr_d_div(tmp2, -0.5, tmp2, MPFR_RNDN);
+ mpfr_exp(tmp2, tmp2, MPFR_RNDN);
+
+ mpfr_ui_sub(out, 1, tmp2, MPFR_RNDN);
+ mpfr_mul(out, out, tmp1, MPFR_RNDN);
+ mpfr_add(out, out, tmp2, MPFR_RNDN);
+ mpfr_div(out, tmp1, out, MPFR_RNDN);
+
+ return(0);
+}
+
+// periodic norm
+int ss_norm(mpfr_t out, mpfr_t k1, mpfr_t k2, struct ss_cache cache, mpfr_t tmp1, mpfr_t tmp2){
+ // split R^2 into equilateral triangles and find which triangle (k1,k2) is in
+ // tmp1=floor(k1/sqrt3)+1
+ // tmp2=floor(k1/2/sqrt3-k2/2)+1
+ // out=floor(k1/2/sqrt3+k2/2)+1
+ mpfr_div(tmp1, k1, cache.sqrt3, MPFR_RNDN);
+ mpfr_sub(tmp2, tmp1, k2, MPFR_RNDN);
+ mpfr_add(out, tmp1, k2, MPFR_RNDN);
+ mpfr_floor(tmp1, tmp1);
+ mpfr_add_ui(tmp1, tmp1, 1, MPFR_RNDN);
+ mpfr_div_ui(tmp2, tmp2, 2, MPFR_RNDN);
+ mpfr_floor(tmp2, tmp2);
+ mpfr_add_ui(tmp2, tmp2, 1, MPFR_RNDN);
+ mpfr_div_ui(out, out, 2, MPFR_RNDN);
+ mpfr_floor(out, out);
+ mpfr_add_ui(out, out, 1, MPFR_RNDN);
+
+ // translate (k1,k2) by -a*(SQRT3,3)-b(SQRT3,-3)
+ // a-b
+ // tmp2=b-a=floor((tmp2-out+1)/3);
+ mpfr_sub(tmp2, tmp2, out, MPFR_RNDN);
+ mpfr_add_ui(tmp2, tmp2, 1, MPFR_RNDN);
+ mpfr_div_ui(tmp2, tmp2, 3, MPFR_RNDN);
+ mpfr_floor(tmp2, tmp2);
+ // tmp1=b=floor((tmp1+b-a)/2)
+ mpfr_add(tmp1, tmp1, tmp2, MPFR_RNDN);
+ mpfr_div_ui(tmp1, tmp1, 2, MPFR_RNDN);
+ mpfr_floor(tmp1, tmp1);
+ // tmp2=a
+ mpfr_sub(tmp2, tmp1, tmp2, MPFR_RNDN);
+
+ // out=(k1-sqrt3*(a+b))**2
+ mpfr_add(out, tmp1, tmp2, MPFR_RNDN);
+ mpfr_mul(out, out, cache.sqrt3, MPFR_RNDN);
+ mpfr_sub(out, k1, out, MPFR_RNDN);
+ mpfr_pow_ui(out, out, 2, MPFR_RNDN);
+ // tmp1=(k2-3*(a-b))**2
+ mpfr_sub(tmp1, tmp2, tmp1, MPFR_RNDN);
+ mpfr_mul_ui(tmp1, tmp1, 3, MPFR_RNDN);
+ mpfr_sub(tmp1, k2, tmp1, MPFR_RNDN);
+ mpfr_pow_ui(tmp1, tmp1, 2, MPFR_RNDN);
+
+ // out=sqrt((k1-sqrt3*(a+b))**2+(k2-3*(a-b))**2)
+ mpfr_add(out, out, tmp1, MPFR_RNDN);
+ mpfr_sqrt(out, out, MPFR_RNDN);
+
+ return(0);
+}
diff --git a/src/ss_integral.h b/src/ss_integral.h
new file mode 100644
index 0000000..fb30ddd
--- /dev/null
+++ b/src/ss_integral.h
@@ -0,0 +1,200 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+ Computation of the sunrise diagram
+*/
+
+#ifndef SS_INTEGRAL_H
+#define SS_INTEGRAL_H
+
+#include <libinum.h>
+#include "types.h"
+
+// compute pi and sqrt3 ahead of time
+struct ss_cache {
+ mpfr_t pi;
+ mpfr_t sqrt3;
+};
+
+// extra arguments for the integrations
+// A_FF
+typedef struct ssFF_argsint_rho {
+ hh_params params;
+ TYPE_I;
+ array_mpfr abcissa;
+ array_mpfr weights;
+ struct ss_cache cache;
+ array_2_mpfr tmpss;
+ array_pthread_t* thread_ids;
+} ssFF_argsint_rho;
+typedef struct ssFF_argsint_theta {
+ hh_params params;
+ TYPE_I;
+ array_mpfr abcissa;
+ array_mpfr weights;
+ struct ss_cache cache;
+ array_2_mpfr tmpss;
+ mpfr_ptr rho;
+} ssFF_argsint_theta;
+typedef struct ssFF_argsint_psi {
+ hh_params params;
+ TYPE_I;
+ array_mpfr abcissa;
+ array_mpfr weights;
+ struct ss_cache cache;
+ array_2_mpfr tmpss;
+ mpfr_ptr rho;
+ mpfr_ptr theta;
+} ssFF_argsint_psi;
+typedef struct ssFF_argsint_z {
+ hh_params params;
+ TYPE_I;
+ struct ss_cache cache;
+ array_2_mpfr tmpss;
+ mpfr_ptr rho;
+ mpfr_ptr theta;
+ mpfr_ptr psi;
+} ssFF_argsint_z;
+
+// A_RF
+typedef struct ssRF_argsint_theta {
+ hh_params params;
+ TYPE_I;
+ array_mpfr abcissa;
+ array_mpfr weights;
+ struct ss_cache cache;
+ array_2_mpfr tmpss;
+ array_pthread_t* thread_ids;
+} ssRF_argsint_theta;
+typedef struct ssRF_argsint_varphi {
+ hh_params params;
+ TYPE_I;
+ array_mpfr abcissa;
+ array_mpfr weights;
+ struct ss_cache cache;
+ array_2_mpfr tmpss;
+ mpfr_ptr theta;
+} ssRF_argsint_varphi;
+typedef struct ssRF_argsint_r {
+ hh_params params;
+ TYPE_I;
+ array_mpfr abcissa;
+ array_mpfr weights;
+ struct ss_cache cache;
+ array_2_mpfr tmpss;
+ mpfr_ptr theta;
+ mpfr_ptr varphi;
+} ssRF_argsint_r;
+typedef struct ssRF_argsint_rho {
+ hh_params params;
+ TYPE_I;
+ struct ss_cache cache;
+ array_2_mpfr tmpss;
+ mpfr_ptr theta;
+ mpfr_ptr varphi;
+ mpfr_ptr r;
+} ssRF_argsint_rho;
+
+// A_RR
+typedef struct ssRR_argsint_theta {
+ hh_params params;
+ TYPE_I;
+ array_mpfr abcissa;
+ array_mpfr weights;
+ struct ss_cache cache;
+ array_2_mpfr tmpss;
+ array_pthread_t* thread_ids;
+ int sector_theta;
+} ssRR_argsint_theta;
+typedef struct ssRR_argsint_varphi {
+ hh_params params;
+ TYPE_I;
+ array_mpfr abcissa;
+ array_mpfr weights;
+ struct ss_cache cache;
+ array_2_mpfr tmpss;
+ int sector_theta;
+ mpfr_ptr theta;
+} ssRR_argsint_varphi;
+typedef struct ssRR_argsint_r {
+ hh_params params;
+ TYPE_I;
+ array_mpfr abcissa;
+ array_mpfr weights;
+ struct ss_cache cache;
+ array_2_mpfr tmpss;
+ mpfr_ptr theta;
+ mpfr_ptr varphi;
+} ssRR_argsint_r;
+typedef struct ssRR_argsint_rho {
+ hh_params params;
+ TYPE_I;
+ struct ss_cache cache;
+ array_2_mpfr tmpss;
+ mpfr_ptr theta;
+ mpfr_ptr varphi;
+ mpfr_ptr r;
+} ssRR_argsint_rho;
+
+// compute the integral
+int ss_integrate(mpfr_t* out, TYPE_I, hh_params params, array_mpfr abcissa, array_mpfr weights, unsigned int threads);
+
+// compute the integral A_FF
+int ssFF_integrate(mpfr_t* out, TYPE_I, hh_params params, array_mpfr abcissa, array_mpfr weights, struct ss_cache cache, array_2_mpfr tmpss , unsigned int threads);
+// integrand of the integral over rho in A_FF
+int ssFF_integrand_rho(mpfr_t* out, mpfr_t rho, void* args);
+// integrand of the integral over theta in A_FF
+int ssFF_integrand_theta(mpfr_t* out, mpfr_t theta, void* args);
+// integrand of the integral over psi in A_FF
+int ssFF_integrand_psi(mpfr_t* out, mpfr_t psi, void* args);
+// integrand of the integral over z in A_FF
+int ssFF_integrand_z(mpfr_t* out, mpfr_t z, void* args);
+
+// compute the integral A_RF
+int ssRF_integrate(mpfr_t* out, TYPE_I, hh_params params, array_mpfr abcissa, array_mpfr weights, struct ss_cache cache, array_2_mpfr tmpss, unsigned int threads);
+// integrand of the integral over theta in A_RF
+int ssRF_integrand_theta(mpfr_t* out, mpfr_t theta, void* args);
+// integrand of the integral over varphi in A_RF
+int ssRF_integrand_varphi(mpfr_t* out, mpfr_t varphi, void* args);
+// integrand of the integral over r in A_RF
+int ssRF_integrand_r(mpfr_t* out, mpfr_t r, void* args);
+// integrand of the integral over rho in A_RF
+int ssRF_integrand_rho(mpfr_t* out, mpfr_t rho, void* args);
+
+// compute the integral A_RR
+int ssRR_integrate(mpfr_t* out, TYPE_I, hh_params params, array_mpfr abcissa, array_mpfr weights, struct ss_cache cache, array_2_mpfr tmpss, unsigned int threads);
+// integrand of the integral over theta in A_RR
+int ssRR_integrand_theta(mpfr_t* out, mpfr_t theta, void* args);
+// integrand of the integral over varphi in A_RR
+int ssRR_integrand_varphi(mpfr_t* out, mpfr_t varphi, void* args);
+// integrand of the integral over r in A_RR
+int ssRR_integrand_r(mpfr_t* out, mpfr_t r, void* args);
+// integrand of the integral over rho in A_RR
+int ssRR_integrand_rho(mpfr_t* out, mpfr_t rho, void* args);
+
+// R(theta)
+int ss_R(mpfr_t out, mpfr_t theta, unsigned int sector, struct ss_cache cache);
+
+// cutoff function
+int ss_cutoff(mpfr_t out, mpfr_t x, mpfr_t tmp1, mpfr_t tmp2);
+
+// periodic norm
+int ss_norm(mpfr_t out, mpfr_t k1, mpfr_t k2, struct ss_cache cache, mpfr_t tmp1, mpfr_t tmp2);
+
+#endif
+
+
diff --git a/src/ss_integral_double.c b/src/ss_integral_double.c
new file mode 100644
index 0000000..00ab637
--- /dev/null
+++ b/src/ss_integral_double.c
@@ -0,0 +1,437 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "ss_integral_double.h"
+#include <math.h>
+
+#define PI 3.1415926535897932385L
+#define SQRT3 1.7320508075688772935L
+
+// compute the integral
+int ss_integrate_double(long double* out, TYPE_I_DOUBLE, hh_params_double params, array_ldouble abcissa, array_ldouble weights, unsigned int threads){
+ long double val;
+
+ // compute A_FF, A_RF and A_RR
+ ssFF_integrate_double(out, I, params, abcissa, weights, threads);
+ ssRF_integrate_double(&val, I, params, abcissa, weights, threads);
+ *out+=2*val;
+ ssRR_integrate_double(&val, I, params, abcissa, weights, threads);
+ *out+=val;
+
+ return(0);
+}
+
+
+// compute the integral A_FF
+int ssFF_integrate_double(long double* out, TYPE_I_DOUBLE, hh_params_double params, array_ldouble abcissa, array_ldouble weights, unsigned int threads){
+ ssFF_argsint_rho_double args;
+ int ret;
+ array_pthread_t thread_ids;
+
+ args.params=params;
+ args.I=I;
+ args.abcissa=abcissa;
+ args.weights=weights;
+
+ if(threads==1){
+ ret=integrate_gauss_ldouble(out, &ssFF_integrand_rho_double, 0., 1., abcissa, weights, &args);
+ }
+ else{
+ ret=integrate_gauss_multithread_ldouble(out, &ssFF_integrand_rho_double, 0., 1., abcissa, weights, &args, threads, &thread_ids);
+ array_pthread_t_free(thread_ids);
+ }
+
+ // extra factor
+ *out*=6;
+
+ return(ret);
+}
+
+// integrand of the integral over rho in A_FF
+int ssFF_integrand_rho_double(long double* out, long double rho, void* args){
+ ssFF_argsint_theta_double nargs;
+ int ret;
+ ssFF_argsint_rho_double* argument=(ssFF_argsint_rho_double*)args;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.abcissa=argument->abcissa;
+ nargs.weights=argument->weights;
+ nargs.rho=rho;
+
+ ret=integrate_gauss_ldouble(out, &ssFF_integrand_theta_double, -PI/6, PI/2, argument->abcissa, argument->weights, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over theta in A_FF
+int ssFF_integrand_theta_double(long double* out, long double theta, void* args){
+ ssFF_argsint_psi_double nargs;
+ int ret;
+ ssFF_argsint_theta_double* argument=(ssFF_argsint_theta_double*)args;
+ long double val;
+ int a;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.abcissa=argument->abcissa;
+ nargs.weights=argument->weights;
+ nargs.rho=argument->rho;
+ nargs.theta=theta;
+
+ // split the integral into four parts to improve the precision
+ *out=0;
+ for(a=0;a<4;a++){
+ ret=integrate_gauss_ldouble(&val, &ssFF_integrand_psi_double, PI/4*(a-2), PI/4*(a-1), argument->abcissa, argument->weights, &nargs);
+ if(ret<0){
+ break;
+ }
+ *out+=val;
+ }
+
+ return(ret);
+}
+
+// integrand of the integral over psi in A_FF
+int ssFF_integrand_psi_double(long double* out, long double psi, void* args){
+ ssFF_argsint_z_double nargs;
+ int ret;
+ ssFF_argsint_psi_double* argument=(ssFF_argsint_psi_double*)args;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.rho=argument->rho;
+ nargs.theta=argument->theta;
+ nargs.psi=psi;
+
+ ret=integrate_gauss_ldouble(out, &ssFF_integrand_z_double, 0., 1., argument->abcissa, argument->weights, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over z in A_FF
+int ssFF_integrand_z_double(long double* out, long double z, void* args){
+ ssFF_argsint_z_double* argument=(ssFF_argsint_z_double*)args;
+ long double p1,p2,q1,q2,F1,F2;
+
+ // r and varphi (store in p1 and p2)
+ p1=(1-z)*(1+z*(1-cosl(argument->psi))/(1+cosl(argument->psi)));
+ p2=acosl(cos(2*argument->psi)-p1/2*sin(2*argument->psi)*sin(2*argument->psi));
+ if(argument->psi<0.){
+ p2=-p2;
+ }
+
+ // jacobian
+ *out=4*argument->rho*argument->rho*argument->rho*p1*(1+p1*cosl(2*argument->psi))/(1+cosl(argument->psi))/sqrtl(1+p1*cosl(argument->psi)*cosl(argument->psi));
+
+ // cutoffs
+ *out*=ss_cutoff_double(argument->rho)*ss_cutoff_double(argument->rho*p1);
+
+ q1=SQRT3+argument->rho*p1*cosl(argument->theta+p2);
+ q2=argument->params.omega*(1+argument->rho*p1*sinl(argument->theta+p2));
+ p1=SQRT3+argument->rho*cosl(argument->theta);
+ p2=argument->params.omega*(1+argument->rho*sinl(argument->theta));
+ F1=p1+q1-SQRT3;
+ F2=p2+q2-argument->params.omega;
+
+ *out*=(*(argument->I))(p1, p2, q1, q2, F1, F2, argument->params.t1, argument->params.t2, argument->params.phi, argument->params.W);
+
+ return(0);
+}
+
+
+// compute the integral A_RF
+int ssRF_integrate_double(long double* out, TYPE_I_DOUBLE, hh_params_double params, array_ldouble abcissa, array_ldouble weights, unsigned int threads){
+ ssRF_argsint_theta_double args;
+ int ret;
+ long double val;
+ int a;
+ array_pthread_t thread_ids;
+
+ args.params=params;
+ args.I=I;
+ args.abcissa=abcissa;
+ args.weights=weights;
+
+ // split the integral in three parts to improve the precision
+ *out=0;
+ for(a=0;a<3;a++){
+ if(threads==1){
+ ret=integrate_gauss_ldouble(&val, &ssRF_integrand_theta_double, (4*a-1)*PI/6, (4*a+3)*PI/6, abcissa, weights, &args);
+ }
+ else{
+ ret=integrate_gauss_multithread_ldouble(&val, &ssRF_integrand_theta_double, (4*a-1)*PI/6, (4*a+3)*PI/6, abcissa, weights, &args, threads, &thread_ids);
+ array_pthread_t_free(thread_ids);
+ }
+ if(ret<0){
+ break;
+ }
+
+ *out+=val;
+ }
+
+ // extra factor
+ *out*=3;
+
+ return(ret);
+}
+
+// integrand of the integral over theta in A_RF
+int ssRF_integrand_theta_double(long double* out, long double theta, void* args){
+ ssRF_argsint_varphi_double nargs;
+ int ret;
+ ssRF_argsint_theta_double* argument=(ssRF_argsint_theta_double*)args;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.abcissa=argument->abcissa;
+ nargs.weights=argument->weights;
+ nargs.theta=theta;
+
+ ret=integrate_gauss_ldouble(out, &ssRF_integrand_varphi_double, -PI/6, PI/2, argument->abcissa, argument->weights, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over varphi in A_RF
+int ssRF_integrand_varphi_double(long double* out, long double varphi, void* args){
+ ssRF_argsint_r_double nargs;
+ int ret;
+ ssRF_argsint_varphi_double* argument=(ssRF_argsint_varphi_double*)args;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.abcissa=argument->abcissa;
+ nargs.weights=argument->weights;
+ nargs.theta=argument->theta;
+ nargs.varphi=varphi;
+
+ ret=integrate_gauss_ldouble(out, &ssRF_integrand_r_double, 0., 1., argument->abcissa, argument->weights, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over r in A_RF
+int ssRF_integrand_r_double(long double* out, long double r, void* args){
+ ssRF_argsint_rho_double nargs;
+ int ret;
+ ssRF_argsint_r_double* argument=(ssRF_argsint_r_double*)args;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.theta=argument->theta;
+ nargs.varphi=argument->varphi;
+ nargs.r=r;
+
+ ret=integrate_gauss_ldouble(out, &ssRF_integrand_rho_double, 0., ss_R_double(nargs.varphi, 0), argument->abcissa, argument->weights, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over rho in A_RF
+int ssRF_integrand_rho_double(long double* out, long double rho, void* args){
+ int nu;
+ ssRF_argsint_rho_double* argument=(ssRF_argsint_rho_double*)args;
+ long double p1,p2,q1,q2,F1,F2;
+
+ *out=0.;
+ for(nu=-1;nu<=1;nu=nu+2){
+ q1=SQRT3+argument->r*cosl(argument->theta);
+ q2=argument->params.omega*(1+argument->r*sinl(argument->theta));
+ F1=SQRT3+rho*cosl(argument->varphi);
+ F2=nu*(1+rho*sinl(argument->varphi));
+ p1=SQRT3+F1-q1;
+ p2=argument->params.omega+F2-q2;
+
+ *out+=argument->r*rho*(*(argument->I))(p1, p2, q1, q2, F1, F2, argument->params.t1, argument->params.t2, argument->params.phi, argument->params.W)*(1-ss_cutoff_double(ss_norm_double(F1-q1,F2-q2)))*ss_cutoff_double(argument->r);
+ }
+
+ return(0);
+}
+
+
+// compute the integral A_RR
+int ssRR_integrate_double(long double* out, TYPE_I_DOUBLE, hh_params_double params, array_ldouble abcissa, array_ldouble weights, unsigned int threads){
+ ssRR_argsint_theta_double args;
+ int ret;
+ int a;
+ long double lower, upper;
+ long double val;
+ array_pthread_t thread_ids;
+
+ args.params=params;
+ args.I=I;
+ args.abcissa=abcissa;
+ args.weights=weights;
+
+ // split the integral in three parts
+ *out=0;
+ for(a=0;a<3;a++){
+ lower=(4*a-1)*PI/6;
+ upper=(4*a+3)*PI/6;
+
+ // save sector
+ args.sector_theta=a;
+
+ if(threads==1){
+ ret=integrate_gauss_ldouble(&val, &ssRR_integrand_theta_double, lower, upper, abcissa, weights, &args);
+ }
+ else{
+ ret=integrate_gauss_multithread_ldouble(&val, &ssRR_integrand_theta_double, lower, upper, abcissa, weights, &args, threads, &thread_ids);
+ array_pthread_t_free(thread_ids);
+ }
+ if(ret<0){
+ break;
+ }
+
+ *out+=val;
+ }
+ // extra factor
+ *out*=3;
+
+ return(ret);
+}
+
+// integrand of the integral over theta in A_RR
+int ssRR_integrand_theta_double(long double* out, long double theta, void* args){
+ ssRR_argsint_varphi_double nargs;
+ int ret;
+ ssRR_argsint_theta_double* argument=(ssRR_argsint_theta_double*)args;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.abcissa=argument->abcissa;
+ nargs.weights=argument->weights;
+ nargs.sector_theta=argument->sector_theta;
+ nargs.theta=theta;
+
+ ret=integrate_gauss_ldouble(out, &ssRR_integrand_varphi_double, -PI/6, PI/2, argument->abcissa, argument->weights, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over varphi in A_RR
+int ssRR_integrand_varphi_double(long double* out, long double varphi, void* args){
+ ssRR_argsint_r_double nargs;
+ int ret;
+ ssRR_argsint_varphi_double* argument=(ssRR_argsint_varphi_double*)args;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.abcissa=argument->abcissa;
+ nargs.weights=argument->weights;
+ nargs.theta=argument->theta;
+ nargs.varphi=varphi;
+
+ ret=integrate_gauss_ldouble(out, &ssRR_integrand_r_double, 0., ss_R_double(nargs.theta, argument->sector_theta), argument->abcissa, argument->weights, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over r in A_RR
+int ssRR_integrand_r_double(long double* out, long double r, void* args){
+ ssRR_argsint_rho_double nargs;
+ int ret;
+ ssRR_argsint_r_double* argument=(ssRR_argsint_r_double*)args;
+
+ // new argument
+ nargs.params=argument->params;
+ nargs.I=argument->I;
+ nargs.theta=argument->theta;
+ nargs.varphi=argument->varphi;
+ nargs.r=r;
+
+ ret=integrate_gauss_ldouble(out, &ssRR_integrand_rho_double, 0., ss_R_double(nargs.varphi, 0), argument->abcissa, argument->weights, &nargs);
+
+ return(ret);
+}
+
+// integrand of the integral over rho in A_RR
+int ssRR_integrand_rho_double(long double* out, long double rho, void* args){
+ int eta, nu;
+ ssRR_argsint_rho_double* argument=(ssRR_argsint_rho_double*)args;
+ long double p1,p2,q1,q2,F1,F2;
+
+ *out=0.;
+ for(eta=-1;eta<=1;eta=eta+2){
+ for(nu=-1;nu<=1;nu=nu+2){
+ q1=SQRT3+argument->r*cosl(argument->theta);
+ q2=eta*(1+argument->r*sinl(argument->theta));
+ F1=SQRT3+rho*cosl(argument->varphi);
+ F2=nu*(1+rho*sinl(argument->varphi));
+ p1=SQRT3+F1-q1;
+ p2=argument->params.omega+F2-q2;
+
+ *out+=argument->r*rho*(*(argument->I))(p1, p2, q1, q2, F1, F2, argument->params.t1, argument->params.t2, argument->params.phi, argument->params.W)*(1-ss_cutoff_double(ss_norm_double(F1-q1,F2-q2)))*(1-ss_cutoff_double(ss_norm_double(q1-SQRT3,q2-argument->params.omega)));
+ }
+ }
+
+ return(0);
+}
+
+
+// R(theta)
+long double ss_R_double(long double theta, int sector){
+ return(1./cosl(theta+(8*sector-1)*PI/6));
+}
+
+// cutoff function
+long double ss_cutoff_double(long double x){
+ long double e1,e2;
+
+ if(x<=0.5){
+ return(1.);
+ }
+ if(x>=1){
+ return(0.);
+ }
+
+ e1=expl(-0.5/(1-x));
+ e2=expl(-0.5/(x-0.5));
+ return(e1/(e2+e1*(1-e2)));
+}
+
+// periodic norm
+long double ss_norm_double(long double k1, long double k2){
+ long double n1, n2, n3;
+ long double tmp1, tmp2;
+ long double t1, t2;
+
+ // split R^2 into equilateral triangles and find which triangle (k1,k2) is in
+ n1=floorl(k1/SQRT3)+1;
+ n2=floorl(k1/2/SQRT3-k2/2)+1;
+ n3=floorl(k1/2/SQRT3+k2/2)+1;
+
+ // translate (k1,k2) by -a*(SQRT3,3)-b(SQRT3,-3)
+ // a-b
+ tmp1=-floorl((n2-n3+1)/3);
+ // b
+ tmp2=floorl((n1-tmp1)/2);
+ // a
+ tmp1=tmp1+tmp2;
+ t1=k1-SQRT3*(tmp1+tmp2);
+ t2=k2-3*(tmp1-tmp2);
+
+ return(sqrtl(t1*t1+t2*t2));
+}
diff --git a/src/ss_integral_double.h b/src/ss_integral_double.h
new file mode 100644
index 0000000..67ec1de
--- /dev/null
+++ b/src/ss_integral_double.h
@@ -0,0 +1,170 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+ Computation of the sunrise diagram
+
+ Takes a pointer to an integrand function 'I' as an argument. 'I' must have the following arguments
+ long double I(long double p1, long double p2, long double q1, long double q2, long double K1, long double K2, long double t1, long double t2, long double phi, long double W)
+*/
+
+#ifndef SS_INTEGRAL_DOUBLE_H
+#define SS_INTEGRAL_DOUBLE_H
+
+#include <libinum.h>
+#include "double_util.h"
+
+// extra arguments for the integrations
+// A_FF
+typedef struct ssFF_argsint_rho_double {
+ hh_params_double params;
+ TYPE_I_DOUBLE;
+ array_ldouble abcissa;
+ array_ldouble weights;
+} ssFF_argsint_rho_double;
+typedef struct ssFF_argsint_theta_double {
+ hh_params_double params;
+ TYPE_I_DOUBLE;
+ array_ldouble abcissa;
+ array_ldouble weights;
+ long double rho;
+} ssFF_argsint_theta_double;
+typedef struct ssFF_argsint_psi_double {
+ hh_params_double params;
+ TYPE_I_DOUBLE;
+ array_ldouble abcissa;
+ array_ldouble weights;
+ long double rho;
+ long double theta;
+} ssFF_argsint_psi_double;
+typedef struct ssFF_argsint_z_double {
+ hh_params_double params;
+ TYPE_I_DOUBLE;
+ long double rho;
+ long double theta;
+ long double psi;
+} ssFF_argsint_z_double;
+
+// A_RF
+typedef struct ssRF_argsint_theta_double {
+ hh_params_double params;
+ TYPE_I_DOUBLE;
+ array_ldouble abcissa;
+ array_ldouble weights;
+} ssRF_argsint_theta_double;
+typedef struct ssRF_argsint_varphi_double {
+ hh_params_double params;
+ TYPE_I_DOUBLE;
+ array_ldouble abcissa;
+ array_ldouble weights;
+ long double theta;
+} ssRF_argsint_varphi_double;
+typedef struct ssRF_argsint_r_double {
+ hh_params_double params;
+ TYPE_I_DOUBLE;
+ array_ldouble abcissa;
+ array_ldouble weights;
+ long double theta;
+ long double varphi;
+} ssRF_argsint_r_double;
+typedef struct ssRF_argsint_rho_double {
+ hh_params_double params;
+ TYPE_I_DOUBLE;
+ long double theta;
+ long double varphi;
+ long double r;
+} ssRF_argsint_rho_double;
+
+// A_RR
+typedef struct ssRR_argsint_theta_double {
+ hh_params_double params;
+ TYPE_I_DOUBLE;
+ array_ldouble abcissa;
+ array_ldouble weights;
+ int sector_theta;
+} ssRR_argsint_theta_double;
+typedef struct ssRR_argsint_varphi_double {
+ hh_params_double params;
+ TYPE_I_DOUBLE;
+ array_ldouble abcissa;
+ array_ldouble weights;
+ int sector_theta;
+ long double theta;
+} ssRR_argsint_varphi_double;
+typedef struct ssRR_argsint_r_double {
+ hh_params_double params;
+ TYPE_I_DOUBLE;
+ array_ldouble abcissa;
+ array_ldouble weights;
+ long double theta;
+ long double varphi;
+} ssRR_argsint_r_double;
+typedef struct ssRR_argsint_rho_double {
+ hh_params_double params;
+ TYPE_I_DOUBLE;
+ long double theta;
+ long double varphi;
+ long double r;
+} ssRR_argsint_rho_double;
+
+
+// compute the integral
+int ss_integrate_double(long double* out, TYPE_I_DOUBLE, hh_params_double params, array_ldouble abcissa, array_ldouble weights, unsigned int threads);
+
+// compute the integral A_FF
+int ssFF_integrate_double(long double* out, TYPE_I_DOUBLE, hh_params_double params, array_ldouble abcissa, array_ldouble weights, unsigned int threads);
+// integrand of the integral over rho in A_FF
+int ssFF_integrand_rho_double(long double* out, long double rho, void* args);
+// integrand of the integral over theta in A_FF
+int ssFF_integrand_theta_double(long double* out, long double theta, void* args);
+// integrand of the integral over psi in A_FF
+int ssFF_integrand_psi_double(long double* out, long double psi, void* args);
+// integrand of the integral over z in A_FF
+int ssFF_integrand_z_double(long double* out, long double z, void* args);
+
+// compute the integral A_RF
+int ssRF_integrate_double(long double* out, TYPE_I_DOUBLE, hh_params_double params, array_ldouble abcissa, array_ldouble weights, unsigned int threads);
+// integrand of the integral over theta in A_RF
+int ssRF_integrand_theta_double(long double* out, long double theta, void* args);
+// integrand of the integral over varphi in A_RF
+int ssRF_integrand_varphi_double(long double* out, long double varphi, void* args);
+// integrand of the integral over r in A_RF
+int ssRF_integrand_r_double(long double* out, long double r, void* args);
+// integrand of the integral over rho in A_RF
+int ssRF_integrand_rho_double(long double* out, long double rho, void* args);
+
+// compute the integral A_RR
+int ssRR_integrate_double(long double* out, TYPE_I_DOUBLE, hh_params_double params, array_ldouble abcissa, array_ldouble weights, unsigned int threads);
+// integrand of the integral over theta in A_RR
+int ssRR_integrand_theta_double(long double* out, long double theta, void* args);
+// integrand of the integral over varphi in A_RR
+int ssRR_integrand_varphi_double(long double* out, long double varphi, void* args);
+// integrand of the integral over r in A_RR
+int ssRR_integrand_r_double(long double* out, long double r, void* args);
+// integrand of the integral over rho in A_RR
+int ssRR_integrand_rho_double(long double* out, long double rho, void* args);
+
+// R(theta;
+long double ss_R_double(long double theta, int sector);
+
+// cutoff function
+long double ss_cutoff_double(long double x);
+
+// periodic norm
+long double ss_norm_double(long double k1, long double k2);
+
+#endif
+
diff --git a/src/types.h b/src/types.h
new file mode 100644
index 0000000..0dec1fc
--- /dev/null
+++ b/src/types.h
@@ -0,0 +1,35 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef TYPES_H
+#define TYPES_H
+
+#include <mpfr.h>
+
+typedef struct hh_params {
+ int omega;
+ mpfr_t t1;
+ mpfr_t t2;
+ mpfr_t lambda;
+ mpfr_t sinphi;
+ mpfr_t phi;
+ mpfr_t W;
+} hh_params;
+
+// format for I function (used to compute sunrise diagrams)
+#define TYPE_I int (*I)(mpfr_ptr, mpfr_ptr, mpfr_ptr, mpfr_ptr, mpfr_ptr, mpfr_ptr, mpfr_ptr, mpfr_ptr, mpfr_ptr, mpfr_ptr, mpfr_ptr, array_mpfr*, struct ss_cache)
+
+#endif
diff --git a/src/zz_integral.c b/src/zz_integral.c
new file mode 100644
index 0000000..ee42355
--- /dev/null
+++ b/src/zz_integral.c
@@ -0,0 +1,237 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "zz_integral.h"
+
+
+// I for z1-z2
+int zz_I(mpfr_t out, mpfr_t p1, mpfr_t p2, mpfr_t q1, mpfr_t q2, mpfr_t F1, mpfr_t F2, mpfr_t t1, mpfr_t t2, mpfr_t phi, mpfr_t W, array_mpfr* tmps1, struct ss_cache cache){
+ mpfr_t* tmps;
+
+ // alloc tmps if needed
+ array_mpfr_alloc_tmps(9, tmps1);
+
+ tmps=tmps1->values;
+
+ // tmps[0]=alpha1(p1,p2)
+ zz_alpha1(tmps[0], p1, p2, cache, tmps[1]);
+ // tmps[1]=m(p1,p2)
+ zz_m(tmps[1], p1, p2, t2, phi, W, cache, tmps[2]);
+ // tmps[2]=alpha1(q1,q2)
+ zz_alpha1(tmps[2], q1, q2, cache, tmps[3]);
+ // tmps[3]=m(q1,q2)
+ zz_m(tmps[3], q1, q2, t2, phi, W, cache, tmps[4]);
+ // tmps[4]=alpha1(F1,F2)
+ zz_alpha1(tmps[4], F1, F2, cache, tmps[5]);
+ // tmps[5]=m(F1,F2)
+ zz_m(tmps[5], F1, F2, t2, phi, W, cache, tmps[6]);
+
+ // tmps[6]=zeta(p)
+ zz_zeta(tmps[6], tmps[0], t2, phi);
+ // tmps[7]=zeta(q)
+ zz_zeta(tmps[7], tmps[2], t2, phi);
+ // tmps[8]=zeta(F)
+ zz_zeta(tmps[8], tmps[4], t2, phi);
+ // tmps[6]=Z=zeta(p)+zeta(q)-zeta(F)
+ mpfr_add(tmps[6], tmps[6], tmps[7], MPFR_RNDN);
+ mpfr_sub(tmps[6], tmps[6], tmps[8], MPFR_RNDN);
+
+ // tmps[0]=xi(p)
+ zz_xi(tmps[0], tmps[1], tmps[0], t1, tmps[7]);
+ // tmps[2]=xi(q)
+ zz_xi(tmps[2], tmps[3], tmps[2], t1, tmps[7]);
+ // tmps[4]=xi(F)
+ zz_xi(tmps[4], tmps[5], tmps[4], t1, tmps[7]);
+
+ // tmps[1]=m(p)/xi(p)
+ zz_mxi(tmps[1], tmps[1], tmps[0]);
+ // tmps[3]=m(q)/xi(q)
+ zz_mxi(tmps[3], tmps[3], tmps[2]);
+ // tmps[5]=m(F)/xi(F)
+ zz_mxi(tmps[5], tmps[5], tmps[4]);
+
+ // I=(xip+xiq+xiF)*(mp/xip+mq/xiq-mF/xiF-mp*mq*mF/xip/xiq/xiF)*Z/(Z**2-(xip+xiq+xiF)**2)**2/108
+ mpfr_add(tmps[0], tmps[0], tmps[2], MPFR_RNDN);
+ mpfr_add(tmps[0], tmps[0], tmps[4], MPFR_RNDN);
+ mpfr_add(tmps[2], tmps[1], tmps[3], MPFR_RNDN);
+ mpfr_sub(tmps[2], tmps[2], tmps[5], MPFR_RNDN);
+ mpfr_mul(tmps[1], tmps[1], tmps[3], MPFR_RNDN);
+ mpfr_mul(tmps[1], tmps[1], tmps[5], MPFR_RNDN);
+ mpfr_sub(tmps[1], tmps[2], tmps[1], MPFR_RNDN);
+ mpfr_mul(tmps[1], tmps[0], tmps[1], MPFR_RNDN);
+ mpfr_mul(tmps[1], tmps[1], tmps[6], MPFR_RNDN);
+ mpfr_pow_ui(tmps[6], tmps[6], 2, MPFR_RNDN);
+ mpfr_pow_ui(tmps[0], tmps[0], 2, MPFR_RNDN);
+ mpfr_sub(tmps[0], tmps[6], tmps[0], MPFR_RNDN);
+ mpfr_pow_ui(tmps[0], tmps[0], 2, MPFR_RNDN);
+ mpfr_div(out, tmps[1], tmps[0], MPFR_RNDN);
+ mpfr_div_ui(out, out, 108, MPFR_RNDN);
+
+ return(0);
+}
+
+
+// I for z1+z2
+int ZZ_I(mpfr_t out, mpfr_t p1, mpfr_t p2, mpfr_t q1, mpfr_t q2, mpfr_t F1, mpfr_t F2, mpfr_t t1, mpfr_t t2, mpfr_t phi, mpfr_t W, array_mpfr* tmps1, struct ss_cache cache){
+ mpfr_t* tmps;
+
+ // alloc tmps if needed
+ array_mpfr_alloc_tmps(9, tmps1);
+
+ tmps=tmps1->values;
+
+ // tmps[0]=alpha1(p1,p2)
+ zz_alpha1(tmps[0], p1, p2, cache, tmps[1]);
+ // tmps[1]=m(p1,p2)
+ zz_m(tmps[1], p1, p2, t2, phi, W, cache, tmps[2]);
+ // tmps[2]=alpha1(q1,q2)
+ zz_alpha1(tmps[2], q1, q2, cache, tmps[3]);
+ // tmps[3]=m(q1,q2)
+ zz_m(tmps[3], q1, q2, t2, phi, W, cache, tmps[4]);
+ // tmps[4]=alpha1(F1,F2)
+ zz_alpha1(tmps[4], F1, F2, cache, tmps[5]);
+ // tmps[5]=m(F1,F2)
+ zz_m(tmps[5], F1, F2, t2, phi, W, cache, tmps[6]);
+
+ // tmps[6]=zeta(p)
+ zz_zeta(tmps[6], tmps[0], t2, phi);
+ // tmps[7]=zeta(q)
+ zz_zeta(tmps[7], tmps[2], t2, phi);
+ // tmps[8]=zeta(F)
+ zz_zeta(tmps[8], tmps[4], t2, phi);
+ // tmps[6]=Z**2=(zeta(p)+zeta(q)-zeta(F))**2
+ mpfr_add(tmps[6], tmps[6], tmps[7], MPFR_RNDN);
+ mpfr_sub(tmps[6], tmps[6], tmps[8], MPFR_RNDN);
+ mpfr_pow_ui(tmps[6], tmps[6], 2, MPFR_RNDN);
+
+ // tmps[0]=xi(p)
+ zz_xi(tmps[0], tmps[1], tmps[0], t1, tmps[7]);
+ // tmps[2]=xi(q)
+ zz_xi(tmps[2], tmps[3], tmps[2], t1, tmps[7]);
+ // tmps[4]=xi(F)
+ zz_xi(tmps[4], tmps[5], tmps[4], t1, tmps[7]);
+
+ // tmps[1]=m(p)/xi(p)
+ zz_mxi(tmps[1], tmps[1], tmps[0]);
+ // tmps[3]=m(q)/xi(q)
+ zz_mxi(tmps[3], tmps[3], tmps[2]);
+ // tmps[5]=m(F)/xi(F)
+ zz_mxi(tmps[5], tmps[5], tmps[4]);
+
+ // I=(1-mp*mF/xip/xiF-mq*mF/xiq/xiF+mp*mq/xip/xiq)*(Z**2+(xip+xiq+xiF)**2)/(Z**2-(xip+xiq+xiF)**2)**2/216
+ mpfr_add(tmps[0], tmps[0], tmps[2], MPFR_RNDN);
+ mpfr_add(tmps[0], tmps[0], tmps[4], MPFR_RNDN);
+ mpfr_pow_ui(tmps[0], tmps[0], 2, MPFR_RNDN);
+ mpfr_mul(tmps[2], tmps[1], tmps[3], MPFR_RNDN);
+ mpfr_mul(tmps[4], tmps[3], tmps[5], MPFR_RNDN);
+ mpfr_sub(tmps[2], tmps[2], tmps[4], MPFR_RNDN);
+ mpfr_mul(tmps[4], tmps[1], tmps[5], MPFR_RNDN);
+ mpfr_sub(tmps[2], tmps[2], tmps[4], MPFR_RNDN);
+ mpfr_add_ui(out, tmps[2], 1, MPFR_RNDN);
+ mpfr_add(tmps[1], tmps[6], tmps[0], MPFR_RNDN);
+ mpfr_mul(out, out, tmps[1], MPFR_RNDN);
+ mpfr_sub(tmps[0], tmps[6], tmps[0], MPFR_RNDN);
+ mpfr_pow_ui(tmps[0], tmps[0], 2, MPFR_RNDN);
+ mpfr_div(out, out, tmps[0], MPFR_RNDN);
+ mpfr_div_ui(out, out, 216, MPFR_RNDN);
+
+ return(0);
+}
+
+// zeta(k1,k2)
+// requires m and alpha1 to be computed beforehand
+int zz_zeta(mpfr_t zeta, mpfr_t alpha1, mpfr_t t2, mpfr_t phi){
+ mpfr_cos(zeta, phi, MPFR_RNDN);
+ mpfr_mul(zeta, zeta, t2, MPFR_RNDN);
+ mpfr_mul_ui(zeta, zeta, 2, MPFR_RNDN);
+ mpfr_mul(zeta, zeta, alpha1, MPFR_RNDN);
+ return(0);
+}
+// xi(k1,k2)
+// requires m and alpha1 to be computed beforehand
+// requires one initialized tmp number
+int zz_xi(mpfr_t xi, mpfr_t m, mpfr_t alpha1, mpfr_t t1, mpfr_t tmp){
+ mpfr_pow_ui(tmp, m, 2, MPFR_RNDN);
+
+ mpfr_mul(xi, alpha1, t1, MPFR_RNDN);
+ mpfr_mul(xi, xi, t1, MPFR_RNDN);
+ mpfr_mul_ui(xi, xi, 2, MPFR_RNDN);
+
+ mpfr_add(xi, xi, tmp, MPFR_RNDN);
+ mpfr_sqrt(xi, xi, MPFR_RNDN);
+ return(0);
+}
+// m(k1,k2)
+// requires two initialized tmp numbers
+int zz_m(mpfr_t m, mpfr_t k1, mpfr_t k2, mpfr_t t2, mpfr_t phi, mpfr_t W, struct ss_cache cache, mpfr_t tmp1){
+ zz_alpha2(m, k1, k2, cache, tmp1);
+ mpfr_sin(tmp1, phi, MPFR_RNDN);
+ mpfr_mul(m, m, tmp1, MPFR_RNDN);
+ mpfr_mul(m, m, t2, MPFR_RNDN);
+ mpfr_mul_ui(m, m, 2, MPFR_RNDN);
+ mpfr_sub(m, W, m, MPFR_RNDN);
+ return(0);
+}
+// m(k1,k2)/xi(k1,k2)
+int zz_mxi(mpfr_t mxi, mpfr_t m, mpfr_t xi){
+ // if xi=0, then return 0 (m/xi->0 at pF)
+ if(mpfr_cmp_ui(xi,0)==0){
+ mpfr_set_ui(mxi, 0, MPFR_RNDN);
+ return(0);
+ }
+ mpfr_div(mxi, m, xi, MPFR_RNDN);
+
+ return(0);
+}
+
+// alpha1(k1,k2)
+// requires one initialized tmp number
+int zz_alpha1(mpfr_t alpha1, mpfr_t k1, mpfr_t k2, struct ss_cache cache, mpfr_t tmp1){
+ mpfr_mul(tmp1, k2, cache.pi, MPFR_RNDN);
+ mpfr_div_ui(tmp1, tmp1, 3, MPFR_RNDN);
+ mpfr_cos(tmp1, tmp1, MPFR_RNDN);
+
+ mpfr_mul(alpha1, k1, cache.pi, MPFR_RNDN);
+ mpfr_div(alpha1, alpha1, cache.sqrt3, MPFR_RNDN);
+ mpfr_cos(alpha1, alpha1, MPFR_RNDN);
+
+ mpfr_add(alpha1, alpha1, tmp1, MPFR_RNDN);
+ mpfr_mul(alpha1, alpha1, tmp1, MPFR_RNDN);
+ mpfr_mul_ui(alpha1, alpha1, 2, MPFR_RNDN);
+ mpfr_add_d(alpha1, alpha1, 0.5, MPFR_RNDN);
+ return(0);
+}
+// alpha2(k1,k2)
+// requires one initialized tmp numbers
+int zz_alpha2(mpfr_t alpha2, mpfr_t k1, mpfr_t k2, struct ss_cache cache, mpfr_t tmp1){
+ mpfr_mul(tmp1, k2, cache.pi, MPFR_RNDN);
+ mpfr_div_ui(tmp1, tmp1, 3, MPFR_RNDN);
+ mpfr_cos(tmp1, tmp1, MPFR_RNDN);
+
+ mpfr_mul(alpha2, k1, cache.pi, MPFR_RNDN);
+ mpfr_div(alpha2, alpha2, cache.sqrt3, MPFR_RNDN);
+ mpfr_cos(alpha2, alpha2, MPFR_RNDN);
+
+ mpfr_sub(alpha2, alpha2, tmp1, MPFR_RNDN);
+
+ mpfr_mul(tmp1, k2, cache.pi, MPFR_RNDN);
+ mpfr_div_ui(tmp1, tmp1, 3, MPFR_RNDN);
+ mpfr_sin(tmp1, tmp1, MPFR_RNDN);
+
+ mpfr_mul(alpha2, alpha2, tmp1, MPFR_RNDN);
+ mpfr_mul_ui(alpha2, alpha2, 2, MPFR_RNDN);
+ return(0);
+}
diff --git a/src/zz_integral.h b/src/zz_integral.h
new file mode 100644
index 0000000..b23c88a
--- /dev/null
+++ b/src/zz_integral.h
@@ -0,0 +1,53 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+ Computation of z1-z2 and z1+z2
+*/
+
+#ifndef ZZ_INTEGRAL_H
+#define ZZ_INTEGRAL_H
+
+#include "ss_integral.h"
+#include <libinum.h>
+#include <mpfr.h>
+
+// I for z1-z2
+int zz_I(mpfr_t out, mpfr_t p1, mpfr_t p2, mpfr_t q1, mpfr_t q2, mpfr_t F1, mpfr_t F2, mpfr_t t1, mpfr_t t2, mpfr_t phi, mpfr_t W, array_mpfr* tmps1, struct ss_cache cache);
+
+// I for z1+z2
+int ZZ_I(mpfr_t out, mpfr_t p1, mpfr_t p2, mpfr_t q1, mpfr_t q2, mpfr_t F1, mpfr_t F2, mpfr_t t1, mpfr_t t2, mpfr_t phi, mpfr_t W, array_mpfr* tmps1, struct ss_cache cache);
+
+// zeta(k1,k2)
+int zz_zeta(mpfr_t zeta, mpfr_t alpha1, mpfr_t t2, mpfr_t phi);
+
+// xi(k1,k2)
+int zz_xi(mpfr_t xi, mpfr_t m, mpfr_t alpha1, mpfr_t t1, mpfr_t tmp);
+
+// m(k1,k2)
+int zz_m(mpfr_t m, mpfr_t k1, mpfr_t k2, mpfr_t t2, mpfr_t phi, mpfr_t W, struct ss_cache cache, mpfr_t tmp1);
+
+// m(k1,k2)/xi(k1,k2)
+int zz_mxi(mpfr_t mxi, mpfr_t m, mpfr_t xi);
+
+// alpha1(k1,k2)
+int zz_alpha1(mpfr_t alpha1, mpfr_t k1, mpfr_t k2, struct ss_cache cache, mpfr_t tmp1);
+
+// alpha2(k1,k2)
+int zz_alpha2(mpfr_t alpha2, mpfr_t k1, mpfr_t k2, struct ss_cache cache, mpfr_t tmp1);
+
+#endif
+
diff --git a/src/zz_integral_double.c b/src/zz_integral_double.c
new file mode 100644
index 0000000..9330ad1
--- /dev/null
+++ b/src/zz_integral_double.c
@@ -0,0 +1,89 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "zz_integral_double.h"
+#include <math.h>
+
+#define PI 3.1415926535897932385L
+#define SQRT3 1.7320508075688772935L
+
+// I for z1-z2
+long double zz_I_double(long double p1, long double p2, long double q1, long double q2, long double F1, long double F2, long double t1, long double t2, long double phi, long double W){
+ long double xip,xiq,xiF,mp,mq,mF,zetap,zetaq,zetaF;
+
+ // alpha1 (store in xi)
+ xip=zz_alpha1_double(p1,p2);
+ xiq=zz_alpha1_double(q1,q2);
+ xiF=zz_alpha1_double(F1,F2);
+
+ // alpha2 (store in m)
+ mp=zz_alpha2_double(p1,p2);
+ mq=zz_alpha2_double(q1,q2);
+ mF=zz_alpha2_double(F1,F2);
+
+ zetap=2*t2*cosl(phi)*xip;
+ zetaq=2*t2*cosl(phi)*xiq;
+ zetaF=2*t2*cosl(phi)*xiF;
+
+ mp=W-2*t2*sinl(phi)*mp;
+ mq=W-2*t2*sinl(phi)*mq;
+ mF=W-2*t2*sinl(phi)*mF;
+
+ xip=sqrtl(mp*mp+2*t1*t1*xip);
+ xiq=sqrtl(mq*mq+2*t1*t1*xiq);
+ xiF=sqrtl(mF*mF+2*t1*t1*xiF);
+
+ return((xip+xiq+xiF)*(mp/xip+mq/xiq-mF/xiF-mp*mq*mF/xip/xiq/xiF)*(zetap+zetaq-zetaF)/(((zetap+zetaq-zetaF)*(zetap+zetaq-zetaF)-(xip+xiq+xiF)*(xip+xiq+xiF))*((zetap+zetaq-zetaF)*(zetap+zetaq-zetaF)-(xip+xiq+xiF)*(xip+xiq+xiF)))/108);
+}
+
+// I for z1+z2
+long double ZZ_I_double(long double p1, long double p2, long double q1, long double q2, long double F1, long double F2, long double t1, long double t2, long double phi, long double W){
+ long double xip,xiq,xiF,mp,mq,mF,zetap,zetaq,zetaF;
+
+ // alpha1 (store in xi)
+ xip=zz_alpha1_double(p1,p2);
+ xiq=zz_alpha1_double(q1,q2);
+ xiF=zz_alpha1_double(F1,F2);
+
+ // alpha2 (store in m)
+ mp=zz_alpha2_double(p1,p2);
+ mq=zz_alpha2_double(q1,q2);
+ mF=zz_alpha2_double(F1,F2);
+
+ zetap=2*t2*cosl(phi)*xip;
+ zetaq=2*t2*cosl(phi)*xiq;
+ zetaF=2*t2*cosl(phi)*xiF;
+
+ mp=W-2*t2*sinl(phi)*mp;
+ mq=W-2*t2*sinl(phi)*mq;
+ mF=W-2*t2*sinl(phi)*mF;
+
+ xip=sqrtl(mp*mp+2*t1*t1*xip);
+ xiq=sqrtl(mq*mq+2*t1*t1*xiq);
+ xiF=sqrtl(mF*mF+2*t1*t1*xiF);
+
+ return((1-mp/xip*mF/xiF-mq/xiq*mF/xiF+mp/xip*mq/xiq)*((zetap+zetaq-zetaF)*(zetap+zetaq-zetaF)+(xip+xiq+xiF)*(xip+xiq+xiF))/(((zetap+zetaq-zetaF)*(zetap+zetaq-zetaF)-(xip+xiq+xiF)*(xip+xiq+xiF))*((zetap+zetaq-zetaF)*(zetap+zetaq-zetaF)-(xip+xiq+xiF)*(xip+xiq+xiF)))/216);
+}
+
+// bar alpha_1(k1,k2)
+long double zz_alpha1_double(long double k1, long double k2){
+ return(2*cosl(PI/3*k2)*(cosl(PI/SQRT3*k1)+cosl(PI/3*k2))+0.5);
+}
+// bar alpha_2(k1,k2)
+long double zz_alpha2_double(long double k1, long double k2){
+ return(2*sinl(PI/3*k2)*(cosl(PI/SQRT3*k1)-cosl(PI/3*k2)));
+}
+
diff --git a/src/zz_integral_double.h b/src/zz_integral_double.h
new file mode 100644
index 0000000..da21ef3
--- /dev/null
+++ b/src/zz_integral_double.h
@@ -0,0 +1,35 @@
+/*
+Copyright 2016 Ian Jauslin
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+ Computation of z1-z2 and z1+z2 (using doubles)
+*/
+
+#ifndef ZZ_INTEGRAL_DOUBLE_H
+#define ZZ_INTEGRAL_DOUBLE_H
+
+// I for z1-z2
+long double zz_I_double(long double p1, long double p2, long double q1, long double q2, long double F1, long double F2, long double t1, long double t2, long double phi, long double W);
+
+// I for z1+z2
+long double ZZ_I_double(long double p1, long double p2, long double q1, long double q2, long double F1, long double F2, long double t1, long double t2, long double phi, long double W);
+
+// bar alpha_1(k1,k2)
+long double zz_alpha1_double(long double k1, long double k2);
+// bar alpha_2(k1,k2)
+long double zz_alpha2_double(long double k1, long double k2);
+
+#endif