From ebfeabc87c06a4ac73d1e59325c11894b7b3e220 Mon Sep 17 00:00:00 2001 From: Ye Meng <60788399+yeoveryeah@users.noreply.github.com> Date: Sun, 28 Nov 2021 21:08:16 +0800 Subject: [PATCH 1/4] draft to jss-essay - concepts of Bayes EL added but need further work to fit with previous sections. - TBC: which example to include, and how detailed it's supposed to be with stanEL package. --- vignettes/jss/jss-draft.Rmd | 96 ++++- vignettes/jss/jss-draft.log | 758 +++++++++++++++++++++++++++++++++ vignettes/jss/jss-includes.tex | 28 ++ vignettes/jss/references.bib | 24 ++ 4 files changed, 904 insertions(+), 2 deletions(-) create mode 100644 vignettes/jss/jss-draft.log diff --git a/vignettes/jss/jss-draft.Rmd b/vignettes/jss/jss-draft.Rmd index 28cdca3..d3a8d2f 100644 --- a/vignettes/jss/jss-draft.Rmd +++ b/vignettes/jss/jss-draft.Rmd @@ -7,6 +7,10 @@ author: affiliation: University of Waterloo - name: Martin Lysy affiliation: University of Waterloo + - name: Ye Meng + affiliation: University of Waterloo + - name: Yunfeng Yang + affiliation: University of Waterloo address: - 200 University Avenue West - Ontario, Canada @@ -530,9 +534,97 @@ Alternatively, a user can implement the `G` matrix and gradient calculations in ## Bayesian EL -- Good to illustrate with \proglang{Stan} NUTS algorithm, because we've implemented it already and \proglang{Stan} is a really good autodiff engine + best NUTS implementation. + + +The Bayesian EL(BayesEL) combines the prior information on parameters and the data-driven likelihood to draw inference on the estimand that is defined by estimating equations. With the prior density $\pi(\tth)$ over the support $\Theta$, the posterior is defined as: + +$$ +\pi(\tth\mid\xx) = \frac{L(\tth\mid\xx)\pi(\tth)}{\int L(\tth\mid\xx)\pi(\tth) \text{d}\tth} +\propto \left[ \prod_{i=1}^{n}w_i \right] \cdot \pi(\tth) +$$ +As the analytic form of $\pi(\tth\mid\xx)$ is generally absent, the MCMC sampling methods can be used to evaluate the behaviour of the posterior distribution and making the inference. However, the fact that the posterior distribution is in a non-parametric manner and the support of the posterior can be irregular confronts the adoption of the Gibbs sampling and the Random walk Metropolis sampling. @chaudhuri-et-al2017 utilized the results from convex analysis and adapted the HMC sampling, a gradient-based method, to overcoming these restraints and solving problems with minimal assumptions. To ensure the HMC sampling work for the BayesEL procedures, the following assumptions must hold: + +* $\Theta$ is an open set. + +* $g$ is a smooth function of $\tth$ in $\Theta$, $q \leq d$^[Note that @qin1994empirical assumed $q \geq d$.] and $\Theta_1 = \{\tth \in \Theta: L(\tth) > 0\}$ is non-empty. + + If $\Theta_1$ were empty, the EL will never be positive and the BayesEL is no longer beneficial to make statistical inference. + +* The sample size $n > q$ and the matrix $\GG$ has full row rank for any $\tth\in\Theta$ + +* For any fixed $\xx$, suppose at least $q$ elements of $\w$ that are non-zero, then for any fixed $\tth\in\Theta$, the matrix $\sum_{i=1}^n w_i \nabla{\GG(\xx_i)}$ has full row rank, where $\nabla \GG(\xx_i)$ is the $q\times d$ Jacobian matrix. + +* Consider a sequence of points in $\Theta_1$, $\{\tth^{(k)}\}$, such that $\tth^{(k)}$ converges to a boundary point $\tth^{(0)}$ of $\TTh_1$. Assume that $\tth^{(0)}$ lies within $\Theta$ and $L(\tth^{(k)})$ strictly decreases to $L(\tth^{(0)})$. then, for some constant $b(n, \theta^{(0)}) > -1$, we have +$$ +\lim_{k\to \inf} \inf \frac{\log\left[\pi(\tth^{(k-1)})\right]-\log\left[\pi(\tth^{(k)})\right]}{\log\left[L(\tth^{(k-1)})\right]-\log\left[L(\tth^{(k)})\right]} \geq b(n, \theta^{(0)}) +$$ + + With this assumption, the gradient of the log posterior diverges as the parameter values approach the boundary of $\Theta_1$ from the interior of the support, which is sufficient condition for the leapfrog integrator to propose parameter values inside $\Theta_1$. + +For the underlying connotation and implications, extensive discussion and mathematical proof on these imperative assumptions can be found in @chaudhuri-et-al2017. In the next section, we'll discuss why the HMC is of primary interest and how it manages to deal with complex and non-convex boundaries. + +### Hamiltonian Monte Carlo Sampling for Empirical Likelihood + +The Hamiltonian Monte Carlo(HMC) sampling employs the Hamiltonian dynamics in the Metropolis algorithm and explores the target distribution rapidly. @Neal2012 visualized the Hamiltonian dynamics as a frictionless puck sliding over a smooth height-varying surface. The potential energy ($\Ua(\theta)$) the puck possesses is proportional to its height ($\theta$), at which the kinetic energy is explicitly defined in physics by $\Ka(p) = \frac{p^2}{2m}$, where $p$ is the momentum at height ($\theta$) and $m$ is the mass of the puck. The major characteristic of the Hamiltonian dynamics is energy conservation: no energy gain/loss of the puck such that $\Ha(\theta, p)$ will remain constant over time, while the allocation of the energy into $\Ua(\theta)$ and $\Ka(p)$ may vary with the movement of the puck over time. + +The $d$-dimensional Hamiltonian system consists of $d$ particles deferring to Hamiltonian dynamics. The state of the system at time $t$ can be described by the trajectory $\GGa_t = (\tth_t, \pp_t)$ with position variables $\tth = (\rv \theta d)$ and auxiliary momentum variables $\pp = (\rv p d )$. Then, the Hamiltonian system is characterized by the conservative total energy: +$$ +\Ha (\tth, \pp) = \Ua(\tth) + \Ka (\pp) +$$ + +To use HMC sampling to sample from the posterior with empirical likelihood $\pi(\tth\mid \xx)$, the potential energy are therefore defined by: + +$$ +\Ua(\tth) = - \log\{\pi(\tth\mid \xx)\} = - \log\{L(\tth)\} - \log\{\pi(\tth)\} - \log{\Ca(\xx)} +$$ +where $\Ca(\xx)$ is a normalizing constant. The kinetic energy is given by +$$ + \Ka (\pp) = \frac{\pp^T \MM ^{-1}\pp}{2} +$$ +where the mass matrix $\MM$ is a symmetric positive definite matrix and is usually set to be a scalar multiple of the identity matrix; as a result, it's apparent that $\Ka(\pp)$ corresponds to the log probability density of a Gaussian distribution. + +The Hamiltonian function $\Ha(\tth, \pp, t)$ can be described by the set of differential equations, also known as the Hamilton's equations: +\begin{equation} +\der t\tth(t) = + \del{\pp} \Ha(\tth, \pp)=\MM^{-1}\pp, \qquad \der t \pp(t) = - \del{\tth} \Ha(\tth,\pp) = \frac{\nabla \pi(\tth\mid\xx)}{ \pi(\tth\mid\xx)} +(\#eq:HEQ) +\end{equation} +The three fundamental properties of the Hamiltonian dynamics granting the feasible and efficient HMC proposal are: reversible, conservative and volume preserving. The simple intuition behind the reversibility is that the initial state can be traced back from some time $t$, and it guarantees the convergence of HMC. The conservation property makes $\Ha(\tth, \pp)$ constant over the phase space trajectory $\GGa_t = (\tth_t, \pp_t)$, which makes the theoretical acceptance rate of HMC algorithm to be 1. Mathematically, the volume preservation deciphers that the change of variables $\GGa_0 \to \GGa_t$ has a Jacobian of 1 + +$$ +\left| \frac{\partial \GGa_t}{\partial \GGa_0} \right|= 1 +$$ + +Therefore, it signifies that the joint distribution of $\GGa_t = (\tth_t, \pp_t)$ is invariant to the Hamiltonian change-of-variables from $\GGa_0$ to $\GGa_t$. + +Given the current state $\GGa_\curr = (\tth_\curr, \pp_\curr)$, the HMC sampling will start with a random draw for the artificial momentum vector $\pp_0 \sim\N(\bz, \MM)$. Then, from the initial state $\GGa_0 = (\tth_\curr, \pp_0)$, the HMC proposal are made based on the solution to the Hamilton's equations (\ref{eq:HEQ}). However, the ordinary differential equations (\ref{eq:HEQ}) can't be solved analytically on the phase space except several simple cases. The leapfrog integration is able to approximate the differential equations by discretizing time with small step size $\eps = t/L$, where ($L, \eps$) are parameters of the algorithm to be tuned. Then, for each one of the $L$ steps, the evolution kernel $\hat \IH_\eps$ is given by +\begin{equation} +\begin{split} +\tilde \pp_\eps & = \pp + (\eps/2) \cdot \frac{\nabla \pi(\tth\mid\xx)}{ \pi(\tth\mid\xx)} \\ +\tilde \tth & = \tth + \eps \cdot \tilde \MM^{-1}\pp_\eps \\ +\tilde \pp & = \tilde \pp_\eps + (\eps/2) \cdot \frac{\nabla \pi(\tilde\tth\mid\xx)}{ \pi(\tilde\tth\mid\xx)} +\end{split} +(\#eq:leapfrog) +\end{equation} +The leap-frog integrator is a second-order symplectic integrator that will guarantee the generated trajectories to comply with the three fundamental properties discussed earlier. The HMC proposal $\GGa_\prop$ is accepted with the probability + +$$ +\alpha=\min\left\{1, \frac{p(\GGa_\prop)}{p(\GGa_0)}\right\}= \min\left\{1, \frac{\exp(\Ha(\GGa_0))}{\exp(\Ha(\GGa_\prop))}\right\} +$$ + +Theoretically, the energy conservation states that $\Ha(\GGa_t) = \Ha(\GGa_0)$. Therefore, if the Hamilton's equations (\ref{eq:HEQ}) can be solved analytically such that the proposal will always be accepted. Though the leapfrog integrator is highly accurate, the numerical approximation will bias the proposed trajectory such that the Hamiltonian will not be exactly conservative. Besides, the approximation will not be perfectly reversible in practice due to rounding calculation. In this sense, the $\Ha(\GGa_t)$ will depart away from $\Ha(\GGa_0)$ and the acceptance rate would not achieve 1. By treating the proposal as a stochastic one, the Metropolis acceptance step is embraced to compensate for this deviation. However, this deviation is fairly small and the resulting acceptance probability is relatively high, which makes HMC efficient. + +@Betancourt2017 explains the geometry of the phase space, i.e the collection of all possible values of the $\GGa_t$ and how the HMC sampling manage to explore regions with large curvature and draw samples from the posterior efficiently. @chaudhuri-et-al2017 derives the sufficient assumptions to make the HMC algorithm works in principle and to keep the successive positions proposed by the leapfrog integrator inside the support of the posterior $\pi(\tth\mid\xx)$. For a posterior with complex and non-convex support, the HMC sampling will keep the proposed trajectories inside the support due to high absolute gradient values of the log-posterior at the boundaries. This makes HMC sampling a powerful alternative to explore a BayesEL posterior. + +The HMC has three parameters to be tuned: the mass matrix $\MM$, the discretization step size $\eps$ and the number of steps taken $L$. @Neal2012 gives detailed geometry underlying and discloses the message that the inverse mass matrix corresponds to the variance matrix of the target distribution. In the light of this fact, the mass can be roughly estimated from the inverse variance of the posterior distribution. In practice, it is good to simply set $\diag(\mm)$ as an identity matrix. Intuitively, if the mass matrix is poorly estimated, a smaller step size has to be adopted to ensure the accuracy of the proposal and hence more leapfrog steps have to be taken to ensure the efficiency. In particular, @Matthew2011 pointed out that since HMC's performance is highly sensitive to the a step size $\eps$ and a desired number of steps $L$. If $L$ is too small then the algorithm exhibits undesirable random walk behavior, while if $L$ is too large the algorithm wastes computation. The No-U-Turn Sampler(NUTS) proposed by @Matthew2011 is an extension to HMC that eliminates the need to set the number of steps $L$. Generally speaking, NUTS uses a recursive algorithm to build a set of likely candidate points that spans a wide swath of the target distribution, stopping automatically when it starts to double back and retrace its steps. As shown in @Matthew2011, NUTS empirically has an efficient performance as a HMC algorithm that is well tuned. + +### stanEL pkg + +- to-do: job-satis example here!!!! + +- How to accommodate the job-satis example and so as the application of the stanEL pkg into this section? -- Can illustrate with the example from the JRSSB EL-HMC paper. # Conclusion diff --git a/vignettes/jss/jss-draft.log b/vignettes/jss/jss-draft.log new file mode 100644 index 0000000..508d8e1 --- /dev/null +++ b/vignettes/jss/jss-draft.log @@ -0,0 +1,758 @@ +This is pdfTeX, Version 3.141592653-2.6-1.40.23 (TeX Live 2022/dev) (preloaded format=pdflatex 2021.11.26) 28 NOV 2021 21:04 +entering extended mode + restricted \write18 enabled. + %&-line parsing enabled. +**jss-draft.tex +(./jss-draft.tex +LaTeX2e <2021-11-15> +L3 programming layer <2021-11-22> (./jss.cls +Document Class: jss 2021/05/23 3.3 jss class by Achim Zeileis +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/article.cls +Document Class: article 2021/10/04 v1.4n Standard LaTeX document class +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/size11.clo +File: size11.clo 2021/10/04 v1.4n Standard LaTeX file (size option) +) +\c@part=\count183 +\c@section=\count184 +\c@subsection=\count185 +\c@subsubsection=\count186 +\c@paragraph=\count187 +\c@subparagraph=\count188 +\c@figure=\count189 +\c@table=\count190 +\abovecaptionskip=\skip47 +\belowcaptionskip=\skip48 +\bibindent=\dimen138 +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/graphics/graphicx.sty +Package: graphicx 2021/09/16 v1.2d Enhanced LaTeX Graphics (DPC,SPQR) +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/graphics/keyval.sty +Package: keyval 2014/10/28 v1.15 key=value parser (DPC) +\KV@toks@=\toks16 +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/graphics/graphics.sty +Package: graphics 2021/03/04 v1.4d Standard LaTeX Graphics (DPC,SPQR) +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/graphics/trig.sty +Package: trig 2021/08/11 v1.11 sin cos tan (DPC) +) +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/graphics-cfg/graphics.c +fg +File: graphics.cfg 2016/06/04 v1.11 sample graphics configuration +) +Package graphics Info: Driver file: pdftex.def on input line 107. + +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/graphics-def/pdftex.def +File: pdftex.def 2020/10/05 v1.2a Graphics/color driver for pdftex +)) +\Gin@req@height=\dimen139 +\Gin@req@width=\dimen140 +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/xcolor/xcolor.sty +Package: xcolor 2021/10/31 v2.13 LaTeX color extensions (UK) +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/graphics-cfg/color.cfg +File: color.cfg 2016/01/02 v1.6 sample color configuration +) +Package xcolor Info: Driver file: pdftex.def on input line 227. +Package xcolor Info: Model `cmy' substituted by `cmy0' on input line 1352. +Package xcolor Info: Model `hsb' substituted by `rgb' on input line 1356. +Package xcolor Info: Model `RGB' extended on input line 1368. +Package xcolor Info: Model `HTML' substituted by `rgb' on input line 1370. +Package xcolor Info: Model `Hsb' substituted by `hsb' on input line 1371. +Package xcolor Info: Model `tHsb' substituted by `hsb' on input line 1372. +Package xcolor Info: Model `HSB' substituted by `hsb' on input line 1373. +Package xcolor Info: Model `Gray' substituted by `gray' on input line 1374. +Package xcolor Info: Model `wave' substituted by `hsb' on input line 1375. +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/ae/ae.sty +Package: ae 2001/02/12 1.3 Almost European Computer Modern +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/fontenc.sty +Package: fontenc 2021/04/29 v2.0v Standard LaTeX package +)) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/fancyvrb/fancyvrb.st +y +Package: fancyvrb 2021/11/24\nobreakspace {} 4.1a\nobreakspace {}verbatim text +(tvz,hv) +\FV@CodeLineNo=\count191 +\FV@InFile=\read2 +\FV@TabBox=\box50 +\c@FancyVerbLine=\count192 +\FV@StepNumber=\count193 +\FV@OutFile=\write3 +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/fontenc.sty +Package: fontenc 2021/04/29 v2.0v Standard LaTeX package +LaTeX Font Info: Trying to load font information for T1+aer on input line 11 +2. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/ae/t1aer.fd +File: t1aer.fd 1997/11/16 Font definitions for T1/aer. +)) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/lmodern.sty +Package: lmodern 2015/05/01 v1.6.1 Latin Modern Fonts +LaTeX Font Info: Overwriting symbol font `operators' in version `normal' +(Font) OT1/cmr/m/n --> OT1/lmr/m/n on input line 22. +LaTeX Font Info: Overwriting symbol font `letters' in version `normal' +(Font) OML/cmm/m/it --> OML/lmm/m/it on input line 23. +LaTeX Font Info: Overwriting symbol font `symbols' in version `normal' +(Font) OMS/cmsy/m/n --> OMS/lmsy/m/n on input line 24. +LaTeX Font Info: Overwriting symbol font `largesymbols' in version `normal' +(Font) OMX/cmex/m/n --> OMX/lmex/m/n on input line 25. +LaTeX Font Info: Overwriting symbol font `operators' in version `bold' +(Font) OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 26. +LaTeX Font Info: Overwriting symbol font `letters' in version `bold' +(Font) OML/cmm/b/it --> OML/lmm/b/it on input line 27. +LaTeX Font Info: Overwriting symbol font `symbols' in version `bold' +(Font) OMS/cmsy/b/n --> OMS/lmsy/b/n on input line 28. +LaTeX Font Info: Overwriting symbol font `largesymbols' in version `bold' +(Font) OMX/cmex/m/n --> OMX/lmex/m/n on input line 29. +LaTeX Font Info: Overwriting math alphabet `\mathbf' in version `normal' +(Font) OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 31. +LaTeX Font Info: Overwriting math alphabet `\mathsf' in version `normal' +(Font) OT1/cmss/m/n --> OT1/lmss/m/n on input line 32. +LaTeX Font Info: Overwriting math alphabet `\mathit' in version `normal' +(Font) OT1/cmr/m/it --> OT1/lmr/m/it on input line 33. +LaTeX Font Info: Overwriting math alphabet `\mathtt' in version `normal' +(Font) OT1/cmtt/m/n --> OT1/lmtt/m/n on input line 34. +LaTeX Font Info: Overwriting math alphabet `\mathbf' in version `bold' +(Font) OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 35. +LaTeX Font Info: Overwriting math alphabet `\mathsf' in version `bold' +(Font) OT1/cmss/bx/n --> OT1/lmss/bx/n on input line 36. +LaTeX Font Info: Overwriting math alphabet `\mathit' in version `bold' +(Font) OT1/cmr/bx/it --> OT1/lmr/bx/it on input line 37. +LaTeX Font Info: Overwriting math alphabet `\mathtt' in version `bold' +(Font) OT1/cmtt/m/n --> OT1/lmtt/m/n on input line 38. +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/natbib/natbib.sty +Package: natbib 2010/09/13 8.31b (PWD, AO) +\bibhang=\skip49 +\bibsep=\skip50 +LaTeX Info: Redefining \cite on input line 694. +\c@NAT@ctr=\count194 +) +\footerskip=\skip51 +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/hyperref/hyperref.sty +Package: hyperref 2021-06-07 v7.00m Hypertext links for LaTeX +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty +Package: ltxcmds 2020-05-10 v1.25 LaTeX kernel commands for general use (HO) +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/iftex/iftex.sty +Package: iftex 2020/03/06 v1.0d TeX engine tests +) +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/pdftexcmds/pdftexcmds +.sty +Package: pdftexcmds 2020-06-27 v0.33 Utility functions of pdfTeX for LuaTeX (HO +) + +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/infwarerr/infwarerr.s +ty +Package: infwarerr 2019/12/03 v1.5 Providing info/warning/error messages (HO) +) +Package pdftexcmds Info: \pdf@primitive is available. +Package pdftexcmds Info: \pdf@ifprimitive is available. +Package pdftexcmds Info: \pdfdraftmode found. +) +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/kvsetkeys/kvsetkeys.s +ty +Package: kvsetkeys 2019/12/15 v1.18 Key value parser (HO) +) +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/kvdefinekeys/kvdefine +keys.sty +Package: kvdefinekeys 2019-12-19 v1.6 Define keys (HO) +) +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/pdfescape/pdfescape.s +ty +Package: pdfescape 2019/12/09 v1.15 Implements pdfTeX's escape features (HO) +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/hycolor/hycolor.sty +Package: hycolor 2020-01-27 v1.10 Color options for hyperref/bookmark (HO) +) +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/letltxmacro/letltxmacro +.sty +Package: letltxmacro 2019/12/03 v1.6 Let assignment for LaTeX macros (HO) +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/auxhook/auxhook.sty +Package: auxhook 2019-12-17 v1.6 Hooks for auxiliary files (HO) +) +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/kvoptions/kvoptions.sty +Package: kvoptions 2020-10-07 v3.14 Key value format for package options (HO) +) +\@linkdim=\dimen141 +\Hy@linkcounter=\count195 +\Hy@pagecounter=\count196 +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/hyperref/pd1enc.def +File: pd1enc.def 2021-06-07 v7.00m Hyperref: PDFDocEncoding definition (HO) +Now handling font encoding PD1 ... +... no UTF-8 mapping file for font encoding PD1 +) +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/hyperref/hyperref-langp +atches.def +File: hyperref-langpatches.def 2021-06-07 v7.00m Hyperref: patches for babel la +nguages +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/intcalc/intcalc.sty +Package: intcalc 2019/12/15 v1.3 Expandable calculations with integers (HO) +) +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/etexcmds/etexcmds.sty +Package: etexcmds 2019/12/15 v1.7 Avoid name clashes with e-TeX commands (HO) +) +\Hy@SavedSpaceFactor=\count197 +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/hyperref/puenc.def +File: puenc.def 2021-06-07 v7.00m Hyperref: PDF Unicode definition (HO) +Now handling font encoding PU ... +... no UTF-8 mapping file for font encoding PU +) +Package hyperref Info: Hyper figures OFF on input line 4192. +Package hyperref Info: Link nesting OFF on input line 4197. +Package hyperref Info: Hyper index ON on input line 4200. +Package hyperref Info: Plain pages OFF on input line 4207. +Package hyperref Info: Backreferencing OFF on input line 4212. +Package hyperref Info: Implicit mode ON; LaTeX internals redefined. +Package hyperref Info: Bookmarks ON on input line 4445. +\c@Hy@tempcnt=\count198 +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/url/url.sty +\Urlmuskip=\muskip16 +Package: url 2013/09/16 ver 3.4 Verb mode for urls, etc. +) +LaTeX Info: Redefining \url on input line 4804. +\XeTeXLinkMargin=\dimen142 +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/bitset/bitset.sty +Package: bitset 2019/12/09 v1.3 Handle bit-vector datatype (HO) + +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/bigintcalc/bigintcalc +.sty +Package: bigintcalc 2019/12/15 v1.5 Expandable calculations on big integers (HO +) +)) +\Fld@menulength=\count199 +\Field@Width=\dimen143 +\Fld@charsize=\dimen144 +Package hyperref Info: Hyper figures OFF on input line 6076. +Package hyperref Info: Link nesting OFF on input line 6081. +Package hyperref Info: Hyper index ON on input line 6084. +Package hyperref Info: backreferencing OFF on input line 6091. +Package hyperref Info: Link coloring OFF on input line 6096. +Package hyperref Info: Link coloring with OCG OFF on input line 6101. +Package hyperref Info: PDF/A mode OFF on input line 6106. +LaTeX Info: Redefining \ref on input line 6146. +LaTeX Info: Redefining \pageref on input line 6150. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/atbegshi-ltx.sty +Package: atbegshi-ltx 2021/01/10 v1.0c Emulation of the original atbegshi +package with kernel methods +) +\Hy@abspage=\count266 +\c@Item=\count267 +\c@Hfootnote=\count268 +) +Package hyperref Info: Driver (autodetected): hpdftex. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/hyperref/hpdftex.def +File: hpdftex.def 2021-06-07 v7.00m Hyperref driver for pdfTeX +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/atveryend-ltx.sty +Package: atveryend-ltx 2020/08/19 v1.0a Emulation of the original atveryend pac +kage +with kernel methods +) +\Fld@listcount=\count269 +\c@bookmark@seq@number=\count270 + +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/rerunfilecheck/rerunfil +echeck.sty +Package: rerunfilecheck 2019/12/05 v1.9 Rerun checks for auxiliary files (HO) + +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/uniquecounter/uniquec +ounter.sty +Package: uniquecounter 2019/12/15 v1.4 Provide unlimited unique counter (HO) +) +Package uniquecounter Info: New unique counter `rerunfilecheck' on input line 2 +86. +) +\Hy@SectionHShift=\skip52 +) +\preXLskip=\skip53 +\preLskip=\skip54 +\preMskip=\skip55 +\preSskip=\skip56 +\postMskip=\skip57 +\postSskip=\skip58 + +Package hyperref Warning: Option `hyperindex' has already been used, +(hyperref) setting the option has no effect on input line 447. + +Package hyperref Info: Option `colorlinks' set `true' on input line 447. +Package hyperref Info: Option `linktocpage' set `true' on input line 447. +Package hyperref Info: Option `plainpages' set `false' on input line 447. +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsfonts/amssymb.sty +Package: amssymb 2013/01/14 v3.01 AMS font symbols +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsfonts/amsfonts.sty +Package: amsfonts 2013/01/14 v3.01 Basic AMSFonts support +\@emptytoks=\toks17 +\symAMSa=\mathgroup4 +\symAMSb=\mathgroup5 +LaTeX Font Info: Redeclaring math symbol \hbar on input line 98. +LaTeX Font Info: Overwriting math alphabet `\mathfrak' in version `bold' +(Font) U/euf/m/n --> U/euf/b/n on input line 106. +)) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsmath/amsmath.sty +Package: amsmath 2021/10/15 v2.17l AMS math features +\@mathmargin=\skip59 +For additional information on amsmath, use the `?' option. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsmath/amstext.sty +Package: amstext 2021/08/26 v2.01 AMS text +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsmath/amsgen.sty +File: amsgen.sty 1999/11/30 v2.0 generic functions +\@emptytoks=\toks18 +\ex@=\dimen145 +)) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsmath/amsbsy.sty +Package: amsbsy 1999/11/29 v1.2d Bold Symbols +\pmbraise@=\dimen146 +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsmath/amsopn.sty +Package: amsopn 2021/08/26 v2.02 operator names +) +\inf@bad=\count271 +LaTeX Info: Redefining \frac on input line 234. +\uproot@=\count272 +\leftroot@=\count273 +LaTeX Info: Redefining \overline on input line 399. +\classnum@=\count274 +\DOTSCASE@=\count275 +LaTeX Info: Redefining \ldots on input line 496. +LaTeX Info: Redefining \dots on input line 499. +LaTeX Info: Redefining \cdots on input line 620. +\Mathstrutbox@=\box51 +\strutbox@=\box52 +\big@size=\dimen147 +LaTeX Font Info: Redeclaring font encoding OML on input line 743. +LaTeX Font Info: Redeclaring font encoding OMS on input line 744. +\macc@depth=\count276 +\c@MaxMatrixCols=\count277 +\dotsspace@=\muskip17 +\c@parentequation=\count278 +\dspbrk@lvl=\count279 +\tag@help=\toks19 +\row@=\count280 +\column@=\count281 +\maxfields@=\count282 +\andhelp@=\toks20 +\eqnshift@=\dimen148 +\alignsep@=\dimen149 +\tagshift@=\dimen150 +\tagwidth@=\dimen151 +\totwidth@=\dimen152 +\lineht@=\dimen153 +\@envbody=\toks21 +\multlinegap=\skip60 +\multlinetaggap=\skip61 +\mathdisplay@stack=\toks22 +LaTeX Info: Redefining \[ on input line 2938. +LaTeX Info: Redefining \] on input line 2939. +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/iftex/ifxetex.sty +Package: ifxetex 2019/10/25 v0.7 ifxetex legacy package. Use iftex instead. +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/iftex/ifluatex.sty +Package: ifluatex 2019/10/25 v1.5 ifluatex legacy package. Use iftex instead. +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/fixltx2e.sty +Package: fixltx2e 2016/12/29 v2.1a fixes to LaTeX (obsolete) +Applying: [2015/01/01] Old fixltx2e package on input line 46. + +Package fixltx2e Warning: fixltx2e is not required with releases after 2015 +(fixltx2e) All fixes are now in the LaTeX kernel. +(fixltx2e) See the latexrelease package for details. + +Already applied: [0000/00/00] Old fixltx2e package on input line 53. +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/fontenc.sty +Package: fontenc 2021/04/29 v2.0v Standard LaTeX package +LaTeX Font Info: Trying to load font information for T1+lmr on input line 11 +2. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/t1lmr.fd +File: t1lmr.fd 2015/05/01 v1.6.1 Font defs for Latin Modern +)) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/inputenc.sty +Package: inputenc 2021/02/14 v1.3d Input encoding file +\inpenc@prehook=\toks23 +\inpenc@posthook=\toks24 +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/framed/framed.sty +Package: framed 2011/10/22 v 0.96: framed or shaded text with page breaks +\OuterFrameSep=\skip62 +\fb@frw=\dimen154 +\fb@frh=\dimen155 +\FrameRule=\dimen156 +\FrameSep=\dimen157 +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/tools/longtable.sty +Package: longtable 2021-09-01 v4.17 Multi-page Table package (DPC) +\LTleft=\skip63 +\LTright=\skip64 +\LTpre=\skip65 +\LTpost=\skip66 +\LTchunksize=\count283 +\LTcapwidth=\dimen158 +\LT@head=\box53 +\LT@firsthead=\box54 +\LT@foot=\box55 +\LT@lastfoot=\box56 +\LT@gbox=\box57 +\LT@cols=\count284 +\LT@rows=\count285 +\c@LT@tables=\count286 +\c@LT@chunks=\count287 +\LT@p@ftn=\toks25 +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/booktabs/booktabs.sty +Package: booktabs 2020/01/12 v1.61803398 Publication quality tables +\heavyrulewidth=\dimen159 +\lightrulewidth=\dimen160 +\cmidrulewidth=\dimen161 +\belowrulesep=\dimen162 +\belowbottomsep=\dimen163 +\aboverulesep=\dimen164 +\abovetopsep=\dimen165 +\cmidrulesep=\dimen166 +\cmidrulekern=\dimen167 +\defaultaddspace=\dimen168 +\@cmidla=\count288 +\@cmidlb=\count289 +\@aboverulesep=\dimen169 +\@belowrulesep=\dimen170 +\@thisruleclass=\count290 +\@lastruleclass=\count291 +\@thisrulewidth=\dimen171 +) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/caption/caption.sty +Package: caption 2020/10/26 v3.5g Customizing captions (AR) +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/caption/caption3.sty +Package: caption3 2020/10/21 v2.2e caption3 kernel (AR) +\captionmargin=\dimen172 +\captionmargin@=\dimen173 +\captionwidth=\dimen174 +\caption@tempdima=\dimen175 +\caption@indent=\dimen176 +\caption@parindent=\dimen177 +\caption@hangindent=\dimen178 +Package caption Info: Standard document class detected. +) +\c@caption@flags=\count292 +\c@continuedfloat=\count293 +Package caption Info: hyperref package is loaded. +Package caption Info: longtable package is loaded. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/caption/ltcaption.sty +Package: ltcaption 2020/05/30 v1.4b longtable captions (AR) +)) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/tools/bm.sty +Package: bm 2021/04/25 v1.2e Bold Symbol Support (DPC/FMi) +\symboldoperators=\mathgroup6 +\symboldletters=\mathgroup7 +\symboldsymbols=\mathgroup8 +Package bm Info: No bold for \OMX/lmex/m/n, using \pmb. +Package bm Info: No bold for \U/msa/m/n, using \pmb. +Package bm Info: No bold for \U/msb/m/n, using \pmb. +LaTeX Font Info: Redeclaring math alphabet \mathbf on input line 149. +) +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/l3backend/l3backend-pdf +tex.def +File: l3backend-pdftex.def 2021-10-18 L3 backend support: PDF output (pdfTeX) +\l__color_backend_stack_int=\count294 +\l__pdf_internal_box=\box58 +) (./jss-draft.aux) +\openout1 = `jss-draft.aux'. + +LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 199. +LaTeX Font Info: ... okay on input line 199. +LaTeX Font Info: Checking defaults for OMS/cmsy/m/n on input line 199. +LaTeX Font Info: ... okay on input line 199. +LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 199. +LaTeX Font Info: ... okay on input line 199. +LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 199. +LaTeX Font Info: ... okay on input line 199. +LaTeX Font Info: Checking defaults for TS1/cmr/m/n on input line 199. +LaTeX Font Info: ... okay on input line 199. +LaTeX Font Info: Checking defaults for OMX/cmex/m/n on input line 199. +LaTeX Font Info: ... okay on input line 199. +LaTeX Font Info: Checking defaults for U/cmr/m/n on input line 199. +LaTeX Font Info: ... okay on input line 199. +LaTeX Font Info: Checking defaults for PD1/pdf/m/n on input line 199. +LaTeX Font Info: ... okay on input line 199. +LaTeX Font Info: Checking defaults for PU/pdf/m/n on input line 199. +LaTeX Font Info: ... okay on input line 199. + +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-b +ase.sty +Package: epstopdf-base 2020-01-24 v2.11 Base part for package epstopdf +Package epstopdf-base Info: Redefining graphics rule for `.eps' on input line 4 +85. + +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/latexconfig/epstopdf-sy +s.cfg +File: epstopdf-sys.cfg 2010/07/13 v1.3 Configuration of (r)epstopdf for TeX Liv +e +)) +Package hyperref Info: Link coloring ON on input line 199. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/hyperref/nameref.sty +Package: nameref 2021-04-02 v2.47 Cross-referencing by name of section +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/refcount/refcount.sty +Package: refcount 2019/12/15 v3.6 Data extraction from label references (HO) +) +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/gettitlestring/gettit +lestring.sty +Package: gettitlestring 2019/12/15 v1.6 Cleanup title references (HO) +) +\c@section@level=\count295 +) +LaTeX Info: Redefining \ref on input line 199. +LaTeX Info: Redefining \pageref on input line 199. +LaTeX Info: Redefining \nameref on input line 199. +(./jss-draft.out) (./jss-draft.out) +\@outlinefile=\write4 +\openout4 = `jss-draft.out'. + + +File: jsslogo.jpg Graphic file (type jpg) + +Package pdftex.def Info: jsslogo.jpg used on input line 199. +(pdftex.def) Requested size: 89.80768pt x 65.43607pt. +LaTeX Font Info: Trying to load font information for T1+pzc on input line 19 +9. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/psnfss/t1pzc.fd +File: t1pzc.fd 2020/03/25 font definitions for T1/pzc. +) +LaTeX Font Info: Font shape `T1/pzc/m/n' in size <28> not available +(Font) Font shape `T1/pzc/m/it' tried instead on input line 199. +LaTeX Font Info: Trying to load font information for OT1+lmr on input line 1 +99. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/ot1lmr.fd +File: ot1lmr.fd 2015/05/01 v1.6.1 Font defs for Latin Modern +) +LaTeX Font Info: Trying to load font information for OML+lmm on input line 1 +99. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/omllmm.fd +File: omllmm.fd 2015/05/01 v1.6.1 Font defs for Latin Modern +) +LaTeX Font Info: Trying to load font information for OMS+lmsy on input line +199. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/omslmsy.fd +File: omslmsy.fd 2015/05/01 v1.6.1 Font defs for Latin Modern +) +LaTeX Font Info: Trying to load font information for OMX+lmex on input line +199. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/omxlmex.fd +File: omxlmex.fd 2015/05/01 v1.6.1 Font defs for Latin Modern +) +LaTeX Font Info: External font `lmex10' loaded for size +(Font) <10.95> on input line 199. +LaTeX Font Info: External font `lmex10' loaded for size +(Font) <8> on input line 199. +LaTeX Font Info: External font `lmex10' loaded for size +(Font) <6> on input line 199. +LaTeX Font Info: Trying to load font information for U+msa on input line 199 +. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsfonts/umsa.fd +File: umsa.fd 2013/01/14 v3.01 AMS symbols A +) +LaTeX Font Info: Trying to load font information for U+msb on input line 199 +. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsfonts/umsb.fd +File: umsb.fd 2013/01/14 v3.01 AMS symbols B +) +LaTeX Font Info: Trying to load font information for T1+lmss on input line 1 +99. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/t1lmss.fd +File: t1lmss.fd 2015/05/01 v1.6.1 Font defs for Latin Modern +) +Package caption Info: Begin \AtBeginDocument code. +Package caption Info: End \AtBeginDocument code. +[1 + +{/Users/yeoveryeah/Library/TinyTeX/texmf-var/fonts/map/pdftex/updmap/pdftex.map +} <./jsslogo.jpg>] +Underfull \vbox (badness 10000) has occurred while \output is active [] + + +Overfull \hbox (5.47499pt too wide) has occurred while \output is active +\T1/lmr/m/n/10.95 2 [] + [] + +[2] +Overfull \hbox (5.47499pt too wide) has occurred while \output is active +[] \T1/lmr/m/n/10.95 3 + [] + +[3] +Underfull \vbox (badness 1735) has occurred while \output is active [] + + +Overfull \hbox (5.47499pt too wide) has occurred while \output is active +\T1/lmr/m/n/10.95 4 [] + [] + +[4] +LaTeX Font Info: Trying to load font information for U+euf on input line 322 +. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsfonts/ueuf.fd +File: ueuf.fd 2013/01/14 v3.01 Euler Fraktur +) +Overfull \hbox (5.47499pt too wide) has occurred while \output is active +[] \T1/lmr/m/n/10.95 5 + [] + +[5] +Overfull \hbox (5.47499pt too wide) has occurred while \output is active +\T1/lmr/m/n/10.95 6 [] + [] + +[6] +LaTeX Font Info: Trying to load font information for TS1+lmr on input line 4 +09. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/ts1lmr.fd +File: ts1lmr.fd 2015/05/01 v1.6.1 Font defs for Latin Modern +) +Overfull \hbox (5.47499pt too wide) has occurred while \output is active +[] \T1/lmr/m/n/10.95 7 + [] + +[7] + +File: smoothfun.png Graphic file (type png) + +Package pdftex.def Info: smoothfun.png used on input line 469. +(pdftex.def) Requested size: 221.32687pt x 225.5158pt. + +Overfull \hbox (5.47499pt too wide) has occurred while \output is active +\T1/lmr/m/n/10.95 8 [] + [] + +[8] +LaTeX Font Info: Trying to load font information for T1+lmtt on input line 4 +92. +(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/t1lmtt.fd +File: t1lmtt.fd 2015/05/01 v1.6.1 Font defs for Latin Modern +) +Overfull \hbox (5.47499pt too wide) has occurred while \output is active +[] \T1/lmr/m/n/10.95 9 + [] + +[9 <./smoothfun.png>] +LaTeX Font Info: Font shape `T1/lmtt/bx/n' in size <10.95> not available +(Font) Font shape `T1/lmtt/b/n' tried instead on input line 540. + +Overfull \hbox (10.94998pt too wide) has occurred while \output is active +\T1/lmr/m/n/10.95 10 [] + [] + +[10] +Overfull \hbox (10.94998pt too wide) has occurred while \output is active +[] \T1/lmr/m/n/10.95 11 + [] + +[11] +Overfull \hbox (10.94998pt too wide) has occurred while \output is active +\T1/lmr/m/n/10.95 12 [] + [] + +[12] +Overfull \hbox (10.94998pt too wide) has occurred while \output is active +[] \T1/lmr/m/n/10.95 13 + [] + +[13] +Overfull \hbox (10.94998pt too wide) has occurred while \output is active +\T1/lmr/m/n/10.95 14 [] + [] + +[14] +Underfull \hbox (badness 1178) in paragraph at lines 812--813 +\T1/lmr/m/n/10.95 and fol-lows the ex-am-ple in the mean re-gres-sion sec-tion +where in-stead of in-clud-ing + [] + + +Overfull \hbox (10.94998pt too wide) has occurred while \output is active +[] \T1/lmr/m/n/10.95 15 + [] + +[15] +LaTeX Font Info: External font `lmex10' loaded for size +(Font) <9> on input line 829. +LaTeX Font Info: External font `lmex10' loaded for size +(Font) <5> on input line 829. + +Package natbib Warning: Citation `qin1994empirical' on page 16 undefined on inp +ut line 829. + + +Overfull \hbox (10.94998pt too wide) has occurred while \output is active +\T1/lmr/m/n/10.95 16 [] + [] + +[16] +Overfull \hbox (10.94998pt too wide) has occurred while \output is active +[] \T1/lmr/m/n/10.95 17 + [] + +[17] +Overfull \hbox (10.94998pt too wide) has occurred while \output is active +\T1/lmr/m/n/10.95 18 [] + [] + +[18] (./jss-draft.bbl +Overfull \hbox (10.94998pt too wide) has occurred while \output is active +[] \T1/lmr/m/n/10.95 19 + [] + +[19] +Overfull \hbox (10.94998pt too wide) has occurred while \output is active +\T1/lmr/m/n/10.95 20 [] + [] + +[20]) + +Package natbib Warning: There were undefined citations. + + +Underfull \hbox (badness 10000) in paragraph at lines 917--917 + + [] + +LaTeX Font Info: Font shape `T1/pzc/m/n' in size <13> not available +(Font) Font shape `T1/pzc/m/it' tried instead on input line 917. + +Package caption Warning: Unused \captionsetup[table] on input line 137. +See the caption package documentation for explanation. + + +Overfull \hbox (10.94998pt too wide) has occurred while \output is active +[] \T1/lmr/m/n/10.95 21 + [] + +[21] (./jss-draft.aux) +Package rerunfilecheck Info: File `jss-draft.out' has not changed. +(rerunfilecheck) Checksum: 34055E8F364508FD5A64AE323899DD99;3818. + ) +Here is how much of TeX's memory you used: + 13151 strings out of 480313 + 211393 string characters out of 5896198 + 504533 words of memory out of 5000000 + 30643 multiletter control sequences out of 15000+600000 + 523309 words of font info for 107 fonts, out of 8000000 for 9000 + 14 hyphenation exceptions out of 8191 + 75i,19n,77p,1550b,473s stack positions out of 5000i,500n,10000p,200000b,80000s +{/Users/yeoveryeah/Library/TinyTeX/texmf-dist/fonts/enc/dvips/lm/lm-mathsy.en +c}{/Users/yeoveryeah/Library/TinyTeX/texmf-dist/fonts/enc/dvips/lm/lm-mathit.en +c}{/Users/yeoveryeah/Library/TinyTeX/texmf-dist/fonts/enc/dvips/lm/lm-ec.enc}{/ +Users/yeoveryeah/Library/TinyTeX/texmf-dist/fonts/enc/dvips/lm/lm-ts1.enc}{/Use +rs/yeoveryeah/Library/TinyTeX/texmf-dist/fonts/enc/dvips/lm/lm-rm.enc}{/Users/y +eoveryeah/Library/TinyTeX/texmf-dist/fonts/enc/dvips/lm/lm-mathex.enc}{/Users/y +eoveryeah/Library/TinyTeX/texmf-dist/fonts/enc/dvips/base/8r.enc} + +Output written on jss-draft.pdf (21 pages, 600231 bytes). +PDF statistics: + 564 PDF objects out of 1000 (max. 8388607) + 472 compressed objects within 5 object streams + 129 named destinations out of 1000 (max. 500000) + 139 words of extra memory for PDF output out of 10000 (max. 10000000) + diff --git a/vignettes/jss/jss-includes.tex b/vignettes/jss/jss-includes.tex index 2a9fb61..6636391 100644 --- a/vignettes/jss/jss-includes.tex +++ b/vignettes/jss/jss-includes.tex @@ -4,11 +4,15 @@ \usepackage{amsmath} \newcommand{\s}{\sigma} \newcommand{\XX}{\bm X} +\newcommand{\GG}{\bm G} +\newcommand{\MM}{{\bm M}} +\newcommand{\mm}{{\bm m}} \newcommand{\xx}{\bm x} \newcommand{\yy}{\bm y} \newcommand{\zz}{\bm z} \newcommand{\uu}{\bm u} \newcommand{\qq}{\bm q} +\newcommand{\pp}{{\bm p}} \newcommand{\ee}{\bm e} \newcommand{\tth}{\bm \theta} \newcommand{\lla}{\bm \lambda} @@ -16,6 +20,8 @@ \newcommand{\gga}{\bm \gamma} \newcommand{\eep}{\bm \epsilon} \newcommand{\oom}{\bm \omega} +\newcommand{\GGa}{{\bm \Gamma}} +\newcommand{\TTh}{{\bm \Theta}} \newcommand{\R}{\mathbb R} \renewcommand{\gg}{\bm g} \newcommand{\w}{\omega} @@ -29,6 +35,7 @@ \newcommand{\str}[1]{{#1}^{\star}} \DeclareMathOperator*{\argmax}{arg\,max} \DeclareMathOperator*{\argmin}{arg\,min} +\DeclareMathOperator{\diag}{diag} % diagonal matrix % \newcommand{\cx}{\bm{{\scriptstyle \mathcal X}}} \newcommand{\e}{\varepsilon} \newcommand{\iid}{\stackrel {\textrm{iid}}{\sim}} @@ -36,3 +43,24 @@ \newcommand{\sha}[1]{{#1}^{\sharp}} \newcommand{\ud}{\mathop{}\!\mathrm{d}} \newcommand{\dth}{\frac{\ud}{\ud\tth}} +\newcommand{\Ua}{{\mathcal{U}}} +\newcommand{\Ha}{{\mathcal{H}}} +\newcommand{\Ka}{{\mathcal{K}}} +\newcommand{\IH}{{\mathfrak P}} +\newcommand{\Ca}{{\mathcal C}} +\newcommand{\pv}{p_{\text{v}}} % p-value +\newcommand{\eps}{\varepsilon} % epsilon +\newcommand{\curr}{{\text{curr}}} +\newcommand{\prop}{{\text{prop}}} +\newcommand{\rv}[3][1]{#2_{#1},\ldots,#2_{#3}} +% derivatives and partials +\newcommand{\del}[2][]{\frac{\partial^{#1}}{\partial {#2}^{#1}}} +\newcommand{\der}[2][]{\frac{\textnormal{d}^{#1}}{\textnormal{d} {#2}^{#1}}} +% bold zero +\newcommand{\bz}{{\bm 0}} +% normal distribution +\newcommand{\N}{\mathcal{N}} + + + + diff --git a/vignettes/jss/references.bib b/vignettes/jss/references.bib index 7284d13..10b4460 100644 --- a/vignettes/jss/references.bib +++ b/vignettes/jss/references.bib @@ -222,3 +222,27 @@ @article{chen2007 year={2007}, publisher={Taylor \& Francis} } + +@article{Matthew2011, + title={The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo}, + author={Matthew D. Hoffman and Andrew Gelman}, + year={2011}, + archivePrefix={arXiv}, + primaryClass={stat.CO} +} + +@article{Neal2012, + title={MCMC using Hamiltonian dynamics}, + author={Radford M. Neal}, + year={2012}, + archivePrefix={arXiv}, + primaryClass={stat.CO} +} + +@article{Betancourt2017, + title={A Conceptual Introduction to Hamiltonian Monte Carlo}, + author={Michael Betancourt}, + year={2017}, + archivePrefix={arXiv}, + primaryClass={stat.ME} +} \ No newline at end of file From 31d4a6d03a0fc3ec73524c2742eca904ed3b1007 Mon Sep 17 00:00:00 2001 From: Ye Meng <60788399+yeoveryeah@users.noreply.github.com> Date: Sun, 12 Dec 2021 23:43:49 +0800 Subject: [PATCH 2/4] update with Bayesian EL parth deleting some theory parts to make it more like a sub-section; (todo) add logitstic model exmaple in the end/ --- vignettes/jss/jss-draft.Rmd | 52 ++- vignettes/jss/jss-draft.log | 758 --------------------------------- vignettes/jss/jss-includes.tex | 2 + 3 files changed, 31 insertions(+), 781 deletions(-) delete mode 100644 vignettes/jss/jss-draft.log diff --git a/vignettes/jss/jss-draft.Rmd b/vignettes/jss/jss-draft.Rmd index d3a8d2f..0813998 100644 --- a/vignettes/jss/jss-draft.Rmd +++ b/vignettes/jss/jss-draft.Rmd @@ -538,36 +538,43 @@ Alternatively, a user can implement the `G` matrix and gradient calculations in - Can illustrate with the example from the JRSSB EL-HMC paper.--> -The Bayesian EL(BayesEL) combines the prior information on parameters and the data-driven likelihood to draw inference on the estimand that is defined by estimating equations. With the prior density $\pi(\tth)$ over the support $\Theta$, the posterior is defined as: +The following sections will present the Bayesian EL(BayesEL) estimation, as well as an example incorporating \pkg{flexEL} and \proglang{Stan}. + +The BayesEL method combines the prior information on parameters and the data-driven likelihood to draw inference on the estimand that is defined by estimating equations. With the prior density $\pi(\tth)$ over the support $\Theta$, the posterior is defined as: $$ \pi(\tth\mid\xx) = \frac{L(\tth\mid\xx)\pi(\tth)}{\int L(\tth\mid\xx)\pi(\tth) \text{d}\tth} \propto \left[ \prod_{i=1}^{n}w_i \right] \cdot \pi(\tth) $$ -As the analytic form of $\pi(\tth\mid\xx)$ is generally absent, the MCMC sampling methods can be used to evaluate the behaviour of the posterior distribution and making the inference. However, the fact that the posterior distribution is in a non-parametric manner and the support of the posterior can be irregular confronts the adoption of the Gibbs sampling and the Random walk Metropolis sampling. @chaudhuri-et-al2017 utilized the results from convex analysis and adapted the HMC sampling, a gradient-based method, to overcoming these restraints and solving problems with minimal assumptions. To ensure the HMC sampling work for the BayesEL procedures, the following assumptions must hold: +As the analytic form of $\pi(\tth\mid\xx)$ is generally absent, the Markov Chain Monte Carlo (MCMC) sampling can be applied to make inference from the posterior distribution. However, the posterior distribution is usually in a non-parametric manner and its support can be irregular, which confronts the adoption of the Gibbs sampling and the Random Walk Metropolis sampling. @chaudhuri-et-al2017 utilized the results from convex analysis and adapted the Hamiltonian Monte Carlo(HMC) sampling, a gradient-based method, to overcoming these restraints and solving problems with minimal assumptions. + - With this assumption, the gradient of the log posterior diverges as the parameter values approach the boundary of $\Theta_1$ from the interior of the support, which is sufficient condition for the leapfrog integrator to propose parameter values inside $\Theta_1$. - -For the underlying connotation and implications, extensive discussion and mathematical proof on these imperative assumptions can be found in @chaudhuri-et-al2017. In the next section, we'll discuss why the HMC is of primary interest and how it manages to deal with complex and non-convex boundaries. - -### Hamiltonian Monte Carlo Sampling for Empirical Likelihood - -The Hamiltonian Monte Carlo(HMC) sampling employs the Hamiltonian dynamics in the Metropolis algorithm and explores the target distribution rapidly. @Neal2012 visualized the Hamiltonian dynamics as a frictionless puck sliding over a smooth height-varying surface. The potential energy ($\Ua(\theta)$) the puck possesses is proportional to its height ($\theta$), at which the kinetic energy is explicitly defined in physics by $\Ka(p) = \frac{p^2}{2m}$, where $p$ is the momentum at height ($\theta$) and $m$ is the mass of the puck. The major characteristic of the Hamiltonian dynamics is energy conservation: no energy gain/loss of the puck such that $\Ha(\theta, p)$ will remain constant over time, while the allocation of the energy into $\Ua(\theta)$ and $\Ka(p)$ may vary with the movement of the puck over time. +The HMC sampling employs the Hamiltonian dynamics in the Metropolis algorithm and explores the target distribution rapidly. @Neal2012 visualized the Hamiltonian dynamics as a frictionless puck sliding over a smooth height-varying surface. The potential energy ($\Ua(\theta)$) the puck possesses is proportional to its height ($\theta$), at which the kinetic energy is explicitly defined in physics by $\Ka(p) = \frac{p^2}{2m}$, where $p$ is the momentum at height ($\theta$) and $m$ is the mass of the puck. The major characteristic of the Hamiltonian dynamics is energy conservation: $\Ha(\theta, p)$ will remain constant over time, while the allocation of the energy into $\Ua(\theta)$ and $\Ka(p)$ may vary with the movement of the puck over time. The $d$-dimensional Hamiltonian system consists of $d$ particles deferring to Hamiltonian dynamics. The state of the system at time $t$ can be described by the trajectory $\GGa_t = (\tth_t, \pp_t)$ with position variables $\tth = (\rv \theta d)$ and auxiliary momentum variables $\pp = (\rv p d )$. Then, the Hamiltonian system is characterized by the conservative total energy: $$ @@ -590,7 +597,7 @@ The Hamiltonian function $\Ha(\tth, \pp, t)$ can be described by the set of diff \der t\tth(t) = + \del{\pp} \Ha(\tth, \pp)=\MM^{-1}\pp, \qquad \der t \pp(t) = - \del{\tth} \Ha(\tth,\pp) = \frac{\nabla \pi(\tth\mid\xx)}{ \pi(\tth\mid\xx)} (\#eq:HEQ) \end{equation} -The three fundamental properties of the Hamiltonian dynamics granting the feasible and efficient HMC proposal are: reversible, conservative and volume preserving. The simple intuition behind the reversibility is that the initial state can be traced back from some time $t$, and it guarantees the convergence of HMC. The conservation property makes $\Ha(\tth, \pp)$ constant over the phase space trajectory $\GGa_t = (\tth_t, \pp_t)$, which makes the theoretical acceptance rate of HMC algorithm to be 1. Mathematically, the volume preservation deciphers that the change of variables $\GGa_0 \to \GGa_t$ has a Jacobian of 1 +The three fundamental properties of the Hamiltonian dynamics that grants the feasible and efficient HMC proposal are: reversible, conservative and volume preserving. The simple intuition behind the reversibility is that the initial state can be traced back from some time $t$, and it guarantees the convergence of HMC. The conservation property makes $\Ha(\tth, \pp)$ constant over the phase space trajectory $\GGa_t = (\tth_t, \pp_t)$, which makes the theoretical acceptance rate of HMC algorithm to be 1. Mathematically, the volume preservation deciphers that the change of variables $\GGa_0 \to \GGa_t$ has a Jacobian of 1 $$ \left| \frac{\partial \GGa_t}{\partial \GGa_0} \right|= 1 @@ -613,18 +620,17 @@ $$ \alpha=\min\left\{1, \frac{p(\GGa_\prop)}{p(\GGa_0)}\right\}= \min\left\{1, \frac{\exp(\Ha(\GGa_0))}{\exp(\Ha(\GGa_\prop))}\right\} $$ -Theoretically, the energy conservation states that $\Ha(\GGa_t) = \Ha(\GGa_0)$. Therefore, if the Hamilton's equations (\ref{eq:HEQ}) can be solved analytically such that the proposal will always be accepted. Though the leapfrog integrator is highly accurate, the numerical approximation will bias the proposed trajectory such that the Hamiltonian will not be exactly conservative. Besides, the approximation will not be perfectly reversible in practice due to rounding calculation. In this sense, the $\Ha(\GGa_t)$ will depart away from $\Ha(\GGa_0)$ and the acceptance rate would not achieve 1. By treating the proposal as a stochastic one, the Metropolis acceptance step is embraced to compensate for this deviation. However, this deviation is fairly small and the resulting acceptance probability is relatively high, which makes HMC efficient. +The energy conservation suggests that if the Hamilton's equations (\ref{eq:HEQ}) can be solved analytically, the proposal will always be accepted. Though the leapfrog integrator is highly accurate, the numerical approximation will bias the proposed trajectory such that the Hamiltonian will not be exactly conservative. Besides, the approximation will not be perfectly reversible in practice due to rounding calculation. In this sense, the $\Ha(\GGa_t)$ will depart away from $\Ha(\GGa_0)$ and the acceptance rate would not achieve 1. By treating the proposal as a stochastic one, the Metropolis acceptance step is embraced to compensate for this deviation. However, this deviation is fairly small and the resulting acceptance probability is relatively high, which makes HMC efficient. @Betancourt2017 explains the geometry of the phase space, i.e the collection of all possible values of the $\GGa_t$ and how the HMC sampling manage to explore regions with large curvature and draw samples from the posterior efficiently. @chaudhuri-et-al2017 derives the sufficient assumptions to make the HMC algorithm works in principle and to keep the successive positions proposed by the leapfrog integrator inside the support of the posterior $\pi(\tth\mid\xx)$. For a posterior with complex and non-convex support, the HMC sampling will keep the proposed trajectories inside the support due to high absolute gradient values of the log-posterior at the boundaries. This makes HMC sampling a powerful alternative to explore a BayesEL posterior. -The HMC has three parameters to be tuned: the mass matrix $\MM$, the discretization step size $\eps$ and the number of steps taken $L$. @Neal2012 gives detailed geometry underlying and discloses the message that the inverse mass matrix corresponds to the variance matrix of the target distribution. In the light of this fact, the mass can be roughly estimated from the inverse variance of the posterior distribution. In practice, it is good to simply set $\diag(\mm)$ as an identity matrix. Intuitively, if the mass matrix is poorly estimated, a smaller step size has to be adopted to ensure the accuracy of the proposal and hence more leapfrog steps have to be taken to ensure the efficiency. In particular, @Matthew2011 pointed out that since HMC's performance is highly sensitive to the a step size $\eps$ and a desired number of steps $L$. If $L$ is too small then the algorithm exhibits undesirable random walk behavior, while if $L$ is too large the algorithm wastes computation. The No-U-Turn Sampler(NUTS) proposed by @Matthew2011 is an extension to HMC that eliminates the need to set the number of steps $L$. Generally speaking, NUTS uses a recursive algorithm to build a set of likely candidate points that spans a wide swath of the target distribution, stopping automatically when it starts to double back and retrace its steps. As shown in @Matthew2011, NUTS empirically has an efficient performance as a HMC algorithm that is well tuned. - -### stanEL pkg +The HMC has three parameters to be tuned: the mass matrix $\MM$, the discretization step size $\eps$ and the number of steps taken $L$. @Neal2012 gives detailed geometry underlying and discloses the message that the inverse mass matrix corresponds to the variance matrix of the target distribution. In the light of this fact, the mass can be roughly estimated from the inverse variance of the posterior distribution. In practice, it is good to simply set $\MM$ as an identity matrix. On the other hand, if the mass matrix is poorly estimated, a smaller step size has to be adopted to assure the accuracy of the proposal and, as a result, more leapfrog steps will be taken to ensure the efficiency. -- to-do: job-satis example here!!!! +@Matthew2011 pointed out that the performance of HMC is highly sensitive to the the step size $\eps$ and the number of steps $L$. If $L$ is too small, the algorithm exhibits undesirable random walk behavior; yet, a large $L$ will lead to a waste of computation. The No-U-Turn Sampler(NUTS), a variant of HMC, will automatically make an appropriate choice of $L$ in each iteration. Generally speaking, NUTS uses a recursive algorithm to build a set of likely candidate points that spans a wide swath of the target distribution, stopping automatically when it starts to double back and retrace its steps. As shown in @Matthew2011, NUTS empirically has an efficient performance as a HMC algorithm that is well tuned. -- How to accommodate the job-satis example and so as the application of the stanEL pkg into this section? +\proglang{Stan} primarily implements NUTS to make Bayesian inference. The following is an BayesEL example that takes advantage of both \pkg{flexEL} and \proglang{Stan}. +- todo: add logistic model here. # Conclusion diff --git a/vignettes/jss/jss-draft.log b/vignettes/jss/jss-draft.log deleted file mode 100644 index 508d8e1..0000000 --- a/vignettes/jss/jss-draft.log +++ /dev/null @@ -1,758 +0,0 @@ -This is pdfTeX, Version 3.141592653-2.6-1.40.23 (TeX Live 2022/dev) (preloaded format=pdflatex 2021.11.26) 28 NOV 2021 21:04 -entering extended mode - restricted \write18 enabled. - %&-line parsing enabled. -**jss-draft.tex -(./jss-draft.tex -LaTeX2e <2021-11-15> -L3 programming layer <2021-11-22> (./jss.cls -Document Class: jss 2021/05/23 3.3 jss class by Achim Zeileis -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/article.cls -Document Class: article 2021/10/04 v1.4n Standard LaTeX document class -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/size11.clo -File: size11.clo 2021/10/04 v1.4n Standard LaTeX file (size option) -) -\c@part=\count183 -\c@section=\count184 -\c@subsection=\count185 -\c@subsubsection=\count186 -\c@paragraph=\count187 -\c@subparagraph=\count188 -\c@figure=\count189 -\c@table=\count190 -\abovecaptionskip=\skip47 -\belowcaptionskip=\skip48 -\bibindent=\dimen138 -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/graphics/graphicx.sty -Package: graphicx 2021/09/16 v1.2d Enhanced LaTeX Graphics (DPC,SPQR) -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/graphics/keyval.sty -Package: keyval 2014/10/28 v1.15 key=value parser (DPC) -\KV@toks@=\toks16 -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/graphics/graphics.sty -Package: graphics 2021/03/04 v1.4d Standard LaTeX Graphics (DPC,SPQR) -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/graphics/trig.sty -Package: trig 2021/08/11 v1.11 sin cos tan (DPC) -) -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/graphics-cfg/graphics.c -fg -File: graphics.cfg 2016/06/04 v1.11 sample graphics configuration -) -Package graphics Info: Driver file: pdftex.def on input line 107. - -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/graphics-def/pdftex.def -File: pdftex.def 2020/10/05 v1.2a Graphics/color driver for pdftex -)) -\Gin@req@height=\dimen139 -\Gin@req@width=\dimen140 -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/xcolor/xcolor.sty -Package: xcolor 2021/10/31 v2.13 LaTeX color extensions (UK) -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/graphics-cfg/color.cfg -File: color.cfg 2016/01/02 v1.6 sample color configuration -) -Package xcolor Info: Driver file: pdftex.def on input line 227. -Package xcolor Info: Model `cmy' substituted by `cmy0' on input line 1352. -Package xcolor Info: Model `hsb' substituted by `rgb' on input line 1356. -Package xcolor Info: Model `RGB' extended on input line 1368. -Package xcolor Info: Model `HTML' substituted by `rgb' on input line 1370. -Package xcolor Info: Model `Hsb' substituted by `hsb' on input line 1371. -Package xcolor Info: Model `tHsb' substituted by `hsb' on input line 1372. -Package xcolor Info: Model `HSB' substituted by `hsb' on input line 1373. -Package xcolor Info: Model `Gray' substituted by `gray' on input line 1374. -Package xcolor Info: Model `wave' substituted by `hsb' on input line 1375. -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/ae/ae.sty -Package: ae 2001/02/12 1.3 Almost European Computer Modern -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/fontenc.sty -Package: fontenc 2021/04/29 v2.0v Standard LaTeX package -)) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/fancyvrb/fancyvrb.st -y -Package: fancyvrb 2021/11/24\nobreakspace {} 4.1a\nobreakspace {}verbatim text -(tvz,hv) -\FV@CodeLineNo=\count191 -\FV@InFile=\read2 -\FV@TabBox=\box50 -\c@FancyVerbLine=\count192 -\FV@StepNumber=\count193 -\FV@OutFile=\write3 -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/fontenc.sty -Package: fontenc 2021/04/29 v2.0v Standard LaTeX package -LaTeX Font Info: Trying to load font information for T1+aer on input line 11 -2. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/ae/t1aer.fd -File: t1aer.fd 1997/11/16 Font definitions for T1/aer. -)) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/lmodern.sty -Package: lmodern 2015/05/01 v1.6.1 Latin Modern Fonts -LaTeX Font Info: Overwriting symbol font `operators' in version `normal' -(Font) OT1/cmr/m/n --> OT1/lmr/m/n on input line 22. -LaTeX Font Info: Overwriting symbol font `letters' in version `normal' -(Font) OML/cmm/m/it --> OML/lmm/m/it on input line 23. -LaTeX Font Info: Overwriting symbol font `symbols' in version `normal' -(Font) OMS/cmsy/m/n --> OMS/lmsy/m/n on input line 24. -LaTeX Font Info: Overwriting symbol font `largesymbols' in version `normal' -(Font) OMX/cmex/m/n --> OMX/lmex/m/n on input line 25. -LaTeX Font Info: Overwriting symbol font `operators' in version `bold' -(Font) OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 26. -LaTeX Font Info: Overwriting symbol font `letters' in version `bold' -(Font) OML/cmm/b/it --> OML/lmm/b/it on input line 27. -LaTeX Font Info: Overwriting symbol font `symbols' in version `bold' -(Font) OMS/cmsy/b/n --> OMS/lmsy/b/n on input line 28. -LaTeX Font Info: Overwriting symbol font `largesymbols' in version `bold' -(Font) OMX/cmex/m/n --> OMX/lmex/m/n on input line 29. -LaTeX Font Info: Overwriting math alphabet `\mathbf' in version `normal' -(Font) OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 31. -LaTeX Font Info: Overwriting math alphabet `\mathsf' in version `normal' -(Font) OT1/cmss/m/n --> OT1/lmss/m/n on input line 32. -LaTeX Font Info: Overwriting math alphabet `\mathit' in version `normal' -(Font) OT1/cmr/m/it --> OT1/lmr/m/it on input line 33. -LaTeX Font Info: Overwriting math alphabet `\mathtt' in version `normal' -(Font) OT1/cmtt/m/n --> OT1/lmtt/m/n on input line 34. -LaTeX Font Info: Overwriting math alphabet `\mathbf' in version `bold' -(Font) OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 35. -LaTeX Font Info: Overwriting math alphabet `\mathsf' in version `bold' -(Font) OT1/cmss/bx/n --> OT1/lmss/bx/n on input line 36. -LaTeX Font Info: Overwriting math alphabet `\mathit' in version `bold' -(Font) OT1/cmr/bx/it --> OT1/lmr/bx/it on input line 37. -LaTeX Font Info: Overwriting math alphabet `\mathtt' in version `bold' -(Font) OT1/cmtt/m/n --> OT1/lmtt/m/n on input line 38. -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/natbib/natbib.sty -Package: natbib 2010/09/13 8.31b (PWD, AO) -\bibhang=\skip49 -\bibsep=\skip50 -LaTeX Info: Redefining \cite on input line 694. -\c@NAT@ctr=\count194 -) -\footerskip=\skip51 -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/hyperref/hyperref.sty -Package: hyperref 2021-06-07 v7.00m Hypertext links for LaTeX -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty -Package: ltxcmds 2020-05-10 v1.25 LaTeX kernel commands for general use (HO) -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/iftex/iftex.sty -Package: iftex 2020/03/06 v1.0d TeX engine tests -) -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/pdftexcmds/pdftexcmds -.sty -Package: pdftexcmds 2020-06-27 v0.33 Utility functions of pdfTeX for LuaTeX (HO -) - -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/infwarerr/infwarerr.s -ty -Package: infwarerr 2019/12/03 v1.5 Providing info/warning/error messages (HO) -) -Package pdftexcmds Info: \pdf@primitive is available. -Package pdftexcmds Info: \pdf@ifprimitive is available. -Package pdftexcmds Info: \pdfdraftmode found. -) -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/kvsetkeys/kvsetkeys.s -ty -Package: kvsetkeys 2019/12/15 v1.18 Key value parser (HO) -) -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/kvdefinekeys/kvdefine -keys.sty -Package: kvdefinekeys 2019-12-19 v1.6 Define keys (HO) -) -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/pdfescape/pdfescape.s -ty -Package: pdfescape 2019/12/09 v1.15 Implements pdfTeX's escape features (HO) -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/hycolor/hycolor.sty -Package: hycolor 2020-01-27 v1.10 Color options for hyperref/bookmark (HO) -) -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/letltxmacro/letltxmacro -.sty -Package: letltxmacro 2019/12/03 v1.6 Let assignment for LaTeX macros (HO) -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/auxhook/auxhook.sty -Package: auxhook 2019-12-17 v1.6 Hooks for auxiliary files (HO) -) -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/kvoptions/kvoptions.sty -Package: kvoptions 2020-10-07 v3.14 Key value format for package options (HO) -) -\@linkdim=\dimen141 -\Hy@linkcounter=\count195 -\Hy@pagecounter=\count196 -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/hyperref/pd1enc.def -File: pd1enc.def 2021-06-07 v7.00m Hyperref: PDFDocEncoding definition (HO) -Now handling font encoding PD1 ... -... no UTF-8 mapping file for font encoding PD1 -) -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/hyperref/hyperref-langp -atches.def -File: hyperref-langpatches.def 2021-06-07 v7.00m Hyperref: patches for babel la -nguages -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/intcalc/intcalc.sty -Package: intcalc 2019/12/15 v1.3 Expandable calculations with integers (HO) -) -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/etexcmds/etexcmds.sty -Package: etexcmds 2019/12/15 v1.7 Avoid name clashes with e-TeX commands (HO) -) -\Hy@SavedSpaceFactor=\count197 -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/hyperref/puenc.def -File: puenc.def 2021-06-07 v7.00m Hyperref: PDF Unicode definition (HO) -Now handling font encoding PU ... -... no UTF-8 mapping file for font encoding PU -) -Package hyperref Info: Hyper figures OFF on input line 4192. -Package hyperref Info: Link nesting OFF on input line 4197. -Package hyperref Info: Hyper index ON on input line 4200. -Package hyperref Info: Plain pages OFF on input line 4207. -Package hyperref Info: Backreferencing OFF on input line 4212. -Package hyperref Info: Implicit mode ON; LaTeX internals redefined. -Package hyperref Info: Bookmarks ON on input line 4445. -\c@Hy@tempcnt=\count198 -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/url/url.sty -\Urlmuskip=\muskip16 -Package: url 2013/09/16 ver 3.4 Verb mode for urls, etc. -) -LaTeX Info: Redefining \url on input line 4804. -\XeTeXLinkMargin=\dimen142 -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/bitset/bitset.sty -Package: bitset 2019/12/09 v1.3 Handle bit-vector datatype (HO) - -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/bigintcalc/bigintcalc -.sty -Package: bigintcalc 2019/12/15 v1.5 Expandable calculations on big integers (HO -) -)) -\Fld@menulength=\count199 -\Field@Width=\dimen143 -\Fld@charsize=\dimen144 -Package hyperref Info: Hyper figures OFF on input line 6076. -Package hyperref Info: Link nesting OFF on input line 6081. -Package hyperref Info: Hyper index ON on input line 6084. -Package hyperref Info: backreferencing OFF on input line 6091. -Package hyperref Info: Link coloring OFF on input line 6096. -Package hyperref Info: Link coloring with OCG OFF on input line 6101. -Package hyperref Info: PDF/A mode OFF on input line 6106. -LaTeX Info: Redefining \ref on input line 6146. -LaTeX Info: Redefining \pageref on input line 6150. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/atbegshi-ltx.sty -Package: atbegshi-ltx 2021/01/10 v1.0c Emulation of the original atbegshi -package with kernel methods -) -\Hy@abspage=\count266 -\c@Item=\count267 -\c@Hfootnote=\count268 -) -Package hyperref Info: Driver (autodetected): hpdftex. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/hyperref/hpdftex.def -File: hpdftex.def 2021-06-07 v7.00m Hyperref driver for pdfTeX -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/atveryend-ltx.sty -Package: atveryend-ltx 2020/08/19 v1.0a Emulation of the original atveryend pac -kage -with kernel methods -) -\Fld@listcount=\count269 -\c@bookmark@seq@number=\count270 - -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/rerunfilecheck/rerunfil -echeck.sty -Package: rerunfilecheck 2019/12/05 v1.9 Rerun checks for auxiliary files (HO) - -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/uniquecounter/uniquec -ounter.sty -Package: uniquecounter 2019/12/15 v1.4 Provide unlimited unique counter (HO) -) -Package uniquecounter Info: New unique counter `rerunfilecheck' on input line 2 -86. -) -\Hy@SectionHShift=\skip52 -) -\preXLskip=\skip53 -\preLskip=\skip54 -\preMskip=\skip55 -\preSskip=\skip56 -\postMskip=\skip57 -\postSskip=\skip58 - -Package hyperref Warning: Option `hyperindex' has already been used, -(hyperref) setting the option has no effect on input line 447. - -Package hyperref Info: Option `colorlinks' set `true' on input line 447. -Package hyperref Info: Option `linktocpage' set `true' on input line 447. -Package hyperref Info: Option `plainpages' set `false' on input line 447. -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsfonts/amssymb.sty -Package: amssymb 2013/01/14 v3.01 AMS font symbols -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsfonts/amsfonts.sty -Package: amsfonts 2013/01/14 v3.01 Basic AMSFonts support -\@emptytoks=\toks17 -\symAMSa=\mathgroup4 -\symAMSb=\mathgroup5 -LaTeX Font Info: Redeclaring math symbol \hbar on input line 98. -LaTeX Font Info: Overwriting math alphabet `\mathfrak' in version `bold' -(Font) U/euf/m/n --> U/euf/b/n on input line 106. -)) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsmath/amsmath.sty -Package: amsmath 2021/10/15 v2.17l AMS math features -\@mathmargin=\skip59 -For additional information on amsmath, use the `?' option. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsmath/amstext.sty -Package: amstext 2021/08/26 v2.01 AMS text -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsmath/amsgen.sty -File: amsgen.sty 1999/11/30 v2.0 generic functions -\@emptytoks=\toks18 -\ex@=\dimen145 -)) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsmath/amsbsy.sty -Package: amsbsy 1999/11/29 v1.2d Bold Symbols -\pmbraise@=\dimen146 -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsmath/amsopn.sty -Package: amsopn 2021/08/26 v2.02 operator names -) -\inf@bad=\count271 -LaTeX Info: Redefining \frac on input line 234. -\uproot@=\count272 -\leftroot@=\count273 -LaTeX Info: Redefining \overline on input line 399. -\classnum@=\count274 -\DOTSCASE@=\count275 -LaTeX Info: Redefining \ldots on input line 496. -LaTeX Info: Redefining \dots on input line 499. -LaTeX Info: Redefining \cdots on input line 620. -\Mathstrutbox@=\box51 -\strutbox@=\box52 -\big@size=\dimen147 -LaTeX Font Info: Redeclaring font encoding OML on input line 743. -LaTeX Font Info: Redeclaring font encoding OMS on input line 744. -\macc@depth=\count276 -\c@MaxMatrixCols=\count277 -\dotsspace@=\muskip17 -\c@parentequation=\count278 -\dspbrk@lvl=\count279 -\tag@help=\toks19 -\row@=\count280 -\column@=\count281 -\maxfields@=\count282 -\andhelp@=\toks20 -\eqnshift@=\dimen148 -\alignsep@=\dimen149 -\tagshift@=\dimen150 -\tagwidth@=\dimen151 -\totwidth@=\dimen152 -\lineht@=\dimen153 -\@envbody=\toks21 -\multlinegap=\skip60 -\multlinetaggap=\skip61 -\mathdisplay@stack=\toks22 -LaTeX Info: Redefining \[ on input line 2938. -LaTeX Info: Redefining \] on input line 2939. -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/iftex/ifxetex.sty -Package: ifxetex 2019/10/25 v0.7 ifxetex legacy package. Use iftex instead. -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/iftex/ifluatex.sty -Package: ifluatex 2019/10/25 v1.5 ifluatex legacy package. Use iftex instead. -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/fixltx2e.sty -Package: fixltx2e 2016/12/29 v2.1a fixes to LaTeX (obsolete) -Applying: [2015/01/01] Old fixltx2e package on input line 46. - -Package fixltx2e Warning: fixltx2e is not required with releases after 2015 -(fixltx2e) All fixes are now in the LaTeX kernel. -(fixltx2e) See the latexrelease package for details. - -Already applied: [0000/00/00] Old fixltx2e package on input line 53. -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/fontenc.sty -Package: fontenc 2021/04/29 v2.0v Standard LaTeX package -LaTeX Font Info: Trying to load font information for T1+lmr on input line 11 -2. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/t1lmr.fd -File: t1lmr.fd 2015/05/01 v1.6.1 Font defs for Latin Modern -)) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/base/inputenc.sty -Package: inputenc 2021/02/14 v1.3d Input encoding file -\inpenc@prehook=\toks23 -\inpenc@posthook=\toks24 -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/framed/framed.sty -Package: framed 2011/10/22 v 0.96: framed or shaded text with page breaks -\OuterFrameSep=\skip62 -\fb@frw=\dimen154 -\fb@frh=\dimen155 -\FrameRule=\dimen156 -\FrameSep=\dimen157 -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/tools/longtable.sty -Package: longtable 2021-09-01 v4.17 Multi-page Table package (DPC) -\LTleft=\skip63 -\LTright=\skip64 -\LTpre=\skip65 -\LTpost=\skip66 -\LTchunksize=\count283 -\LTcapwidth=\dimen158 -\LT@head=\box53 -\LT@firsthead=\box54 -\LT@foot=\box55 -\LT@lastfoot=\box56 -\LT@gbox=\box57 -\LT@cols=\count284 -\LT@rows=\count285 -\c@LT@tables=\count286 -\c@LT@chunks=\count287 -\LT@p@ftn=\toks25 -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/booktabs/booktabs.sty -Package: booktabs 2020/01/12 v1.61803398 Publication quality tables -\heavyrulewidth=\dimen159 -\lightrulewidth=\dimen160 -\cmidrulewidth=\dimen161 -\belowrulesep=\dimen162 -\belowbottomsep=\dimen163 -\aboverulesep=\dimen164 -\abovetopsep=\dimen165 -\cmidrulesep=\dimen166 -\cmidrulekern=\dimen167 -\defaultaddspace=\dimen168 -\@cmidla=\count288 -\@cmidlb=\count289 -\@aboverulesep=\dimen169 -\@belowrulesep=\dimen170 -\@thisruleclass=\count290 -\@lastruleclass=\count291 -\@thisrulewidth=\dimen171 -) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/caption/caption.sty -Package: caption 2020/10/26 v3.5g Customizing captions (AR) -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/caption/caption3.sty -Package: caption3 2020/10/21 v2.2e caption3 kernel (AR) -\captionmargin=\dimen172 -\captionmargin@=\dimen173 -\captionwidth=\dimen174 -\caption@tempdima=\dimen175 -\caption@indent=\dimen176 -\caption@parindent=\dimen177 -\caption@hangindent=\dimen178 -Package caption Info: Standard document class detected. -) -\c@caption@flags=\count292 -\c@continuedfloat=\count293 -Package caption Info: hyperref package is loaded. -Package caption Info: longtable package is loaded. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/caption/ltcaption.sty -Package: ltcaption 2020/05/30 v1.4b longtable captions (AR) -)) (/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/tools/bm.sty -Package: bm 2021/04/25 v1.2e Bold Symbol Support (DPC/FMi) -\symboldoperators=\mathgroup6 -\symboldletters=\mathgroup7 -\symboldsymbols=\mathgroup8 -Package bm Info: No bold for \OMX/lmex/m/n, using \pmb. -Package bm Info: No bold for \U/msa/m/n, using \pmb. -Package bm Info: No bold for \U/msb/m/n, using \pmb. -LaTeX Font Info: Redeclaring math alphabet \mathbf on input line 149. -) -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/l3backend/l3backend-pdf -tex.def -File: l3backend-pdftex.def 2021-10-18 L3 backend support: PDF output (pdfTeX) -\l__color_backend_stack_int=\count294 -\l__pdf_internal_box=\box58 -) (./jss-draft.aux) -\openout1 = `jss-draft.aux'. - -LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 199. -LaTeX Font Info: ... okay on input line 199. -LaTeX Font Info: Checking defaults for OMS/cmsy/m/n on input line 199. -LaTeX Font Info: ... okay on input line 199. -LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 199. -LaTeX Font Info: ... okay on input line 199. -LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 199. -LaTeX Font Info: ... okay on input line 199. -LaTeX Font Info: Checking defaults for TS1/cmr/m/n on input line 199. -LaTeX Font Info: ... okay on input line 199. -LaTeX Font Info: Checking defaults for OMX/cmex/m/n on input line 199. -LaTeX Font Info: ... okay on input line 199. -LaTeX Font Info: Checking defaults for U/cmr/m/n on input line 199. -LaTeX Font Info: ... okay on input line 199. -LaTeX Font Info: Checking defaults for PD1/pdf/m/n on input line 199. -LaTeX Font Info: ... okay on input line 199. -LaTeX Font Info: Checking defaults for PU/pdf/m/n on input line 199. -LaTeX Font Info: ... okay on input line 199. - -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-b -ase.sty -Package: epstopdf-base 2020-01-24 v2.11 Base part for package epstopdf -Package epstopdf-base Info: Redefining graphics rule for `.eps' on input line 4 -85. - -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/latexconfig/epstopdf-sy -s.cfg -File: epstopdf-sys.cfg 2010/07/13 v1.3 Configuration of (r)epstopdf for TeX Liv -e -)) -Package hyperref Info: Link coloring ON on input line 199. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/hyperref/nameref.sty -Package: nameref 2021-04-02 v2.47 Cross-referencing by name of section -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/refcount/refcount.sty -Package: refcount 2019/12/15 v3.6 Data extraction from label references (HO) -) -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/generic/gettitlestring/gettit -lestring.sty -Package: gettitlestring 2019/12/15 v1.6 Cleanup title references (HO) -) -\c@section@level=\count295 -) -LaTeX Info: Redefining \ref on input line 199. -LaTeX Info: Redefining \pageref on input line 199. -LaTeX Info: Redefining \nameref on input line 199. -(./jss-draft.out) (./jss-draft.out) -\@outlinefile=\write4 -\openout4 = `jss-draft.out'. - - -File: jsslogo.jpg Graphic file (type jpg) - -Package pdftex.def Info: jsslogo.jpg used on input line 199. -(pdftex.def) Requested size: 89.80768pt x 65.43607pt. -LaTeX Font Info: Trying to load font information for T1+pzc on input line 19 -9. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/psnfss/t1pzc.fd -File: t1pzc.fd 2020/03/25 font definitions for T1/pzc. -) -LaTeX Font Info: Font shape `T1/pzc/m/n' in size <28> not available -(Font) Font shape `T1/pzc/m/it' tried instead on input line 199. -LaTeX Font Info: Trying to load font information for OT1+lmr on input line 1 -99. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/ot1lmr.fd -File: ot1lmr.fd 2015/05/01 v1.6.1 Font defs for Latin Modern -) -LaTeX Font Info: Trying to load font information for OML+lmm on input line 1 -99. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/omllmm.fd -File: omllmm.fd 2015/05/01 v1.6.1 Font defs for Latin Modern -) -LaTeX Font Info: Trying to load font information for OMS+lmsy on input line -199. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/omslmsy.fd -File: omslmsy.fd 2015/05/01 v1.6.1 Font defs for Latin Modern -) -LaTeX Font Info: Trying to load font information for OMX+lmex on input line -199. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/omxlmex.fd -File: omxlmex.fd 2015/05/01 v1.6.1 Font defs for Latin Modern -) -LaTeX Font Info: External font `lmex10' loaded for size -(Font) <10.95> on input line 199. -LaTeX Font Info: External font `lmex10' loaded for size -(Font) <8> on input line 199. -LaTeX Font Info: External font `lmex10' loaded for size -(Font) <6> on input line 199. -LaTeX Font Info: Trying to load font information for U+msa on input line 199 -. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsfonts/umsa.fd -File: umsa.fd 2013/01/14 v3.01 AMS symbols A -) -LaTeX Font Info: Trying to load font information for U+msb on input line 199 -. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsfonts/umsb.fd -File: umsb.fd 2013/01/14 v3.01 AMS symbols B -) -LaTeX Font Info: Trying to load font information for T1+lmss on input line 1 -99. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/t1lmss.fd -File: t1lmss.fd 2015/05/01 v1.6.1 Font defs for Latin Modern -) -Package caption Info: Begin \AtBeginDocument code. -Package caption Info: End \AtBeginDocument code. -[1 - -{/Users/yeoveryeah/Library/TinyTeX/texmf-var/fonts/map/pdftex/updmap/pdftex.map -} <./jsslogo.jpg>] -Underfull \vbox (badness 10000) has occurred while \output is active [] - - -Overfull \hbox (5.47499pt too wide) has occurred while \output is active -\T1/lmr/m/n/10.95 2 [] - [] - -[2] -Overfull \hbox (5.47499pt too wide) has occurred while \output is active -[] \T1/lmr/m/n/10.95 3 - [] - -[3] -Underfull \vbox (badness 1735) has occurred while \output is active [] - - -Overfull \hbox (5.47499pt too wide) has occurred while \output is active -\T1/lmr/m/n/10.95 4 [] - [] - -[4] -LaTeX Font Info: Trying to load font information for U+euf on input line 322 -. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/amsfonts/ueuf.fd -File: ueuf.fd 2013/01/14 v3.01 Euler Fraktur -) -Overfull \hbox (5.47499pt too wide) has occurred while \output is active -[] \T1/lmr/m/n/10.95 5 - [] - -[5] -Overfull \hbox (5.47499pt too wide) has occurred while \output is active -\T1/lmr/m/n/10.95 6 [] - [] - -[6] -LaTeX Font Info: Trying to load font information for TS1+lmr on input line 4 -09. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/ts1lmr.fd -File: ts1lmr.fd 2015/05/01 v1.6.1 Font defs for Latin Modern -) -Overfull \hbox (5.47499pt too wide) has occurred while \output is active -[] \T1/lmr/m/n/10.95 7 - [] - -[7] - -File: smoothfun.png Graphic file (type png) - -Package pdftex.def Info: smoothfun.png used on input line 469. -(pdftex.def) Requested size: 221.32687pt x 225.5158pt. - -Overfull \hbox (5.47499pt too wide) has occurred while \output is active -\T1/lmr/m/n/10.95 8 [] - [] - -[8] -LaTeX Font Info: Trying to load font information for T1+lmtt on input line 4 -92. -(/Users/yeoveryeah/Library/TinyTeX/texmf-dist/tex/latex/lm/t1lmtt.fd -File: t1lmtt.fd 2015/05/01 v1.6.1 Font defs for Latin Modern -) -Overfull \hbox (5.47499pt too wide) has occurred while \output is active -[] \T1/lmr/m/n/10.95 9 - [] - -[9 <./smoothfun.png>] -LaTeX Font Info: Font shape `T1/lmtt/bx/n' in size <10.95> not available -(Font) Font shape `T1/lmtt/b/n' tried instead on input line 540. - -Overfull \hbox (10.94998pt too wide) has occurred while \output is active -\T1/lmr/m/n/10.95 10 [] - [] - -[10] -Overfull \hbox (10.94998pt too wide) has occurred while \output is active -[] \T1/lmr/m/n/10.95 11 - [] - -[11] -Overfull \hbox (10.94998pt too wide) has occurred while \output is active -\T1/lmr/m/n/10.95 12 [] - [] - -[12] -Overfull \hbox (10.94998pt too wide) has occurred while \output is active -[] \T1/lmr/m/n/10.95 13 - [] - -[13] -Overfull \hbox (10.94998pt too wide) has occurred while \output is active -\T1/lmr/m/n/10.95 14 [] - [] - -[14] -Underfull \hbox (badness 1178) in paragraph at lines 812--813 -\T1/lmr/m/n/10.95 and fol-lows the ex-am-ple in the mean re-gres-sion sec-tion -where in-stead of in-clud-ing - [] - - -Overfull \hbox (10.94998pt too wide) has occurred while \output is active -[] \T1/lmr/m/n/10.95 15 - [] - -[15] -LaTeX Font Info: External font `lmex10' loaded for size -(Font) <9> on input line 829. -LaTeX Font Info: External font `lmex10' loaded for size -(Font) <5> on input line 829. - -Package natbib Warning: Citation `qin1994empirical' on page 16 undefined on inp -ut line 829. - - -Overfull \hbox (10.94998pt too wide) has occurred while \output is active -\T1/lmr/m/n/10.95 16 [] - [] - -[16] -Overfull \hbox (10.94998pt too wide) has occurred while \output is active -[] \T1/lmr/m/n/10.95 17 - [] - -[17] -Overfull \hbox (10.94998pt too wide) has occurred while \output is active -\T1/lmr/m/n/10.95 18 [] - [] - -[18] (./jss-draft.bbl -Overfull \hbox (10.94998pt too wide) has occurred while \output is active -[] \T1/lmr/m/n/10.95 19 - [] - -[19] -Overfull \hbox (10.94998pt too wide) has occurred while \output is active -\T1/lmr/m/n/10.95 20 [] - [] - -[20]) - -Package natbib Warning: There were undefined citations. - - -Underfull \hbox (badness 10000) in paragraph at lines 917--917 - - [] - -LaTeX Font Info: Font shape `T1/pzc/m/n' in size <13> not available -(Font) Font shape `T1/pzc/m/it' tried instead on input line 917. - -Package caption Warning: Unused \captionsetup[table] on input line 137. -See the caption package documentation for explanation. - - -Overfull \hbox (10.94998pt too wide) has occurred while \output is active -[] \T1/lmr/m/n/10.95 21 - [] - -[21] (./jss-draft.aux) -Package rerunfilecheck Info: File `jss-draft.out' has not changed. -(rerunfilecheck) Checksum: 34055E8F364508FD5A64AE323899DD99;3818. - ) -Here is how much of TeX's memory you used: - 13151 strings out of 480313 - 211393 string characters out of 5896198 - 504533 words of memory out of 5000000 - 30643 multiletter control sequences out of 15000+600000 - 523309 words of font info for 107 fonts, out of 8000000 for 9000 - 14 hyphenation exceptions out of 8191 - 75i,19n,77p,1550b,473s stack positions out of 5000i,500n,10000p,200000b,80000s -{/Users/yeoveryeah/Library/TinyTeX/texmf-dist/fonts/enc/dvips/lm/lm-mathsy.en -c}{/Users/yeoveryeah/Library/TinyTeX/texmf-dist/fonts/enc/dvips/lm/lm-mathit.en -c}{/Users/yeoveryeah/Library/TinyTeX/texmf-dist/fonts/enc/dvips/lm/lm-ec.enc}{/ -Users/yeoveryeah/Library/TinyTeX/texmf-dist/fonts/enc/dvips/lm/lm-ts1.enc}{/Use -rs/yeoveryeah/Library/TinyTeX/texmf-dist/fonts/enc/dvips/lm/lm-rm.enc}{/Users/y -eoveryeah/Library/TinyTeX/texmf-dist/fonts/enc/dvips/lm/lm-mathex.enc}{/Users/y -eoveryeah/Library/TinyTeX/texmf-dist/fonts/enc/dvips/base/8r.enc} - -Output written on jss-draft.pdf (21 pages, 600231 bytes). -PDF statistics: - 564 PDF objects out of 1000 (max. 8388607) - 472 compressed objects within 5 object streams - 129 named destinations out of 1000 (max. 500000) - 139 words of extra memory for PDF output out of 10000 (max. 10000000) - diff --git a/vignettes/jss/jss-includes.tex b/vignettes/jss/jss-includes.tex index 6636391..c55e631 100644 --- a/vignettes/jss/jss-includes.tex +++ b/vignettes/jss/jss-includes.tex @@ -61,6 +61,8 @@ % normal distribution \newcommand{\N}{\mathcal{N}} +\newtheorem{assump}{Assumption} + From 27141dc08f959e8824a3d45460c62608ac388462 Mon Sep 17 00:00:00 2001 From: Ye Meng <60788399+yeoveryeah@users.noreply.github.com> Date: Sun, 19 Dec 2021 21:15:14 +0800 Subject: [PATCH 3/4] jss draft --- vignettes/jss/jss-draft.Rmd | 35 +++++++++- vignettes/jss/jss-includes.tex | 1 + vignettes/jss/logEL_opt_stan.hpp | 106 +++++++++++++++++++++++++++++++ vignettes/jss/logistic_el.stan | 43 +++++++++++++ 4 files changed, 182 insertions(+), 3 deletions(-) create mode 100644 vignettes/jss/logEL_opt_stan.hpp create mode 100644 vignettes/jss/logistic_el.stan diff --git a/vignettes/jss/jss-draft.Rmd b/vignettes/jss/jss-draft.Rmd index 0813998..da3b8e9 100644 --- a/vignettes/jss/jss-draft.Rmd +++ b/vignettes/jss/jss-draft.Rmd @@ -626,11 +626,40 @@ The energy conservation suggests that if the Hamilton's equations (\ref{eq:HEQ}) The HMC has three parameters to be tuned: the mass matrix $\MM$, the discretization step size $\eps$ and the number of steps taken $L$. @Neal2012 gives detailed geometry underlying and discloses the message that the inverse mass matrix corresponds to the variance matrix of the target distribution. In the light of this fact, the mass can be roughly estimated from the inverse variance of the posterior distribution. In practice, it is good to simply set $\MM$ as an identity matrix. On the other hand, if the mass matrix is poorly estimated, a smaller step size has to be adopted to assure the accuracy of the proposal and, as a result, more leapfrog steps will be taken to ensure the efficiency. -@Matthew2011 pointed out that the performance of HMC is highly sensitive to the the step size $\eps$ and the number of steps $L$. If $L$ is too small, the algorithm exhibits undesirable random walk behavior; yet, a large $L$ will lead to a waste of computation. The No-U-Turn Sampler(NUTS), a variant of HMC, will automatically make an appropriate choice of $L$ in each iteration. Generally speaking, NUTS uses a recursive algorithm to build a set of likely candidate points that spans a wide swath of the target distribution, stopping automatically when it starts to double back and retrace its steps. As shown in @Matthew2011, NUTS empirically has an efficient performance as a HMC algorithm that is well tuned. +@Matthew2011 points out that the performance of HMC is highly sensitive to the the step size $\eps$ and the number of steps $L$. If $L$ is too small, the algorithm exhibits undesirable random walk behavior; yet, a large $L$ will lead to a waste of computation. The No-U-Turn Sampler(NUTS), a variant of HMC, will automatically make an appropriate choice of $L$ in each iteration. Generally speaking, NUTS uses a recursive algorithm to build a set of likely candidate points that spans a wide swath of the target distribution, stopping automatically when it starts to double back and retrace its steps. As shown in @Matthew2011, NUTS empirically has an efficient performance as a HMC algorithm that is well tuned. -\proglang{Stan} primarily implements NUTS to make Bayesian inference. The following is an BayesEL example that takes advantage of both \pkg{flexEL} and \proglang{Stan}. +\pkg{stanEL} deciphered the fast gradient calculation provided by \pkg{flexEL} and gain computational +efficiency as compared to use \proglang{Stan}'s auto differentiation engine. \pkg{stanEL} sources \pkg{flexEL} and wraps the 'logEL' calculation in \proglang{Stan} with the following user defined script. -- todo: add logistic model here. +```{r logElopt, echo = FALSE, results = "asis"} +cat("```hpp", readLines("logEL_opt_stan.hpp"), "```", sep = "\n") +``` + +Consider the logistic model with $Y_{i} \sim BIN(1, \rho_i)$, where $n$ is the number of observation: + +$$ +\logit \rho_i = \xx_{i}'\bbe. +$$ + +The Bartlett moment conditions are given by +$$ +E[y_{i} - \rho_{i} ] = 0, \qquad E\left[\frac{(y_{i} - \rho_{i})^2}{\rho_{i}(1-\rho_{i})} - 1 \right] = 0. +$$ + +The $\bm{G}_{n\times 2}$ matrix is therefore given by: + +$$ +\bm{G}(\bbe) = \begin{bmatrix} +y_1 - \rho_1 & \frac{(y_1 - \rho_1)^2}{\rho_1(1 - \rho_1)} -1 \\ +\vdots & \vdots\\ +y_n - \rho_n & \frac{(y_n - \rho_n)^2}{\rho_n(1 - \rho_n)} - 1 \\ +\end{bmatrix}, +$$ +Assume the prior distribution is $\bbe \sim \N(0, \sigma_{\beta}^2)$, the following \proglang{Stan} script concludes the corresponding logistic model, where the log posterior calculation will be managed by 'logEL' in \pkg{flexEL}. + +```{r logisticStan, echo = FALSE, results = "asis"} +cat("```stan", readLines("logistic_el.stan"), "```", sep = "\n") +``` # Conclusion diff --git a/vignettes/jss/jss-includes.tex b/vignettes/jss/jss-includes.tex index c55e631..56df33d 100644 --- a/vignettes/jss/jss-includes.tex +++ b/vignettes/jss/jss-includes.tex @@ -62,6 +62,7 @@ \newcommand{\N}{\mathcal{N}} \newtheorem{assump}{Assumption} +\DeclareMathOperator{\logit}{logit} diff --git a/vignettes/jss/logEL_opt_stan.hpp b/vignettes/jss/logEL_opt_stan.hpp new file mode 100644 index 0000000..4756d6f --- /dev/null +++ b/vignettes/jss/logEL_opt_stan.hpp @@ -0,0 +1,106 @@ +/// @file logEL_opt_stan.hpp +/// @brief Example of wrapping the `logEL()` and its gradient in Stan. + +#ifndef stanEL_logEL_opt_stan_hpp +#define stanEL_logEL_opt_stan_hpp 1 + +// this line tells the compiler to look for files in the installed flexEL +//[[Rcpp::depends(flexEL)]] +// this lines tells the compiler specifically which file to look for +#include +// link to the stan math library +#include +#include + +template +inline auto logEL_opt(const Eigen::Matrix& G, + const T_m& max_iter, + const T_r& rel_tol, + const T_s& supp_adj, + const T_d& dnc_inf) { + + // import stan names + using stan::math::operands_and_partials; + using stan::partials_return_t; + // create the return type + using T_partials_return = partials_return_t; + // create the gradient wrt Eigen::Matrix type (i.e., stan matrix) + using T_partials_matrix = Eigen::Matrix; + + // Dimensions of input G matrix + // input for flexEL::InnerEL + int n_obs = G.cols(); + int n_eqs = G.rows(); + + // create the return object containing the function and its gradients + operands_and_partials, T_r> + ops_partials(G, rel_tol); + // Evaluate the function and its gradient, + // we'll only evaluate the gradient if necessary. + bool return_dldG = !stan::is_constant_all::value; + T_partials_matrix dldG; + // dummy variable for nonexistent gradient through rel_tol. + T_partials_return dummy_zero = 0; + // only allocate memory if necessary + if(return_dldG) dldG = Eigen::MatrixXd::Zero(n_eqs, n_obs); + double logel; + + // The function object: + flexEL::GenEL IL(n_obs, n_eqs); + //Specified values for options: + int max_iter_ = value_of(max_iter); + double rel_tol_ = value_of(rel_tol); + bool supp_adj_ = value_of(supp_adj) > 0; + bool dnc_inf_ = value_of(dnc_inf) > 0; + IL.set_max_iter(max_iter_); + IL.set_rel_tol(rel_tol_); + IL.set_supp_adj(supp_adj_); + + // wrap input as Eigen::MatrixXd + const auto& G_ = value_of(G); + + // Output: + logel = IL.logel_grad(dldG, G_); + if(dnc_inf_) { + // return -Inf if NR has not converged + int nr_iter; + double nr_err; + IL.get_diag(nr_iter, nr_err); + if((nr_iter >= max_iter_) && (nr_err > rel_tol_)) { + if(return_dldG) { + dldG.setZero(); + ops_partials.edge1_.partials_ = dldG; + } + return ops_partials.build(negative_infinity()); + } + } + if(return_dldG) { + // put the gradient into the return object + ops_partials.edge1_.partials_ = dldG; + } + + if(!stan::is_constant_all::value) { + ops_partials.edge2_.partials_[0] = dummy_zero; + } + + // put the function value into the return object + T_partials_return ans(logel); + return ops_partials.build(ans); +} + +template +typename boost::math::tools::promote_args::type +logEL_opt(const Eigen::Matrix& G, + const int& max_iter, + const T2__& rel_tol, + const int& supp_adj, + const int& dnc_inf, std::ostream* pstream__) { + return logEL_opt(G, max_iter, rel_tol, supp_adj, dnc_inf); +} + +#endif diff --git a/vignettes/jss/logistic_el.stan b/vignettes/jss/logistic_el.stan new file mode 100644 index 0000000..16d50c7 --- /dev/null +++ b/vignettes/jss/logistic_el.stan @@ -0,0 +1,43 @@ +/// @file logistic_el.stan +/// +/// @brief Logistic regression by empirical likelihood +/// using Bartlett moment conditions. + +functions { +#include "include/stanEL.stan" +} + +data { + int n_obs; // Number of observations. + int n_cov; // Number of covariates. + vector[n_obs] y; // Number of successes per area/observation. + vector[n_obs] offset; // Offset term. + matrix[n_obs, n_cov] X; // Common covariate matrix. + real beta_sd; // Common standard deviation for `beta`. + // EL options: + int max_iter; + real rel_tol; + int supp_adj; +} + +parameters { + vector[n_cov] beta_raw; +} +transformed parameters { + vector[n_cov] beta = beta_sd * beta_raw; +} +model { + int dnc_inf = 1; + // G matrix: + vector[n_obs] Xb = X * beta + offset; + vector[n_obs] rho = inv_logit(Xb); + matrix[2, n_obs] G = rep_matrix(0, 2, n_obs); + G[1] = (y - rho)'; + G[2] = ((y - rho) .* (y - rho) ./ (rho .* (1 - rho)) - 1)'; + target += logEL_opt(G, max_iter, rel_tol, supp_adj, dnc_inf); + // prior: + beta_raw ~ std_normal(); +} + + + From ff245cce5f6f5b907b2b4f6be6f459c9dea6555d Mon Sep 17 00:00:00 2001 From: mlysy Date: Tue, 21 Dec 2021 14:04:51 -0500 Subject: [PATCH 4/4] added convergence check to GenEL --- DESCRIPTION | 2 +- R/RcppExports.R | 24 +- R/class_GenEL.R | 121 ++-- inst/include/flexEL/gen_el.h | 8 + man/GenEL.Rd | 25 +- src/GenELExports.cpp | 121 ++-- src/RcppExports.cpp | 52 +- tests/dontrun/api.R | 37 + tests/testthat/el_rfuns.R | 80 ++- tests/testthat/test-GenEL.R | 1276 ++++++++++++++++++---------------- 10 files changed, 995 insertions(+), 751 deletions(-) create mode 100644 tests/dontrun/api.R diff --git a/DESCRIPTION b/DESCRIPTION index 7e38204..c05fa3b 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -11,7 +11,7 @@ Description: Various tools for implementing and calibrating empirical likelihood License: GPL (>= 3) Imports: Rcpp (>= 0.12.13), numDeriv LinkingTo: Rcpp, RcppEigen -RoxygenNote: 7.1.1 +RoxygenNote: 7.1.2 Encoding: UTF-8 Suggests: testthat, MASS, optimCheck, knitr, rmarkdown, RcppEigen VignetteBuilder: knitr diff --git a/R/RcppExports.R b/R/RcppExports.R index 5c21e5b..44d16c6 100644 --- a/R/RcppExports.R +++ b/R/RcppExports.R @@ -89,28 +89,32 @@ GenEL_get_supp_adj <- function(pGEL) { .Call(`_flexEL_GenEL_get_supp_adj`, pGEL) } -GenEL_lambda_nr <- function(pGEL, G, weights, verbose) { - .Call(`_flexEL_GenEL_lambda_nr`, pGEL, G, weights, verbose) +GenEL_get_diag <- function(pGEL) { + .Call(`_flexEL_GenEL_get_diag`, pGEL) +} + +GenEL_lambda_nr <- function(pGEL, G, weights, check_conv) { + .Call(`_flexEL_GenEL_lambda_nr`, pGEL, G, weights, check_conv) } GenEL_omega_hat <- function(pGEL, lambda, G, weights) { .Call(`_flexEL_GenEL_omega_hat`, pGEL, lambda, G, weights) } -GenEL_logel <- function(pGEL, G, verbose) { - .Call(`_flexEL_GenEL_logel`, pGEL, G, verbose) +GenEL_logel <- function(pGEL, G, check_conv) { + .Call(`_flexEL_GenEL_logel`, pGEL, G, check_conv) } -GenEL_weighted_logel <- function(pGEL, G, weights, verbose) { - .Call(`_flexEL_GenEL_weighted_logel`, pGEL, G, weights, verbose) +GenEL_weighted_logel <- function(pGEL, G, weights, check_conv) { + .Call(`_flexEL_GenEL_weighted_logel`, pGEL, G, weights, check_conv) } -GenEL_logel_grad <- function(pGEL, G, verbose) { - .Call(`_flexEL_GenEL_logel_grad`, pGEL, G, verbose) +GenEL_logel_grad <- function(pGEL, G, check_conv) { + .Call(`_flexEL_GenEL_logel_grad`, pGEL, G, check_conv) } -GenEL_weighted_logel_grad <- function(pGEL, G, weights, verbose) { - .Call(`_flexEL_GenEL_weighted_logel_grad`, pGEL, G, weights, verbose) +GenEL_weighted_logel_grad <- function(pGEL, G, weights, check_conv) { + .Call(`_flexEL_GenEL_weighted_logel_grad`, pGEL, G, weights, check_conv) } MeanReg_evalG <- function(y, X, beta) { diff --git a/R/class_GenEL.R b/R/class_GenEL.R index 30b8df9..9b2bf65 100644 --- a/R/class_GenEL.R +++ b/R/class_GenEL.R @@ -23,14 +23,25 @@ GenEL <- R6::R6Class( n_obs <- GenEL_get_n_obs(private$.GEL) n_eqs <- GenEL_get_n_eqs(private$.GEL) if (nrow(G) != n_obs | ncol(G) != n_eqs) { - stop("Dimension of G matrix must be `n_obs` x `n_eqs`.") + stop("Dimension of `G` matrix must be `n_obs` x `n_eqs`.") + } + }, + + #' @description Check the dimension of weights is as expected. + #' @param weights A numeric vector. + check_weights = function(weights) { + if (length(weights) != GenEL_get_n_obs(private$.GEL)) { + stop("Length of `weights` does not equal to the number of obserations.") + } + if (any(weights < 0)) { + stop("`weights` should contain only non-negative values.") } } ), active = list( - #' @field max_iter Access or reset the value of the maximum number of + #' @field max_iter Access or reset the value of the maximum number of #' iterations. The value must be a positive integer (default to 100). max_iter = function(value) { if (missing(value)) private$.max_iter @@ -43,8 +54,8 @@ GenEL <- R6::R6Class( } }, - #' @field rel_tol Access or reset the value of relative tolerance - #' controlling the accuracy at convergence. The value must be a small + #' @field rel_tol Access or reset the value of relative tolerance + #' controlling the accuracy at convergence. The value must be a small #' positive number (default to 1e-7). rel_tol = function(value) { if (missing(value)) private$.rel_tol @@ -57,7 +68,7 @@ GenEL <- R6::R6Class( } }, - #' @field lambda0 Access or reset the initial value of lambda. The value + #' @field lambda0 Access or reset the initial value of lambda. The value #' must be a vector of length `n_eqs` (default to a vector of 0). lambda0 = function(value) { if (missing(value)) private$.lambda0 @@ -71,8 +82,8 @@ GenEL <- R6::R6Class( } }, - #' @field supp_adj Access or reset the support correction flag. The value - #' must be a boolean indicating whether to conduct support correction or + #' @field supp_adj Access or reset the support correction flag. The value + #' must be a boolean indicating whether to conduct support correction or #' not (default to FALSE). supp_adj = function(value) { if (missing(value)) private$.supp_adj @@ -87,7 +98,7 @@ GenEL <- R6::R6Class( } }, - #' @field supp_adj_a Access or reset the value of support corection factor. + #' @field supp_adj_a Access or reset the value of support corection factor. #' The value must be a scalar (defaults to `max(1.0, log(n_obs)/2)`). supp_adj_a = function(value) { if (missing(value)) private$.supp_adj_a @@ -103,8 +114,8 @@ GenEL <- R6::R6Class( } }, - #' @field weight_adj Access or reset the value of the weight for the - #' additional fake observation under support correction. The value must + #' @field weight_adj Access or reset the value of the weight for the + #' additional fake observation under support correction. The value must #' be a scalar (default to NULL). weight_adj = function(value) { if (missing(value)) private$.weight_adj @@ -150,99 +161,101 @@ GenEL <- R6::R6Class( #' @param supp_adj A boolean indicating whether to conduct support correction or not. #' @param supp_adj_a Support adjustment factor. Defaults to `max(1.0, log(n_obs)/2)`. #' @param weight_adj Weight under weighted log EL (default to 1.0). - #' @param lambda0 Initialization vector of size `n_eqs`. + #' @param lambda0 Initialization vector of size `n_eqs`. Defaults to a vector of zeros. set_opts = function(max_iter = 100, rel_tol = 1e-7, supp_adj = FALSE, supp_adj_a = NULL, weight_adj = NULL, - lambda0 = rep(0, GenEL_get_n_eqs(private$.GEL))) { + lambda0 = NULL) { self$max_iter <- max_iter self$rel_tol <- rel_tol self$set_supp_adj(supp_adj = supp_adj, supp_adj_a = supp_adj_a, weight_adj = weight_adj) + if(is.null(lambda0)) lambda0 <- rep(0, GenEL_get_n_eqs(private$.GEL)) self$lambda0 <- lambda0 }, #' @description Calculate the solution of the dual problem of maximum log EL problem. #' @param G A numeric matrix of dimension `n_obs x n_eqs`. #' @param weights A numeric vector of length `n_obs` containing non-negative values. - #' @param verbose A boolean indicating whether to print out number of iterations and maximum error at the end of the Newton-Raphson algorithm. + #' @param check_conv If `TRUE`, checks whether the desired `rel_tol` was reached within the given maximum number of Newton-Raphson iterations. If not, returns a vector of `NaN`s. #' @return A numeric vector of length `n_eqs`. - lambda_nr = function(G, weights, verbose = FALSE) { + lambda_nr = function(G, weights, check_conv = TRUE) { private$check_G(G) n_obs <- GenEL_get_n_obs(private$.GEL) if (missing(weights) || is.null(weights)) { weights <- rep(1.0, n_obs) } - if (length(weights) != n_obs) { - stop("Length of `weights` does not equal to the number of obserations.") - } - if (any(weights < 0)) { - stop("`weights` should contain only non-negative values.") - } - GenEL_lambda_nr(private$.GEL, t(G), weights, verbose) + private$check_weights(weights) + GenEL_lambda_nr(private$.GEL, t(G), weights, check_conv) }, #' @description Calculate the probability vector base on the given G matrix. #' @param G A numeric matrix of dimension `n_obs x n_eqs`. #' @param weights A numeric vector of length `n_obs` containing non-negative values. - #' @param verbose A boolean indicating whether to print out number of iterations and maximum error at the end of the Newton-Raphson algorithm. + #' @param check_conv If `TRUE`, checks whether the desired `rel_tol` was reached within the given maximum number of Newton-Raphson iterations. If not, returns a vector of `NaN`s. #' @return A probability vector of length `n_obs + supp_adj`. - omega_hat = function(G, weights, verbose = FALSE) { + omega_hat = function(G, weights, check_conv = TRUE) { private$check_G(G) n_obs <- GenEL_get_n_obs(private$.GEL) if (missing(weights) || is.null(weights)) { weights <- rep(1.0, n_obs) } - if (length(weights) != n_obs) { - stop("Length of `weights` does not equal to the number of obserations.") - } - if (any(weights < 0)) { - stop("`weights` should contain only non-negative values.") - } - lambda <- self$lambda_nr(G = G, weights = weights, verbose = verbose) + ## if (length(weights) != n_obs) { + ## stop("Length of `weights` does not equal to the number of obserations.") + ## } + ## if (any(weights < 0)) { + ## stop("`weights` should contain only non-negative values.") + ## } + private$check_weights(weights) + lambda <- self$lambda_nr(G = G, weights = weights, + check_conv = check_conv) GenEL_omega_hat(private$.GEL, lambda, t(G), weights) }, - + #' @description Calculate the log empirical likelihood base on the given G matrix. #' @param G A numeric matrix of dimension `n_eqs x n_obs`. #' @param weights A numeric vector of length `n_obs` containing non-negative values. - #' @param verbose A boolean indicating whether to print out number of iterations and maximum error at the end of the Newton-Raphson algorithm. + #' @param check_conv If `TRUE`, checks whether the desired `rel_tol` was reached within the given maximum number of Newton-Raphson iterations. If not, returns `-Inf`. #' @return A scalar. - logel = function(G, weights, verbose = FALSE) { + logel = function(G, weights, check_conv = TRUE) { private$check_G(G) if (missing(weights) || is.null(weights)) { - GenEL_logel(private$.GEL, t(G), verbose) + ans <- GenEL_logel(private$.GEL, t(G), check_conv) } else { - n_obs <- GenEL_get_n_obs(private$.GEL) - if (length(weights) != n_obs) { - stop("Length of `weights` does not equal to the number of obserations.") - } - if (any(weights < 0)) { - stop("`weights` should contain only non-negative values.") - } - GenEL_weighted_logel(private$.GEL, t(G), weights, verbose) + ## n_obs <- GenEL_get_n_obs(private$.GEL) + ## if (length(weights) != n_obs) { + ## stop("Length of `weights` does not equal to the number of obserations.") + ## } + ## if (any(weights < 0)) { + ## stop("`weights` should contain only non-negative values.") + ## } + private$check_weights(weights) + GenEL_weighted_logel(private$.GEL, t(G), weights, check_conv) } }, - + #' @description Calculate log EL and the derivative of log EL w.r.t. G evaluated at G. #' @param G A numeric matrix of dimension `n_obs x n_eqs`. #' @param weights A numeric vector of length `n_obs` containing non-negative values. - #' @param verbose A boolean indicating whether to print out number of iterations and maximum error at the end of the Newton-Raphson algorithm. + #' @param check_conv If `TRUE`, checks whether the desired `rel_tol` was reached within the given maximum number of Newton-Raphson iterations. If not, sets the log EL to negative infinity and the gradient to a matrix of `NaN`s. #' @return A list of two elements. - logel_grad = function(G, weights, verbose = FALSE) { + logel_grad = function(G, weights, check_conv = TRUE) { private$check_G(G) if (missing(weights) || is.null(weights)) { - GenEL_logel_grad(private$.GEL, t(G), verbose) + ans <- GenEL_logel_grad(private$.GEL, t(G), check_conv) } else { - n_obs <- GenEL_get_n_obs(private$.GEL) - if (length(weights) != n_obs) { - stop("Length of `weights` does not equal to the number of obserations.") - } - if (any(weights < 0)) { - stop("`weights` should contain only non-negative values.") - } - GenEL_weighted_logel_grad(private$.GEL, t(G), weights, verbose) + ## n_obs <- GenEL_get_n_obs(private$.GEL) + ## if (length(weights) != n_obs) { + ## stop("Length of `weights` does not equal to the number of obserations.") + ## } + ## if (any(weights < 0)) { + ## stop("`weights` should contain only non-negative values.") + ## } + private$check_weights(weights) + ans <- GenEL_weighted_logel_grad(private$.GEL, t(G), weights, check_conv) } + ans$dldG <- t(ans$dldG) + ans } ) ) diff --git a/inst/include/flexEL/gen_el.h b/inst/include/flexEL/gen_el.h index 3de6ab5..3e1c723 100644 --- a/inst/include/flexEL/gen_el.h +++ b/inst/include/flexEL/gen_el.h @@ -126,6 +126,8 @@ namespace flexEL { const Ref& G, const Ref& weights); // const Ref& norm_weights); + /// Check whether Newton-Raphson algorithm has converged. + bool has_converged_nr(); /// Calculate the profile probability weights. void omega_hat(Ref omega, const Ref& lambda, @@ -249,6 +251,12 @@ namespace flexEL { return; } + /// @return Whether the desired error tolerance `rel_tol` has been reached in less than `max_iter` Newton-Raphson steps. + /// @warning Assumes that lambda_nr has been run at least once. + inline bool GenEL::has_converged_nr() { + return (nr_err_ > rel_tol_) && (nr_iter_ == max_iter_); + } + /// @param[in] G Moment matrix of size `n_eqs x n_obs` or `n_eqs x (n_obs + 1)`. If `supp_adj = false`, the former is required. If `supp_adj = true` and the former is provided, support adjustment is performed. If `supp_adj = true` and `G.cols() == n_obs + 1`, assumes that support has already been corrected. inline Ref GenEL::supp_G(const Ref& G) { if(supp_adj_ && G.cols() == n_obs_) { diff --git a/man/GenEL.Rd b/man/GenEL.Rd index c12933f..f8eeced 100644 --- a/man/GenEL.Rd +++ b/man/GenEL.Rd @@ -52,6 +52,9 @@ be a scalar (default to NULL).} Check the dimension of G is what is expected. +Check the dimension of weights is as expected. + + Create a new \code{GenEL} object. \subsection{Usage}{ \if{html}{\out{
}}\preformatted{GenEL$new(n_obs, n_eqs)}\if{html}{\out{
}} @@ -65,6 +68,8 @@ Create a new \code{GenEL} object. \item{\code{n_eqs}}{Number of (moment constraint) equations.} \item{\code{G}}{A numeric matrix.} + +\item{\code{weights}}{A numeric vector.} } \if{html}{\out{}} } @@ -105,7 +110,7 @@ Set more than one options together. supp_adj = FALSE, supp_adj_a = NULL, weight_adj = NULL, - lambda0 = rep(0, GenEL_get_n_eqs(private$.GEL)) + lambda0 = NULL )}\if{html}{\out{}} } @@ -122,7 +127,7 @@ Set more than one options together. \item{\code{weight_adj}}{Weight under weighted log EL (default to 1.0).} -\item{\code{lambda0}}{Initialization vector of size \code{n_eqs}.} +\item{\code{lambda0}}{Initialization vector of size \code{n_eqs}. Defaults to a vector of zeros.} } \if{html}{\out{}} } @@ -133,7 +138,7 @@ Set more than one options together. \subsection{Method \code{lambda_nr()}}{ Calculate the solution of the dual problem of maximum log EL problem. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{GenEL$lambda_nr(G, weights, verbose = FALSE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{GenEL$lambda_nr(G, weights, check_conv = TRUE)}\if{html}{\out{
}} } \subsection{Arguments}{ @@ -143,7 +148,7 @@ Calculate the solution of the dual problem of maximum log EL problem. \item{\code{weights}}{A numeric vector of length \code{n_obs} containing non-negative values.} -\item{\code{verbose}}{A boolean indicating whether to print out number of iterations and maximum error at the end of the Newton-Raphson algorithm.} +\item{\code{check_conv}}{If \code{TRUE}, checks whether the desired \code{rel_tol} was reached within the given maximum number of Newton-Raphson iterations. If not, returns a vector of \code{NaN}s.} } \if{html}{\out{}} } @@ -157,7 +162,7 @@ A numeric vector of length \code{n_eqs}. \subsection{Method \code{omega_hat()}}{ Calculate the probability vector base on the given G matrix. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{GenEL$omega_hat(G, weights, verbose = FALSE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{GenEL$omega_hat(G, weights, check_conv = TRUE)}\if{html}{\out{
}} } \subsection{Arguments}{ @@ -167,7 +172,7 @@ Calculate the probability vector base on the given G matrix. \item{\code{weights}}{A numeric vector of length \code{n_obs} containing non-negative values.} -\item{\code{verbose}}{A boolean indicating whether to print out number of iterations and maximum error at the end of the Newton-Raphson algorithm.} +\item{\code{check_conv}}{If \code{TRUE}, checks whether the desired \code{rel_tol} was reached within the given maximum number of Newton-Raphson iterations. If not, returns a vector of \code{NaN}s.} } \if{html}{\out{}} } @@ -181,7 +186,7 @@ A probability vector of length \code{n_obs + supp_adj}. \subsection{Method \code{logel()}}{ Calculate the log empirical likelihood base on the given G matrix. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{GenEL$logel(G, weights, verbose = FALSE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{GenEL$logel(G, weights, check_conv = TRUE)}\if{html}{\out{
}} } \subsection{Arguments}{ @@ -191,7 +196,7 @@ Calculate the log empirical likelihood base on the given G matrix. \item{\code{weights}}{A numeric vector of length \code{n_obs} containing non-negative values.} -\item{\code{verbose}}{A boolean indicating whether to print out number of iterations and maximum error at the end of the Newton-Raphson algorithm.} +\item{\code{check_conv}}{If \code{TRUE}, checks whether the desired \code{rel_tol} was reached within the given maximum number of Newton-Raphson iterations. If not, returns \code{-Inf}.} } \if{html}{\out{}} } @@ -205,7 +210,7 @@ A scalar. \subsection{Method \code{logel_grad()}}{ Calculate log EL and the derivative of log EL w.r.t. G evaluated at G. \subsection{Usage}{ -\if{html}{\out{
}}\preformatted{GenEL$logel_grad(G, weights, verbose = FALSE)}\if{html}{\out{
}} +\if{html}{\out{
}}\preformatted{GenEL$logel_grad(G, weights, check_conv = TRUE)}\if{html}{\out{
}} } \subsection{Arguments}{ @@ -215,7 +220,7 @@ Calculate log EL and the derivative of log EL w.r.t. G evaluated at G. \item{\code{weights}}{A numeric vector of length \code{n_obs} containing non-negative values.} -\item{\code{verbose}}{A boolean indicating whether to print out number of iterations and maximum error at the end of the Newton-Raphson algorithm.} +\item{\code{check_conv}}{If \code{TRUE}, checks whether the desired \code{rel_tol} was reached within the given maximum number of Newton-Raphson iterations. If not, sets the log EL to negative infinity and the gradient to a matrix of \code{NaN}s.} } \if{html}{\out{}} } diff --git a/src/GenELExports.cpp b/src/GenELExports.cpp index a74f3ec..4198c8c 100644 --- a/src/GenELExports.cpp +++ b/src/GenELExports.cpp @@ -121,17 +121,36 @@ bool GenEL_get_supp_adj(SEXP pGEL) { return supp_adj; } +/// Get diagnostics for the last Newton-Raphson run. +/// +/// @param[in] pGEL `externalptr` pointer to GenEL object. +/// +/// @return A list with elements `n_iter` and `max_err` giving the number of Newton-Raphson iterations and the maximum componentwise (relative) error between the last to iterations of `lambda`. +/// +// [[Rcpp::export]] +Rcpp::List GenEL_get_diag(SEXP pGEL) { + Rcpp::XPtr GEL(pGEL); + int n_iter = 0; + double max_err = 0; + GEL->get_diag(n_iter, max_err); + return Rcpp::List::create(Rcpp::Named("n_iter") = n_iter, + Rcpp::Named("max_err") = max_err); +} + /// Solve the dual problem via Newton-Raphson algorithm. /// /// @param[in] pGEL `externalptr` pointer to GenEL object. /// @param[in] G Moment matrix of size `n_eqs x n_obs` or `n_eqs x (n_obs + supp_adj)`. If `supp_adj = false`, the former is required. If `supp_adj = true` and the former is provided, support adjustment is performed. If `supp_adj = true` and `G.cols() == n_obs + 1`, assumes that support has already been corrected. /// @param[in] verbose A boolean indicating whether to print out number of iterations and maximum error at the end of the Newton-Raphson algorithm. -/// +/// @param[in] check_conv If `true`, checks whether the desired `rel_tol` was reached within the given maximum number of Newton-Raphson iterations. If not, returns a vector of `NaN`s. +/// +/// @return A vector of length `n_eqs` corresponding to the last step of the Newton-Raphson algorithm. +/// // [[Rcpp::export]] Eigen::VectorXd GenEL_lambda_nr(SEXP pGEL, Eigen::MatrixXd G, Eigen::VectorXd weights, - bool verbose) { + bool check_conv) { Rcpp::XPtr GEL(pGEL); // bool supp_adj = GEL->get_supp_adj(); // int n_obs = G.cols(); @@ -140,28 +159,32 @@ Eigen::VectorXd GenEL_lambda_nr(SEXP pGEL, // Eigen::VectorXd norm_weights = Eigen::VectorXd::Constant(n_obs+supp_adj, 1.0/(n_obs+supp_adj)); // Eigen::VectorXd norm_weights = weights/weights.sum(); GEL->lambda_nr(lambda, G, weights); - int n_iter; - double max_err; - bool not_conv; - GEL->get_diag(n_iter, max_err); - not_conv = (n_iter == GEL->get_max_iter()) && (max_err > GEL->get_rel_tol()); - if(verbose) { - Rprintf("n_iter = %i, max_err = %f\n", n_iter, max_err); - } + bool not_conv = check_conv ? GEL->has_converged_nr() : false; + // bool not_conv = false; + // if(check_conv) { + // int n_iter; + // double max_err; + // GEL->get_diag(n_iter, max_err); + // not_conv = (n_iter == GEL->get_max_iter()) && + // (max_err > GEL->get_rel_tol()); + // } if (not_conv) { - for (int ii=0; ii < lambda.size(); ii++) { - lambda(ii) = std::numeric_limits::quiet_NaN(); - } + lambda.setConstant(std::numeric_limits::quiet_NaN()); + // for (int ii=0; ii < lambda.size(); ii++) { + // lambda(ii) = std::numeric_limits::quiet_NaN(); + // } } return lambda; } -/// Calculate the probability vector base on the given G matrix. +/// Calculate the probability vector based on the given `G` matrix. /// /// @param[in] pGEL `externalptr` pointer to GenEL object. /// @param[in] lambda Dual problem vector of size `n_eqs`. /// @param[in] G Moment matrix of size `n_eqs x n_obs` or `n_eqs x (n_obs + supp_adj)`. If `supp_adj = false`, the former is required. If `supp_adj = true` and the former is provided, support adjustment is performed. If `supp_adj = true` and `G.cols() == n_obs + 1`, assumes that support has already been corrected. /// +/// @return A vector of length `n_obs` or `n_obs+1` (depending on the value of `supp_adj`) giving the probability vector `omega_hat`. +/// // [[Rcpp::export]] Eigen::VectorXd GenEL_omega_hat(SEXP pGEL, Eigen::VectorXd lambda, @@ -177,22 +200,23 @@ Eigen::VectorXd GenEL_omega_hat(SEXP pGEL, return omega; } -/// Calculate the log empirical likelihood base on the given G matrix. +/// Calculate the log empirical likelihood base on the given `G` matrix. /// /// @param[in] pGEL `externalptr` pointer to GenEL object. -/// @param[in] omega Probability vector of length `n_obs + supp_adj`. -/// +/// @param[in] G Moment matrix of size `n_eqs x n_obs` or `n_eqs x (n_obs + supp_adj)`. If `supp_adj = false`, the former is required. If `supp_adj = true` and the former is provided, support adjustment is performed. If `supp_adj = true` and `G.cols() == n_obs + 1`, assumes that support has already been corrected. +/// @param[in] check_conv If `true`, checks whether the desired `rel_tol` was reached within the given maximum number of Newton-Raphson iterations. If not, returns negative infinity. +/// +/// @return Scalar value of the log empirical likelihood function. +/// // [[Rcpp::export]] double GenEL_logel(SEXP pGEL, Eigen::MatrixXd G, - bool verbose) { + bool check_conv) { Rcpp::XPtr GEL(pGEL); double log_el = GEL->logel(G); - if(verbose) { - int n_iter; - double max_err; - GEL->get_diag(n_iter, max_err); - Rprintf("n_iter = %i, max_err = %f\n", n_iter, max_err); + bool not_conv = check_conv ? GEL->has_converged_nr() : false; + if(not_conv) { + log_el = -std::numeric_limits::infinity(); } return log_el; } @@ -202,71 +226,74 @@ double GenEL_logel(SEXP pGEL, /// @param[in] pGEL `externalptr` pointer to GenEL object. /// @param[in] G Moment matrix of size `n_eqs x (n_obs + supp_adj)`. /// @param[in] weights Weight vector of length `n_obs`. +/// @param[in] check_conv If `true`, checks whether the desired `rel_tol` was reached within the given maximum number of Newton-Raphson iterations. If not, returns negative infinity. +/// +/// @return Scalar value of the log empirical likelihood function. /// // [[Rcpp::export]] double GenEL_weighted_logel(SEXP pGEL, Eigen::MatrixXd G, Eigen::VectorXd weights, - bool verbose) { + bool check_conv) { Rcpp::XPtr GEL(pGEL); double log_el = GEL->logel(G, weights); - if(verbose) { - int n_iter; - double max_err; - GEL->get_diag(n_iter, max_err); - Rprintf("n_iter = %i, max_err = %f\n", n_iter, max_err); + bool not_conv = check_conv ? GEL->has_converged_nr() : false; + if(not_conv) { + log_el = -std::numeric_limits::infinity(); } return log_el; } -/// Calculate the probability vector, log EL, and the derivative of log EL w.r.t. G evaluated at G. +/// Calculate log EL and its gradient with respect to `G`. /// /// @param[in] pGEL `externalptr` pointer to GenEL object. /// @param[in] G Moment matrix of size `n_eqs x n_obs`. -/// @param[in] verbose A boolean indicating whether to print out number of iterations and maximum error at the end of the Newton-Raphson algorithm. +/// @param[in] check_conv If `true`, checks whether the desired `rel_tol` was reached within the given maximum number of Newton-Raphson iterations. If not, sets the log EL to negative infinity and the gradient to a matrix of `NaN`s. +/// +/// @return A list with elements `log_el` and `dldG` corresponding to the log EL and its gradient (a matrix of size `n_eqs x n_obs`). /// // [[Rcpp::export]] -Rcpp::List GenEL_logel_grad(SEXP pGEL, Eigen::MatrixXd G, bool verbose) { +Rcpp::List GenEL_logel_grad(SEXP pGEL, Eigen::MatrixXd G, bool check_conv) { Rcpp::XPtr GEL(pGEL); int n_eqs = GEL->get_n_eqs(); int n_obs = GEL->get_n_obs(); Eigen::MatrixXd dldG(n_eqs, n_obs); double log_el = GEL->logel_grad(dldG, G); - if (verbose) { - int n_iter; - double max_err; - GEL->get_diag(n_iter, max_err); - Rprintf("n_iter = %i, max_err = %f\n", n_iter, max_err); + bool not_conv = check_conv ? GEL->has_converged_nr() : false; + if(not_conv) { + log_el = -std::numeric_limits::infinity(); + dldG.setConstant(std::numeric_limits::quiet_NaN()); } return Rcpp::List::create(Rcpp::Named("logel") = log_el, - Rcpp::Named("dldG") = dldG.transpose()); + Rcpp::Named("dldG") = dldG); } -/// Calculate the probability vector, log EL, and the derivative of log EL w.r.t. G evaluated at G. +/// Calculate the weighted log EL and its gradient with respect to `G`. /// /// @param[in] pGEL `externalptr` pointer to GenEL object. /// @param[in] G Moment matrix of size `n_eqs x n_obs`. /// @param[in] weights Weight vector of length `n_obs`. -/// @param[in] verbose A boolean indicating whether to print out number of iterations and maximum error at the end of the Newton-Raphson algorithm. +/// @param[in] check_conv If `true`, checks whether the desired `rel_tol` was reached within the given maximum number of Newton-Raphson iterations. If not, sets the log EL to negative infinity and the gradient to a matrix of `NaN`s. +/// +/// @return A list with elements `log_el` and `dldG` corresponding to the log EL and its gradient (a matrix of size `n_eqs x n_obs`). /// // [[Rcpp::export]] Rcpp::List GenEL_weighted_logel_grad(SEXP pGEL, Eigen::MatrixXd G, Eigen::VectorXd weights, - bool verbose) { + bool check_conv) { Rcpp::XPtr GEL(pGEL); int n_eqs = GEL->get_n_eqs(); int n_obs = GEL->get_n_obs(); Eigen::MatrixXd dldG(n_eqs, n_obs); double log_el = GEL->logel_grad(dldG, G, weights); - if (verbose) { - int n_iter; - double max_err; - GEL->get_diag(n_iter, max_err); - Rprintf("n_iter = %i, max_err = %f\n", n_iter, max_err); + bool not_conv = check_conv ? GEL->has_converged_nr() : false; + if(not_conv) { + log_el = -std::numeric_limits::infinity(); + dldG.setConstant(std::numeric_limits::quiet_NaN()); } return Rcpp::List::create(Rcpp::Named("logel") = log_el, - Rcpp::Named("dldG") = dldG.transpose()); + Rcpp::Named("dldG") = dldG); } // /// Calculate the probability vector, log EL, and the derivative of log EL w.r.t. G evaluated at G. diff --git a/src/RcppExports.cpp b/src/RcppExports.cpp index 3d5db5e..b720077 100644 --- a/src/RcppExports.cpp +++ b/src/RcppExports.cpp @@ -268,17 +268,28 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// GenEL_get_diag +Rcpp::List GenEL_get_diag(SEXP pGEL); +RcppExport SEXP _flexEL_GenEL_get_diag(SEXP pGELSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::RNGScope rcpp_rngScope_gen; + Rcpp::traits::input_parameter< SEXP >::type pGEL(pGELSEXP); + rcpp_result_gen = Rcpp::wrap(GenEL_get_diag(pGEL)); + return rcpp_result_gen; +END_RCPP +} // GenEL_lambda_nr -Eigen::VectorXd GenEL_lambda_nr(SEXP pGEL, Eigen::MatrixXd G, Eigen::VectorXd weights, bool verbose); -RcppExport SEXP _flexEL_GenEL_lambda_nr(SEXP pGELSEXP, SEXP GSEXP, SEXP weightsSEXP, SEXP verboseSEXP) { +Eigen::VectorXd GenEL_lambda_nr(SEXP pGEL, Eigen::MatrixXd G, Eigen::VectorXd weights, bool check_conv); +RcppExport SEXP _flexEL_GenEL_lambda_nr(SEXP pGELSEXP, SEXP GSEXP, SEXP weightsSEXP, SEXP check_convSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< SEXP >::type pGEL(pGELSEXP); Rcpp::traits::input_parameter< Eigen::MatrixXd >::type G(GSEXP); Rcpp::traits::input_parameter< Eigen::VectorXd >::type weights(weightsSEXP); - Rcpp::traits::input_parameter< bool >::type verbose(verboseSEXP); - rcpp_result_gen = Rcpp::wrap(GenEL_lambda_nr(pGEL, G, weights, verbose)); + Rcpp::traits::input_parameter< bool >::type check_conv(check_convSEXP); + rcpp_result_gen = Rcpp::wrap(GenEL_lambda_nr(pGEL, G, weights, check_conv)); return rcpp_result_gen; END_RCPP } @@ -297,56 +308,56 @@ BEGIN_RCPP END_RCPP } // GenEL_logel -double GenEL_logel(SEXP pGEL, Eigen::MatrixXd G, bool verbose); -RcppExport SEXP _flexEL_GenEL_logel(SEXP pGELSEXP, SEXP GSEXP, SEXP verboseSEXP) { +double GenEL_logel(SEXP pGEL, Eigen::MatrixXd G, bool check_conv); +RcppExport SEXP _flexEL_GenEL_logel(SEXP pGELSEXP, SEXP GSEXP, SEXP check_convSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< SEXP >::type pGEL(pGELSEXP); Rcpp::traits::input_parameter< Eigen::MatrixXd >::type G(GSEXP); - Rcpp::traits::input_parameter< bool >::type verbose(verboseSEXP); - rcpp_result_gen = Rcpp::wrap(GenEL_logel(pGEL, G, verbose)); + Rcpp::traits::input_parameter< bool >::type check_conv(check_convSEXP); + rcpp_result_gen = Rcpp::wrap(GenEL_logel(pGEL, G, check_conv)); return rcpp_result_gen; END_RCPP } // GenEL_weighted_logel -double GenEL_weighted_logel(SEXP pGEL, Eigen::MatrixXd G, Eigen::VectorXd weights, bool verbose); -RcppExport SEXP _flexEL_GenEL_weighted_logel(SEXP pGELSEXP, SEXP GSEXP, SEXP weightsSEXP, SEXP verboseSEXP) { +double GenEL_weighted_logel(SEXP pGEL, Eigen::MatrixXd G, Eigen::VectorXd weights, bool check_conv); +RcppExport SEXP _flexEL_GenEL_weighted_logel(SEXP pGELSEXP, SEXP GSEXP, SEXP weightsSEXP, SEXP check_convSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< SEXP >::type pGEL(pGELSEXP); Rcpp::traits::input_parameter< Eigen::MatrixXd >::type G(GSEXP); Rcpp::traits::input_parameter< Eigen::VectorXd >::type weights(weightsSEXP); - Rcpp::traits::input_parameter< bool >::type verbose(verboseSEXP); - rcpp_result_gen = Rcpp::wrap(GenEL_weighted_logel(pGEL, G, weights, verbose)); + Rcpp::traits::input_parameter< bool >::type check_conv(check_convSEXP); + rcpp_result_gen = Rcpp::wrap(GenEL_weighted_logel(pGEL, G, weights, check_conv)); return rcpp_result_gen; END_RCPP } // GenEL_logel_grad -Rcpp::List GenEL_logel_grad(SEXP pGEL, Eigen::MatrixXd G, bool verbose); -RcppExport SEXP _flexEL_GenEL_logel_grad(SEXP pGELSEXP, SEXP GSEXP, SEXP verboseSEXP) { +Rcpp::List GenEL_logel_grad(SEXP pGEL, Eigen::MatrixXd G, bool check_conv); +RcppExport SEXP _flexEL_GenEL_logel_grad(SEXP pGELSEXP, SEXP GSEXP, SEXP check_convSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< SEXP >::type pGEL(pGELSEXP); Rcpp::traits::input_parameter< Eigen::MatrixXd >::type G(GSEXP); - Rcpp::traits::input_parameter< bool >::type verbose(verboseSEXP); - rcpp_result_gen = Rcpp::wrap(GenEL_logel_grad(pGEL, G, verbose)); + Rcpp::traits::input_parameter< bool >::type check_conv(check_convSEXP); + rcpp_result_gen = Rcpp::wrap(GenEL_logel_grad(pGEL, G, check_conv)); return rcpp_result_gen; END_RCPP } // GenEL_weighted_logel_grad -Rcpp::List GenEL_weighted_logel_grad(SEXP pGEL, Eigen::MatrixXd G, Eigen::VectorXd weights, bool verbose); -RcppExport SEXP _flexEL_GenEL_weighted_logel_grad(SEXP pGELSEXP, SEXP GSEXP, SEXP weightsSEXP, SEXP verboseSEXP) { +Rcpp::List GenEL_weighted_logel_grad(SEXP pGEL, Eigen::MatrixXd G, Eigen::VectorXd weights, bool check_conv); +RcppExport SEXP _flexEL_GenEL_weighted_logel_grad(SEXP pGELSEXP, SEXP GSEXP, SEXP weightsSEXP, SEXP check_convSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< SEXP >::type pGEL(pGELSEXP); Rcpp::traits::input_parameter< Eigen::MatrixXd >::type G(GSEXP); Rcpp::traits::input_parameter< Eigen::VectorXd >::type weights(weightsSEXP); - Rcpp::traits::input_parameter< bool >::type verbose(verboseSEXP); - rcpp_result_gen = Rcpp::wrap(GenEL_weighted_logel_grad(pGEL, G, weights, verbose)); + Rcpp::traits::input_parameter< bool >::type check_conv(check_convSEXP); + rcpp_result_gen = Rcpp::wrap(GenEL_weighted_logel_grad(pGEL, G, weights, check_conv)); return rcpp_result_gen; END_RCPP } @@ -525,6 +536,7 @@ static const R_CallMethodDef CallEntries[] = { {"_flexEL_GenEL_get_n_obs", (DL_FUNC) &_flexEL_GenEL_get_n_obs, 1}, {"_flexEL_GenEL_get_n_eqs", (DL_FUNC) &_flexEL_GenEL_get_n_eqs, 1}, {"_flexEL_GenEL_get_supp_adj", (DL_FUNC) &_flexEL_GenEL_get_supp_adj, 1}, + {"_flexEL_GenEL_get_diag", (DL_FUNC) &_flexEL_GenEL_get_diag, 1}, {"_flexEL_GenEL_lambda_nr", (DL_FUNC) &_flexEL_GenEL_lambda_nr, 4}, {"_flexEL_GenEL_omega_hat", (DL_FUNC) &_flexEL_GenEL_omega_hat, 4}, {"_flexEL_GenEL_logel", (DL_FUNC) &_flexEL_GenEL_logel, 3}, diff --git a/tests/dontrun/api.R b/tests/dontrun/api.R new file mode 100644 index 0000000..a8ac206 --- /dev/null +++ b/tests/dontrun/api.R @@ -0,0 +1,37 @@ +#--- API examples for flexEL --------------------------------------------------- + +#' Function to calculate `G` for EL regression. +#' +mrG <- function(beta, y, X) { + # code goes here +} + + +#--- Basic API ----------------------------------------------------------------- + +el <- GenEL$new(n_obs, n_eq) +mr_el <- function(beta) { + el$logel(mrG(beta, y, X)) +} + +# can do something similar for G and dG/dbeta. + +#--- Slightly less basic, many for multiple usage ------------------------------ + +MREL <- R6Class( + classname = "MREL", + inherit = "GenEL", + public = list( + initialize = function(X, y) + ) + ) + +#--- desired API for mean regression ------------------------------------------- + +el <- MREL$new(y, X, Z = NULL, delta = NULL, el_opts = NULL) + +to_theta = function(beta, gamma = NULL) {} + +to_coef = function(theta) { + # returns either beta or a list with elements beta and gamma +} diff --git a/tests/testthat/el_rfuns.R b/tests/testthat/el_rfuns.R index 6735bb5..53dd670 100644 --- a/tests/testthat/el_rfuns.R +++ b/tests/testthat/el_rfuns.R @@ -1,5 +1,71 @@ # ---- helper functions ---- +# wrappers to R versions having the same interface as C++ + +lambda_nr_R <- function(G, max_iter = 100, rel_tol = 1e-7, + supp_adj = FALSE, supp_adj_a, weights = NULL) { + if(supp_adj) { + G <- adjG_R(G, an = supp_adj_a) + } + if(is.null(weights)) { + ans <- lambdaNR_R(G, max_iter = max_iter, rel_tol = rel_tol) + } else { + if(supp_adj) weights <- c(weights, 1) + weights <- weights/sum(weights) + ans <- lambdaNRC_R(G, weights = weights, + max_iter = max_iter, rel_tol = rel_tol) + } + ans +} + +omega_hat_R <- function(G, max_iter = 100, rel_tol = 1e-7, + supp_adj = FALSE, supp_adj_a, weights = NULL) { + if(supp_adj) { + G <- adjG_R(G, an = supp_adj_a) + } + if(is.null(weights)) { + ans <- omega_hat_NC_R(G, max_iter = max_iter, rel_tol = rel_tol) + } else { + if(supp_adj) weights <- c(weights, 1) + weights <- weights/sum(weights) + ans <- weighted_omega_hat_R(G, weights = weights, + max_iter = max_iter, rel_tol = rel_tol) + } + ans +} + +logel_R <- function(G, max_iter = 100, rel_tol = 1e-7, + supp_adj = FALSE, supp_adj_a, weights = NULL) { + if(supp_adj) { + G <- adjG_R(G, an = supp_adj_a) + } + if(is.null(weights)) { + omega_hat <- omega_hat_NC_R(G, max_iter = max_iter, rel_tol = rel_tol) + lel <- if(anyNA(omega_hat)) -Inf else sum(log(omega_hat)) + } else { + if(supp_adj) weights <- c(weights, 1) + lel <- weighted_logEL_G_R(G, weights = weights, + max_iter = max_iter, rel_tol = rel_tol) + lel <- as.numeric(lel) + } + lel +} + +logel_grad_R <- function(G, max_iter = 100, rel_tol = 1e-7, + supp_adj = FALSE, supp_adj_a, weights = NULL) { + n <- nrow(G) + p <- ncol(G) + fun <- function(G) { + logel_R(G, weights = weights, + max_iter = max_iter, rel_tol = rel_tol, + supp_adj = supp_adj, supp_adj_a = supp_adj_a) + } + dldG <- tryCatch(numDeriv::grad(fun, G), + error = function(e) rep(NaN, n*p)) + matrix(dldG, nrow = n, ncol = p) +} + + # maximum relative error max_rel_err <- function(lambdaNew, lambdaOld) { # Note: for stability (using the same tolerance as in C++ implementation) @@ -112,7 +178,7 @@ omega_hat_NC_R <- function(G, max_iter = 100, rel_tol = 1e-7, verbose = FALSE) { } weighted_omega_hat_R <- function(G, weights, adjust = FALSE, - max_iter = 100, rel_tol = 1e-7, abs_tol = 1e-3, + max_iter = 100, rel_tol = 1e-7, abs_tol = 1e-3, verbose = FALSE) { lambdaOut <- lambdaNRC_R(G = G, weights = weights, max_iter, rel_tol, verbose) conv <- lambdaOut$convergence # 1 if converged @@ -286,7 +352,7 @@ omega_hat_EM_R <- function(G, deltas, epsilons, adjust = FALSE, # omegasOld <- omegas logelOld <- logEL_R(omegas, epsilons, deltas) # message("logelOld = ", logelOld) - + # weights <- evalWeights_R(deltas, omegas, epsilons) # weights <- weights/sum(weights) # print(weights/sum(weights)) @@ -416,7 +482,7 @@ logEL_smooth_R <- function(omegas, epsilons, deltas, s = 10, adjust=FALSE) { } # numerical stability: watch out for extremely small negative values omegas[abs(omegas) < 1e-10/length(omegas)] <- 1e-10 - + weights <- evalWeights_smooth_R(deltas, omegas, epsilons, s, adjust) return(sum(weights*log(omegas))) # return(sum(deltas*log(omegas)+(1-deltas)*log(psos))) @@ -554,7 +620,7 @@ omega_hat_EM_smooth_R <- function(G, deltas, epsilons, s=10, adjust = FALSE, # ---- wrapper functions ---- # wrapper function for censor and non-censor omega_hat -omega_hat_R <- function(G, deltas, epsilons, adjust = FALSE, +omega_hat_cens_R <- function(G, deltas, epsilons, adjust = FALSE, max_iter = 100, rel_tol = 1e-7, abs_tol = 1e-3, verbose = FALSE) { if (missing(deltas) && missing(epsilons)) { omegas <- omega_hat_NC_R(G, max_iter=max_iter, rel_tol=rel_tol, verbose=verbose) @@ -586,7 +652,7 @@ logEL_R <- function(omegas, epsilons, deltas, adjust=FALSE) { } # numerical stability: watch out for extremely small negative values omegas[abs(omegas) < 1e-10/length(omegas)] <- 1e-10 - + weights <- evalWeights_R(deltas, omegas, epsilons) # print(weights) # message("new weights logel: ", sum(weights*log(omegas))) @@ -613,7 +679,7 @@ weighted_logEL_G_R <- function(G, weights, max_iter = 100, rel_tol = 1e-7) { sum_weights <- sum(weights) norm_weights <- weights/sum_weights lambda_lst <- lambdaNRC_R(G, norm_weights, max_iter, rel_tol) - if (!lambda_lst$convergence) return(NA) + if (!lambda_lst$convergence) return(-Inf) else { lambda <- lambda_lst$lambda omega <- c(norm_weights/(1 - c(G %*% lambda))) @@ -624,7 +690,7 @@ weighted_logEL_G_R <- function(G, weights, max_iter = 100, rel_tol = 1e-7) { } } -logEL_adjG_R <- function(G, max_iter = 100, rel_tol = 1e-7, +logEL_adjG_R <- function(G, max_iter = 100, rel_tol = 1e-7, an = max(1, 0.5*log(nrow(G)))) { n <- nrow(G) # lambda <- flexEL::lambdaNR(G = G, max_iter = max_iter, rel_tol = rel_tol, support = TRUE) diff --git a/tests/testthat/test-GenEL.R b/tests/testthat/test-GenEL.R index 8ab102e..859cef0 100644 --- a/tests/testthat/test-GenEL.R +++ b/tests/testthat/test-GenEL.R @@ -4,612 +4,684 @@ context("GenEL") source("el_rfuns.R") -ntest <- 3 - -# ---- lambda_nr ----- - -# nconv <- 0 -test_that("lambda_nr with default options", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - gel <- GenEL$new(n, p) - G <- matrix(rnorm(n*p),n,p) - lambda_cpp <- gel$lambda_nr(G) - # lambda_cpp - lambda_R_lst <- lambdaNR_R(G) - # lambda_R_lst$lambda - if (!lambda_R_lst$convergence) { - expect_equal(all(is.na(lambda_cpp)), all(is.na(lambda_R_lst$lambda))) - } - else { - # nconv <<- nconv + 1 - expect_equal(lambda_cpp, lambda_R_lst$lambda) - } - } -}) - -# nconv <- 0 -test_that("lambda_nr with given convergence settings", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - max_iter <- sample(c(100, 200, 500), 1) - rel_tol <- runif(1, 1e-6, 1e-2) - gel <- GenEL$new(n, p) - gel$max_iter <- max_iter - gel$rel_tol <- rel_tol - G <- matrix(rnorm(n*p),n,p) - lambda_cpp <- gel$lambda_nr(G, verbose = FALSE) - lambda_R_lst <- lambdaNR_R(G, max_iter = max_iter, rel_tol = rel_tol) - if (!lambda_R_lst$convergence) { - expect_equal(all(is.na(lambda_cpp)), all(is.na(lambda_R_lst$lambda))) - } - else { - # nconv <<- nconv + 1 - expect_equal(lambda_cpp, lambda_R_lst$lambda) - } - } -}) - -# nconv <- 0 -test_that("lambda_nr with given convergence settings and support correction", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - max_iter <- sample(c(100, 200, 500), 1) - rel_tol <- runif(1, 1e-6, 1e-2) - adj_a <- runif(1, 1, 5) - gel <- GenEL$new(n, p) - gel$set_opts(max_iter = max_iter, rel_tol = rel_tol, - supp_adj = TRUE, supp_adj_a = adj_a) - G <- matrix(rnorm(n*p),n,p) - lambda_cpp <- gel$lambda_nr(G, verbose = FALSE) - lambda_cpp - lambda_R_lst <- lambdaNR_R(adjG_R(G, adj_a), max_iter = max_iter, rel_tol = rel_tol) - lambda_R_lst$lambda - if (!lambda_R_lst$convergence) { - expect_equal(all(is.na(lambda_cpp)), all(is.na(lambda_R_lst$lambda))) - } - else { - # nconv <<- nconv + 1 - expect_equal(lambda_cpp, lambda_R_lst$lambda) - } - } -}) - -# ---- lambda_nr with weights ---- - -# nconv <- 0 -test_that("weighted lambda_nr with default options", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - gel <- GenEL$new(n, p) - G <- matrix(rnorm(n*p),n,p) - weights <- runif(n, 0, 2) - lambda_cpp <- gel$lambda_nr(G, weights) - # lambda_cpp - lambda_R_lst <- lambdaNRC_R(G, weights/sum(weights)) - # lambda_R_lst$lambda - if (!lambda_R_lst$convergence) { - expect_equal(all(is.na(lambda_cpp)), all(is.na(lambda_R_lst$lambda))) - } - else { - # nconv <<- nconv + 1 - expect_equal(lambda_cpp, lambda_R_lst$lambda) - } - } -}) - -# nconv <- 0 -test_that("weighted lambda_nr with given convergence settings", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - max_iter <- sample(c(200, 300, 500), 1) - rel_tol <- runif(1, 1e-6, 1e-4) - gel <- GenEL$new(n, p) - gel$max_iter <- max_iter - gel$rel_tol <- rel_tol - G <- matrix(rnorm(n*p),n,p) - weights <- runif(n, 0, 2) - lambda_cpp <- gel$lambda_nr(G, weights, verbose = FALSE) - # lambda_cpp - lambda_R_lst <- lambdaNRC_R(G, weights/sum(weights), - max_iter = max_iter, rel_tol = rel_tol) - # lambda_R_lst$lambda - if (!lambda_R_lst$convergence) { - expect_equal(all(is.na(lambda_cpp)), all(is.na(lambda_R_lst$lambda))) - } - else { - # nconv <<- nconv + 1 - expect_equal(lambda_cpp, lambda_R_lst$lambda) - } - } -}) - -# nconv <- 0 -test_that("weighted lambda_nr with given convergence settings and support correction", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - max_iter <- sample(c(200, 300, 500), 1) - rel_tol <- runif(1, 1e-6, 1e-4) - adj_a <- runif(1, 1, 5) - gel <- GenEL$new(n, p) - gel$set_opts(max_iter = max_iter, rel_tol = rel_tol, - supp_adj = TRUE, supp_adj_a = adj_a) - G <- matrix(rnorm(n*p),n,p) - weights <- runif(n, 0, 2) - lambda_cpp <- gel$lambda_nr(G, weights, verbose = FALSE) - # lambda_cpp - weights_R <- c(weights, 1) - lambda_R_lst <- lambdaNRC_R(adjG_R(G, adj_a), weights_R/sum(weights_R), - max_iter = max_iter, rel_tol = rel_tol) - # lambda_R_lst$lambda - if (!lambda_R_lst$convergence) { - expect_equal(all(is.na(lambda_cpp)), all(is.na(lambda_R_lst$lambda))) - } - else { - # nconv <<- nconv + 1 - expect_equal(lambda_cpp, lambda_R_lst$lambda) - } - } -}) - -# ---- omega_hat ---- - -# nconv <- 0 -test_that("omega_hat with default settings", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - gel <- GenEL$new(n, p) - G <- matrix(rnorm(n*p),n,p) - omega_cpp <- gel$omega_hat(G) - omega_R <- omega_hat_R(G) - if (!any(is.na(omega_cpp)) & !any(is.na(omega_R))) { - # nconv <<- nconv + 1 - expect_equal(omega_cpp, omega_R) - } - else { - expect_equal(any(is.na(omega_cpp)), any(is.na(omega_R))) - } - } -}) - -# nconv <- 0 -test_that("omega_hat with given convergence settings", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - max_iter <- sample(c(100, 200, 500), 1) - rel_tol <- runif(1, 1e-6, 1e-2) - gel <- GenEL$new(n, p) - gel$max_iter <- max_iter - gel$rel_tol <- rel_tol - G <- matrix(rnorm(n*p),n,p) - omega_cpp <- gel$omega_hat(G) - omega_R <- omega_hat_R(G, max_iter = max_iter, rel_tol = rel_tol) - if (!any(is.na(omega_cpp)) & !any(is.na(omega_R))) { - # nconv <<- nconv + 1 - expect_equal(omega_cpp, omega_R) - } - else { - expect_equal(any(is.na(omega_cpp)), any(is.na(omega_R))) - } - } -}) - -# nconv <- 0 -test_that("omega_hat with default settings and support correction", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - max_iter <- sample(c(100, 200, 500), 1) - rel_tol <- runif(1, 1e-6, 1e-2) - adj_a <- runif(1, 1, 5) - gel <- GenEL$new(n, p) - gel$set_opts(max_iter = max_iter, rel_tol = rel_tol, - supp_adj = TRUE, supp_adj_a = adj_a) - G <- matrix(rnorm(n*p),n,p) - omega_cpp <- gel$omega_hat(G) - omega_R <- omega_hat_R(adjG_R(G, adj_a), adjust = TRUE, - max_iter = max_iter, rel_tol = rel_tol) - if (!any(is.na(omega_cpp)) & !any(is.na(omega_R))) { - # nconv <<- nconv + 1 - expect_equal(omega_cpp, omega_R) - } - else { - expect_equal(any(is.na(omega_cpp)), any(is.na(omega_R))) - } - } -}) - -# ---- omega_hat with weights ---- - -# nconv <- 0 -test_that("weighted omega_hat with default settings", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - gel <- GenEL$new(n, p) - G <- matrix(rnorm(n*p),n,p) - weights <- runif(n, 0, 2) - weights <- weights/sum(weights) - omega_cpp <- gel$omega_hat(G, weights) - # omega_cpp - omega_R <- weighted_omega_hat_R(G, weights) - # omega_R - if (!any(is.na(omega_cpp)) & !any(is.na(omega_R))) { - # nconv <<- nconv + 1 - expect_equal(omega_cpp, omega_R) - } - else { - expect_equal(any(is.na(omega_cpp)), any(is.na(omega_R))) - } - } -}) - -# nconv <- 0 -test_that("weighted omega_hat with given convergence settings", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - max_iter <- sample(c(100, 200, 500), 1) - rel_tol <- runif(1, 1e-6, 1e-4) - gel <- GenEL$new(n, p) - gel$max_iter <- max_iter - gel$rel_tol <- rel_tol - G <- matrix(rnorm(n*p),n,p) - weights <- runif(n, 0, 2) - omega_cpp <- gel$omega_hat(G, weights) - # omega_cpp - omega_R <- weighted_omega_hat_R(G, weights/sum(weights), - max_iter = max_iter, rel_tol = rel_tol) - # omega_R - if (!any(is.na(omega_cpp)) & !any(is.na(omega_R))) { - # nconv <<- nconv + 1 - expect_equal(omega_cpp, omega_R, tolerance = 1e-4) - } - else { - expect_equal(any(is.na(omega_cpp)), any(is.na(omega_R))) - } - } -}) - -# nconv <- 0 -test_that("weighted omega_hat with default settings and support correction", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - max_iter <- sample(c(100, 200, 500), 1) - rel_tol <- runif(1, 1e-6, 1e-4) - adj_a <- runif(1, 1, 5) +test_cases <- expand.grid( + set_opts = c(FALSE, TRUE), + supp_adj = c(FALSE, TRUE), + weights = c(FALSE, TRUE), + method = c("lambda_nr", "omega_hat", "logel", "logel_grad") +) +ntest <- nrow(test_cases) + +test_that("R and C++ implementations of methods are the same.", { + for(ii in 1:ntest) { + # setup + n <- sample(20:50,1) + p <- sample(1:3, 1) gel <- GenEL$new(n, p) - gel$set_opts(max_iter = max_iter, rel_tol = rel_tol, - supp_adj = TRUE, supp_adj_a = adj_a) G <- matrix(rnorm(n*p),n,p) weights <- runif(n, 0, 2) - omega_cpp <- gel$omega_hat(G, weights) - # omega_cpp - weights_R <- c(weights, 1) - omega_R <- weighted_omega_hat_R(adjG_R(G, adj_a), weights_R/sum(weights_R), - adjust = TRUE, - max_iter = max_iter, rel_tol = rel_tol) - # omega_R - if (!any(is.na(omega_cpp)) & !any(is.na(omega_R))) { - # nconv <<- nconv + 1 - expect_equal(omega_cpp, omega_R, tolerance = 1e-4) - } - else { - expect_equal(any(is.na(omega_cpp)), any(is.na(omega_R))) - } - } -}) - -# ---- logel ---- - -# converged result -check_res <- function(x) { - all(is.finite(x) & !is.na(x)) -} - -# nconv <- 0 -test_that("logel with default settings", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - gel <- GenEL$new(n, p) - G <- matrix(rnorm(n*p),n,p) - logel_cpp <- gel$logel(G) - omegahat_R <- omega_hat_R(G) - logel_R <- logEL_R(omegahat_R) - if (check_res(logel_R) & check_res(logel_cpp)) { - # nconv <<- nconv + 1 - expect_equal(logel_cpp, logel_R) - } - } -}) - -# nconv <- 0 -test_that("logel with given convergence settings", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - max_iter <- sample(c(100, 200, 500), 1) - rel_tol <- runif(1, 1e-6, 1e-2) - gel <- GenEL$new(n, p) - gel$max_iter <- max_iter - gel$rel_tol <- rel_tol - G <- matrix(rnorm(n*p),n,p) - logel_cpp <- gel$logel(G) - # logel_cpp - omegahat_R <- omega_hat_R(G, max_iter = max_iter, rel_tol = rel_tol) - logel_R <- logEL_R(omegahat_R) - # logel_R - if (check_res(logel_R) & check_res(logel_cpp)) { - # nconv <<- nconv + 1 - expect_equal(logel_cpp, logel_R) - } - } -}) - -# nconv <- 0 -test_that("logel with given convergence settings and support correction", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - max_iter <- sample(c(100, 200, 500), 1) - rel_tol <- runif(1, 1e-6, 1e-2) - adj_a <- runif(1, 1, 5) - gel <- GenEL$new(n, p) - gel$set_opts(max_iter = max_iter, rel_tol = rel_tol, - supp_adj = TRUE, supp_adj_a = adj_a) - G <- matrix(rnorm(n*p),n,p) - logel_cpp <- gel$logel(G) - omegahat_R <- omega_hat_R(adjG_R(G, adj_a), max_iter = max_iter, rel_tol = rel_tol) - logel_R <- logEL_R(omegahat_R, adjust = TRUE) - if (check_res(logel_R) & check_res(logel_cpp)) { - # nconv <<- nconv + 1 - expect_equal(logel_cpp, logel_R) - } - } -}) - -# ---- logel with weights ---- - -# nconv <- 0 -test_that("weighted logel with default settings", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - gel <- GenEL$new(n, p) - G <- matrix(rnorm(n*p),n,p) - weights <- runif(n, 0, 2) - # weights <- weights/sum(weights) - logel_cpp <- gel$logel(G, weights) - # logel_cpp - logel_R <- weighted_logEL_G_R(G, weights) - # as.numeric(logel_R) - if (check_res(logel_R) & check_res(logel_cpp)) { - # nconv <<- nconv + 1 - expect_equal(logel_cpp, as.numeric(logel_R)) - } - } -}) - -# nconv <- 0 -test_that("weighted logel with given convergence settings", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - max_iter <- sample(c(100, 200, 500), 1) - rel_tol <- runif(1, 1e-6, 1e-4) - gel <- GenEL$new(n, p) - gel$max_iter <- max_iter - gel$rel_tol <- rel_tol - G <- matrix(rnorm(n*p),n,p) - weights <- runif(n, 0, 2) - # weights <- weights/sum(weights) - logel_cpp <- gel$logel(G, weights) - # logel_cpp - logel_R <- weighted_logEL_G_R(G, weights, max_iter, rel_tol) - # as.numeric(logel_R) - if (check_res(logel_R) & check_res(logel_cpp)) { - # nconv <<- nconv + 1 - expect_equal(logel_cpp, as.numeric(logel_R), tolerance = 1e-4) + # construct argument lists from test cases + el_opts <- NULL # list of arguments to set_opts + el_args <- list(G = G) # list of arguments to other methods + if(test_cases$set_opts[ii]) { + # set nr parameters + max_iter <- sample(c(1, 10, 50, 100), 1) + rel_tol <- runif(1, 1e-6, 1e-2) + el_opts <- c(el_opts, list(max_iter = max_iter, rel_tol = rel_tol)) + # check active members + gel$max_iter <- max_iter + gel$rel_tol <- rel_tol + } + if(test_cases$supp_adj[ii]) { + # set support correction + adj_a <- runif(1, 1, 5) + el_opts <- c(el_opts, list(supp_adj = TRUE, supp_adj_a = adj_a)) + } + if(test_cases$weights[ii]) { + # weighted version + el_args <- c(el_args, list(weights = weights)) + } + # R and C++ calculations + if(length(el_opts) > 0) { + do.call(gel$set_opts, args = el_opts) + } + if(test_cases$method[ii] == "lambda_nr") { + lambda_cpp <- do.call(gel$lambda_nr, el_args) + lambda_R_lst <- do.call(lambda_nr_R, args = c(el_opts, el_args)) + if (!lambda_R_lst$convergence) { + expect_equal(all(is.na(lambda_cpp)), all(is.na(lambda_R_lst$lambda))) + } else { + expect_equal(lambda_cpp, lambda_R_lst$lambda) + } + } + if(test_cases$method[ii] == "omega_hat") { + omega_cpp <- do.call(gel$omega_hat, el_args) + omega_R <- do.call(omega_hat_R, args = c(el_opts, el_args)) + if (!any(is.na(omega_cpp)) & !any(is.na(omega_R))) { + expect_equal(omega_cpp, omega_R, tolerance = 1e-6) + } else { + expect_equal(any(is.na(omega_cpp)), any(is.na(omega_R))) + } + } + if(test_cases$method[ii] == "logel") { + lel_cpp <- do.call(gel$logel, el_args) + lel_R <- do.call(logel_R, args = c(el_opts, el_args)) + expect_equal(lel_cpp, lel_R) + } + if(test_cases$method[ii] == "logel_grad") { + dldG_cpp <- do.call(gel$logel_grad, el_args)$dldG + dldG_R <- do.call(logel_grad_R, args = c(el_opts, el_args)) + expect_equal(dldG_cpp, dldG_R, tolerance = 1e-4) } } }) -# nconv <- 0 -test_that("weighted logel with given convergence settings and support correction", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:(n-2), 1) - max_iter <- sample(c(100, 200, 500), 1) - rel_tol <- runif(1, 1e-6, 1e-4) - adj_a <- runif(1, 1, 5) - gel <- GenEL$new(n, p) - gel$set_opts(max_iter = max_iter, rel_tol = rel_tol, - supp_adj = TRUE, supp_adj_a = adj_a) - G <- matrix(rnorm(n*p),n,p) - weights <- runif(n, 0, 2) - # weights <- weights/sum(weights) - logel_cpp <- gel$logel(G, weights) - logel_cpp - logel_R <- weighted_logEL_G_R(adjG_R(G, adj_a), c(weights, 1), - max_iter = max_iter, rel_tol = rel_tol) - as.numeric(logel_R) - if (check_res(logel_R) & check_res(logel_cpp)) { - # nconv <<- nconv + 1 - expect_equal(logel_cpp, as.numeric(logel_R)) - } - } -}) - -# ---- logel_grad ---- - -# nconv <- 0 -test_that("dldG with default settings", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:2, 1) - gel <- GenEL$new(n, p) - G <- matrix(rnorm(n*p),n,p) - dldG_cpp <- gel$logel_grad(G)$dldG - dldG_nd <- matrix(tryCatch(numDeriv::grad(logEL_G_R, G, method.args = list(eps = gel$rel_tol)), - error = function(e) { - rep(NA, nrow(G) * ncol(G)) - }), nrow = nrow(G), ncol = ncol(G)) - if (check_res(dldG_cpp) & check_res(dldG_nd)) { - # nconv <<- nconv + 1 - expect_equal(dldG_cpp, dldG_nd, tolerance = 1e-3) - } - } -}) - -# nconv <- 0 -test_that("dldG with given convergence settings", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:2, 1) - max_iter <- sample(c(100, 200, 500), 1) - rel_tol <- runif(1, 1e-6, 1e-2) - gel <- GenEL$new(n, p) - gel$max_iter <- max_iter - gel$rel_tol <- rel_tol - G <- matrix(rnorm(n*p),n,p) - dldG_cpp <- gel$logel_grad(G)$dldG - logEL_G_R_use <- function(G) { - logEL_G_R(G, max_iter = max_iter, rel_tol = rel_tol) - } - dldG_nd <- matrix(tryCatch(numDeriv::grad(logEL_G_R_use, G, method.args = list(eps = gel$rel_tol)), - error = function(e) { - rep(NA, nrow(G) * ncol(G)) - }), nrow = nrow(G), ncol = ncol(G)) - if (check_res(dldG_cpp) & check_res(dldG_nd)) { - # nconv <<- nconv + 1 - expect_equal(dldG_cpp, dldG_nd, tolerance = 1e-3) - } - } -}) - -# nconv <- 0 -test_that("dldG with given convergence settings and support correction", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:2, 1) - max_iter <- sample(c(100, 200, 500), 1) - rel_tol <- runif(1, 1e-5, 1e-3) - adj_a <- runif(1, 1, 5) - gel <- GenEL$new(n, p) - gel$set_opts(max_iter = max_iter, rel_tol = rel_tol, - supp_adj = TRUE, supp_adj_a = adj_a) - G <- matrix(rnorm(n*p),n,p) - dldG_cpp <- gel$logel_grad(G)$dldG - # dldG_cpp - logEL_adjG_R_use <- function(G) { - logEL_adjG_R(G, max_iter = max_iter, rel_tol = rel_tol, an = adj_a) - } - dldG_nd <- matrix(tryCatch(numDeriv::grad(logEL_adjG_R_use, G, - method.args = list(eps = gel$rel_tol)), - error = function(e) { - rep(NA, (nrow(G) + 1) * ncol(G)) - }), nrow = nrow(G), ncol = ncol(G)) - # dldG_nd - range(dldG_cpp-dldG_nd) - if (check_res(dldG_cpp) & check_res(dldG_nd)) { - # nconv <<- nconv + 1 - expect_equal(dldG_cpp, dldG_nd, tolerance = 1e-4) - } - } -}) - -# ---- logel_grad with weights ---- - -# nconv <- 0 -test_that("weights dldG with default settings", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:2, 1) - gel <- GenEL$new(n, p) - G <- matrix(rnorm(n*p),n,p) - weights <- runif(n, 0, 2) - # weights <- rep(1,n) - dldG_cpp <- gel$logel_grad(G, weights)$dldG - weighted_logEL_G_R_use <- function(G) { - weighted_logEL_G_R(G, weights) - } - dldG_nd <- matrix(tryCatch(numDeriv::grad(weighted_logEL_G_R_use, G, - method.args = list(eps = gel$rel_tol)), - error = function(e) { - rep(NA, nrow(G) * ncol(G)) - }), nrow = nrow(G), ncol = ncol(G)) - if (check_res(dldG_cpp) & check_res(dldG_nd)) { - # nconv <<- nconv + 1 - expect_equal(dldG_cpp, dldG_nd, tolerance = 1e-3) - } - } -}) -# nconv - -# nconv <- 0 -test_that("weighted dldG with given convergence settings", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:2, 1) - max_iter <- sample(c(100, 200, 500), 1) - rel_tol <- runif(1, 1e-6, 1e-2) - gel <- GenEL$new(n, p) - gel$max_iter <- max_iter - gel$rel_tol <- rel_tol - G <- matrix(rnorm(n*p),n,p) - weights <- runif(n, 0, 2) - dldG_cpp <- gel$logel_grad(G, weights)$dldG - weighted_logEL_G_R_use <- function(G) { - weighted_logEL_G_R(G, weights, max_iter, rel_tol) - } - dldG_nd <- matrix(tryCatch(numDeriv::grad(weighted_logEL_G_R_use, G, - method.args = list(eps = gel$rel_tol)), - error = function(e) { - rep(NA, nrow(G) * ncol(G)) - }), nrow = nrow(G), ncol = ncol(G)) - if (check_res(dldG_cpp) & check_res(dldG_nd)) { - # nconv <<- nconv + 1 - expect_equal(dldG_cpp, dldG_nd, tolerance = 1e-3) - } - } -}) - -# nconv <- 0 -test_that("weighted dldG with given convergence settings and support correction", { - for (ii in 1:ntest) { - n <- sample(10:20,1) - p <- sample(1:2, 1) - max_iter <- sample(c(100, 200, 500), 1) - rel_tol <- runif(1, 1e-5, 1e-3) - adj_a <- runif(1, 1, 5) - gel <- GenEL$new(n, p) - gel$set_opts(max_iter = max_iter, rel_tol = rel_tol, - supp_adj = TRUE, supp_adj_a = adj_a) - G <- matrix(rnorm(n*p),n,p) - weights <- runif(n, 0, 2) - dldG_cpp <- gel$logel_grad(G, weights)$dldG - weighted_logEL_G_R_use <- function(G) { - weighted_logEL_G_R(adjG_R(G, adj_a), c(weights, 1), max_iter, rel_tol) - } - dldG_nd <- matrix(tryCatch(numDeriv::grad(weighted_logEL_G_R_use, G, - method.args = list(eps = gel$rel_tol)), - error = function(e) { - rep(NA, nrow(G) * ncol(G)) - }), nrow = nrow(G), ncol = ncol(G)) - range(dldG_cpp-dldG_nd) - if (check_res(dldG_cpp) & check_res(dldG_nd)) { - # nconv <<- nconv + 1 - expect_equal(dldG_cpp, dldG_nd, tolerance = 1e-4) - } - } -}) +#--- old tests ----------------------------------------------------------------- + +## ntest <- 3 + +## # nconv <- 0 +## test_that("lambda_nr with default options", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## gel <- GenEL$new(n, p) +## G <- matrix(rnorm(n*p),n,p) +## lambda_cpp <- gel$lambda_nr(G) +## # lambda_cpp +## lambda_R_lst <- lambdaNR_R(G) +## # lambda_R_lst$lambda +## if (!lambda_R_lst$convergence) { +## expect_equal(all(is.na(lambda_cpp)), all(is.na(lambda_R_lst$lambda))) +## } +## else { +## # nconv <<- nconv + 1 +## expect_equal(lambda_cpp, lambda_R_lst$lambda) +## } +## } +## }) + +## # nconv <- 0 +## test_that("lambda_nr with given convergence settings", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## max_iter <- sample(c(1, 10, 50, 100), 1) +## rel_tol <- runif(1, 1e-6, 1e-2) +## gel <- GenEL$new(n, p) +## gel$max_iter <- max_iter +## gel$rel_tol <- rel_tol +## G <- matrix(rnorm(n*p),n,p) +## lambda_cpp <- gel$lambda_nr(G) +## lambda_R_lst <- lambdaNR_R(G, max_iter = max_iter, rel_tol = rel_tol) +## if (!lambda_R_lst$convergence) { +## expect_equal(all(is.na(lambda_cpp)), all(is.na(lambda_R_lst$lambda))) +## } +## else { +## # nconv <<- nconv + 1 +## expect_equal(lambda_cpp, lambda_R_lst$lambda) +## } +## } +## }) + +## # nconv <- 0 +## test_that("lambda_nr with given convergence settings and support correction", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## max_iter <- sample(c(1, 10, 50, 100), 1) +## rel_tol <- runif(1, 1e-6, 1e-2) +## adj_a <- runif(1, 1, 5) +## gel <- GenEL$new(n, p) +## gel$set_opts(max_iter = max_iter, rel_tol = rel_tol, +## supp_adj = TRUE, supp_adj_a = adj_a) +## G <- matrix(rnorm(n*p),n,p) +## lambda_cpp <- gel$lambda_nr(G) +## lambda_cpp +## lambda_R_lst <- lambdaNR_R(adjG_R(G, adj_a), max_iter = max_iter, rel_tol = rel_tol) +## lambda_R_lst$lambda +## if (!lambda_R_lst$convergence) { +## expect_equal(all(is.na(lambda_cpp)), all(is.na(lambda_R_lst$lambda))) +## } +## else { +## # nconv <<- nconv + 1 +## expect_equal(lambda_cpp, lambda_R_lst$lambda) +## } +## } +## }) + +## # ---- lambda_nr with weights ---- + +## # nconv <- 0 +## test_that("weighted lambda_nr with default options", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## gel <- GenEL$new(n, p) +## G <- matrix(rnorm(n*p),n,p) +## weights <- runif(n, 0, 2) +## lambda_cpp <- gel$lambda_nr(G, weights) +## # lambda_cpp +## lambda_R_lst <- lambdaNRC_R(G, weights/sum(weights)) +## # lambda_R_lst$lambda +## if (!lambda_R_lst$convergence) { +## expect_equal(all(is.na(lambda_cpp)), all(is.na(lambda_R_lst$lambda))) +## } +## else { +## # nconv <<- nconv + 1 +## expect_equal(lambda_cpp, lambda_R_lst$lambda) +## } +## } +## }) + +## # nconv <- 0 +## test_that("weighted lambda_nr with given convergence settings", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## max_iter <- sample(c(1, 10, 50, 100), 1) +## rel_tol <- runif(1, 1e-6, 1e-4) +## gel <- GenEL$new(n, p) +## gel$max_iter <- max_iter +## gel$rel_tol <- rel_tol +## G <- matrix(rnorm(n*p),n,p) +## weights <- runif(n, 0, 2) +## lambda_cpp <- gel$lambda_nr(G, weights) +## # lambda_cpp +## lambda_R_lst <- lambdaNRC_R(G, weights/sum(weights), +## max_iter = max_iter, rel_tol = rel_tol) +## # lambda_R_lst$lambda +## if (!lambda_R_lst$convergence) { +## expect_equal(all(is.na(lambda_cpp)), all(is.na(lambda_R_lst$lambda))) +## } +## else { +## # nconv <<- nconv + 1 +## expect_equal(lambda_cpp, lambda_R_lst$lambda) +## } +## } +## }) + +## # nconv <- 0 +## test_that("weighted lambda_nr with given convergence settings and support correction", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## max_iter <- sample(c(1, 10, 50, 100), 1) +## rel_tol <- runif(1, 1e-6, 1e-2) +## adj_a <- runif(1, 1, 5) +## gel <- GenEL$new(n, p) +## gel$set_opts(max_iter = max_iter, rel_tol = rel_tol, +## supp_adj = TRUE, supp_adj_a = adj_a) +## G <- matrix(rnorm(n*p),n,p) +## weights <- runif(n, 0, 2) +## lambda_cpp <- gel$lambda_nr(G, weights) +## # lambda_cpp +## weights_R <- c(weights, 1) +## lambda_R_lst <- lambdaNRC_R(adjG_R(G, adj_a), weights_R/sum(weights_R), +## max_iter = max_iter, rel_tol = rel_tol) +## # lambda_R_lst$lambda +## if (!lambda_R_lst$convergence) { +## expect_equal(all(is.na(lambda_cpp)), all(is.na(lambda_R_lst$lambda))) +## } +## else { +## # nconv <<- nconv + 1 +## expect_equal(lambda_cpp, lambda_R_lst$lambda) +## } +## } +## }) + +## # ---- omega_hat ---- + +## # nconv <- 0 +## test_that("omega_hat with default settings", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## gel <- GenEL$new(n, p) +## G <- matrix(rnorm(n*p),n,p) +## omega_cpp <- gel$omega_hat(G) +## omega_R <- omega_hat_R(G) +## if (!any(is.na(omega_cpp)) & !any(is.na(omega_R))) { +## # nconv <<- nconv + 1 +## expect_equal(omega_cpp, omega_R) +## } +## else { +## expect_equal(any(is.na(omega_cpp)), any(is.na(omega_R))) +## } +## } +## }) + +## # nconv <- 0 +## test_that("omega_hat with given convergence settings", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## max_iter <- sample(c(1, 10, 50, 100), 1) +## rel_tol <- runif(1, 1e-6, 1e-2) +## gel <- GenEL$new(n, p) +## gel$max_iter <- max_iter +## gel$rel_tol <- rel_tol +## G <- matrix(rnorm(n*p),n,p) +## omega_cpp <- gel$omega_hat(G) +## omega_R <- omega_hat_R(G, max_iter = max_iter, rel_tol = rel_tol) +## if (!any(is.na(omega_cpp)) & !any(is.na(omega_R))) { +## # nconv <<- nconv + 1 +## expect_equal(omega_cpp, omega_R) +## } +## else { +## expect_equal(any(is.na(omega_cpp)), any(is.na(omega_R))) +## } +## } +## }) + +## # nconv <- 0 +## test_that("omega_hat with default settings and support correction", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## max_iter <- sample(c(1, 10, 50, 100), 1) +## rel_tol <- runif(1, 1e-6, 1e-2) +## adj_a <- runif(1, 1, 5) +## gel <- GenEL$new(n, p) +## gel$set_opts(max_iter = max_iter, rel_tol = rel_tol, +## supp_adj = TRUE, supp_adj_a = adj_a) +## G <- matrix(rnorm(n*p),n,p) +## omega_cpp <- gel$omega_hat(G) +## omega_R <- omega_hat_R(adjG_R(G, adj_a), adjust = TRUE, +## max_iter = max_iter, rel_tol = rel_tol) +## if (!any(is.na(omega_cpp)) & !any(is.na(omega_R))) { +## # nconv <<- nconv + 1 +## expect_equal(omega_cpp, omega_R) +## } +## else { +## expect_equal(any(is.na(omega_cpp)), any(is.na(omega_R))) +## } +## } +## }) + +## # ---- omega_hat with weights ---- + +## # nconv <- 0 +## test_that("weighted omega_hat with default settings", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## gel <- GenEL$new(n, p) +## G <- matrix(rnorm(n*p),n,p) +## weights <- runif(n, 0, 2) +## weights <- weights/sum(weights) +## omega_cpp <- gel$omega_hat(G, weights) +## # omega_cpp +## omega_R <- weighted_omega_hat_R(G, weights) +## # omega_R +## if (!any(is.na(omega_cpp)) & !any(is.na(omega_R))) { +## # nconv <<- nconv + 1 +## expect_equal(omega_cpp, omega_R) +## } +## else { +## expect_equal(any(is.na(omega_cpp)), any(is.na(omega_R))) +## } +## } +## }) + +## # nconv <- 0 +## test_that("weighted omega_hat with given convergence settings", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## max_iter <- sample(c(1, 10, 50, 100), 1) +## rel_tol <- runif(1, 1e-6, 1e-2) +## gel <- GenEL$new(n, p) +## gel$max_iter <- max_iter +## gel$rel_tol <- rel_tol +## G <- matrix(rnorm(n*p),n,p) +## weights <- runif(n, 0, 2) +## omega_cpp <- gel$omega_hat(G, weights) +## # omega_cpp +## omega_R <- weighted_omega_hat_R(G, weights/sum(weights), +## max_iter = max_iter, rel_tol = rel_tol) +## # omega_R +## if (!any(is.na(omega_cpp)) & !any(is.na(omega_R))) { +## # nconv <<- nconv + 1 +## expect_equal(omega_cpp, omega_R, tolerance = 1e-4) +## } +## else { +## expect_equal(any(is.na(omega_cpp)), any(is.na(omega_R))) +## } +## } +## }) + +## # nconv <- 0 +## test_that("weighted omega_hat with default settings and support correction", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## max_iter <- sample(c(1, 10, 50, 100), 1) +## rel_tol <- runif(1, 1e-6, 1e-2) +## adj_a <- runif(1, 1, 5) +## gel <- GenEL$new(n, p) +## gel$set_opts(max_iter = max_iter, rel_tol = rel_tol, +## supp_adj = TRUE, supp_adj_a = adj_a) +## G <- matrix(rnorm(n*p),n,p) +## weights <- runif(n, 0, 2) +## omega_cpp <- gel$omega_hat(G, weights) +## # omega_cpp +## weights_R <- c(weights, 1) +## omega_R <- weighted_omega_hat_R(adjG_R(G, adj_a), weights_R/sum(weights_R), +## adjust = TRUE, +## max_iter = max_iter, rel_tol = rel_tol) +## # omega_R +## if (!any(is.na(omega_cpp)) & !any(is.na(omega_R))) { +## # nconv <<- nconv + 1 +## expect_equal(omega_cpp, omega_R, tolerance = 1e-4) +## } +## else { +## expect_equal(any(is.na(omega_cpp)), any(is.na(omega_R))) +## } +## } +## }) + +## # ---- logel ---- + +## # converged result +## check_res <- function(x) { +## all(is.finite(x) & !is.na(x)) +## } + +## # nconv <- 0 +## test_that("logel with default settings", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## gel <- GenEL$new(n, p) +## G <- matrix(rnorm(n*p),n,p) +## logel_cpp <- gel$logel(G) +## omegahat_R <- omega_hat_R(G) +## logel_R <- logEL_R(omegahat_R) +## ## if (check_res(logel_R) & check_res(logel_cpp)) { +## # nconv <<- nconv + 1 +## expect_equal(logel_cpp, logel_R) +## ## } +## } +## }) + +## # nconv <- 0 +## test_that("logel with given convergence settings", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## max_iter <- sample(c(1, 10, 50, 100), 1) +## rel_tol <- runif(1, 1e-6, 1e-2) +## gel <- GenEL$new(n, p) +## gel$max_iter <- max_iter +## gel$rel_tol <- rel_tol +## G <- matrix(rnorm(n*p),n,p) +## logel_cpp <- gel$logel(G) +## # logel_cpp +## omegahat_R <- omega_hat_R(G, max_iter = max_iter, rel_tol = rel_tol) +## logel_R <- logEL_R(omegahat_R) +## # logel_R +## ## if (check_res(logel_R) & check_res(logel_cpp)) { +## # nconv <<- nconv + 1 +## expect_equal(logel_cpp, logel_R) +## ## } +## } +## }) + +## # nconv <- 0 +## test_that("logel with given convergence settings and support correction", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## max_iter <- sample(c(100, 200, 500), 1) +## rel_tol <- runif(1, 1e-6, 1e-2) +## adj_a <- runif(1, 1, 5) +## gel <- GenEL$new(n, p) +## gel$set_opts(max_iter = max_iter, rel_tol = rel_tol, +## supp_adj = TRUE, supp_adj_a = adj_a) +## G <- matrix(rnorm(n*p),n,p) +## logel_cpp <- gel$logel(G) +## omegahat_R <- omega_hat_R(adjG_R(G, adj_a), max_iter = max_iter, rel_tol = rel_tol) +## logel_R <- logEL_R(omegahat_R, adjust = TRUE) +## ## if (check_res(logel_R) & check_res(logel_cpp)) { +## # nconv <<- nconv + 1 +## expect_equal(logel_cpp, logel_R) +## ## } +## } +## }) + +## # ---- logel with weights ---- + +## # nconv <- 0 +## test_that("weighted logel with default settings", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## gel <- GenEL$new(n, p) +## G <- matrix(rnorm(n*p),n,p) +## weights <- runif(n, 0, 2) +## # weights <- weights/sum(weights) +## logel_cpp <- gel$logel(G, weights) +## # logel_cpp +## logel_R <- weighted_logEL_G_R(G, weights) +## # as.numeric(logel_R) +## if (check_res(logel_R) & check_res(logel_cpp)) { +## # nconv <<- nconv + 1 +## expect_equal(logel_cpp, as.numeric(logel_R)) +## } +## } +## }) + +## # nconv <- 0 +## test_that("weighted logel with given convergence settings", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## max_iter <- sample(c(100, 200, 500), 1) +## rel_tol <- runif(1, 1e-6, 1e-4) +## gel <- GenEL$new(n, p) +## gel$max_iter <- max_iter +## gel$rel_tol <- rel_tol +## G <- matrix(rnorm(n*p),n,p) +## weights <- runif(n, 0, 2) +## # weights <- weights/sum(weights) +## logel_cpp <- gel$logel(G, weights) +## # logel_cpp +## logel_R <- weighted_logEL_G_R(G, weights, max_iter, rel_tol) +## # as.numeric(logel_R) +## if (check_res(logel_R) & check_res(logel_cpp)) { +## # nconv <<- nconv + 1 +## expect_equal(logel_cpp, as.numeric(logel_R), tolerance = 1e-4) +## } +## } +## }) + +## # nconv <- 0 +## test_that("weighted logel with given convergence settings and support correction", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:(n-2), 1) +## max_iter <- sample(c(100, 200, 500), 1) +## rel_tol <- runif(1, 1e-6, 1e-4) +## adj_a <- runif(1, 1, 5) +## gel <- GenEL$new(n, p) +## gel$set_opts(max_iter = max_iter, rel_tol = rel_tol, +## supp_adj = TRUE, supp_adj_a = adj_a) +## G <- matrix(rnorm(n*p),n,p) +## weights <- runif(n, 0, 2) +## # weights <- weights/sum(weights) +## logel_cpp <- gel$logel(G, weights) +## logel_cpp +## logel_R <- weighted_logEL_G_R(adjG_R(G, adj_a), c(weights, 1), +## max_iter = max_iter, rel_tol = rel_tol) +## as.numeric(logel_R) +## if (check_res(logel_R) & check_res(logel_cpp)) { +## # nconv <<- nconv + 1 +## expect_equal(logel_cpp, as.numeric(logel_R)) +## } +## } +## }) + +## # ---- logel_grad ---- + +## # nconv <- 0 +## test_that("dldG with default settings", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:2, 1) +## gel <- GenEL$new(n, p) +## G <- matrix(rnorm(n*p),n,p) +## dldG_cpp <- gel$logel_grad(G)$dldG +## dldG_nd <- matrix(tryCatch(numDeriv::grad(logEL_G_R, G, method.args = list(eps = gel$rel_tol)), +## error = function(e) { +## rep(NA, nrow(G) * ncol(G)) +## }), nrow = nrow(G), ncol = ncol(G)) +## if (check_res(dldG_cpp) & check_res(dldG_nd)) { +## # nconv <<- nconv + 1 +## expect_equal(dldG_cpp, dldG_nd, tolerance = 1e-3) +## } +## } +## }) + +## # nconv <- 0 +## test_that("dldG with given convergence settings", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:2, 1) +## max_iter <- sample(c(100, 200, 500), 1) +## rel_tol <- runif(1, 1e-6, 1e-2) +## gel <- GenEL$new(n, p) +## gel$max_iter <- max_iter +## gel$rel_tol <- rel_tol +## G <- matrix(rnorm(n*p),n,p) +## dldG_cpp <- gel$logel_grad(G)$dldG +## logEL_G_R_use <- function(G) { +## logEL_G_R(G, max_iter = max_iter, rel_tol = rel_tol) +## } +## dldG_nd <- matrix(tryCatch(numDeriv::grad(logEL_G_R_use, G, method.args = list(eps = gel$rel_tol)), +## error = function(e) { +## rep(NA, nrow(G) * ncol(G)) +## }), nrow = nrow(G), ncol = ncol(G)) +## if (check_res(dldG_cpp) & check_res(dldG_nd)) { +## # nconv <<- nconv + 1 +## expect_equal(dldG_cpp, dldG_nd, tolerance = 1e-3) +## } +## } +## }) + +## # nconv <- 0 +## test_that("dldG with given convergence settings and support correction", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:2, 1) +## max_iter <- sample(c(100, 200, 500), 1) +## rel_tol <- runif(1, 1e-5, 1e-3) +## adj_a <- runif(1, 1, 5) +## gel <- GenEL$new(n, p) +## gel$set_opts(max_iter = max_iter, rel_tol = rel_tol, +## supp_adj = TRUE, supp_adj_a = adj_a) +## G <- matrix(rnorm(n*p),n,p) +## dldG_cpp <- gel$logel_grad(G)$dldG +## # dldG_cpp +## logEL_adjG_R_use <- function(G) { +## logEL_adjG_R(G, max_iter = max_iter, rel_tol = rel_tol, an = adj_a) +## } +## dldG_nd <- matrix(tryCatch(numDeriv::grad(logEL_adjG_R_use, G, +## method.args = list(eps = gel$rel_tol)), +## error = function(e) { +## rep(NA, (nrow(G) + 1) * ncol(G)) +## }), nrow = nrow(G), ncol = ncol(G)) +## # dldG_nd +## range(dldG_cpp-dldG_nd) +## if (check_res(dldG_cpp) & check_res(dldG_nd)) { +## # nconv <<- nconv + 1 +## expect_equal(dldG_cpp, dldG_nd, tolerance = 1e-4) +## } +## } +## }) + +## # ---- logel_grad with weights ---- + +## # nconv <- 0 +## test_that("weights dldG with default settings", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:2, 1) +## gel <- GenEL$new(n, p) +## G <- matrix(rnorm(n*p),n,p) +## weights <- runif(n, 0, 2) +## # weights <- rep(1,n) +## dldG_cpp <- gel$logel_grad(G, weights)$dldG +## weighted_logEL_G_R_use <- function(G) { +## weighted_logEL_G_R(G, weights) +## } +## dldG_nd <- matrix(tryCatch(numDeriv::grad(weighted_logEL_G_R_use, G, +## method.args = list(eps = gel$rel_tol)), +## error = function(e) { +## rep(NA, nrow(G) * ncol(G)) +## }), nrow = nrow(G), ncol = ncol(G)) +## if (check_res(dldG_cpp) & check_res(dldG_nd)) { +## # nconv <<- nconv + 1 +## expect_equal(dldG_cpp, dldG_nd, tolerance = 1e-3) +## } +## } +## }) +## # nconv + +## # nconv <- 0 +## test_that("weighted dldG with given convergence settings", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:2, 1) +## max_iter <- sample(c(100, 200, 500), 1) +## rel_tol <- runif(1, 1e-6, 1e-2) +## gel <- GenEL$new(n, p) +## gel$max_iter <- max_iter +## gel$rel_tol <- rel_tol +## G <- matrix(rnorm(n*p),n,p) +## weights <- runif(n, 0, 2) +## dldG_cpp <- gel$logel_grad(G, weights)$dldG +## weighted_logEL_G_R_use <- function(G) { +## weighted_logEL_G_R(G, weights, max_iter, rel_tol) +## } +## dldG_nd <- matrix(tryCatch(numDeriv::grad(weighted_logEL_G_R_use, G, +## method.args = list(eps = gel$rel_tol)), +## error = function(e) { +## rep(NA, nrow(G) * ncol(G)) +## }), nrow = nrow(G), ncol = ncol(G)) +## if (check_res(dldG_cpp) & check_res(dldG_nd)) { +## # nconv <<- nconv + 1 +## expect_equal(dldG_cpp, dldG_nd, tolerance = 1e-3) +## } +## } +## }) + +## # nconv <- 0 +## test_that("weighted dldG with given convergence settings and support correction", { +## for (ii in 1:ntest) { +## n <- sample(10:20,1) +## p <- sample(1:2, 1) +## max_iter <- sample(c(100, 200, 500), 1) +## rel_tol <- runif(1, 1e-5, 1e-3) +## adj_a <- runif(1, 1, 5) +## gel <- GenEL$new(n, p) +## gel$set_opts(max_iter = max_iter, rel_tol = rel_tol, +## supp_adj = TRUE, supp_adj_a = adj_a) +## G <- matrix(rnorm(n*p),n,p) +## weights <- runif(n, 0, 2) +## dldG_cpp <- gel$logel_grad(G, weights)$dldG +## weighted_logEL_G_R_use <- function(G) { +## weighted_logEL_G_R(adjG_R(G, adj_a), c(weights, 1), max_iter, rel_tol) +## } +## dldG_nd <- matrix(tryCatch(numDeriv::grad(weighted_logEL_G_R_use, G, +## method.args = list(eps = gel$rel_tol)), +## error = function(e) { +## rep(NA, nrow(G) * ncol(G)) +## }), nrow = nrow(G), ncol = ncol(G)) +## range(dldG_cpp-dldG_nd) +## if (check_res(dldG_cpp) & check_res(dldG_nd)) { +## # nconv <<- nconv + 1 +## expect_equal(dldG_cpp, dldG_nd, tolerance = 1e-4) +## } +## } +## })