grad: Replace numDeriv with homemade gradient functions
continuous-integration/drone/push Build is passing
Details
continuous-integration/drone/push Build is passing
Details
This removes numDeriv as the only dependency of this package. Some undocumented options are still available to use numDeriv or pracma to do derivatives, but they're not fully supported throughout at the moment. Particularly, only rcest::grad is used in the delta method functions. No dependencies + tests means I shouldn't have to worry about breakage from feature change unless it's in base R. This is a decent enough difference to warrant a pop of version number.
This commit is contained in:
parent
6e55b6c19c
commit
54721b652a
10
DESCRIPTION
10
DESCRIPTION
|
@ -1,16 +1,14 @@
|
|||
Package: rcest
|
||||
Title: Wrapper functions to ease estimation with choice data
|
||||
Version: 0.8.6
|
||||
Title: Helper package for conducting optimization, particularly maximum likelihood.
|
||||
Version: 0.9.0
|
||||
Authors@R: person("Brian", "Monroe", email = "brian@bamonroe.com", role = c("aut", "cre"))
|
||||
Description: This package provides functions to estimate parameters of "function types" (ftypes), and a variety of helper functions to handle output. These functions allow for parameters to easily be made into linear functions of observable covariates, clustering standard errors around a variable, and delta method functions to do non-linear wald tests.
|
||||
Depends:
|
||||
R (>= 3.3.1)
|
||||
License: MIT
|
||||
Additional_repositories: https://bamonroe.com/drat/
|
||||
Encoding: UTF-8
|
||||
LazyData: true
|
||||
Imports:
|
||||
numDeriv
|
||||
RoxygenNote: 7.1.1
|
||||
Suggests:
|
||||
testthat (>= 2.1.0)
|
||||
testthat (>= 2.1.0),
|
||||
numDeriv
|
||||
|
|
|
@ -22,7 +22,10 @@ export(get_profile)
|
|||
export(get_rcenv)
|
||||
export(get_transform)
|
||||
export(get_wrap_opts)
|
||||
export(grad)
|
||||
export(hessian)
|
||||
export(init_sanitize)
|
||||
export(jacobian)
|
||||
export(layout_convert)
|
||||
export(lfun_wrap)
|
||||
export(make_parsed)
|
||||
|
|
|
@ -4,15 +4,15 @@
|
|||
#' @param g2 function to transform parameters
|
||||
#' @param mu vector of parameters used to derive statistic
|
||||
#' @param sigma covariance matrix of parameters used to derive statistic
|
||||
#' @param gp1 optional gradient function of g, otherwise numDeriv is used
|
||||
#' @param gp2 optional gradient function of g, otherwise numDeriv is used
|
||||
#' @param gp1 optional gradient function of g, otherwise it is calculated numerically
|
||||
#' @param gp2 optional gradient function of g, otherwise it is calculated numerically
|
||||
#' @param ... aditional arguments to g functions
|
||||
#' @export delta_cov
|
||||
delta_cov <- function(g1, g2, mu, sigma, gp1 = NULL, gp2 = NULL, ...) {
|
||||
|
||||
if (is.null(gp1)) {
|
||||
gp1 <- numDeriv::grad(func = g1, x = mu, ...)
|
||||
gp2 <- numDeriv::grad(func = g2, x = mu, ...)
|
||||
gp1 <- grad(fn = g1, x = mu, ...)
|
||||
gp2 <- grad(fn = g2, x = mu, ...)
|
||||
} else {
|
||||
gp1 <- gp1(mu)
|
||||
gp2 <- gp2(mu)
|
||||
|
@ -40,12 +40,12 @@ delta_cov <- function(g1, g2, mu, sigma, gp1 = NULL, gp2 = NULL, ...) {
|
|||
#' @param g function to transform parameters
|
||||
#' @param mu vector of parameters used to derive statistic
|
||||
#' @param sigma covariance matrix of parameters used to derive statistic
|
||||
#' @param gp optional gradient function of g, otherwise numDeriv is used
|
||||
#' @param gp optional gradient function of g, otherwise it is calculated numerically
|
||||
#' @param ... aditional arguments to g function
|
||||
#' @export delta_var
|
||||
delta_var <- function(g, mu, sigma, gp = NULL, ...) {
|
||||
if (is.null(gp)) {
|
||||
gp <- numDeriv::grad(func = g, x = mu, ...)
|
||||
gp <- grad(fn = g, x = mu, ...)
|
||||
} else {
|
||||
gp <- gp(mu)
|
||||
}
|
||||
|
|
|
@ -1,22 +1,33 @@
|
|||
deriv_wrap <- function(deriv_type, par, lfun, wrap_opts, ...) {
|
||||
deriv_prov <- getOption("rcest_deriv", default = "numDeriv")
|
||||
deriv_prov <- getOption("rcest_deriv", default = "rcest")
|
||||
|
||||
if (deriv_type == "grad") {
|
||||
if (deriv_prov == "numDeriv") deriv_fun <- numDeriv::grad
|
||||
else if (deriv_prov == "pracma") deriv_fun <- pracma::grad
|
||||
if (deriv_prov == "rcest") deriv_fun <- rcest::grad
|
||||
else if (deriv_prov == "numDeriv") deriv_fun <- numDeriv::grad
|
||||
else if (deriv_prov == "pracma") deriv_fun <- pracma::grad
|
||||
sum_res <- TRUE
|
||||
} else if (deriv_type == "hessian") {
|
||||
if (deriv_prov == "numDeriv") deriv_fun <- numDeriv::hessian
|
||||
else if (deriv_prov == "pracma") deriv_fun <- pracma::hessian
|
||||
if (deriv_prov == "rcest") deriv_fun <- rcest::hessian
|
||||
else if (deriv_prov == "numDeriv") deriv_fun <- numDeriv::hessian
|
||||
else if (deriv_prov == "pracma") deriv_fun <- pracma::hessian
|
||||
sum_res <- TRUE
|
||||
} else if (deriv_type == "jacobian") {
|
||||
if (deriv_prov == "numDeriv") deriv_fun <- numDeriv::jacobian
|
||||
else if (deriv_prov == "pracma") deriv_fun <- pracma::jacobian
|
||||
if (deriv_prov == "numDeriv") deriv_fun <- rcest::jacobian
|
||||
else if (deriv_prov == "numDeriv") deriv_fun <- numDeriv::jacobian
|
||||
else if (deriv_prov == "pracma") deriv_fun <- pracma::jacobian
|
||||
sum_res <- FALSE
|
||||
} else {
|
||||
stop(paste0("'", deriv_type, "' was passed as a deriv type and this doesn't make sense."))
|
||||
}
|
||||
|
||||
if (deriv_prov == "numDeriv") {
|
||||
if (deriv_prov == "rcest") {
|
||||
deriv_out <- deriv_fun(fn = lfun_wrap,
|
||||
x = par,
|
||||
lfun = lfun,
|
||||
wrap_opts = wrap_opts,
|
||||
sum_res = sum_res,
|
||||
...)
|
||||
} else if (deriv_prov == "numDeriv") {
|
||||
deriv_out <- deriv_fun(func = lfun_wrap,
|
||||
x = par,
|
||||
method = "Richardson",
|
||||
|
|
4
R/grad.R
4
R/grad.R
|
@ -82,8 +82,8 @@ jacobian <- function(fn, x, step = .Machine$double.eps ^ (1 / 4), ...) {
|
|||
#' @param step the size of the step used for the delta. Can be a vector where
|
||||
#' each element matches to a corresponding x
|
||||
#' @param ... named variables passed directly to the function
|
||||
#' @export hess
|
||||
hess <- function(fn, x, step = .Machine$double.eps ^ (1 / 4), ...) {
|
||||
#' @export hessian
|
||||
hessian <- function(fn, x, step = .Machine$double.eps ^ (1 / 4), ...) {
|
||||
gwrap <- function(x, ...) {
|
||||
allow_multi_value <- FALSE
|
||||
grad(fn, x, step = step, ...)
|
||||
|
|
|
@ -17,7 +17,7 @@ set_transform <- function(name, g, gi, gp = NULL) {
|
|||
|
||||
if (is.null(gp)) {
|
||||
gp <- function(x) {
|
||||
numDeriv::grad(func = g, x = x)
|
||||
grad(fn = g, x = x)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12,8 +12,8 @@ The delta methods were derived because the nlWaldtest package didn't allow for a
|
|||
- [x] Create API to allow users to add any optimizer
|
||||
- Base-R optimizers are pre-built into the package, so work can begin out of the box.
|
||||
- [x] Create API to allow users to provide arbitrary parameter transformations.
|
||||
- "none", and "exp" transforms are pre-built for non-constrained and positive-constrained parameters.
|
||||
- "none", "exp", and "logit" transforms are pre-built for non-constrained, positive-constrained, and 0-1 constrained parameters.
|
||||
- [x] Provide Delta Method functions for arbitrary parameter transformations.
|
||||
- [x] Clean way of correcting covariance matrix for clusters in the data.
|
||||
- [x] Minimize Dependencies
|
||||
- Currently only numDeriv as a dependency
|
||||
- Currently zero dependencies.
|
||||
|
|
|
@ -15,9 +15,9 @@ delta_cov(g1, g2, mu, sigma, gp1 = NULL, gp2 = NULL, ...)
|
|||
|
||||
\item{sigma}{covariance matrix of parameters used to derive statistic}
|
||||
|
||||
\item{gp1}{optional gradient function of g, otherwise numDeriv is used}
|
||||
\item{gp1}{optional gradient function of g, otherwise it is calculated numerically}
|
||||
|
||||
\item{gp2}{optional gradient function of g, otherwise numDeriv is used}
|
||||
\item{gp2}{optional gradient function of g, otherwise it is calculated numerically}
|
||||
|
||||
\item{...}{aditional arguments to g functions}
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ delta_var(g, mu, sigma, gp = NULL, ...)
|
|||
|
||||
\item{sigma}{covariance matrix of parameters used to derive statistic}
|
||||
|
||||
\item{gp}{optional gradient function of g, otherwise numDeriv is used}
|
||||
\item{gp}{optional gradient function of g, otherwise it is calculated numerically}
|
||||
|
||||
\item{...}{aditional arguments to g function}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/grad.R
|
||||
\name{grad}
|
||||
\alias{grad}
|
||||
\title{Find the gradient of a function}
|
||||
\usage{
|
||||
grad(fn, x, step = .Machine$double.eps^(1/3), ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{fn}{the function being derived}
|
||||
|
||||
\item{x}{the parameters at which the gradient is being calulated}
|
||||
|
||||
\item{step}{the size of the step used for the delta. Can be a vector where
|
||||
each element matches to a corresponding x}
|
||||
|
||||
\item{...}{named variables passed directly to the function}
|
||||
}
|
||||
\description{
|
||||
Get the gradient of a function using a 2-sided delta method.
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/grad.R
|
||||
\name{hessian}
|
||||
\alias{hessian}
|
||||
\title{Find the hessian of a function}
|
||||
\usage{
|
||||
hessian(fn, x, step = .Machine$double.eps^(1/4), ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{fn}{the function being derived}
|
||||
|
||||
\item{x}{the parameters at which the gradient is being calulated}
|
||||
|
||||
\item{step}{the size of the step used for the delta. Can be a vector where
|
||||
each element matches to a corresponding x}
|
||||
|
||||
\item{...}{named variables passed directly to the function}
|
||||
}
|
||||
\description{
|
||||
Get the hessian of a function using a 2-sided delta method. The
|
||||
hessian is the gradient of the gradient, so this function mainly provides a
|
||||
convienience wrapper around calls to grad.
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/grad.R
|
||||
\name{jacobian}
|
||||
\alias{jacobian}
|
||||
\title{Find the jacobian of a function}
|
||||
\usage{
|
||||
jacobian(fn, x, step = .Machine$double.eps^(1/4), ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{fn}{the function being derived}
|
||||
|
||||
\item{x}{the parameters at which the gradient is being calulated}
|
||||
|
||||
\item{step}{the size of the step used for the delta. Can be a vector where
|
||||
each element matches to a corresponding x}
|
||||
|
||||
\item{...}{named variables passed directly to the function}
|
||||
}
|
||||
\description{
|
||||
Get the jacobian of a function using a 2-sided delta method. The
|
||||
jacobian is the gradient at every observation, so this function mainly provides a
|
||||
convienience wrapper around grad to allow non-scalar functions.
|
||||
}
|
|
@ -99,21 +99,6 @@ test_that("the jacobian of a multi-input/output function is properly calculated"
|
|||
byrow = T, nrow = 3)
|
||||
expect_equal(j, j0)
|
||||
|
||||
x <- round(runif(2), 4)
|
||||
f <- function(x) {
|
||||
y <- vector("numeric", 3)
|
||||
y[1] <- 1 + x[1] ^2 + 1 * x[2]^3
|
||||
y[2] <- 1 + x[1] ^3 + 2 * x[2]^3
|
||||
y[3] <- 1 + x[1] ^4 + 3 * x[2]^3
|
||||
y
|
||||
}
|
||||
j <- jacobian(f, x)
|
||||
j0 <- matrix(c(1 * 2 * x[1], 1 * 3 * x[2]^2,
|
||||
1 * 3 * x[1]^2, 2 * 3 * x[2]^2,
|
||||
1 * 4 * x[1]^3, 3 * 3 * x[2]^2),
|
||||
byrow = T, nrow = 3)
|
||||
expect_equal(j, j0)
|
||||
|
||||
})
|
||||
|
||||
|
||||
|
@ -122,38 +107,38 @@ test_that("the hessian of a function is properly calculated", {
|
|||
|
||||
# First derivitive of exp is exp, so easy to test
|
||||
f <- function(x) exp(x)
|
||||
h <- hess(f, x)
|
||||
h <- hessian(f, x)
|
||||
expect_equal(h, f(x))
|
||||
|
||||
# First derivitive of a linear function is just the coefficient
|
||||
f <- function(x) 2 + 3 * x
|
||||
h <- hess(f, x)
|
||||
h <- hessian(f, x)
|
||||
expect_equal(h, 0)
|
||||
|
||||
# First derivitive of a quadratic function is a little more complicated
|
||||
f <- function(x) 2 + 3 * x ^2
|
||||
h <- hess(f, x)
|
||||
h <- hessian(f, x)
|
||||
expect_equal(h, 6)
|
||||
|
||||
# Multi-parameter function
|
||||
x <- c(1, 2)
|
||||
|
||||
f <- function(x) 2 + 3 * x[1]^2 + 2 * x[2]
|
||||
h <- hess(f, x)
|
||||
h <- hessian(f, x)
|
||||
h0 <- matrix(c(6, 0,
|
||||
0, 0),
|
||||
byrow = T, nrow = 2)
|
||||
expect_equal(h, h0)
|
||||
|
||||
f <- function(x) 2 + 3 * x[1]^2 + 2 * x[2]^3
|
||||
h <- hess(f, x)
|
||||
h <- hessian(f, x)
|
||||
h0 <- matrix(c(6, 0,
|
||||
0, 12 * x[2]),
|
||||
byrow = T, nrow = 2)
|
||||
expect_equal(h, h0)
|
||||
|
||||
f <- function(x) 2 + 3 * x[1]^2 + 2 * x[2]^3 + 4 * x[1]^4 * x[2]^2
|
||||
h <- hess(f, x)
|
||||
h <- hessian(f, x)
|
||||
h0 <- matrix(c(48 * x[1]^2 * x[2]^2 + 6, 32 * x[1]^3 * x[2],
|
||||
32 * x[1]^3 * x[2], 8 * x[1]^4 + 12 * x[2]),
|
||||
byrow = T, nrow = 2)
|
||||
|
|
Loading…
Reference in New Issue