Get rid of Rhpc2, require MPI_cores to be set in options() to use MPI

This commit is contained in:
Brian Albert Monroe 2018-01-28 12:59:22 +02:00
parent eb772ba620
commit e7c92b9164
25 changed files with 26 additions and 78 deletions

1
.gitignore vendored
View File

@ -1,3 +1,4 @@
.Rproj.user
.Rhistory
.RData
tags

View File

@ -1,6 +1,6 @@
Package: ctools
Title: Unified Syntax for Parallel Operations in R
Version: 0.15.0
Version: 0.16.0
Authors@R: person("Brian Albert", "Monroe", email = "brianalbertmonroe@gmail.com", role = c("aut", "cre"))
Description: This package unifies some commonly used functions for parallel computation in R. For Unix-like systems, commands take advantage of forking, for Windows systems a PSOCKS cluster is established, and for Unix clusters through MPI a custom fork of Rhpc is used for MPI operations. Users wishing to take advantage of parallel operations in R previously had to use several different syntaxes for different OS's and again for cluster computing, "ctools" unifies this syntax.
Depends:
@ -8,10 +8,10 @@ Depends:
License: GPL2
Additional_repositories: https://bamonroe.github.io/drat/
LazyData: true
RoxygenNote: 5.0.1
RoxygenNote: 6.0.1
Imports:
parallel,
Rcpp
Suggests:
Rhpc2
Rhpc
LinkingTo: Rcpp

View File

@ -2,6 +2,6 @@
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
array2list <- function(X, dim) {
.Call('ctools_array2list', PACKAGE = 'ctools', X, dim)
.Call('_ctools_array2list', PACKAGE = 'ctools', X, dim)
}

View File

@ -1,10 +1,17 @@
use.MPI <- function(cores = (Rhpc::Rhpc_mpi_universe_size() - 1)) {
# Rhpc requires initialization and worker number setting to be separate processes
Rhpc::Rhpc_initialize()
# Rhcp requires the total worker number, not the total CPU number
cores <- min(max(round(cores), 1), Rhpc::Rhpc_mpi_universe_size() - 1)
mpi_cores <- getOption("MPI_cores")
if (is.null(mpi_cores)) {
stop("To use MPI the option 'MPI_cores' needs to be set to the number of cores available before loading ctools")
}
cores <- mpi_cores
#cores <- min(max(round(cores), 1), Rhpc::Rhpc_mpi_universe_size() - 1)
assign("cores", cores, envir = cluster)
assign("maxcores", cores, envir = cluster)

View File

@ -25,7 +25,7 @@ cluster <- new.env(parent = emptyenv())
assign("mpi.node", mpi.node, envir = cluster)
if ( !mpi.node ) {
if (mpi.check == T) {
use.MPI()
assign("use.mpi", T, envir = cluster)

View File

@ -23,4 +23,3 @@ mat <- matrix(1:12, 3, 4)
c.apply(mat, 1, sum) # a parallel implementation of rowSums
# Note that the serial version is likely faster for this function
}

View File

@ -24,4 +24,3 @@ mat <- matrix(1:12, 3, 4)
c.applyLB(mat, 1, sum) # a parallel implementation of rowSums
# Note that the serial version is likely faster for this function
}

View File

@ -15,4 +15,3 @@
Wrapper function for clusterCall, Rhpc_worker_call and a FORK implementation of
these functions. Note that this is not at all the same as base::call().
}

View File

@ -22,4 +22,3 @@ number can be changed at any time
\examples{
c.config(cores = 2) # Set cores number to 2
}

View File

@ -12,4 +12,3 @@ This function returns the number of cores configured for use with ctools
\examples{
cores <- c.cores() # Return the number of cores in use
}

View File

@ -15,4 +15,3 @@ Wrapper function for parEvalQ() and Rhpc_EvalQ().
\examples{
c.eval(vec <- 1:10) # evaluate the assignment of sequence 1:10 to object vec
}

View File

@ -23,4 +23,3 @@ c.export("vec", push=FALSE) # adds object 'vec' to list of things to export,
c.export(push=T) # export all objects in the to.export list
c.export("vec") # add "vec" to the to.export list, then export all items in list
}

View File

@ -14,4 +14,3 @@
\description{
Drop-in replacement for getOption() function, but called on all cluster nodes and master if cluster is used
}

View File

@ -20,4 +20,3 @@ Wrapper function for parLapply(), mclapply(), and Rhpc_lapply().
vec <- 1:10
c.lapply(vec, function(x) { x + 1}) # adds 1 to every element of vec, returns list
}

View File

@ -20,4 +20,3 @@ Wrapper function for parLapplyLB(), mclapply(mc.preschedule = FALSE), and Rhpc_l
vec <- 1:10
c.lapply(vec, function(x) { x + 1}) # adds 1 to every element of vec, returns list
}

View File

@ -17,4 +17,3 @@ load the packages into all cluster nodes.
\examples{
c.libary("ggplot2","reshape2")
}

View File

@ -12,4 +12,3 @@ This function returns the maximum number of cores possible to use with ctools
\examples{
maxcores <- c.maxcores() # Return the maximum number of cores available
}

View File

@ -13,4 +13,3 @@ Options can also be passed by giving a single unnamed argument which is a named
\description{
Drop-in replacement for options() function, but called on all cluster nodes and master if cluster is used
}

View File

@ -20,4 +20,3 @@ Wrapper function for parSapply(), and Rhpc_sapply(), and an implementaction of m
vec <- 1:10
c.lapply(vec, function(x) { x + 1}) # adds 1 to every element of vec, returns list
}

View File

@ -20,4 +20,3 @@ Wrapper function for parSapplyLB(), and Rhpc_sapplyLB(), and an implementaction
vec <- 1:10
c.lapply(vec, function(x) { x + 1}) # adds 1 to every element of vec, returns list
}

View File

@ -12,4 +12,3 @@ Set seet for random number generators.
\examples{
c.set.seed(42) # set the seed on all workers
}

View File

@ -28,4 +28,3 @@ c.source("/home/user/useful.R", push=FALSE) # adds object 'vec' to list of thi
c.source(push=T) # export all objects in the to.export list
c.source("vec") # add "vec" to the to.export list, then export all items in list
}

View File

@ -28,4 +28,3 @@ c.sourceCpp("/home/user/useful.cpp", push=FALSE) # adds files in FILE to list of
c.sourceCpp(push=T) # source all files in the in the 'sourceCpp' list
c.sourceCpp("vec") # add "vec" to the to.export list, then export all items in list
}

View File

@ -1,52 +0,0 @@
# Using Erathosthenes's Sieve to test MPI
# Load in the helper script for parallel tasks
#source("c-tools.r")
library("ctools")
c.start()
getP <- function(n){
a <- c(2:n)
I <- 2
r <- c()
while(I*I < n){
r <- c(r,a[1])
a <- a[-(which(a%%I == 0))]
I <- a[1]
}
c(r,a)
}
# Need to export this
#c.export("getP")
loopy <- function(n){
for(i in 3:n){
primes <- getP(i)
}
# We actually don't need the primes, we need CPU cycles
# and a way to check which machines are running the code
host <- Sys.info()['nodename']
print(paste("Hostname is",host))
}
# Need to export this
#c.export("loopy")
nums <- 5000:5007
# Need to export this
#c.export("nums")
# Do the actual exporting
c.export(T,"getP","loopy","nums")
#c.export(T)
c.lapply(X=nums,FUN=loopy)
c.done()

View File

@ -7,7 +7,7 @@ using namespace Rcpp;
// array2list
List array2list(NumericMatrix X, int dim);
RcppExport SEXP ctools_array2list(SEXP XSEXP, SEXP dimSEXP) {
RcppExport SEXP _ctools_array2list(SEXP XSEXP, SEXP dimSEXP) {
BEGIN_RCPP
Rcpp::RObject rcpp_result_gen;
Rcpp::RNGScope rcpp_rngScope_gen;
@ -17,3 +17,13 @@ BEGIN_RCPP
return rcpp_result_gen;
END_RCPP
}
static const R_CallMethodDef CallEntries[] = {
{"_ctools_array2list", (DL_FUNC) &_ctools_array2list, 2},
{NULL, NULL, 0}
};
RcppExport void R_init_ctools(DllInfo *dll) {
R_registerRoutines(dll, NULL, CallEntries, NULL, NULL);
R_useDynamicSymbols(dll, FALSE);
}