r - 计算时间序列矩阵的近似熵

标签 r matrix

引入了近似熵来量化时间序列中波动的规律性和不可预测性。

功能

approx_entropy(ts, edim = 2, r = 0.2*sd(ts), elag = 1)

来自包裹 pracma ,计算时间序列的近似熵ts .

我有一个时间序列矩阵(每行一个系列)mat我会估计每个人的近似熵,将结果存储在一个向量中。例如:
library(pracma)

N<-nrow(mat)
r<-matrix(0, nrow = N, ncol = 1)
for (i in 1:N){
     r[i]<-approx_entropy(mat[i,], edim = 2, r = 0.2*sd(mat[i,]), elag = 1)
}

但是,如果 N大这个代码可能太慢了。建议提速吗?谢谢!

最佳答案

我还要说并行化,因为应用函数显然没有带来任何优化。

我尝试了 approx_entropy() 函数:

  • 申请
  • lapply
  • ParApply
  • foreach(来自@Mankind_008)
  • data.table 和 ParApply 的组合
  • ParApply 似乎是 比其他 2 个并行函数稍微高效

    因为我没有得到与@Mankind_008 相同的时间,所以我用 microbenchmark 检查了它们。
    这些是 10 次运行的结果:

    Unit: seconds
    expr      min       lq     mean   median       uq      max neval cld
    forloop     4.067308 4.073604 4.117732 4.097188 4.141059 4.244261    10   b
    apply       4.054737 4.092990 4.147449 4.139112 4.188664 4.246629    10   b
    lapply      4.060242 4.068953 4.229806 4.105213 4.198261 4.873245    10   b
    par         2.384788 2.397440 2.646881 2.456174 2.558573 4.134668    10   a 
    parApply    2.289028 2.300088 2.371244 2.347408 2.369721 2.675570    10   a 
    DT_parApply 2.294298 2.322774 2.387722 2.354507 2.466575 2.515141    10   a
    


    Different Methods

    完整代码:
    library(pracma)
    library(foreach)
    library(parallel)
    library(doParallel)
    
    
    # dummy random time series data
    ts <- rnorm(56)
    mat <- matrix(rep(ts,100), nrow = 100, ncol = 100)
    r <- matrix(0, nrow = nrow(mat), ncol = 1)      
    
    
    ## For Loop
    for (i in 1:nrow(mat)){     
      r[i]<-approx_entropy(mat[i,], edim = 2, r = 0.2*sd(mat[i,]), elag = 1)
    }
    
    ## Apply
    r1 = apply(mat, 1, FUN = function(x) approx_entropy(x, edim = 2, r = 0.2*sd(x), elag = 1))
    
    ## Lapply
    r2 = lapply(1:nrow(mat), FUN = function(x) approx_entropy(mat[x,], edim = 2, r = 0.2*sd(mat[x,]), elag = 1))
    
    ## ParApply
    cl <- makeCluster(getOption("cl.cores", 3))
    r3 = parApply(cl = cl, mat, 1, FUN = function(x) {
      library(pracma); 
      approx_entropy(x, edim = 2, r = 0.2*sd(x), elag = 1)
    })
    stopCluster(cl)
    
    ## Foreach
    registerDoParallel(cl = 3, cores = 2)  
    r4 <- foreach(i = 1:nrow(mat), .combine = rbind)  %dopar%  
      pracma::approx_entropy(mat[i,], edim = 2, r = 0.2*sd(mat[i,]), elag = 1)
    stopImplicitCluster()  
    
    ## Data.table
    library(data.table)
    mDT = as.data.table(mat)
    cl <- makeCluster(getOption("cl.cores", 3))
    r5 = parApply(cl = cl, mDT, 1, FUN = function(x) {
      library(pracma); 
      approx_entropy(x, edim = 2, r = 0.2*sd(x), elag = 1)
    })
    stopCluster(cl)
    
    
    ## All equal Tests
    all.equal(as.numeric(r), r1)
    all.equal(r1, as.numeric(do.call(rbind, r2)))
    all.equal(r1, r3)
    all.equal(r1, as.numeric(r4))
    all.equal(r1, r5)
    
    
    ## Benchmark
    library(microbenchmark)
    mc <- microbenchmark(times=10,
      forloop = {
        for (i in 1:nrow(mat)){
          r[i]<-approx_entropy(mat[i,], edim = 2, r = 0.2*sd(mat[i,]), elag = 1)
        }
      },
      apply = {
        r1 = apply(mat, 1, FUN = function(x) approx_entropy(x, edim = 2, r = 0.2*sd(x), elag = 1))
      },
      lapply = {
        r1 = lapply(1:nrow(mat), FUN = function(x) approx_entropy(mat[x,], edim = 2, r = 0.2*sd(mat[x,]), elag = 1))
      },
      par = {
        registerDoParallel(cl = 3, cores = 2)  
        r_par <- foreach(i = 1:nrow(mat), .combine = rbind)  %dopar%  
          pracma::approx_entropy(mat[i,], edim = 2, r = 0.2*sd(mat[i,]), elag = 1)
        stopImplicitCluster()
      }, 
      parApply = {
        cl <- makeCluster(getOption("cl.cores", 3))
        r3 = parApply(cl = cl, mat, 1, FUN = function(x) {
          library(pracma); 
          approx_entropy(x, edim = 2, r = 0.2*sd(x), elag = 1)
        })
        stopCluster(cl)
      },
      DT_parApply = {
        mDT = as.data.table(mat)
        cl <- makeCluster(getOption("cl.cores", 3))
        r5 = parApply(cl = cl, mDT, 1, FUN = function(x) {
          library(pracma); 
          approx_entropy(x, edim = 2, r = 0.2*sd(x), elag = 1)
        })
        stopCluster(cl)
      }
    )
    
    ## Results
    mc
    Unit: seconds
    expr      min       lq     mean   median       uq      max neval cld
    forloop 4.067308 4.073604 4.117732 4.097188 4.141059 4.244261    10   b
    apply 4.054737 4.092990 4.147449 4.139112 4.188664 4.246629    10   b
    lapply 4.060242 4.068953 4.229806 4.105213 4.198261 4.873245    10   b
    par 2.384788 2.397440 2.646881 2.456174 2.558573 4.134668    10  a 
    parApply 2.289028 2.300088 2.371244 2.347408 2.369721 2.675570    10  a 
    DT_parApply 2.294298 2.322774 2.387722 2.354507 2.466575 2.515141    10  a 
    
    ## Time-Boxplot
    plot(mc)
    

    内核数量 也会影响速度,更多并不总是更快,因为在某些时候,发送给所有工作人员的开销会消耗一些获得的性能。我用 2 到 7 个内核对 ParApply 函数进行了基准测试,在我的机器上,用 3/4 个内核运行该函数似乎是最好的选择,尽管偏差不大。

    mc
    Unit: seconds
    expr      min       lq     mean   median       uq      max neval  cld
    parApply_2 2.670257 2.688115 2.699522 2.694527 2.714293 2.740149    10   c 
    parApply_3 2.312629 2.366021 2.411022 2.399599 2.464568 2.535220    10 a   
    parApply_4 2.358165 2.405190 2.444848 2.433657 2.485083 2.568679    10 a   
    parApply_5 2.504144 2.523215 2.546810 2.536405 2.558630 2.646244    10  b  
    parApply_6 2.687758 2.725502 2.761400 2.747263 2.766318 2.969402    10   c 
    parApply_7 2.906236 2.912945 2.948692 2.919704 2.988599 3.053362    10    d
    


    Amount of Cores

    完整代码:
    ## Benchmark N-Cores
    library(microbenchmark)
    mc <- microbenchmark(times=10,
                         parApply_2 = {
                           cl <- makeCluster(getOption("cl.cores", 2))
                           r3 = parApply(cl = cl, mat, 1, FUN = function(x) {
                             library(pracma); 
                             approx_entropy(x, edim = 2, r = 0.2*sd(x), elag = 1)
                           })
                           stopCluster(cl)
                         },
                         parApply_3 = {
                           cl <- makeCluster(getOption("cl.cores", 3))
                           r3 = parApply(cl = cl, mat, 1, FUN = function(x) {
                             library(pracma); 
                             approx_entropy(x, edim = 2, r = 0.2*sd(x), elag = 1)
                           })
                           stopCluster(cl)
                         },
                         parApply_4 = {
                           cl <- makeCluster(getOption("cl.cores", 4))
                           r3 = parApply(cl = cl, mat, 1, FUN = function(x) {
                             library(pracma); 
                             approx_entropy(x, edim = 2, r = 0.2*sd(x), elag = 1)
                           })
                           stopCluster(cl)
                         },
                         parApply_5 = {
                           cl <- makeCluster(getOption("cl.cores", 5))
                           r3 = parApply(cl = cl, mat, 1, FUN = function(x) {
                             library(pracma); 
                             approx_entropy(x, edim = 2, r = 0.2*sd(x), elag = 1)
                           })
                           stopCluster(cl)
                         },
                         parApply_6 = {
                           cl <- makeCluster(getOption("cl.cores", 6))
                           r3 = parApply(cl = cl, mat, 1, FUN = function(x) {
                             library(pracma); 
                             approx_entropy(x, edim = 2, r = 0.2*sd(x), elag = 1)
                           })
                           stopCluster(cl)
                         },
                         parApply_7 = {
                           cl <- makeCluster(getOption("cl.cores", 7))
                           r3 = parApply(cl = cl, mat, 1, FUN = function(x) {
                             library(pracma); 
                             approx_entropy(x, edim = 2, r = 0.2*sd(x), elag = 1)
                           })
                           stopCluster(cl)
                         }
    )
    
    ## Results
    mc
    Unit: seconds
    expr      min       lq     mean   median       uq      max neval  cld
    parApply_2 2.670257 2.688115 2.699522 2.694527 2.714293 2.740149    10   c 
    parApply_3 2.312629 2.366021 2.411022 2.399599 2.464568 2.535220    10 a   
    parApply_4 2.358165 2.405190 2.444848 2.433657 2.485083 2.568679    10 a   
    parApply_5 2.504144 2.523215 2.546810 2.536405 2.558630 2.646244    10  b  
    parApply_6 2.687758 2.725502 2.761400 2.747263 2.766318 2.969402    10   c 
    parApply_7 2.906236 2.912945 2.948692 2.919704 2.988599 3.053362    10    d
    
    ## Plot Results
    plot(mc)
    

    随着 矩阵变大 ,使​​用 ParApplydata.table 似乎比使用矩阵更快。以下示例使用了一个包含 500*500 个元素的矩阵,导致这些时间(仅用于 2 次运行):

    Unit: seconds
    expr      min       lq     mean   median       uq      max neval cld
    ParApply 191.5861 191.5861 192.6157 192.6157 193.6453 193.6453     2   a
    DT_ParAp 135.0570 135.0570 163.4055 163.4055 191.7541 191.7541     2   a
    


    最小值要低得多,尽管最大值几乎相同,该箱线图中也很好地说明了这一点:
    enter image description here

    完整代码:
    # dummy random time series data
    ts <- rnorm(500)
    # mat <- matrix(rep(ts,100), nrow = 100, ncol = 100)
    mat = matrix(rep(ts,500), nrow = 500, ncol = 500, byrow = T)
    r <- matrix(0, nrow = nrow(mat), ncol = 1)      
    
    ## Benchmark
    library(microbenchmark)
    mc <- microbenchmark(times=2,
                         ParApply = {
                           cl <- makeCluster(getOption("cl.cores", 3))
                           r3 = parApply(cl = cl, mat, 1, FUN = function(x) {
                             library(pracma); 
                             approx_entropy(x, edim = 2, r = 0.2*sd(x), elag = 1)
                           })
                           stopCluster(cl)
                         },
                         DT_ParAp = {
                           mDT = as.data.table(mat)
                           cl <- makeCluster(getOption("cl.cores", 3))
                           r5 = parApply(cl = cl, mDT, 1, FUN = function(x) {
                             library(pracma); 
                             approx_entropy(x, edim = 2, r = 0.2*sd(x), elag = 1)
                           })
                           stopCluster(cl)
                         }
    )
    
    ## Results
    mc
    Unit: seconds
    expr      min       lq     mean   median       uq      max neval cld
    ParApply 191.5861 191.5861 192.6157 192.6157 193.6453 193.6453     2   a
    DT_ParAp 135.0570 135.0570 163.4055 163.4055 191.7541 191.7541     2   a
    
    ## Plot
    plot(mc)
    

    关于r - 计算时间序列矩阵的近似熵,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/50945047/

    相关文章:

    r - 在 r 的 for() 循环中转置和重命名数据帧

    java - 运行 R scraper 脚本时出现 java.io.IOException 错误

    r - R 中递归解的迭代

    R:如何根据列表中的列条件汇总(聚合)dfs 的值?

    R 'fdrtool' 封装 : how to use t statistic

    r - 用于在绘图中选择点/绘图的套索/涂鸦工具

    android - 适合 Android Imageview 矩阵屏幕中心

    C++:访问冲突

    python - 创建一个函数,用所有元素创建一个新的宇宙 [0] PYTHON

    Javascript 创建对象的二维数组