假设我这里有一个像tibble一样的tb_1
# A tibble: 7 x 2
Grp Srt
<chr> <int>
1 A 10
2 B 4
3 B 7
4 A 5
5 A 1
6 A 3
7 B 2现将其转载如下:
tb_1 <- structure(
list(
Grp = c("A", "B", "B", "A", "A", "A", "B"),
Srt = c(10L, 4L, 7L, 5L, 1L, 3L, 2L)
),
class = c("tbl_df", "tbl", "data.frame"),
row.names = c(NA, -7L)
)arrange_groups() i想要一个表演性函数
library(dplyr)
tb_2 <- tb_1 %>%
# Group by 'Grp'.
group_by(Grp) %>%
# Sort by 'Srt' WITHIN each group.
arrange_groups(Srt)在得到的tb_2中,来自"A"组的4个观察应该保持分布在1st、4th、5th和6th行之间;在它们之间按照Srt排序之后。同样,来自"B"组的3个观察应该保持分布在2nd、3rd和7th行之间。
# A tibble: 7 x 2
# Groups: Grp [2]
Grp Srt
<chr> <int>
1 A 1
2 B 2
3 B 4
4 A 3
5 A 5
6 A 10
7 B 7我在下面复制了tb_2:
tb_2 <- structure(
list(
Grp = c("A", "B", "B", "A", "A", "A", "B"),
Srt = c(1L, 2L, 4L, 3L, 5L, 10L, 7L)
),
class = c("grouped_df", "tbl_df", "tbl", "data.frame"),
row.names = c(NA, -7L),
groups = structure(
list(
Grp = c("A", "B"),
.rows = structure(
list(
c(1L, 4L, 5L, 6L),
c(2L, 3L, 7L)
),
ptype = integer(0),
class = c("vctrs_list_of", "vctrs_vctr", "list")
)
),
class = c("tbl_df", "tbl", "data.frame"),
row.names = c(NA, -2L),
.drop = TRUE
)
)更新
当我能够回答我自己的问题时,我将为其他解决方案留出余地。我很想知道替代方案存在什么,特别是那些表现更好、更有创意或者与data.table这样的不同生态系统一起工作的人。
走向优化
理想情况下,进一步的解决方案应该是
order(Srt_1, Srt_2, ...)中的每一列重新计算df;data.table.中不要比现有建议慢发布于 2022-01-18 18:00:11
解决方案
在潮间带中,这个目标可以通过一个简单的工作流或(除其他外)以下两个函数来实现。
工作流程
您只需忽略arrange_groups(),而是使用mutate()实现德普利工作流,因为操作(如order())无论如何都将在组内应用。
library(dplyr)
tb_1 %>%
group_by(Grp) %>%
# Arbitrary sorting variables go HERE:
mutate(across(everything(), ~.[order(Srt)]))
# ^^^重序函数
这个arrange_groups_1()函数首先根据现有组进行排序,然后根据...中给出的变量排序。通过在其组中对数据进行排序,arrange_groups_1()然后将这些组映射回原来的位置。
arrange_groups_1 <- function(.data, ...) {
# Arrange into group "regions", and sort within each region; then...
dplyr::arrange(.data = .data, ... = ..., .by_group = TRUE)[
# ...map the results back to the original structure.
order(order(dplyr::group_indices(.data = .data))),
]
}它与dplyr兼容。
library(dplyr)
tb_1 %>%
group_by(Grp) %>%
arrange_groups_1(Srt)突变函数
与arrange_groups_1()相比,arrange_groups_2()解决方案不那么聪明,但更简单,它只是以功能形式实现工作流。
arrange_groups_2 <- function(.data, ...) {
# Capture the symbols for the sorting variables.
dots <- dplyr::enquos(...)
dplyr::mutate(
.data = .data,
dplyr::across(
# Sort across the entire dataset.
.cols = dplyr::everything(),
# Sort each group "in place"; by variables captured from the proper scope.
.fns = ~.[order(!!!dots)]
)
)
}它也与dplyr兼容。
library(dplyr)
tb_1 %>%
group_by(Grp) %>%
arrange_groups_2(Srt)结果
给定像您这样的tb_1,所有这些解决方案都会产生预期的结果:
# A tibble: 7 x 2
# Groups: Grp [2]
Grp Srt
<chr> <int>
1 A 1
2 B 2
3 B 4
4 A 3
5 A 5
6 A 10
7 B 7性能
在大型数据集上,性能差异可能变得很大。给出了一个具有100万个观测值和多个变量的df,用于分组(Grp_*)和排序(Srt_*)
set.seed(0)
df <- data.frame(
Record_ID = 1:1000000,
Grp_1 = sample(x = letters[ 1:6 ] , size = 1000000, replace = TRUE ),
Grp_2 = sample(x = letters[ 7:12] , size = 1000000, replace = TRUE ),
Grp_3 = sample(x = letters[13:18] , size = 1000000, replace = TRUE ),
Grp_4 = sample(x = letters[19:26] , size = 1000000, replace = TRUE ),
Srt_1 = sample(x = 1:1000000, size = 1000000, replace = FALSE),
Srt_2 = sample(x = 1000001:2000000, size = 1000000, replace = FALSE),
Srt_3 = sample(x = 2000001:3000000, size = 1000000, replace = FALSE),
Srt_4 = sample(x = 3000001:4000000, size = 1000000, replace = FALSE)
)这里计算了每个解决方案的相对性能:
library(dplyr)
library(microbenchmark)
performances <- list(
one_var = microbenchmark(
arrange_groups_1 = df %>%
group_by(Grp_1) %>%
arrange_groups_1(Srt_1),
arrange_groups_2 = df %>%
group_by(Grp_1) %>%
arrange_groups_2(Srt_1),
workflow = df %>%
group_by(Grp_1) %>%
mutate(across(everything(), ~.[order(Srt_1)])),
times = 50
),
two_vars = microbenchmark(
arrange_groups_1 = df %>%
group_by(Grp_1, Grp_2) %>%
arrange_groups_1(Srt_1, Srt_2),
arrange_groups_2 = df %>%
group_by(Grp_1, Grp_2) %>%
arrange_groups_2(Srt_1, Srt_2),
workflow = df %>%
group_by(Grp_1, Grp_2) %>%
mutate(across(everything(), ~.[order(Srt_1, Srt_2)])),
times = 50
),
three_vars = microbenchmark(
arrange_groups_1 = df %>%
group_by(Grp_1, Grp_2, Grp_3) %>%
arrange_groups_1(Srt_1, Srt_2, Srt_3),
arrange_groups_2 = df %>%
group_by(Grp_1, Grp_2, Grp_3) %>%
arrange_groups_2(Srt_1, Srt_2, Srt_3),
workflow = df %>%
group_by(Grp_1, Grp_2, Grp_3) %>%
mutate(across(everything(), ~.[order(Srt_1, Srt_2, Srt_3)])),
times = 50
),
four_vars = microbenchmark(
arrange_groups_1 = df %>%
group_by(Grp_1, Grp_2, Grp_3, Grp_4) %>%
arrange_groups_1(Srt_1, Srt_2, Srt_3, Srt_4),
arrange_groups_2 = df %>%
group_by(Grp_1, Grp_2, Grp_3, Grp_4) %>%
arrange_groups_2(Srt_1, Srt_2, Srt_3, Srt_4),
workflow = df %>%
group_by(Grp_1, Grp_2, Grp_3, Grp_4) %>%
mutate(across(everything(), ~.[order(Srt_1, Srt_2, Srt_3, Srt_4)])),
times = 50
)
)显然,arrange_groups_1()已经过时了。我怀疑arrange_groups_2()可以与工作流保持一致,在提供更多符合人体工程学的使用的同时,仍然可以看到后者。但是,对于更大的分组和排序变量集,这种怀疑应该在其他(以及更好的)机器上得到验证。
#> performances
$one_var
Unit: milliseconds
expr min lq mean median uq max neval
arrange_groups_1 2066.4674 2155.8859 2231.3547 2199.7442 2283.5782 2565.0542 50
arrange_groups_2 352.3775 385.1829 435.2595 444.8746 464.1493 607.0927 50
workflow 337.2756 391.0174 428.9049 435.8385 454.7347 546.4498 50
$two_vars
Unit: milliseconds
expr min lq mean median uq max neval
arrange_groups_1 3580.5395 3688.1506 3842.2048 3799.5430 3979.9716 4317.7100 50
arrange_groups_2 230.1166 239.9141 265.0786 249.3640 287.1006 359.1822 50
workflow 221.6627 234.2732 256.6200 243.3707 281.2269 365.9102 50
$three_vars
Unit: milliseconds
expr min lq mean median uq max neval
arrange_groups_1 5113.6341 5340.5483 5441.3399 5443.5068 5535.0578 5946.6958 50
arrange_groups_2 261.9329 274.1785 295.6854 282.4638 323.5710 412.0139 50
workflow 224.8709 236.9958 263.2440 252.6042 292.7043 339.6351 50
$four_vars
Unit: milliseconds
expr min lq mean median uq max neval
arrange_groups_1 6810.3864 7035.7077 7237.6941 7156.7051 7314.4667 8051.8558 50
arrange_groups_2 581.9000 603.7822 640.8977 626.4116 672.6488 859.8239 50
workflow 349.7786 361.6454 391.7517 375.1532 429.3643 485.9227 50更新
混合函数
受@akrun的回答的启发,这里有一个集成data.table功能的函数.
arrange_groups_3 <- function(.data, ...) {
# Name the variables for grouping, and their complement in '.data'.
group_vars <- dplyr::group_vars(.data)
other_vars <- setdiff(names(.data), group_vars)
# For proper scoping, generate here the expression for sorting.
sort_expr <- substitute(order(...))
dplyr::as_tibble(data.table::as.data.table(.data)[,
(other_vars) := lapply(
# Sort each column, using an index...
.SD, \(x, i) x[i],
# ...which we need calculate only once.
i = eval(sort_expr)
),
group_vars
])
}dplyr.的工效学...with
library(dplyr)
tb_1 %>%
group_by(Grp) %>%
arrange_groups_3(Srt)但是,我的实现删除了.data中的原始分组,因此它仍然是正在进行的工作。
快速突变
这一相当快速的实现是受到@Henrik的建议的启发而使用的,杜普利是dplyr.的一个后端。
arrange_groups_4 <- function(.data, ...) {
# Capture the symbols for the sorting and grouping variables.
sort_syms <- dplyr::enquos(...)
group_syms <- dplyr::groups(.data)
.data |>
# Use a "data.table" backend.
dtplyr::lazy_dt() |>
# Preserve the grouping.
dplyr::group_by(!!!group_syms) |>
# Perform the sorting.
dplyr::mutate(
dplyr::across(
# Sort across the entire dataset.
.cols = dplyr::everything(),
# Sort each group "in place": subscript using the index...
.fns = `[`,
# ...generated when ordering by the sorting variables.
i = order(!!!sort_syms)
)
)
}虽然我还没有对它进行比4分组和排序变量更多的测试,但它似乎是在线性时间内完成的:
$one_var
Unit: milliseconds
expr min lq mean median uq max neval
arrange_groups_4 30.738 31.8028 46.81692 37.6586 59.8274 95.4703 50
$two_vars
Unit: milliseconds
expr min lq mean median uq max neval
arrange_groups_4 41.4364 41.9118 52.91332 46.4306 66.1674 80.171 50
$three_vars
Unit: milliseconds
expr min lq mean median uq max neval
arrange_groups_4 47.8605 48.6225 62.06675 51.9562 71.487 237.0102 50
$four_vars
Unit: milliseconds
expr min lq mean median uq max neval
arrange_groups_4 67.306 69.1426 78.68869 73.81695 88.7874 108.2624 50发布于 2022-01-18 19:03:36
下面是另一个dplyr解决方案,它依赖于保持行顺序的联接。(当然,可以将id列作为最后一步删除,临时对象不需要单独创建,但该方法在此演示文稿中非常清晰。)
group_order = tb_1 %>%
select(Grp) %>%
group_by(Grp) %>%
mutate(id = row_number())
row_order = tb_1 %>%
arrange(Srt) %>%
group_by(Grp) %>%
mutate(id = row_number())
result = group_order %>% left_join(row_order, by = c("Grp", "id"))
result
# # A tibble: 7 × 3
# # Groups: Grp [2]
# Grp id Srt
# <chr> <int> <int>
# 1 A 1 1
# 2 B 1 2
# 3 B 2 4
# 4 A 2 3
# 5 A 3 5
# 6 A 4 10
# 7 B 3 7基准测试,这比arrange_groups_1更好,但在其他方面不是很好:
four_vars = microbenchmark(
arrange_groups_2 = df %>%
group_by(Grp_1, Grp_2, Grp_3, Grp_4) %>%
arrange_groups_2(Srt_1, Srt_2, Srt_3, Srt_4),
workflow = df %>%
group_by(Grp_1, Grp_2, Grp_3, Grp_4) %>%
mutate(across(everything(), ~.[order(Srt_1, Srt_2, Srt_3, Srt_4)])),
join = {
df %>%
group_by(Grp_1, Grp_2, Grp_3, Grp_4) %>%
mutate(id = row_number()) %>%
left_join(
df %>%
arrange(Srt_1, Srt_2, Srt_3, Srt_4) %>%
group_by(Grp_1, Grp_2, Grp_3, Grp_4) %>%
mutate(id = row_number()),
by = c("Grp_1", "Grp_2", "Grp_3", "Grp_4", "id")
)
},
times = 10
)
four_vars
# Unit: milliseconds
# expr min lq mean median uq max neval
# arrange_groups_2 356.7114 366.2305 393.7209 377.6245 389.1009 537.6800 10
# workflow 242.6982 245.5079 252.8441 252.3410 257.7656 267.5277 10
# join 366.6957 400.1438 438.5274 443.0696 477.5481 505.2293 10发布于 2022-01-18 19:20:26
这个问题是关于dplyr的。这里是data.table的一次尝试,因为这也涉及到效率。OP的大数据集'df‘的基准测试如下
library(data.table)
system.time({
df %>%
group_by(Grp_1, Grp_2, Grp_3, Grp_4) %>%
mutate(across(everything(), ~.[order(Srt_1, Srt_2, Srt_3, Srt_4)]))
})
# user system elapsed
# 0.552 0.013 0.564
system.time({
grpnms <- grep("Grp", names(df), value = TRUE)
othernms <- setdiff(names(df), grpnms)
setDT(df)[, (othernms) := lapply(.SD, \(x)
x[order(Srt_1, Srt_2, Srt_3, Srt_4)]), grpnms]
})
# user system elapsed
# 0.348 0.012 0.360 https://stackoverflow.com/questions/70760281
复制相似问题