diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index a8c2aa98cd083..37b305d0a8b50 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -477,8 +477,8 @@ fn remove_cycle( /// Detects query cycles by using depth first search over all active query jobs. /// If a query cycle is found it will break the cycle by finding an edge which /// uses a query latch and then resuming that waiter. -/// There may be multiple cycles involved in a deadlock, so this searches -/// all active queries for cycles before finally resuming all the waiters at once. +/// There may be multiple cycles involved in a deadlock, but we only search +/// one cycle at a call and resume one waiter at once. See `FIXME` below. pub fn break_query_cycles(query_map: QueryMap, registry: &rayon_core::Registry) { let mut wakelist = Vec::new(); let mut jobs: Vec = query_map.keys().cloned().collect(); @@ -488,6 +488,19 @@ pub fn break_query_cycles(query_map: QueryMap, registry: &rayon_core::Registry) while jobs.len() > 0 { if remove_cycle(&query_map, &mut jobs, &mut wakelist) { found_cycle = true; + + // FIXME(#137731): Resume all the waiters at once may cause deadlocks, + // so we resume one waiter at a call for now. It's still unclear whether + // it's due to possible issues in rustc-rayon or instead in the handling + // of query cycles. + // This seem to only appear when multiple query cycles errors + // are involved, so this reduction in parallelism, while suboptimal, is not + // universal and only the deadlock handler will encounter these cases. + // The workaround shows loss of potential gains, but there still are big + // improvements in the common case, and no regressions compared to the + // single-threaded case. More investigation is still needed, and once fixed, + // we can wake up all the waiters up. + break; } }