-
Notifications
You must be signed in to change notification settings - Fork 2.5k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Auto merge of #7838 - ehuss:fix-memory-rustc-output, r=alexcrichton
Avoid buffering large amounts of rustc output. If `rustc` prints out a lot of information (such as with `RUSTC_LOG`, or a huge number of diagnostics), cargo would buffer up large amounts of that in memory. For normal builds, this would happen if the terminal does not print fast enough. For "fresh" replay, *everything* was being buffered. There are two issues: 1. There is no back-pressure on the mpsc queue. If messages come in faster than they can be processed, it grows without bounds. 2. The cache-replay code runs in the "fresh" code path which does not spawn a thread. Thus the main thread was blocked and unable to process `Message`s while the replay is happening. The solution here is to use a bounded queue, and to always spawn a thread for the "fresh" case. The main concern here is performance. Previously the "fresh" jobs avoided spawning a thread to improve performance. I did a fair bit of profiling to understand the impact, using projects with anywhere from 100 to 500 units. On my macOS machine, I found spawning a thread to be slightly faster (1-5%). On Linux and Windows, it was generally about 0 to 5% slower. It might be helpful for others to profile it on their own system. I'm on the fence for the cost/benefit here. It seems generally good to reduce memory usage, but the slight performance hit is disappointing. I tried several other approaches to fix this, all with worse trade offs (I can discuss them if interested). Fixes #6197
- Loading branch information
Showing
5 changed files
with
200 additions
and
46 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,75 @@ | ||
use std::collections::VecDeque; | ||
use std::sync::{Condvar, Mutex}; | ||
use std::time::Duration; | ||
|
||
/// A simple, threadsafe, queue of items of type `T` | ||
/// | ||
/// This is a sort of channel where any thread can push to a queue and any | ||
/// thread can pop from a queue. | ||
/// | ||
/// This supports both bounded and unbounded operations. `push` will never block, | ||
/// and allows the queue to grow without bounds. `push_bounded` will block if the | ||
/// queue is over capacity, and will resume once there is enough capacity. | ||
pub struct Queue<T> { | ||
state: Mutex<State<T>>, | ||
popper_cv: Condvar, | ||
bounded_cv: Condvar, | ||
bound: usize, | ||
} | ||
|
||
struct State<T> { | ||
items: VecDeque<T>, | ||
} | ||
|
||
impl<T> Queue<T> { | ||
pub fn new(bound: usize) -> Queue<T> { | ||
Queue { | ||
state: Mutex::new(State { | ||
items: VecDeque::new(), | ||
}), | ||
popper_cv: Condvar::new(), | ||
bounded_cv: Condvar::new(), | ||
bound, | ||
} | ||
} | ||
|
||
pub fn push(&self, item: T) { | ||
self.state.lock().unwrap().items.push_back(item); | ||
self.popper_cv.notify_one(); | ||
} | ||
|
||
/// Pushes an item onto the queue, blocking if the queue is full. | ||
pub fn push_bounded(&self, item: T) { | ||
let locked_state = self.state.lock().unwrap(); | ||
let mut state = self | ||
.bounded_cv | ||
.wait_while(locked_state, |s| s.items.len() >= self.bound) | ||
.unwrap(); | ||
state.items.push_back(item); | ||
self.popper_cv.notify_one(); | ||
} | ||
|
||
pub fn pop(&self, timeout: Duration) -> Option<T> { | ||
let (mut state, result) = self | ||
.popper_cv | ||
.wait_timeout_while(self.state.lock().unwrap(), timeout, |s| s.items.is_empty()) | ||
.unwrap(); | ||
if result.timed_out() { | ||
None | ||
} else { | ||
let value = state.items.pop_front()?; | ||
if state.items.len() < self.bound { | ||
// Assumes threads cannot be canceled. | ||
self.bounded_cv.notify_one(); | ||
} | ||
Some(value) | ||
} | ||
} | ||
|
||
pub fn try_pop_all(&self) -> Vec<T> { | ||
let mut state = self.state.lock().unwrap(); | ||
let result = state.items.drain(..).collect(); | ||
self.bounded_cv.notify_all(); | ||
result | ||
} | ||
} |
Oops, something went wrong.