-
-
Notifications
You must be signed in to change notification settings - Fork 2.6k
New issue
Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? # to your account
Threadpool blocking #317
Threadpool blocking #317
Changes from 10 commits
f392d28
3a8e4e9
8d12b1b
38cdb10
45ce273
1ea268a
0f71728
ac3797e
20091f4
7a8d8de
d251c10
9d36cd9
97c8145
739f703
a0f5bfa
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,83 @@ | ||
use worker::Worker; | ||
|
||
use futures::Poll; | ||
|
||
/// Error raised by `blocking`. | ||
#[derive(Debug)] | ||
pub struct BlockingError { | ||
_p: (), | ||
} | ||
|
||
/// Enter a blocking section of code. | ||
/// | ||
/// The `blocking` function annotates a section of code that performs a blocking | ||
/// operation, either by issuing a blocking syscall or by performing a long | ||
/// running CPU bound computation. | ||
/// | ||
/// When the `blocking` function enters, it hands off the responsibility of | ||
/// processing the current work queue to another thread. Then, it calls the | ||
/// supplied closure. The closure is permitted to block indefinitely. | ||
/// | ||
/// If the maximum number of concurrent `blocking` calls has been reached, then | ||
/// `NotReady` is returned and the task is notified once existing `blocking` | ||
/// calls complete. The maximum value is specified when creating a thread pool | ||
/// using [`Builder::max_blocking`][build] | ||
/// | ||
/// [build]: struct.Builder.html#method.max_blocking | ||
/// | ||
/// # Background | ||
/// | ||
/// By default, the Tokio thread pool expects that tasks will only run for short | ||
/// periods at a time before yielding back to the thread pool. This is the basic | ||
/// premise of cooperative multitasking. | ||
/// | ||
/// However, it is common to want to perform a blocking operation while | ||
/// processing an asynchronous computation. Examples of blocking operation | ||
/// include: | ||
/// | ||
/// * Performing synchronous file operations (reading and writing). | ||
/// * Blocking on acquiring a mutex. | ||
/// * Performing a CPU bound computation, like cryptographic encryption or | ||
/// decryption. | ||
/// | ||
/// One option for dealing with blocking operations in an asynchronous context | ||
/// is to use a thread pool dedicated to performing these operations. This not | ||
/// ideal as it requires bidirectional message passing as well as a channel to | ||
/// communicate which adds a level of buffering. | ||
/// | ||
/// Instead, `blocking` hands off the responsiblity of processing the work queue | ||
/// to another thread. This hand off is light compared to a channel and does not | ||
/// require buffering. | ||
/// | ||
/// # Panics | ||
/// | ||
/// This function panics if not called from the context of a thread pool worker. | ||
pub fn blocking<F, T>(f: F) -> Poll<T, BlockingError> | ||
where F: FnOnce() -> T, | ||
{ | ||
let res = Worker::with_current(|worker| { | ||
let worker = worker.expect("not called from a runtime thread"); | ||
|
||
// Transition the worker state to blocking. This will exit the fn early | ||
// with `NotRead` if the pool does not have enough capacity to enter | ||
// blocking mode. | ||
worker.transition_to_blocking() | ||
}); | ||
|
||
// If the transition cannot happen, exit early | ||
try_ready!(res); | ||
|
||
// Currently in blocking mode, so call the inner closure | ||
let ret = f(); | ||
|
||
// Try to transition out of blocking mode. This is a fast path that takes | ||
// back ownership of the worker if the worker handoff didn't complete yet. | ||
Worker::with_current(|worker| { | ||
// Worker must be set since it was above. | ||
worker.unwrap() | ||
.transition_from_blocking(); | ||
}); | ||
|
||
// Return the result | ||
Ok(ret.into()) | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,7 +2,7 @@ use callback::Callback; | |
use config::{Config, MAX_WORKERS}; | ||
use park::{BoxPark, BoxedPark, DefaultPark}; | ||
use sender::Sender; | ||
use pool::Pool; | ||
use pool::{Pool, MAX_BACKUP}; | ||
use thread_pool::ThreadPool; | ||
use worker::{self, Worker, WorkerId}; | ||
|
||
|
@@ -63,6 +63,10 @@ pub struct Builder { | |
/// Number of workers to spawn | ||
pool_size: usize, | ||
|
||
/// Maximum number of futures that can be in a blocking section | ||
/// concurrently. | ||
max_blocking: usize, | ||
|
||
/// Generates the `Park` instances | ||
new_park: Box<Fn(&WorkerId) -> BoxPark>, | ||
} | ||
|
@@ -99,11 +103,14 @@ impl Builder { | |
|
||
Builder { | ||
pool_size: num_cpus, | ||
max_blocking: 100, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is there a reason this isn't stored on the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The |
||
config: Config { | ||
keep_alive: None, | ||
name_prefix: None, | ||
stack_size: None, | ||
around_worker: None, | ||
after_start: None, | ||
before_stop: None, | ||
}, | ||
new_park, | ||
} | ||
|
@@ -138,6 +145,33 @@ impl Builder { | |
self | ||
} | ||
|
||
/// Set the maximum number of concurrent blocking sections. | ||
/// | ||
/// This must be a number between 1 and 32,768 though it is advised to keep | ||
/// this value on the smaller side. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What happens when the number of |
||
/// | ||
/// The default value is 100. | ||
/// | ||
/// # Examples | ||
/// | ||
/// ``` | ||
/// # extern crate tokio_threadpool; | ||
/// # extern crate futures; | ||
/// # use tokio_threadpool::Builder; | ||
/// | ||
/// # pub fn main() { | ||
/// // Create a thread pool with default configuration values | ||
/// let thread_pool = Builder::new() | ||
/// .max_blocking(200) | ||
/// .build(); | ||
/// # } | ||
/// ``` | ||
pub fn max_blocking(&mut self, val: usize) -> &mut Self { | ||
assert!(val <= MAX_BACKUP, "max value is {}", MAX_BACKUP); | ||
self.max_blocking = val; | ||
self | ||
} | ||
|
||
/// Set the worker thread keep alive duration | ||
/// | ||
/// If set, a worker thread will wait for up to the specified duration for | ||
|
@@ -255,6 +289,61 @@ impl Builder { | |
self | ||
} | ||
|
||
/// Execute function `f` after each thread is started but before it starts | ||
/// doing work. | ||
/// | ||
/// This is intended for bookkeeping and monitoring use cases. | ||
/// | ||
/// # Examples | ||
/// | ||
/// ``` | ||
/// # extern crate tokio_threadpool; | ||
/// # extern crate futures; | ||
/// # use tokio_threadpool::Builder; | ||
/// | ||
/// # pub fn main() { | ||
/// // Create a thread pool with default configuration values | ||
/// let thread_pool = Builder::new() | ||
/// .after_start(|| { | ||
/// println!("thread started"); | ||
/// }) | ||
/// .build(); | ||
/// # } | ||
/// ``` | ||
pub fn after_start<F>(&mut self, f: F) -> &mut Self | ||
where F: Fn() + Send + Sync + 'static | ||
{ | ||
self.config.after_start = Some(Arc::new(f)); | ||
self | ||
} | ||
|
||
/// Execute function `f` before each thread stops. | ||
/// | ||
/// This is intended for bookkeeping and monitoring use cases. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Presumably this cannot be guaranteed to run (whereas There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If the thread exits, it should run (as long as the threadpool lib itself doesn't panic). What sort of guarantee would you expect? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. basically, i'm wondering how a process needs to manage the threadpool during process shutdown. Servers typically are waiting on some set of futures that complete once the process has received a shutdown signal. Once these futures complete, is there a way to shutdown the pool gracefully? If the pool is just dropped, are threads guaranteed to exit cleanly before the main thread exits? |
||
/// | ||
/// # Examples | ||
/// | ||
/// ``` | ||
/// # extern crate tokio_threadpool; | ||
/// # extern crate futures; | ||
/// # use tokio_threadpool::Builder; | ||
/// | ||
/// # pub fn main() { | ||
/// // Create a thread pool with default configuration values | ||
/// let thread_pool = Builder::new() | ||
/// .before_stop(|| { | ||
/// println!("thread stopping"); | ||
/// }) | ||
/// .build(); | ||
/// # } | ||
/// ``` | ||
pub fn before_stop<F>(&mut self, f: F) -> &mut Self | ||
where F: Fn() + Send + Sync + 'static | ||
{ | ||
self.config.before_stop = Some(Arc::new(f)); | ||
self | ||
} | ||
|
||
/// Customize the `park` instance used by each worker thread. | ||
/// | ||
/// The provided closure `f` is called once per worker and returns a `Park` | ||
|
@@ -331,6 +420,7 @@ impl Builder { | |
let inner = Arc::new( | ||
Pool::new( | ||
workers.into_boxed_slice(), | ||
self.max_blocking, | ||
self.config.clone())); | ||
|
||
// Wrap with `Sender` | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,18 +1,32 @@ | ||
use callback::Callback; | ||
|
||
use std::fmt; | ||
use std::sync::Arc; | ||
use std::time::Duration; | ||
|
||
/// Thread pool specific configuration values | ||
#[derive(Debug, Clone)] | ||
#[derive(Clone)] | ||
pub(crate) struct Config { | ||
pub keep_alive: Option<Duration>, | ||
// Used to configure a worker thread | ||
pub name_prefix: Option<String>, | ||
pub stack_size: Option<usize>, | ||
pub around_worker: Option<Callback>, | ||
pub after_start: Option<Arc<Fn() + Send + Sync>>, | ||
pub before_stop: Option<Arc<Fn() + Send + Sync>>, | ||
} | ||
|
||
/// Max number of workers that can be part of a pool. This is the most that can | ||
/// fit in the scheduler state. Note, that this is the max number of **active** | ||
/// threads. There can be more standby threads. | ||
pub(crate) const MAX_WORKERS: usize = 1 << 15; | ||
|
||
impl fmt::Debug for Config { | ||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { | ||
fmt.debug_struct("Config") | ||
.field("keep_alive", &self.keep_alive) | ||
.field("name_prefix", &self.name_prefix) | ||
.field("stack_size", &self.stack_size) | ||
.finish() | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
"CPU-bound"