From 77c496be4bdb5fd5facebc199308ab4445dc8ba2 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 17 Mar 2023 20:13:19 +0100 Subject: [PATCH 001/177] Add a10::Ring to Coordinator Still will be used to power io_uring operations. --- rt/Cargo.toml | 1 + rt/src/coordinator.rs | 27 +++++++++++++++++++++------ rt/src/lib.rs | 4 ++-- 3 files changed, 24 insertions(+), 8 deletions(-) diff --git a/rt/Cargo.toml b/rt/Cargo.toml index 2e3c15697..55c14f7be 100644 --- a/rt/Cargo.toml +++ b/rt/Cargo.toml @@ -17,6 +17,7 @@ edition = "2021" test = ["heph/test"] [dependencies] +a10 = { version = "0.1.0", default-features = false, git = "https://github.com/Thomasdezeeuw/a10" } heph = { version = "0.4.0", path = "../", default-features = false } heph-inbox = { version = "0.2.3", path = "../inbox", default-features = false } log = { version = "0.4.16", default-features = false, features = ["kv_unstable", "kv_unstable_std"] } diff --git a/rt/src/coordinator.rs b/rt/src/coordinator.rs index 2737b479c..c8f0f826e 100644 --- a/rt/src/coordinator.rs +++ b/rt/src/coordinator.rs @@ -16,6 +16,7 @@ //! [sync worker threads]: crate::sync_worker use std::env::consts::ARCH; +use std::os::fd::{AsFd, AsRawFd}; use std::os::unix::process::parent_id; use std::sync::Arc; use std::time::Instant; @@ -24,6 +25,7 @@ use std::{fmt, io, process}; use heph::actor_ref::{ActorGroup, Delivery}; use log::{as_debug, as_display, debug, error, info, trace}; use mio::event::Event; +use mio::unix::SourceFd; use mio::{Events, Interest, Poll, Registry, Token}; use mio_signals::{SignalSet, Signals}; @@ -38,10 +40,13 @@ use crate::{ /// Token used to receive process signals. const SIGNAL: Token = Token(usize::MAX); +const RING: Token = Token(usize::MAX - 1); /// Coordinator responsible for coordinating the Heph runtime. #[derive(Debug)] pub(super) struct Coordinator { + /// I/O uring. + ring: a10::Ring, /// OS poll, used to poll the status of the (sync) worker threads and /// process `signals`. poll: Poll, @@ -75,6 +80,7 @@ impl Coordinator { worker_wakers: Box<[&'static ThreadWaker]>, trace_log: Option>, ) -> io::Result { + let ring = a10::Ring::config(512).build()?; let poll = Poll::new()?; // NOTE: on Linux this MUST be created before starting the worker // threads. @@ -89,14 +95,15 @@ impl Coordinator { let (host_os, host_name) = host_info()?; let host_id = host_id()?; Ok(Coordinator { - host_os, - host_name, - host_id, - app_name, + ring, poll, signals, internals, start: Instant::now(), + app_name, + host_os, + host_name, + host_id, }) } @@ -195,6 +202,10 @@ impl Coordinator { // polling events. let timing = trace::start(&*trace_log); let registry = self.poll.registry(); + let ring_fd = &self.ring.as_fd().as_raw_fd(); + registry + .register(&mut SourceFd(ring_fd), RING, Interest::READABLE) + .map_err(|err| rt::Error::coordinator(Error::Startup(err)))?; register_workers(registry, workers) .map_err(|err| rt::Error::coordinator(Error::RegisteringWorkers(err)))?; register_sync_workers(registry, sync_workers) @@ -392,11 +403,13 @@ fn handle_sync_worker_event( /// Error running the [`Coordinator`]. #[derive(Debug)] pub(super) enum Error { + /// Error in starting up the Coordinator. + Startup(io::Error), /// Error in [`register_workers`]. RegisteringWorkers(io::Error), /// Error in [`register_sync_workers`]. RegisteringSyncActors(io::Error), - /// Error polling ([`mio::Poll`]). + /// Error polling ([`mio::Poll`] or [`a10::Ring`]). Polling(io::Error), /// Error sending start signal to worker. SendingStartSignal(io::Error), @@ -408,6 +421,7 @@ impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use Error::*; match self { + Startup(err) => write!(f, "error starting coordinator: {err}"), RegisteringWorkers(err) => write!(f, "error registering worker threads: {err}"), RegisteringSyncActors(err) => { write!(f, "error registering synchronous actor threads: {err}") @@ -423,7 +437,8 @@ impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { use Error::*; match self { - RegisteringWorkers(ref err) + Startup(ref err) + | RegisteringWorkers(ref err) | RegisteringSyncActors(ref err) | Polling(ref err) | SendingStartSignal(ref err) diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 64fdd7355..abeeb20b6 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -181,8 +181,8 @@ // the test feature. #![doc(cfg_hide(any(test, feature = "test")))] -#[cfg(not(any(target_os = "linux", target_os = "freebsd", target_os = "macos")))] -compile_error!("Heph currently only supports Linux, FreeBSD and macOS."); +#[cfg(not(any(target_os = "linux")))] +compile_error!("Heph currently only supports Linux."); #[cfg(not(target_pointer_width = "64"))] compile_error!("Heph currently only supports 64 bit architectures."); From 20e52a9b835afdc3e0fb0ebe8660d1b10041e2c6 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sat, 18 Mar 2023 14:24:01 +0100 Subject: [PATCH 002/177] Add I/O uring to RuntimeInternals This is for the local version only, next up is the shared version. --- rt/src/local/mod.rs | 4 ++++ rt/src/worker.rs | 38 ++++++++++++++++++++++++++++++++++---- 2 files changed, 38 insertions(+), 4 deletions(-) diff --git a/rt/src/local/mod.rs b/rt/src/local/mod.rs index c458c2d5f..7f9c448fd 100644 --- a/rt/src/local/mod.rs +++ b/rt/src/local/mod.rs @@ -30,6 +30,8 @@ pub(super) struct RuntimeInternals { pub(super) scheduler: RefCell, /// OS poll, used for event notifications to support non-blocking I/O. pub(super) poll: RefCell, + /// I/O uring. + pub(super) ring: RefCell, /// Timers, deadlines and timeouts. pub(crate) timers: RefCell, /// Actor references to relay received `Signal`s to. @@ -47,6 +49,7 @@ impl RuntimeInternals { shared_internals: Arc, waker_id: WakerId, poll: Poll, + ring: a10::Ring, cpu: Option, trace_log: Option, ) -> RuntimeInternals { @@ -56,6 +59,7 @@ impl RuntimeInternals { waker_id, scheduler: RefCell::new(Scheduler::new()), poll: RefCell::new(poll), + ring: RefCell::new(ring), timers: RefCell::new(Timers::new()), signal_receivers: RefCell::new(ActorGroup::empty()), cpu, diff --git a/rt/src/worker.rs b/rt/src/worker.rs index a2c360e74..dcca3f5ed 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -17,6 +17,7 @@ use std::cell::RefMut; use std::num::NonZeroUsize; +use std::os::fd::{AsFd, AsRawFd}; use std::panic::{catch_unwind, AssertUnwindSafe}; use std::rc::Rc; use std::sync::Arc; @@ -26,7 +27,8 @@ use std::{fmt, io, thread}; use crossbeam_channel::{self, Receiver}; use heph::actor_ref::{Delivery, SendError}; use log::{as_debug, debug, info, trace}; -use mio::{Events, Poll, Registry, Token}; +use mio::unix::SourceFd; +use mio::{Events, Interest, Poll, Registry, Token}; use crate::error::StringError; use crate::local::waker::{self, WakerId}; @@ -50,12 +52,16 @@ const COMMS: Token = Token(usize::MAX - 1); /// Token used to indicate the shared [`Poll`] (in [`shared::RuntimeInternals`]) /// has events. const SHARED_POLL: Token = Token(usize::MAX - 2); +/// Token used to indicate the I/O uring has events. +const RING: Token = Token(usize::MAX - 3); /// Setup a new worker thread. /// /// Use [`WorkerSetup::start`] to spawn the worker thread. pub(super) fn setup(id: NonZeroUsize) -> io::Result<(WorkerSetup, &'static ThreadWaker)> { let poll = Poll::new()?; + // TODO: configure ring. + let ring = a10::Ring::new(512)?; // Setup the waking mechanism. let (waker_sender, waker_events) = crossbeam_channel::unbounded(); @@ -66,6 +72,7 @@ pub(super) fn setup(id: NonZeroUsize) -> io::Result<(WorkerSetup, &'static Threa let setup = WorkerSetup { id, poll, + ring, waker_id, waker_events, }; @@ -79,6 +86,8 @@ pub(super) struct WorkerSetup { /// Poll instance for the worker thread. This is needed before starting the /// thread to initialise the [`rt::local::waker`]. poll: Poll, + /// I/O uring. + ring: a10::Ring, /// Waker id used to create a `Waker` for thread-local actors. waker_id: WakerId, /// Receiving side of the channel for `Waker` events. @@ -211,12 +220,18 @@ impl Worker { // Register the shared poll intance. let poll = setup.poll; - trace!(worker_id = setup.id.get(); "registring shared poll"); + trace!(worker_id = setup.id.get(); "registering I/O uring"); + let ring = setup.ring; + let ring_fd = ring.as_fd().as_raw_fd(); + poll.registry() + .register(&mut SourceFd(&ring_fd), RING, Interest::READABLE) + .map_err(Error::Init)?; + trace!(worker_id = setup.id.get(); "registering shared poll"); shared_internals .register_worker_poll(poll.registry(), SHARED_POLL) .map_err(Error::Init)?; // Register the channel to the coordinator. - trace!(worker_id = setup.id.get(); "registring communication channel"); + trace!(worker_id = setup.id.get(); "registering communication channel"); receiver .register(poll.registry(), COMMS) .map_err(Error::Init)?; @@ -227,6 +242,7 @@ impl Worker { shared_internals, setup.waker_id, poll, + ring, cpu, trace_log, ); @@ -256,6 +272,8 @@ impl Worker { mut receiver: rt::channel::Receiver, ) -> io::Result { let poll = Poll::new()?; + // TODO: configure ring. + let ring = a10::Ring::new(512)?; // TODO: this channel will grow unbounded as the waker implementation // sends pids into it. @@ -266,7 +284,8 @@ impl Worker { receiver.register(poll.registry(), COMMS)?; let id = NonZeroUsize::new(usize::MAX).unwrap(); - let internals = RuntimeInternals::new(id, shared_internals, waker_id, poll, None, None); + let internals = + RuntimeInternals::new(id, shared_internals, waker_id, poll, ring, None, None); Ok(Worker { internals: Rc::new(internals), events: Events::with_capacity(16), @@ -427,6 +446,7 @@ impl Worker { let mut scheduler = self.internals.scheduler.borrow_mut(); let mut check_comms = false; let mut check_shared_poll = false; + let mut check_ring = false; let mut amount = 0; for event in self.events.iter() { trace!(worker_id = self.internals.id.get(); "got OS event: {event:?}"); @@ -434,6 +454,7 @@ impl Worker { WAKER => { /* Need to wake up to handle user space events. */ } COMMS => check_comms = true, SHARED_POLL => check_shared_poll = true, + RING => check_ring = true, token => { let pid = ProcessId::from(token); trace!( @@ -445,6 +466,15 @@ impl Worker { } } } + + if check_ring { + self.internals + .ring + .borrow_mut() + .poll(Some(Duration::ZERO)) + .map_err(Error::Polling)?; + } + trace::finish_rt( self.internals.trace_log.borrow_mut().as_mut(), timing, From 9c19b287dc8ef86382e95864b44e6640f65c0ade Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sat, 18 Mar 2023 14:36:44 +0100 Subject: [PATCH 003/177] Add I/O uring to shared runtime internals And ensure it's polled from the worker threads. --- rt/src/shared/mod.rs | 37 +++++++++++++++++++++++++++++++++++-- rt/src/worker.rs | 20 ++++++++++++++++++++ 2 files changed, 55 insertions(+), 2 deletions(-) diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index f3948ab6b..9b6b75b59 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -2,7 +2,7 @@ use std::cmp::min; use std::future::Future; -use std::os::unix::io::AsRawFd; +use std::os::fd::{AsFd, AsRawFd, RawFd}; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex, TryLockError}; @@ -41,6 +41,7 @@ use waker::WakerId; /// be called inside `Arc::new_cyclic`. pub(crate) struct RuntimeSetup { poll: Poll, + ring: a10::Ring, registry: Registry, } @@ -54,11 +55,14 @@ impl RuntimeSetup { ) -> RuntimeInternals { // Needed by `RuntimeInternals::wake_workers`. debug_assert!(worker_wakers.len() >= 1); + let sq = self.ring.submission_queue().clone(); RuntimeInternals { shared_id, worker_wakers, wake_worker_idx: AtomicUsize::new(0), poll: Mutex::new(self.poll), + ring: Mutex::new(self.ring), + sq, registry: self.registry, scheduler: Scheduler::new(), timers: Timers::new(), @@ -80,6 +84,10 @@ pub(crate) struct RuntimeInternals { /// Poll instance for all shared event sources. This is polled by the worker /// thread. poll: Mutex, + /// I/O uring. + ring: Mutex, + /// SubmissionQueue for the `ring`. + sq: a10::SubmissionQueue, /// Registry for the `Coordinator`'s `Poll` instance. registry: Registry, /// Scheduler for thread-safe actors. @@ -108,8 +116,14 @@ impl RuntimeInternals { /// Setup new runtime internals. pub(crate) fn setup() -> io::Result { let poll = Poll::new()?; + // TODO: configure ring. + let ring = a10::Ring::new(512)?; let registry = poll.registry().try_clone()?; - Ok(RuntimeSetup { poll, registry }) + Ok(RuntimeSetup { + poll, + ring, + registry, + }) } /// Returns metrics about the shared scheduler and timers. @@ -146,6 +160,25 @@ impl RuntimeInternals { } } + /// Polls the I/O uring if it's currently not being polled. + pub(crate) fn try_poll_ring(&self) -> io::Result<()> { + match self.ring.try_lock() { + Ok(mut ring) => ring.poll(Some(Duration::ZERO)), + Err(TryLockError::WouldBlock) => Ok(()), + Err(TryLockError::Poisoned(err)) => panic!("failed to lock shared I/O uring: {err}"), + } + } + + /// Return the file descriptor for the I/O uring. + pub(crate) fn ring_fd(&self) -> RawFd { + self.ring.lock().unwrap().as_fd().as_raw_fd() + } + + /// Returns the I/O uring submission queue. + pub(crate) fn submission_queue(&self) -> &a10::SubmissionQueue { + &self.sq + } + /// Register an `event::Source`, see [`mio::Registry::register`]. pub(crate) fn register( &self, diff --git a/rt/src/worker.rs b/rt/src/worker.rs index dcca3f5ed..6f3d0cef5 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -54,6 +54,8 @@ const COMMS: Token = Token(usize::MAX - 1); const SHARED_POLL: Token = Token(usize::MAX - 2); /// Token used to indicate the I/O uring has events. const RING: Token = Token(usize::MAX - 3); +/// Token used to indicate the shared I/O uring has events. +const SHARED_RING: Token = Token(usize::MAX - 4); /// Setup a new worker thread. /// @@ -226,6 +228,15 @@ impl Worker { poll.registry() .register(&mut SourceFd(&ring_fd), RING, Interest::READABLE) .map_err(Error::Init)?; + trace!(worker_id = setup.id.get(); "registering shared I/O uring"); + let shared_ring_fd = shared_internals.ring_fd(); + poll.registry() + .register( + &mut SourceFd(&shared_ring_fd), + SHARED_POLL, + Interest::READABLE, + ) + .map_err(Error::Init)?; trace!(worker_id = setup.id.get(); "registering shared poll"); shared_internals .register_worker_poll(poll.registry(), SHARED_POLL) @@ -447,6 +458,7 @@ impl Worker { let mut check_comms = false; let mut check_shared_poll = false; let mut check_ring = false; + let mut check_shared_ring = false; let mut amount = 0; for event in self.events.iter() { trace!(worker_id = self.internals.id.get(); "got OS event: {event:?}"); @@ -455,6 +467,7 @@ impl Worker { COMMS => check_comms = true, SHARED_POLL => check_shared_poll = true, RING => check_ring = true, + SHARED_RING => check_shared_ring = true, token => { let pid = ProcessId::from(token); trace!( @@ -475,6 +488,13 @@ impl Worker { .map_err(Error::Polling)?; } + if check_shared_ring { + self.internals + .shared + .try_poll_ring() + .map_err(Error::Polling)?; + } + trace::finish_rt( self.internals.trace_log.borrow_mut().as_mut(), timing, From 7b5a50380eb0372b71b8c6a548f29958cf3afd25 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sat, 18 Mar 2023 14:37:30 +0100 Subject: [PATCH 004/177] Add submission_queue method to rt::Access To get access to the I/O uring submission queue. --- rt/src/access.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/rt/src/access.rs b/rt/src/access.rs index 3eef3f86d..749745b02 100644 --- a/rt/src/access.rs +++ b/rt/src/access.rs @@ -78,6 +78,9 @@ mod private { /// Changes the process id to `new_pid`, returning the old process id. fn change_pid(&mut self, new_pid: ProcessId) -> ProcessId; + /// Get access to the `SubmissionQueue`. + fn submission_queue(&self) -> a10::SubmissionQueue; + /// Registers the `source`. fn register(&mut self, source: &mut S, interest: Interest) -> io::Result<()> where @@ -169,6 +172,10 @@ impl PrivateAccess for ThreadLocal { replace(&mut self.pid, new_pid) } + fn submission_queue(&self) -> a10::SubmissionQueue { + self.rt.internals.ring.borrow().submission_queue().clone() + } + fn register(&mut self, source: &mut S, interest: Interest) -> io::Result<()> where S: event::Source + ?Sized, @@ -329,6 +336,10 @@ impl PrivateAccess for ThreadSafe { replace(&mut self.pid, new_pid) } + fn submission_queue(&self) -> a10::SubmissionQueue { + self.rt.submission_queue().clone() + } + fn register(&mut self, source: &mut S, interest: Interest) -> io::Result<()> where S: event::Source + ?Sized, From 233d4e6e67bd95debaee2133675e8013e8516131 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sat, 18 Mar 2023 22:13:59 +0100 Subject: [PATCH 005/177] Add io module Starting with new buffer trait that work with A10. --- rt/src/io/buf.rs | 409 +++++++++++++++++++++++++++++++++++++++++++++++ rt/src/io/mod.rs | 12 ++ rt/src/lib.rs | 3 +- 3 files changed, 423 insertions(+), 1 deletion(-) create mode 100644 rt/src/io/buf.rs create mode 100644 rt/src/io/mod.rs diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs new file mode 100644 index 000000000..29cbee231 --- /dev/null +++ b/rt/src/io/buf.rs @@ -0,0 +1,409 @@ +//! Buffers. + +use std::mem::MaybeUninit; + +/// Trait that defines the behaviour of buffers used in reading, which requires +/// mutable access. +/// +/// This is implemented for common types such as `Vec`, [see below]. +/// +/// [see below]: #foreign-impls +/// +/// # Safety +/// +/// Unlike normal buffers the buffer implementations for Heph have additional +/// requirements because Heph uses I/O uring. +/// +/// If the operation (that uses this buffer) is not polled to completion, i.e. +/// the `Future` is dropped before it returns `Poll::Ready`, the kernel still +/// has access to the buffer and will still attempt to write into it. This means +/// that we must delay deallocation in such a way that the kernel will not write +/// into memory we don't have access to any more. This makes, for example, stack +/// based buffers unfit to implement `BufMut`. Because we can't delay the +/// deallocation once its dropped and the kernel will overwrite part of your +/// stack (where the buffer used to be)! +pub unsafe trait BufMut: 'static { + /// Returns the writable buffer as pointer and length parts. + /// + /// # Safety + /// + /// Only initialised bytes may be written to the pointer returned. The + /// pointer *may* point to uninitialised bytes, so reading from the pointer + /// is UB. + /// + /// The implementation must ensure that the pointer is valid, i.e. not null + /// and pointing to memory owned by the buffer. Furthermore it must ensure + /// that the returned length is, in combination with the pointer, valid. In + /// other words the memory the pointer and length are pointing to must be a + /// valid memory address and owned by the buffer. + /// + /// # Why not a slice? + /// + /// Returning a slice `&[u8]` would prevent us to use unitialised bytes, + /// meaning we have to zero the buffer before usage, not ideal for + /// performance. So, naturally you would suggest `&[MaybeUninit]`, + /// however that would prevent buffer types with only initialised bytes. + /// Returning a slice with `MaybeUninit` to such as type would be unsound as + /// it would allow the caller to write unitialised bytes without using + /// `unsafe`. + unsafe fn parts(&mut self) -> (*mut u8, usize); + + /// Update the length of the byte slice, marking `n` bytes as initialised. + /// + /// # Safety + /// + /// The caller must ensure that at least the first `n` bytes returned by + /// [`parts`] are initialised. + /// + /// [`parts`]: BufMut::parts + /// + /// # Notes + /// + /// If this method is not implemented correctly methods such as + /// [`TcpStream::recv_n`] will not work correctly (as the buffer will + /// overwrite itself on successive reads). + /// + /// [`TcpStream::recv_n`]: crate::net::TcpStream::recv_n + unsafe fn update_length(&mut self, n: usize); + + /// Returns the length of the buffer as returned by [`parts`]. + /// + /// [`parts`]: BufMut::parts + fn spare_capacity(&self) -> usize; + + /// Returns `true` if the buffer has spare capacity. + fn has_spare_capacity(&self) -> bool { + self.spare_capacity() == 0 + } +} + +/// The implementation for `Vec` only uses the uninitialised capacity of the +/// vector. In other words the bytes currently in the vector remain untouched. +/// +/// # Examples +/// +/// The following example shows that the bytes already in the vector remain +/// untouched. +/// +/// ``` +/// use heph_rt::bytes::Bytes; +/// +/// let mut buf = Vec::with_capacity(100); +/// buf.extend(b"Hello world!"); +/// +/// write_bytes(b" Hello mars!", &mut buf); +/// +/// assert_eq!(&*buf, b"Hello world! Hello mars!"); +/// +/// fn write_bytes(src: &[u8], mut buf: B) where B: Bytes { +/// // Writes `src` to `buf`. +/// # let dst = buf.as_bytes(); +/// # let len = std::cmp::min(src.len(), dst.len()); +/// # // Safety: both the src and dst pointers are good. And we've ensured +/// # // that the length is correct, not overwriting data we don't own or +/// # // reading data we don't own. +/// # unsafe { +/// # std::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr().cast(), len); +/// # buf.update_length(len); +/// # } +/// } +/// ``` +// SAFETY: `Vec` manages the allocation of the bytes, so as long as it's +// alive, so is the slice of bytes. When the `Vec`tor is leaked the allocation +// will also be leaked. +unsafe impl BufMut for Vec { + unsafe fn parts(&mut self) -> (*mut u8, usize) { + let slice = self.spare_capacity_mut(); + (slice.as_mut_ptr().cast(), slice.len()) + } + + unsafe fn update_length(&mut self, n: usize) { + let new = self.len() + n; + debug_assert!(self.capacity() >= new); + self.set_len(new); + } + + fn spare_capacity(&self) -> usize { + self.capacity() - self.len() + } + + fn has_spare_capacity(&self) -> bool { + self.capacity() != self.len() + } +} + +/// Trait that defines the behaviour of buffers used in reading using vectored +/// I/O, which requires mutable access. +/// +/// # Notes +/// +/// This trait can only be implemented by internal types. However it is already +/// implemented for arrays and tuples of buffers that implement [`BufMut`], [see below]. +/// +/// [see below]: #foreign-impls +/// +/// # Safety +/// +/// This has the same safety requirements as [`BufMut`], but then for all +/// buffers used. +pub trait BufMutSlice: private::BufMutSlice + 'static {} + +// NOTE: see the `private` module below for the actual trait. + +impl BufMutSlice for [B; N] {} + +// SAFETY: `BufMutSlice` has the same safety requirements as `BufMut` and since +// `B` implements `BufMut` it's safe to implement `BufMutSlice` for an array of +// `B`. +unsafe impl private::BufMutSlice for [B; N] { + unsafe fn as_iovec(&mut self) -> [libc::iovec; N] { + let mut iovecs = MaybeUninit::uninit_array(); + for (buf, iovec) in self.iter_mut().zip(iovecs.iter_mut()) { + let (ptr, len) = buf.parts(); + _ = iovec.write(libc::iovec { + iov_base: ptr.cast(), + iov_len: len, + }); + } + MaybeUninit::array_assume_init(iovecs) + } + + unsafe fn update_length(&mut self, n: usize) { + let mut left = n; + for buf in self.iter_mut() { + let (_, len) = buf.parts(); + if len < left { + // Fully initialised the buffer. + buf.update_length(len); + left -= len; + } else { + // Partially initialised the buffer. + buf.update_length(left); + return; + } + } + unreachable!( + "called BufMutSlice::set_init({n}), with buffers totaling in {} in size", + n - left + ); + } +} + +// NOTE: Also see implementation of `BufMutSlice` for tuples in the macro +// `buf_slice_for_tuple` below. + +/// Trait that defines the behaviour of buffers used in writing, which requires +/// read only access. +/// +/// # Safety +/// +/// Unlike normal buffers the buffer implementations for Heph have additional +/// requirements because Heph uses I/O uring. +/// +/// If the operation (that uses this buffer) is not polled to completion, i.e. +/// the `Future` is dropped before it returns `Poll::Ready`, the kernel still +/// has access to the buffer and will still attempt to read from it. This means +/// that we must delay deallocation in such a way that the kernel will not read +/// memory we don't have access to any more. This makes, for example, stack +/// based buffers unfit to implement `Buf`. Because we can't delay the +/// deallocation once its dropped and the kernel will read part of your stack +/// (where the buffer used to be)! This would be a huge security risk. +pub unsafe trait Buf: 'static { + /// Returns the reabable buffer as pointer and length parts. + /// + /// # Safety + /// + /// The implementation must ensure that the pointer is valid, i.e. not null + /// and pointing to memory owned by the buffer. Furthermore it must ensure + /// that the returned length is, in combination with the pointer, valid. In + /// other words the memory the pointer and length are pointing to must be a + /// valid memory address and owned by the buffer. + unsafe fn parts(&self) -> (*const u8, usize); +} + +// SAFETY: `Vec` manages the allocation of the bytes, so as long as it's +// alive, so is the slice of bytes. When the `Vec`tor is leaked the allocation +// will also be leaked. +unsafe impl Buf for Vec { + unsafe fn parts(&self) -> (*const u8, usize) { + let slice = self.as_slice(); + (slice.as_ptr().cast(), slice.len()) + } +} + +// SAFETY: `String` is just a `Vec`, see it's implementation for the safety +// reasoning. +unsafe impl Buf for String { + unsafe fn parts(&self) -> (*const u8, usize) { + let slice = self.as_bytes(); + (slice.as_ptr().cast(), slice.len()) + } +} + +// SAFETY: because the reference has a `'static` lifetime we know the bytes +// can't be deallocated, so it's safe to implement `Buf`. +unsafe impl Buf for &'static [u8] { + unsafe fn parts(&self) -> (*const u8, usize) { + (self.as_ptr(), self.len()) + } +} + +// SAFETY: because the reference has a `'static` lifetime we know the bytes +// can't be deallocated, so it's safe to implement `Buf`. +unsafe impl Buf for &'static str { + unsafe fn parts(&self) -> (*const u8, usize) { + (self.as_bytes().as_ptr(), self.len()) + } +} + +/// Trait that defines the behaviour of buffers used in writing using vectored +/// I/O, which requires read only access. +/// +/// # Notes +/// +/// This trait can only be implemented by internal types. However it is already +/// implemented for arrays and tuples of buffers that implement [`Buf`], [see +/// below]. +/// +/// [see below]: #foreign-impls +/// +/// # Safety +/// +/// This has the same safety requirements as [`Buf`], but then for all buffers +/// used. +pub trait BufSlice: private::BufSlice + 'static {} + +// NOTE: see the `private` module below for the actual trait. + +impl BufSlice for [B; N] {} + +// SAFETY: `BufSlice` has the same safety requirements as `Buf` and since `B` +// implements `Buf` it's safe to implement `BufSlice` for an array of `B`. +unsafe impl private::BufSlice for [B; N] { + unsafe fn as_iovec(&self) -> [libc::iovec; N] { + let mut iovecs = MaybeUninit::uninit_array(); + for (buf, iovec) in self.iter().zip(iovecs.iter_mut()) { + let (ptr, len) = buf.parts(); + _ = iovec.write(libc::iovec { + iov_base: ptr as _, + iov_len: len, + }); + } + MaybeUninit::array_assume_init(iovecs) + } +} + +macro_rules! buf_slice_for_tuple { + ( + // Number of values. + $N: expr, + // Generic parameter name and tuple index. + $( $generic: ident . $index: tt ),+ + ) => { + impl<$( $generic: BufMut ),+> BufMutSlice<$N> for ($( $generic ),+) { } + // SAFETY: `BufMutSlice` has the same safety requirements as `BufMut` + // and since all generic buffers must implement `BufMut` it's safe to + // implement `BufMutSlice` for a tuple of all those buffers. + unsafe impl<$( $generic: BufMut ),+> private::BufMutSlice<$N> for ($( $generic ),+) { + unsafe fn as_iovec(&mut self) -> [libc::iovec; $N] { + [ + $({ + let (ptr, len) = self.$index.parts(); + libc::iovec { + iov_base: ptr.cast(), + iov_len: len, + } + }),+ + ] + } + + unsafe fn update_length(&mut self, n: usize) { + let mut left = n; + $({ + let (_, len) = self.$index.parts(); + if len < left { + // Fully initialised the buffer. + self.$index.update_length(len); + left -= len; + } else { + // Partially initialised the buffer. + self.$index.update_length(left); + return; + } + })+ + unreachable!( + "called BufMutSlice::set_init({n}), with buffers totaling in {} in size", + n - left + ); + } + } + + impl<$( $generic: Buf ),+> BufSlice<$N> for ($( $generic ),+) { } + + // SAFETY: `BufSlice` has the same safety requirements as `Buf` and + // since all generic buffers must implement `Buf` it's safe to implement + // `BufSlice` for a tuple of all those buffers. + unsafe impl<$( $generic: Buf ),+> private::BufSlice<$N> for ($( $generic ),+) { + unsafe fn as_iovec(&self) -> [libc::iovec; $N] { + [ + $({ + let (ptr, len) = self.$index.parts(); + libc::iovec { + iov_base: ptr as _, + iov_len: len, + } + }),+ + ] + } + } + }; +} + +buf_slice_for_tuple!(2, A.0, B.1); +buf_slice_for_tuple!(3, A.0, B.1, C.2); +buf_slice_for_tuple!(4, A.0, B.1, C.2, D.3); +buf_slice_for_tuple!(5, A.0, B.1, C.2, D.3, E.4); +buf_slice_for_tuple!(6, A.0, B.1, C.2, D.3, E.4, F.5); +buf_slice_for_tuple!(7, A.0, B.1, C.2, D.3, E.4, F.5, G.6); +buf_slice_for_tuple!(8, A.0, B.1, C.2, D.3, E.4, F.5, G.6, I.7); + +mod private { + /// Private version of [`BufMutSlice`]. + /// + /// [`BufMutSlice`]: crate::io::BufMutSlice + pub unsafe trait BufMutSlice: 'static { + /// Returns the writable buffers as `iovec` structures. + /// + /// # Safety + /// + /// This has the same safety requirements as [`BufMut::parts`], but then for + /// all buffers used. + unsafe fn as_iovec(&mut self) -> [libc::iovec; N]; + + /// Mark `n` bytes as initialised. + /// + /// # Safety + /// + /// The caller must ensure that `n` bytes are initialised in the vectors + /// return by [`BufMutSlice::as_iovec`]. + /// + /// The implementation must ensure that that proper buffer(s) are + /// initialised. For example when this is called with `n = 10` with two + /// buffers of size `8` the implementation should initialise the first + /// buffer with `n = 8` and the second with `n = 10 - 8 = 2`. + unsafe fn update_length(&mut self, n: usize); + } + + /// Private version of [`BufSlice`]. + /// + /// [`BufSlice`]: crate::io::BufSlice + pub unsafe trait BufSlice: 'static { + /// Returns the reabable buffer as `iovec` structures. + /// + /// # Safety + /// + /// This has the same safety requirements as [`Buf::parts`], but then for + /// all buffers used. + unsafe fn as_iovec(&self) -> [libc::iovec; N]; + } +} diff --git a/rt/src/io/mod.rs b/rt/src/io/mod.rs new file mode 100644 index 000000000..8e21649b4 --- /dev/null +++ b/rt/src/io/mod.rs @@ -0,0 +1,12 @@ +//! Type definitions for I/O functionality. +//! +//! The main types of this module are the [`Buf`] and [`BufMut`] traits, which +//! define the requirements on buffers in use in I/O. Additionally the +//! [`BufSlice`] and [`BufMutSlice`] traits define the behaviour of buffers in +//! vectored I/O. + +// For ease of use within the crate. +pub(crate) use std::io::{Error, Result}; + +mod buf; +pub use buf::{Buf, BufMut, BufMutSlice, BufSlice}; diff --git a/rt/src/lib.rs b/rt/src/lib.rs index abeeb20b6..ac96df407 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -207,8 +207,8 @@ use std::convert::TryInto; use std::future::Future; use std::rc::Rc; use std::sync::Arc; +use std::task; use std::time::{Duration, Instant}; -use std::{io, task}; use ::log::{as_debug, debug, warn}; use heph::actor::{self, NewActor, SyncActor}; @@ -222,6 +222,7 @@ pub mod bytes; mod channel; mod coordinator; mod error; +pub mod io; mod local; pub mod log; pub mod net; From 88fe4ce8d3c416814fa587760f199bee8bcd8312 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sat, 18 Mar 2023 22:24:25 +0100 Subject: [PATCH 006/177] Add BufWrapper A type to implement the A10 Buf(Mut)(Slice) traits on using Heph's versions. --- rt/src/io/buf.rs | 81 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index 29cbee231..78bc5f685 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -407,3 +407,84 @@ mod private { unsafe fn as_iovec(&self) -> [libc::iovec; N]; } } + +/// Wrapper around `B` to implement [`a10::io::BufMut`] and [`a10::io::Buf`]. +pub(crate) struct BufWrapper(pub(crate) B); + +unsafe impl a10::io::BufMut for BufWrapper { + unsafe fn parts(&mut self) -> (*mut u8, u32) { + let (ptr, size) = self.0.parts(); + (ptr, size as u32) + } + + unsafe fn set_init(&mut self, n: usize) { + self.0.update_length(n); + } +} + +unsafe impl BufMut for BufWrapper { + unsafe fn parts(&mut self) -> (*mut u8, usize) { + self.0.parts() + } + + unsafe fn update_length(&mut self, n: usize) { + self.0.update_length(n); + } + + fn spare_capacity(&self) -> usize { + self.0.spare_capacity() + } + + fn has_spare_capacity(&self) -> bool { + self.0.has_spare_capacity() + } +} + +unsafe impl a10::io::Buf for BufWrapper { + unsafe fn parts(&self) -> (*const u8, u32) { + let (ptr, size) = self.0.parts(); + (ptr, size as u32) + } +} + +unsafe impl Buf for BufWrapper { + unsafe fn parts(&self) -> (*const u8, usize) { + self.0.parts() + } +} + +unsafe impl, const N: usize> a10::io::BufMutSlice for BufWrapper { + unsafe fn as_iovec(&mut self) -> [libc::iovec; N] { + self.0.as_iovec() + } + + unsafe fn set_init(&mut self, n: usize) { + self.0.update_length(n) + } +} + +impl, const N: usize> BufMutSlice for BufWrapper {} + +unsafe impl, const N: usize> private::BufMutSlice for BufWrapper { + unsafe fn as_iovec(&mut self) -> [libc::iovec; N] { + self.0.as_iovec() + } + + unsafe fn update_length(&mut self, n: usize) { + self.0.update_length(n) + } +} + +unsafe impl, const N: usize> a10::io::BufSlice for BufWrapper { + unsafe fn as_iovec(&self) -> [libc::iovec; N] { + self.0.as_iovec() + } +} + +impl, const N: usize> BufSlice for BufWrapper {} + +unsafe impl, const N: usize> private::BufSlice for BufWrapper { + unsafe fn as_iovec(&self) -> [libc::iovec; N] { + self.0.as_iovec() + } +} From d0ca2ff8e172a0a6df246926e954a0b1a670ea7f Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sat, 18 Mar 2023 22:34:27 +0100 Subject: [PATCH 007/177] Add wrappers around A10's I/O Futures --- rt/src/io/futures.rs | 72 ++++++++++++++++++++++++++++++++++++++++++++ rt/src/io/mod.rs | 7 +++++ 2 files changed, 79 insertions(+) create mode 100644 rt/src/io/futures.rs diff --git a/rt/src/io/futures.rs b/rt/src/io/futures.rs new file mode 100644 index 000000000..e5067b9be --- /dev/null +++ b/rt/src/io/futures.rs @@ -0,0 +1,72 @@ +//! A10 I/O [`Future`] wrappers. + +#![allow(missing_debug_implementations)] + +use std::future::Future; +use std::io; +use std::pin::Pin; +use std::task::{self, Poll}; + +use a10::extract::Extractor; + +use crate::io::buf::{Buf, BufMut, BufMutSlice, BufSlice, BufWrapper}; + +/// [`Future`] behind write implementations. +pub struct Write<'a, B>(pub(crate) Extractor>>); + +impl<'a, B: Buf> Future for Write<'a, B> { + type Output = io::Result<(B, usize)>; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|(buf, len)| (buf.0, len)) + } +} + +/// [`Future`] behind write vectored implementations. +pub struct WriteVectored<'a, B, const N: usize>( + pub(crate) Extractor, N>>, +); + +impl<'a, B: BufSlice, const N: usize> Future for WriteVectored<'a, B, N> { + type Output = io::Result<(B, usize)>; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|(buf, len)| (buf.0, len)) + } +} + +/// [`Future`] behind read implementations. +pub struct Read<'a, B>(pub(crate) a10::io::Read<'a, BufWrapper>); + +impl<'a, B: BufMut> Future for Read<'a, B> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|buf| buf.0) + } +} + +/// [`Future`] behind read vectored implementations. +pub struct ReadVectored<'a, B, const N: usize>( + pub(crate) a10::io::ReadVectored<'a, BufWrapper, N>, +); + +impl<'a, B: BufMutSlice, const N: usize> Future for ReadVectored<'a, B, N> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|buf| buf.0) + } +} diff --git a/rt/src/io/mod.rs b/rt/src/io/mod.rs index 8e21649b4..9af353f56 100644 --- a/rt/src/io/mod.rs +++ b/rt/src/io/mod.rs @@ -4,9 +4,16 @@ //! define the requirements on buffers in use in I/O. Additionally the //! [`BufSlice`] and [`BufMutSlice`] traits define the behaviour of buffers in //! vectored I/O. +//! +//! Finally this module contains a number of [`Future`] implementation that +//! facilitate I/O operations. // For ease of use within the crate. pub(crate) use std::io::{Error, Result}; mod buf; +pub(crate) use buf::BufWrapper; pub use buf::{Buf, BufMut, BufMutSlice, BufSlice}; + +mod futures; +pub use futures::{Read, ReadVectored, Write, WriteVectored}; From f0d190d5affaa3de24262b34c7127f301a7a40ee Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sat, 18 Mar 2023 23:03:18 +0100 Subject: [PATCH 008/177] Register I/O uring in test Worker --- rt/src/worker.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/rt/src/worker.rs b/rt/src/worker.rs index 6f3d0cef5..479e37d07 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -292,6 +292,18 @@ impl Worker { let waker = mio::Waker::new(poll.registry(), WAKER)?; let waker_id = waker::init(waker, waker_sender); + // Register the shared poll intance. + let ring_fd = ring.as_fd().as_raw_fd(); + poll.registry() + .register(&mut SourceFd(&ring_fd), RING, Interest::READABLE)?; + let shared_ring_fd = shared_internals.ring_fd(); + poll.registry().register( + &mut SourceFd(&shared_ring_fd), + SHARED_POLL, + Interest::READABLE, + )?; + shared_internals.register_worker_poll(poll.registry(), SHARED_POLL)?; + // Register the channel to the coordinator. receiver.register(poll.registry(), COMMS)?; let id = NonZeroUsize::new(usize::MAX).unwrap(); From 98730e90dc4db23c2ce1d417e7e135026e40853a Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sat, 18 Mar 2023 23:04:33 +0100 Subject: [PATCH 009/177] Ignore interrupts while polling epoll --- rt/src/coordinator.rs | 13 ++++++++++--- rt/src/worker.rs | 7 ++++++- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/rt/src/coordinator.rs b/rt/src/coordinator.rs index c8f0f826e..5f2fd0c70 100644 --- a/rt/src/coordinator.rs +++ b/rt/src/coordinator.rs @@ -129,9 +129,7 @@ impl Coordinator { let mut events = Events::with_capacity(16); loop { let timing = trace::start(&trace_log); - // Process OS events. - self.poll - .poll(&mut events, None) + self.poll_os(&mut events) .map_err(|err| rt::Error::coordinator(Error::Polling(err)))?; trace::finish_rt(trace_log.as_mut(), timing, "Polling for OS events", &[]); @@ -186,6 +184,15 @@ impl Coordinator { } } + fn poll_os(&mut self, events: &mut Events) -> io::Result<()> { + match self.poll.poll(events, None) { + Ok(()) => Ok(()), + // The I/O uring will interrupt us. + Err(ref err) if err.kind() == io::ErrorKind::Interrupted => Ok(()), + Err(err) => Err(err), + } + } + /// Do the pre-[`run`] setup. /// /// [`run`]: Coordinator::run diff --git a/rt/src/worker.rs b/rt/src/worker.rs index 479e37d07..d1ce38f06 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -692,7 +692,12 @@ impl Worker { "Polling for OS events", &[], ); - res + match res { + Ok(()) => Ok(()), + // The I/O uring will interrupt us. + Err(ref err) if err.kind() == io::ErrorKind::Interrupted => Ok(()), + Err(err) => Err(err), + } } /// Determine the timeout to be used in polling. From 2c162a329f578c2577511b3f11077f1d3a9f27d3 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 19 Mar 2023 17:23:27 +0100 Subject: [PATCH 010/177] Add WriteAll and WriteAllVectored Future wrappers --- rt/src/io/futures.rs | 30 ++++++++++++++++++++++++++++++ rt/src/io/mod.rs | 2 +- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/rt/src/io/futures.rs b/rt/src/io/futures.rs index e5067b9be..a573f0c92 100644 --- a/rt/src/io/futures.rs +++ b/rt/src/io/futures.rs @@ -25,6 +25,20 @@ impl<'a, B: Buf> Future for Write<'a, B> { } } +/// [`Future`] behind write all implementations. +pub struct WriteAll<'a, B>(pub(crate) Extractor>>); + +impl<'a, B: Buf> Future for WriteAll<'a, B> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|buf| buf.0) + } +} + /// [`Future`] behind write vectored implementations. pub struct WriteVectored<'a, B, const N: usize>( pub(crate) Extractor, N>>, @@ -41,6 +55,22 @@ impl<'a, B: BufSlice, const N: usize> Future for WriteVectored<'a, B, N> { } } +/// [`Future`] behind write all vectored implementations. +pub struct WriteAllVectored<'a, B, const N: usize>( + pub(crate) Extractor, N>>, +); + +impl<'a, B: BufSlice, const N: usize> Future for WriteAllVectored<'a, B, N> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|buf| buf.0) + } +} + /// [`Future`] behind read implementations. pub struct Read<'a, B>(pub(crate) a10::io::Read<'a, BufWrapper>); diff --git a/rt/src/io/mod.rs b/rt/src/io/mod.rs index 9af353f56..4ed868f97 100644 --- a/rt/src/io/mod.rs +++ b/rt/src/io/mod.rs @@ -16,4 +16,4 @@ pub(crate) use buf::BufWrapper; pub use buf::{Buf, BufMut, BufMutSlice, BufSlice}; mod futures; -pub use futures::{Read, ReadVectored, Write, WriteVectored}; +pub use futures::{Read, ReadVectored, Write, WriteAll, WriteAllVectored, WriteVectored}; From 1eb895396dfaa6fc0d11642e962ac6207d0df5ab Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 19 Mar 2023 17:25:18 +0100 Subject: [PATCH 011/177] Fix doc link --- rt/src/io/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/rt/src/io/mod.rs b/rt/src/io/mod.rs index 4ed868f97..552a93b83 100644 --- a/rt/src/io/mod.rs +++ b/rt/src/io/mod.rs @@ -5,8 +5,10 @@ //! [`BufSlice`] and [`BufMutSlice`] traits define the behaviour of buffers in //! vectored I/O. //! -//! Finally this module contains a number of [`Future`] implementation that +//! Finally this module contains a number of [`Future`] implementations that //! facilitate I/O operations. +//! +//! [`Future`]: std::future:::Future // For ease of use within the crate. pub(crate) use std::io::{Error, Result}; From 06cea55f535f789e10b3b08df0a60794cd69658d Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 21 Mar 2023 00:10:21 +0100 Subject: [PATCH 012/177] Update A10 version It renames the Buf trait method, a change I've also made to Heph in this commit. --- rt/src/io/buf.rs | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index 78bc5f685..f0be2b7bb 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -46,7 +46,7 @@ pub unsafe trait BufMut: 'static { /// Returning a slice with `MaybeUninit` to such as type would be unsound as /// it would allow the caller to write unitialised bytes without using /// `unsafe`. - unsafe fn parts(&mut self) -> (*mut u8, usize); + unsafe fn parts_mut(&mut self) -> (*mut u8, usize); /// Update the length of the byte slice, marking `n` bytes as initialised. /// @@ -112,7 +112,7 @@ pub unsafe trait BufMut: 'static { // alive, so is the slice of bytes. When the `Vec`tor is leaked the allocation // will also be leaked. unsafe impl BufMut for Vec { - unsafe fn parts(&mut self) -> (*mut u8, usize) { + unsafe fn parts_mut(&mut self) -> (*mut u8, usize) { let slice = self.spare_capacity_mut(); (slice.as_mut_ptr().cast(), slice.len()) } @@ -156,10 +156,10 @@ impl BufMutSlice for [B; N] {} // `B` implements `BufMut` it's safe to implement `BufMutSlice` for an array of // `B`. unsafe impl private::BufMutSlice for [B; N] { - unsafe fn as_iovec(&mut self) -> [libc::iovec; N] { + unsafe fn as_iovecs_mut(&mut self) -> [libc::iovec; N] { let mut iovecs = MaybeUninit::uninit_array(); for (buf, iovec) in self.iter_mut().zip(iovecs.iter_mut()) { - let (ptr, len) = buf.parts(); + let (ptr, len) = buf.parts_mut(); _ = iovec.write(libc::iovec { iov_base: ptr.cast(), iov_len: len, @@ -171,7 +171,7 @@ unsafe impl private::BufMutSlice for [B; N] { unsafe fn update_length(&mut self, n: usize) { let mut left = n; for buf in self.iter_mut() { - let (_, len) = buf.parts(); + let (_, len) = buf.parts_mut(); if len < left { // Fully initialised the buffer. buf.update_length(len); @@ -280,7 +280,7 @@ impl BufSlice for [B; N] {} // SAFETY: `BufSlice` has the same safety requirements as `Buf` and since `B` // implements `Buf` it's safe to implement `BufSlice` for an array of `B`. unsafe impl private::BufSlice for [B; N] { - unsafe fn as_iovec(&self) -> [libc::iovec; N] { + unsafe fn as_iovecs(&self) -> [libc::iovec; N] { let mut iovecs = MaybeUninit::uninit_array(); for (buf, iovec) in self.iter().zip(iovecs.iter_mut()) { let (ptr, len) = buf.parts(); @@ -305,10 +305,10 @@ macro_rules! buf_slice_for_tuple { // and since all generic buffers must implement `BufMut` it's safe to // implement `BufMutSlice` for a tuple of all those buffers. unsafe impl<$( $generic: BufMut ),+> private::BufMutSlice<$N> for ($( $generic ),+) { - unsafe fn as_iovec(&mut self) -> [libc::iovec; $N] { + unsafe fn as_iovecs_mut(&mut self) -> [libc::iovec; $N] { [ $({ - let (ptr, len) = self.$index.parts(); + let (ptr, len) = self.$index.parts_mut(); libc::iovec { iov_base: ptr.cast(), iov_len: len, @@ -320,7 +320,7 @@ macro_rules! buf_slice_for_tuple { unsafe fn update_length(&mut self, n: usize) { let mut left = n; $({ - let (_, len) = self.$index.parts(); + let (_, len) = self.$index.parts_mut(); if len < left { // Fully initialised the buffer. self.$index.update_length(len); @@ -344,7 +344,7 @@ macro_rules! buf_slice_for_tuple { // since all generic buffers must implement `Buf` it's safe to implement // `BufSlice` for a tuple of all those buffers. unsafe impl<$( $generic: Buf ),+> private::BufSlice<$N> for ($( $generic ),+) { - unsafe fn as_iovec(&self) -> [libc::iovec; $N] { + unsafe fn as_iovecs(&self) -> [libc::iovec; $N] { [ $({ let (ptr, len) = self.$index.parts(); @@ -378,7 +378,7 @@ mod private { /// /// This has the same safety requirements as [`BufMut::parts`], but then for /// all buffers used. - unsafe fn as_iovec(&mut self) -> [libc::iovec; N]; + unsafe fn as_iovecs_mut(&mut self) -> [libc::iovec; N]; /// Mark `n` bytes as initialised. /// @@ -404,7 +404,7 @@ mod private { /// /// This has the same safety requirements as [`Buf::parts`], but then for /// all buffers used. - unsafe fn as_iovec(&self) -> [libc::iovec; N]; + unsafe fn as_iovecs(&self) -> [libc::iovec; N]; } } @@ -412,8 +412,8 @@ mod private { pub(crate) struct BufWrapper(pub(crate) B); unsafe impl a10::io::BufMut for BufWrapper { - unsafe fn parts(&mut self) -> (*mut u8, u32) { - let (ptr, size) = self.0.parts(); + unsafe fn parts_mut(&mut self) -> (*mut u8, u32) { + let (ptr, size) = self.0.parts_mut(); (ptr, size as u32) } @@ -423,8 +423,8 @@ unsafe impl a10::io::BufMut for BufWrapper { } unsafe impl BufMut for BufWrapper { - unsafe fn parts(&mut self) -> (*mut u8, usize) { - self.0.parts() + unsafe fn parts_mut(&mut self) -> (*mut u8, usize) { + self.0.parts_mut() } unsafe fn update_length(&mut self, n: usize) { @@ -454,8 +454,8 @@ unsafe impl Buf for BufWrapper { } unsafe impl, const N: usize> a10::io::BufMutSlice for BufWrapper { - unsafe fn as_iovec(&mut self) -> [libc::iovec; N] { - self.0.as_iovec() + unsafe fn as_iovecs_mut(&mut self) -> [libc::iovec; N] { + self.0.as_iovecs_mut() } unsafe fn set_init(&mut self, n: usize) { @@ -466,8 +466,8 @@ unsafe impl, const N: usize> a10::io::BufMutSlice for BufWr impl, const N: usize> BufMutSlice for BufWrapper {} unsafe impl, const N: usize> private::BufMutSlice for BufWrapper { - unsafe fn as_iovec(&mut self) -> [libc::iovec; N] { - self.0.as_iovec() + unsafe fn as_iovecs_mut(&mut self) -> [libc::iovec; N] { + self.0.as_iovecs_mut() } unsafe fn update_length(&mut self, n: usize) { @@ -476,15 +476,15 @@ unsafe impl, const N: usize> private::BufMutSlice for BufWr } unsafe impl, const N: usize> a10::io::BufSlice for BufWrapper { - unsafe fn as_iovec(&self) -> [libc::iovec; N] { - self.0.as_iovec() + unsafe fn as_iovecs(&self) -> [libc::iovec; N] { + self.0.as_iovecs() } } impl, const N: usize> BufSlice for BufWrapper {} unsafe impl, const N: usize> private::BufSlice for BufWrapper { - unsafe fn as_iovec(&self) -> [libc::iovec; N] { - self.0.as_iovec() + unsafe fn as_iovecs(&self) -> [libc::iovec; N] { + self.0.as_iovecs() } } From 114dc68b33a687d2a9497010071bea2f8d5fd844 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 21 Mar 2023 00:12:10 +0100 Subject: [PATCH 013/177] Add ReadN and ReadNVectored Futures --- rt/src/io/futures.rs | 30 ++++++++++++++++++++++++++++++ rt/src/io/mod.rs | 4 +++- 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/rt/src/io/futures.rs b/rt/src/io/futures.rs index a573f0c92..f567ab7e4 100644 --- a/rt/src/io/futures.rs +++ b/rt/src/io/futures.rs @@ -100,3 +100,33 @@ impl<'a, B: BufMutSlice, const N: usize> Future for ReadVectored<'a, B, N> { .map_ok(|buf| buf.0) } } + +/// [`Future`] behind read `n` implementations. +pub struct ReadN<'a, B>(pub(crate) a10::io::ReadN<'a, BufWrapper>); + +impl<'a, B: BufMut> Future for ReadN<'a, B> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|buf| buf.0) + } +} + +/// [`Future`] behind read `n` vectored implementations. +pub struct ReadNVectored<'a, B, const N: usize>( + pub(crate) a10::io::ReadNVectored<'a, BufWrapper, N>, +); + +impl<'a, B: BufMutSlice, const N: usize> Future for ReadNVectored<'a, B, N> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|buf| buf.0) + } +} diff --git a/rt/src/io/mod.rs b/rt/src/io/mod.rs index 552a93b83..a49753105 100644 --- a/rt/src/io/mod.rs +++ b/rt/src/io/mod.rs @@ -18,4 +18,6 @@ pub(crate) use buf::BufWrapper; pub use buf::{Buf, BufMut, BufMutSlice, BufSlice}; mod futures; -pub use futures::{Read, ReadVectored, Write, WriteAll, WriteAllVectored, WriteVectored}; +pub use futures::{ + Read, ReadN, ReadNVectored, ReadVectored, Write, WriteAll, WriteAllVectored, WriteVectored, +}; From c3491fb84e9515499497b7cefa03373f1a7d6877 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 21 Mar 2023 00:19:14 +0100 Subject: [PATCH 014/177] Add BufMutSlice::total_spare_capacity and has_spare_capacity methods. Useful to quickly check if the buffers has enoug capacity in read_n calls. --- rt/src/io/buf.rs | 43 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 4 deletions(-) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index f0be2b7bb..aed8552cf 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -146,11 +146,27 @@ unsafe impl BufMut for Vec { /// /// This has the same safety requirements as [`BufMut`], but then for all /// buffers used. -pub trait BufMutSlice: private::BufMutSlice + 'static {} +pub trait BufMutSlice: private::BufMutSlice + 'static { + /// Returns the total length of all buffer.s + fn total_spare_capacity(&self) -> usize; + + /// Returns `true` at least one of the buffer has spare capacity. + fn has_spare_capacity(&self) -> bool { + self.total_spare_capacity() == 0 + } +} // NOTE: see the `private` module below for the actual trait. -impl BufMutSlice for [B; N] {} +impl BufMutSlice for [B; N] { + fn total_spare_capacity(&self) -> usize { + self.iter().map(BufMut::spare_capacity).sum() + } + + fn has_spare_capacity(&self) -> bool { + self.iter().any(BufMut::has_spare_capacity) + } +} // SAFETY: `BufMutSlice` has the same safety requirements as `BufMut` and since // `B` implements `BufMut` it's safe to implement `BufMutSlice` for an array of @@ -300,7 +316,18 @@ macro_rules! buf_slice_for_tuple { // Generic parameter name and tuple index. $( $generic: ident . $index: tt ),+ ) => { - impl<$( $generic: BufMut ),+> BufMutSlice<$N> for ($( $generic ),+) { } + impl<$( $generic: BufMut ),+> BufMutSlice<$N> for ($( $generic ),+) { + fn total_spare_capacity(&self) -> usize { + $( self.$index.spare_capacity() + )+ + 0 + } + + fn has_spare_capacity(&self) -> bool { + $( self.$index.has_spare_capacity() || )+ + false + } + } + // SAFETY: `BufMutSlice` has the same safety requirements as `BufMut` // and since all generic buffers must implement `BufMut` it's safe to // implement `BufMutSlice` for a tuple of all those buffers. @@ -463,7 +490,15 @@ unsafe impl, const N: usize> a10::io::BufMutSlice for BufWr } } -impl, const N: usize> BufMutSlice for BufWrapper {} +impl, const N: usize> BufMutSlice for BufWrapper { + fn total_spare_capacity(&self) -> usize { + self.0.total_spare_capacity() + } + + fn has_spare_capacity(&self) -> bool { + self.0.has_spare_capacity() + } +} unsafe impl, const N: usize> private::BufMutSlice for BufWrapper { unsafe fn as_iovecs_mut(&mut self) -> [libc::iovec; N] { From b1863c811776c521b47631e1bd15b74f71d387df Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 21 Mar 2023 00:29:02 +0100 Subject: [PATCH 015/177] Use I/O uring for the Unix pipe implementation Based on the A10 crate. --- rt/src/bytes.rs | 15 -- rt/src/lib.rs | 12 + rt/src/pipe.rs | 481 ++++++------------------------------ rt/tests/functional/pipe.rs | 141 +++-------- 4 files changed, 118 insertions(+), 531 deletions(-) diff --git a/rt/src/bytes.rs b/rt/src/bytes.rs index 13442c2a5..1dd975aec 100644 --- a/rt/src/bytes.rs +++ b/rt/src/bytes.rs @@ -19,7 +19,6 @@ //! [`update_lengths`]: BytesVectored::update_lengths use std::cmp::min; -use std::io::IoSliceMut; use std::mem::MaybeUninit; use std::ops::{Deref, DerefMut}; use std::{fmt, slice}; @@ -210,20 +209,6 @@ impl<'a> MaybeUninitSlice<'a> { // `repr(transparent)` attribute. unsafe { &mut *(bufs as *mut _ as *mut _) } } - - /// Returns `bufs` as [`IoSliceMut`]. - /// - /// # Unsafety - /// - /// This is unsound. - /// - /// Reading from the returned slice is UB. - #[allow(clippy::wrong_self_convention)] - pub(crate) unsafe fn as_io<'b>( - bufs: &'b mut [MaybeUninitSlice<'a>], - ) -> &'b mut [IoSliceMut<'a>] { - &mut *(bufs as *mut _ as *mut _) - } } impl<'a> Deref for MaybeUninitSlice<'a> { diff --git a/rt/src/lib.rs b/rt/src/lib.rs index ac96df407..26d521974 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -203,6 +203,18 @@ macro_rules! try_io { }; } +/// Helper macro to execute a system call that returns an `io::Result`. +macro_rules! syscall { + ($fn: ident ( $($arg: expr),* $(,)? ) ) => {{ + let res = unsafe { libc::$fn($( $arg, )*) }; + if res == -1 { + Err(std::io::Error::last_os_error()) + } else { + Ok(res) + } + }}; +} + use std::convert::TryInto; use std::future::Future; use std::rc::Rc; diff --git a/rt/src/pipe.rs b/rt/src/pipe.rs index af59558a3..140b0e727 100644 --- a/rt/src/pipe.rs +++ b/rt/src/pipe.rs @@ -31,18 +31,17 @@ //! //! const DATA: &[u8] = b"Hello, world!"; //! -//! async fn process_handler(mut ctx: actor::Context) -> io::Result<()> +//! async fn process_handler(ctx: actor::Context) -> io::Result<()> //! where RT: rt::Access, //! { -//! let (mut sender, mut receiver) = pipe::new(&mut ctx)?; +//! let (mut sender, mut receiver) = pipe::new(ctx.runtime_ref())?; //! //! // Write some data. //! sender.write_all(DATA).await?; //! drop(sender); // Close the sending side. //! //! // And read the data back. -//! let mut buf = Vec::with_capacity(DATA.len() + 1); -//! receiver.read_n(&mut buf, DATA.len()).await?; +//! let buf = receiver.read_n(Vec::with_capacity(DATA.len() + 1), DATA.len()).await?; //! assert_eq!(buf, DATA); //! Ok(()) //! } @@ -69,7 +68,7 @@ //! //! const DATA: &[u8] = b"Hello, world!"; //! -//! async fn process_handler(mut ctx: actor::Context) -> io::Result<()> +//! async fn process_handler(ctx: actor::Context) -> io::Result<()> //! where RT: rt::Access, //! { //! // Spawn the "echo" command that echos everything it reads from standard @@ -81,8 +80,8 @@ //! .spawn()?; //! //! // Create our process standard in and out. -//! let mut stdin = pipe::Sender::from_child_stdin(&mut ctx, process.stdin.take().unwrap())?; -//! let mut stdout = pipe::Receiver::from_child_stdout(&mut ctx, process.stdout.take().unwrap())?; +//! let mut stdin = pipe::Sender::from_child_stdin(ctx.runtime_ref(), process.stdin.take().unwrap())?; +//! let mut stdout = pipe::Receiver::from_child_stdout(ctx.runtime_ref(), process.stdout.take().unwrap())?; //! //! // Write some data. //! stdin.write_all(DATA).await?; @@ -90,8 +89,7 @@ //! # process.wait()?; // Needed to pass the test on macOS. //! //! // And read the data back. -//! let mut buf = Vec::with_capacity(DATA.len() + 1); -//! stdout.read_n(&mut buf, DATA.len()).await?; +//! let buf = stdout.read_n(Vec::with_capacity(DATA.len() + 1), DATA.len()).await?; //! assert_eq!(buf, DATA); //! Ok(()) //! } @@ -105,19 +103,17 @@ //! # heph_rt::test::join(&actor_ref, std::time::Duration::from_secs(1)).unwrap(); //! ``` -use std::future::Future; -use std::io::{self, IoSlice}; -use std::mem::MaybeUninit; -use std::pin::Pin; +use std::io; +use std::os::fd::{IntoRawFd, RawFd}; use std::process::{ChildStderr, ChildStdin, ChildStdout}; -use std::task::{self, Poll}; -use heph::actor; -use mio::unix::pipe; -use mio::Interest; +use a10::{AsyncFd, Extract}; -use crate::bytes::{Bytes, BytesVectored, MaybeUninitSlice}; -use crate::{self as rt, Bound}; +use crate as rt; +use crate::io::{ + Buf, BufMut, BufMutSlice, BufSlice, BufWrapper, Read, ReadN, ReadNVectored, ReadVectored, + Write, WriteAll, WriteAllVectored, WriteVectored, +}; /// Create a new Unix pipe. /// @@ -129,17 +125,18 @@ use crate::{self as rt, Bound}; /// commands to the child process. /// /// [`pipe(2)`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/pipe.html -pub fn new(ctx: &mut actor::Context) -> io::Result<(Sender, Receiver)> +pub fn new(rt: &RT) -> io::Result<(Sender, Receiver)> where RT: rt::Access, { - let (mut sender, mut receiver) = pipe::new()?; - - let rt = ctx.runtime(); - rt.register(&mut sender, Interest::WRITABLE)?; - rt.register(&mut receiver, Interest::READABLE)?; - - Ok((Sender { inner: sender }, Receiver { inner: receiver })) + let mut fds: [RawFd; 2] = [-1, -1]; + let _ = syscall!(pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC))?; + + let sq = rt.submission_queue(); + // SAFETY: we just initialised the `fds` above. + let r = unsafe { AsyncFd::new(fds[0], sq.clone()) }; + let w = unsafe { AsyncFd::new(fds[1], sq) }; + Ok((Sender { fd: w }, Receiver { fd: r })) } /// Sending end of an Unix pipe. @@ -147,62 +144,35 @@ where /// Created by calling [`new`] or converted from [`ChildStdin`]. #[derive(Debug)] pub struct Sender { - inner: pipe::Sender, + fd: AsyncFd, } impl Sender { /// Convert a [`ChildStdin`] to a `Sender`. - pub fn from_child_stdin( - ctx: &mut actor::Context, - stdin: ChildStdin, - ) -> io::Result + pub fn from_child_stdin(rt: &RT, stdin: ChildStdin) -> io::Result where RT: rt::Access, { - let mut sender = pipe::Sender::from(stdin); - sender.set_nonblocking(true)?; - ctx.runtime().register(&mut sender, Interest::WRITABLE)?; - Ok(Sender { inner: sender }) - } - - /// Attempt to write the bytes in `buf` into the pipe. - /// - /// If no bytes can currently be written this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`Sender::write`] or [`Sender::write_all`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_write(&mut self, buf: &[u8]) -> io::Result { - io::Write::write(&mut self.inner, buf) + let sq = rt.submission_queue(); + // Safety: `ChildStdin` is guaranteed to be a valid file descriptor. + let fd = unsafe { AsyncFd::new(stdin.into_raw_fd(), sq.clone()) }; + Ok(Sender { fd }) } /// Write the bytes in `buf` into the pipe. /// /// Return the number of bytes written. This may we fewer then the length of /// `buf`. To ensure that all bytes are written use [`Sender::write_all`]. - pub fn write<'a, 'b>(&'a mut self, buf: &'b [u8]) -> Write<'a, 'b> { - Write { sender: self, buf } + pub fn write<'a, B: Buf>(&'a mut self, buf: B) -> Write<'a, B> { + Write(self.fd.write(BufWrapper(buf)).extract()) } /// Write the all bytes in `buf` into the pipe. /// /// If this fails to write all bytes (this happens if a write returns /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. - pub fn write_all<'a, 'b>(&'a mut self, buf: &'b [u8]) -> WriteAll<'a, 'b> { - WriteAll { sender: self, buf } - } - - /// Attempt to write the bytes in `bufs` into the pipe. - /// - /// If no bytes can currently be written this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`Sender::write_vectored`] or [`Sender::write_vectored_all`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result { - io::Write::write_vectored(&mut self.inner, bufs) + pub fn write_all<'a, B: Buf>(&'a mut self, buf: B) -> WriteAll<'a, B> { + WriteAll(self.fd.write_all(BufWrapper(buf)).extract()) } /// Write the bytes in `bufs` intoto the pipe. @@ -210,121 +180,22 @@ impl Sender { /// Return the number of bytes written. This may we fewer then the length of /// `bufs`. To ensure that all bytes are written use /// [`Sender::write_vectored_all`]. - pub fn write_vectored<'a, 'b>( + pub fn write_vectored<'a, B: BufSlice, const N: usize>( &'a mut self, - bufs: &'b mut [IoSlice<'b>], - ) -> WriteVectored<'a, 'b> { - WriteVectored { sender: self, bufs } + bufs: B, + ) -> WriteVectored<'a, B, N> { + WriteVectored(self.fd.write_vectored(BufWrapper(bufs)).extract()) } /// Write the all bytes in `bufs` into the pipe. /// /// If this fails to write all bytes (this happens if a write returns /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. - pub fn write_vectored_all<'a, 'b>( + pub fn write_vectored_all<'a, B: BufSlice, const N: usize>( &'a mut self, - bufs: &'b mut [IoSlice<'b>], - ) -> WriteVectoredAll<'a, 'b> { - WriteVectoredAll { sender: self, bufs } - } -} - -/// The [`Future`] behind [`Sender::write`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Write<'a, 'b> { - sender: &'a mut Sender, - buf: &'b [u8], -} - -impl<'a, 'b> Future for Write<'a, 'b> { - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let Write { sender, buf } = Pin::into_inner(self); - try_io!(sender.try_write(buf)) - } -} - -/// The [`Future`] behind [`Sender::write_all`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct WriteAll<'a, 'b> { - sender: &'a mut Sender, - buf: &'b [u8], -} - -impl<'a, 'b> Future for WriteAll<'a, 'b> { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let WriteAll { sender, buf } = Pin::into_inner(self); - loop { - match sender.try_write(buf) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::WriteZero.into())), - Ok(n) if buf.len() <= n => return Poll::Ready(Ok(())), - Ok(n) => { - *buf = &buf[n..]; - // Try to write some more bytes. - continue; - } - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => break Poll::Pending, - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => break Poll::Ready(Err(err)), - } - } - } -} - -/// The [`Future`] behind [`Sender::write_vectored`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct WriteVectored<'a, 'b> { - sender: &'a mut Sender, - bufs: &'b mut [IoSlice<'b>], -} - -impl<'a, 'b> Future for WriteVectored<'a, 'b> { - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let WriteVectored { sender, bufs } = Pin::into_inner(self); - try_io!(sender.try_write_vectored(bufs)) - } -} - -/// The [`Future`] behind [`Sender::write_vectored_all`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct WriteVectoredAll<'a, 'b> { - sender: &'a mut Sender, - bufs: &'b mut [IoSlice<'b>], -} - -impl<'a, 'b> Future for WriteVectoredAll<'a, 'b> { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let WriteVectoredAll { sender, bufs } = Pin::into_inner(self); - while !bufs.is_empty() { - match sender.try_write_vectored(bufs) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::WriteZero.into())), - Ok(n) => IoSlice::advance_slices(bufs, n), - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => return Poll::Pending, - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Poll::Ready(Err(err)), - } - } - Poll::Ready(Ok(())) - } -} - -impl Bound for Sender { - type Error = io::Error; - - fn bind_to(&mut self, ctx: &mut actor::Context) -> io::Result<()> { - ctx.runtime() - .reregister(&mut self.inner, Interest::WRITABLE) + bufs: B, + ) -> WriteAllVectored<'a, B, N> { + WriteAllVectored(self.fd.write_all_vectored(BufWrapper(bufs)).extract()) } } @@ -334,74 +205,35 @@ impl Bound for Sender { /// [`ChildStderr`]. #[derive(Debug)] pub struct Receiver { - inner: pipe::Receiver, + fd: AsyncFd, } impl Receiver { /// Convert a [`ChildStdout`] to a `Receiver`. - pub fn from_child_stdout( - ctx: &mut actor::Context, - stdout: ChildStdout, - ) -> io::Result + pub fn from_child_stdout(rt: &RT, stdout: ChildStdout) -> io::Result where RT: rt::Access, { - let mut receiver = pipe::Receiver::from(stdout); - receiver.set_nonblocking(true)?; - ctx.runtime().register(&mut receiver, Interest::READABLE)?; - Ok(Receiver { inner: receiver }) + let sq = rt.submission_queue(); + // Safety: `ChildStdout` is guaranteed to be a valid file descriptor. + let fd = unsafe { AsyncFd::new(stdout.into_raw_fd(), sq.clone()) }; + Ok(Receiver { fd }) } /// Convert a [`ChildStderr`] to a `Receiver`. - pub fn from_child_stderr( - ctx: &mut actor::Context, - stderr: ChildStderr, - ) -> io::Result + pub fn from_child_stderr(rt: &RT, stderr: ChildStderr) -> io::Result where RT: rt::Access, { - let mut receiver = pipe::Receiver::from(stderr); - receiver.set_nonblocking(true)?; - ctx.runtime().register(&mut receiver, Interest::READABLE)?; - Ok(Receiver { inner: receiver }) - } - - /// Attempt to read bytes from the pipe, writing them into `buf`. - /// - /// If no bytes can currently be read this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`Receiver::read`] or [`Receiver::read_n`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_read(&mut self, mut buf: B) -> io::Result - where - B: Bytes, - { - debug_assert!( - buf.has_spare_capacity(), - "called `Receiver::try_read` with an empty buffer" - ); - // SAFETY: This is unsound. - // However Mio passes the buffer directly to the OS without reading any - // bytes, so it shouldn't invoke any UB. - let buf_bytes = unsafe { &mut *(buf.as_bytes() as *mut [MaybeUninit] as *mut [u8]) }; - io::Read::read(&mut self.inner, buf_bytes).map(|read| { - // Safety: just read the bytes. - unsafe { buf.update_length(read) } - read - }) + let sq = rt.submission_queue(); + // Safety: `ChildStderr` is guaranteed to be a valid file descriptor. + let fd = unsafe { AsyncFd::new(stderr.into_raw_fd(), sq.clone()) }; + Ok(Receiver { fd }) } /// Read bytes from the pipe, writing them into `buf`. - pub fn read<'a, B>(&'a mut self, buf: B) -> Read<'a, B> - where - B: Bytes, - { - Read { - receiver: self, - buf, - } + pub fn read<'a, B: BufMut>(&'a mut self, buf: B) -> Read<'a, B> { + Read(self.fd.read(BufWrapper(buf))) } /// Read at least `n` bytes from the pipe, writing them into `buf`. @@ -409,203 +241,32 @@ impl Receiver { /// This returns a [`Future`] that receives at least `n` bytes from the /// `Receiver` and writes them into buffer `B`, or returns /// [`io::ErrorKind::UnexpectedEof`] if less then `n` bytes could be read. - pub fn read_n<'a, B>(&'a mut self, buf: B, n: usize) -> ReadN<'a, B> - where - B: Bytes, - { + pub fn read_n<'a, B: BufMut>(&'a mut self, buf: B, n: usize) -> ReadN<'a, B> { debug_assert!( buf.spare_capacity() >= n, - "called `Reader::read_n` with a buffer smaller then `n`", - ); - ReadN { - receiver: self, - buf, - left: n, - } - } - - /// Attempt to read bytes from the pipe, writing them into `bufs`. - /// - /// If no bytes can currently be read this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`Receiver::read_vectored`] or [`Receiver::read_n_vectored`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_read_vectored(&mut self, mut bufs: B) -> io::Result - where - B: BytesVectored, - { - debug_assert!( - bufs.has_spare_capacity(), - "called `Receiver::try_read_vectored` with empty buffers" + "called `Receiver::read_n` with a buffer smaller then `n`", ); - let mut buffers = bufs.as_bufs(); - let bufs_bytes = unsafe { MaybeUninitSlice::as_io(buffers.as_mut()) }; - match io::Read::read_vectored(&mut self.inner, bufs_bytes) { - Ok(read) => { - drop(buffers); - // Safety: just read the bytes. - unsafe { bufs.update_lengths(read) } - Ok(read) - } - Err(err) => Err(err), - } + ReadN(self.fd.read_n(BufWrapper(buf), n)) } /// Read bytes from the pipe, writing them into `bufs`. - pub fn read_vectored(&mut self, bufs: B) -> ReadVectored<'_, B> - where - B: BytesVectored, - { - debug_assert!( - bufs.has_spare_capacity(), - "called `Receiver::read_vectored` with empty buffers" - ); - ReadVectored { - receiver: self, - bufs, - } + pub fn read_vectored<'a, B: BufMutSlice, const N: usize>( + &'a mut self, + bufs: B, + ) -> ReadVectored<'a, B, N> { + ReadVectored(self.fd.read_vectored(BufWrapper(bufs))) } /// Read at least `n` bytes from the pipe, writing them into `bufs`. - pub fn read_n_vectored(&mut self, bufs: B, n: usize) -> ReadNVectored<'_, B> - where - B: BytesVectored, - { + pub fn read_n_vectored<'a, B: BufMutSlice, const N: usize>( + &'a mut self, + bufs: B, + n: usize, + ) -> ReadNVectored<'a, B, N> { debug_assert!( - bufs.spare_capacity() >= n, - "called `Receiver::read_n_vectored` with a buffer smaller then `n`" + bufs.total_spare_capacity() >= n, + "called `Receiver::read_n_vectored` with buffers smaller then `n`" ); - ReadNVectored { - receiver: self, - bufs, - left: n, - } - } -} - -/// The [`Future`] behind [`Receiver::read`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Read<'b, B> { - receiver: &'b mut Receiver, - buf: B, -} - -impl<'b, B> Future for Read<'b, B> -where - B: Bytes + Unpin, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let Read { receiver, buf } = Pin::into_inner(self); - try_io!(receiver.try_read(&mut *buf)) - } -} - -/// The [`Future`] behind [`Receiver::read_n`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct ReadN<'b, B> { - receiver: &'b mut Receiver, - buf: B, - left: usize, -} - -impl<'b, B> Future for ReadN<'b, B> -where - B: Bytes + Unpin, -{ - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let ReadN { - receiver, - buf, - left, - } = Pin::into_inner(self); - loop { - match receiver.try_read(&mut *buf) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::UnexpectedEof.into())), - Ok(n) if n >= *left => return Poll::Ready(Ok(())), - Ok(n) => { - *left -= n; - // Try to read some more bytes. - continue; - } - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => break Poll::Pending, - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => break Poll::Ready(Err(err)), - } - } - } -} - -/// The [`Future`] behind [`Receiver::read_vectored`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct ReadVectored<'b, B> { - receiver: &'b mut Receiver, - bufs: B, -} - -impl<'b, B> Future for ReadVectored<'b, B> -where - B: BytesVectored + Unpin, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let ReadVectored { receiver, bufs } = Pin::into_inner(self); - try_io!(receiver.try_read_vectored(&mut *bufs)) - } -} - -/// The [`Future`] behind [`Receiver::read_n_vectored`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct ReadNVectored<'b, B> { - receiver: &'b mut Receiver, - bufs: B, - left: usize, -} - -impl<'b, B> Future for ReadNVectored<'b, B> -where - B: BytesVectored + Unpin, -{ - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let ReadNVectored { - receiver, - bufs, - left, - } = Pin::into_inner(self); - loop { - match receiver.try_read_vectored(&mut *bufs) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::UnexpectedEof.into())), - Ok(n) if n >= *left => return Poll::Ready(Ok(())), - Ok(n) => { - *left -= n; - // Try to read some more bytes. - continue; - } - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => break Poll::Pending, - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => break Poll::Ready(Err(err)), - } - } - } -} - -impl Bound for Receiver { - type Error = io::Error; - - fn bind_to(&mut self, ctx: &mut actor::Context) -> io::Result<()> { - ctx.runtime() - .reregister(&mut self.inner, Interest::READABLE) + ReadNVectored(self.fd.read_n_vectored(BufWrapper(bufs), n)) } } diff --git a/rt/tests/functional/pipe.rs b/rt/tests/functional/pipe.rs index 03076e039..98d6b9417 100644 --- a/rt/tests/functional/pipe.rs +++ b/rt/tests/functional/pipe.rs @@ -1,33 +1,31 @@ //! Tests for the Unix pipe. -use std::io::{self, IoSlice}; +use std::io; use std::time::Duration; use heph::{actor, ActorRef}; -use heph_rt::pipe::{self, Receiver, Sender}; +use heph_rt::pipe::{self, Receiver}; use heph_rt::spawn::ActorOptions; use heph_rt::test::{join, join_many, try_spawn_local, PanicSupervisor}; -use heph_rt::{self as rt, Bound}; +use heph_rt::{self as rt}; const DATA: &[u8] = b"Hello world"; const DATAV: &[&[u8]] = &[b"Hello world!", b" ", b"From mars."]; -const DATAV_LEN: usize = DATAV[0].len() + DATAV[1].len() + DATAV[2].len(); #[test] fn smoke() { - async fn actor(mut ctx: actor::Context) -> io::Result<()> + async fn actor(ctx: actor::Context) -> io::Result<()> where RT: rt::Access, { - let (mut sender, mut receiver) = pipe::new(&mut ctx)?; + let (mut sender, mut receiver) = pipe::new(ctx.runtime_ref())?; - let n = sender.write(DATA).await?; + let (_, n) = sender.write(DATA).await?; assert_eq!(n, DATA.len()); drop(sender); - let mut buf = Vec::with_capacity(DATA.len() + 1); - let n = receiver.read(&mut buf).await?; - assert_eq!(n, DATA.len()); + let buf = receiver.read(Vec::with_capacity(DATA.len() + 1)).await?; + assert_eq!(buf.len(), DATA.len()); assert_eq!(buf, DATA); Ok(()) } @@ -44,13 +42,13 @@ fn write_all_read_n() { const DATA: &[u8] = &[213; 17 * 4096]; async fn writer( - mut ctx: actor::Context, + ctx: actor::Context, reader: ActorRef, ) -> io::Result<()> where RT: rt::Access, { - let (mut sender, receiver) = pipe::new(&mut ctx)?; + let (mut sender, receiver) = pipe::new(ctx.runtime_ref())?; reader.send(receiver).await.unwrap(); @@ -64,10 +62,11 @@ fn write_all_read_n() { RT: rt::Access, { let mut receiver = ctx.receive_next().await.unwrap(); - receiver.bind_to(&mut ctx)?; - let mut buf = Vec::with_capacity(DATA.len() + 1); - receiver.read_n(&mut buf, DATA.len()).await?; + let buf = receiver + .read_n(Vec::with_capacity(DATA.len() + 1), DATA.len()) + .await?; + assert_eq!(buf, DATA); Ok(()) } @@ -94,21 +93,17 @@ fn write_vectored_all_read_n_vectored() { const DATA_LEN: usize = DATA[0].len() + DATA[1].len() + DATA[2].len(); async fn writer( - mut ctx: actor::Context, + ctx: actor::Context, reader: ActorRef, ) -> io::Result<()> where RT: rt::Access, { - let (mut sender, receiver) = pipe::new(&mut ctx)?; + let (mut sender, receiver) = pipe::new(ctx.runtime_ref())?; reader.send(receiver).await.unwrap(); - let bufs = &mut [ - IoSlice::new(&DATA[0]), - IoSlice::new(&DATA[1]), - IoSlice::new(&DATA[2]), - ]; + let bufs = [DATA[0], DATA[1], DATA[2]]; sender.write_vectored_all(bufs).await?; drop(sender); Ok(()) @@ -119,14 +114,16 @@ fn write_vectored_all_read_n_vectored() { RT: rt::Access, { let mut receiver = ctx.receive_next().await.unwrap(); - receiver.bind_to(&mut ctx)?; - let mut bufs = &mut [ + let bufs = [ Vec::with_capacity(8 * 4096), Vec::with_capacity(6 * 4096), Vec::with_capacity((4 * 4096) + 1), ]; - receiver.read_n_vectored(&mut bufs, DATA_LEN).await?; + let [buf1, buf2, buf3] = receiver.read_n_vectored(bufs, DATA_LEN).await?; + debug_assert!(buf1 == DATA[0]); + debug_assert!(buf2 == DATA[1]); + debug_assert!(buf3 == DATA[2]); Ok(()) } @@ -148,32 +145,27 @@ fn write_vectored_all_read_n_vectored() { } #[test] -#[ignore] fn vectored_io() { - async fn actor(mut ctx: actor::Context) -> io::Result<()> + async fn actor(ctx: actor::Context) -> io::Result<()> where RT: rt::Access, { - let (mut sender, mut receiver) = pipe::new(&mut ctx)?; + let (mut sender, mut receiver) = pipe::new(ctx.runtime_ref())?; - let bufs = &mut [ - IoSlice::new(DATAV[0]), - IoSlice::new(DATAV[1]), - IoSlice::new(DATAV[2]), - ]; - let n = sender.write_vectored(bufs).await?; + let bufs = [DATAV[0], DATAV[1], DATAV[2]]; + let (_, n) = sender.write_vectored(bufs).await?; assert_eq!(n, DATA.len()); drop(sender); - let mut buf1 = Vec::with_capacity(DATAV[0].len()); - let mut buf2 = Vec::with_capacity(DATAV[1].len()); - let mut buf3 = Vec::with_capacity(DATAV[2].len() + 2); - let mut bufs = [&mut buf1, &mut buf2, &mut buf3]; - let n = receiver.read_vectored(&mut bufs).await?; - assert_eq!(n, DATAV_LEN); - assert_eq!(buf1, DATAV[0]); - assert_eq!(buf2, DATAV[1]); - assert_eq!(buf3, DATAV[2]); + let bufs = [ + Vec::with_capacity(DATAV[0].len()), + Vec::with_capacity(DATAV[1].len()), + Vec::with_capacity(DATAV[2].len() + 2), + ]; + let [buf1, buf2, buf3] = receiver.read_vectored(bufs).await?; + assert!(buf1 == DATAV[0]); + assert!(buf2 == DATAV[1]); + assert!(buf3 == DATAV[2]); Ok(()) } @@ -182,66 +174,3 @@ fn vectored_io() { let actor_ref = try_spawn_local(PanicSupervisor, actor, (), ActorOptions::default()).unwrap(); join(&actor_ref, Duration::from_secs(1)).unwrap(); } - -#[test] -fn actor_bound() { - async fn creator( - mut ctx: actor::Context, - write_ref: ActorRef, - reader_ref: ActorRef, - ) -> io::Result<()> - where - RT: rt::Access, - { - let (sender, receiver) = pipe::new(&mut ctx)?; - reader_ref.send(receiver).await.unwrap(); - write_ref.send(sender).await.unwrap(); - Ok(()) - } - - async fn writer(mut ctx: actor::Context) -> io::Result<()> - where - RT: rt::Access, - { - let mut sender = ctx.receive_next().await.unwrap(); - sender.bind_to(&mut ctx)?; - - sender.write_all(DATA).await - } - - async fn reader(mut ctx: actor::Context) -> io::Result<()> - where - RT: rt::Access, - { - let mut receiver = ctx.receive_next().await.unwrap(); - receiver.bind_to(&mut ctx)?; - - let mut buf = Vec::with_capacity(DATA.len() + 1); - receiver.read_n(&mut buf, DATA.len()).await?; - assert_eq!(buf, DATA); - Ok(()) - } - - #[allow(trivial_casts)] - let reader = reader as fn(_) -> _; - let reader_ref = try_spawn_local(PanicSupervisor, reader, (), ActorOptions::default()).unwrap(); - - #[allow(trivial_casts)] - let writer = writer as fn(_) -> _; - let writer_ref = try_spawn_local(PanicSupervisor, writer, (), ActorOptions::default()).unwrap(); - - #[allow(trivial_casts)] - let creator = creator as fn(_, _, _) -> _; - let creator_ref = try_spawn_local( - PanicSupervisor, - creator, - (writer_ref.clone(), reader_ref.clone()), - ActorOptions::default(), - ) - .unwrap(); - - // Can't use `join_many` due to the differening message types. - join(&creator_ref, Duration::from_secs(1)).unwrap(); - join(&writer_ref, Duration::from_secs(1)).unwrap(); - join(&reader_ref, Duration::from_secs(1)).unwrap(); -} From 9f038e62640ad0e62ec689ac08619bafc9178a4c Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 21 Mar 2023 00:33:11 +0100 Subject: [PATCH 016/177] Fix incorrect token in registering shared I/O uring --- rt/src/worker.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rt/src/worker.rs b/rt/src/worker.rs index d1ce38f06..cac509e40 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -299,7 +299,7 @@ impl Worker { let shared_ring_fd = shared_internals.ring_fd(); poll.registry().register( &mut SourceFd(&shared_ring_fd), - SHARED_POLL, + SHARED_RING, Interest::READABLE, )?; shared_internals.register_worker_poll(poll.registry(), SHARED_POLL)?; From ee750284c58488398633f739a7d87d8077cad09b Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 21 Mar 2023 21:11:07 +0100 Subject: [PATCH 017/177] Fix some docs Fix some incorrect references to types or methods. --- rt/src/io/buf.rs | 8 ++++---- rt/src/io/mod.rs | 2 +- rt/src/pipe.rs | 13 ++++++------- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index aed8552cf..45f690309 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -53,9 +53,9 @@ pub unsafe trait BufMut: 'static { /// # Safety /// /// The caller must ensure that at least the first `n` bytes returned by - /// [`parts`] are initialised. + /// [`parts_mut`] are initialised. /// - /// [`parts`]: BufMut::parts + /// [`parts_mut`]: BufMut::parts_mut /// /// # Notes /// @@ -66,9 +66,9 @@ pub unsafe trait BufMut: 'static { /// [`TcpStream::recv_n`]: crate::net::TcpStream::recv_n unsafe fn update_length(&mut self, n: usize); - /// Returns the length of the buffer as returned by [`parts`]. + /// Returns the length of the buffer as returned by [`parts_mut`]. /// - /// [`parts`]: BufMut::parts + /// [`parts_mut`]: BufMut::parts_mut fn spare_capacity(&self) -> usize; /// Returns `true` if the buffer has spare capacity. diff --git a/rt/src/io/mod.rs b/rt/src/io/mod.rs index a49753105..c10edbde8 100644 --- a/rt/src/io/mod.rs +++ b/rt/src/io/mod.rs @@ -8,7 +8,7 @@ //! Finally this module contains a number of [`Future`] implementations that //! facilitate I/O operations. //! -//! [`Future`]: std::future:::Future +//! [`Future`]: std::future::Future // For ease of use within the crate. pub(crate) use std::io::{Error, Result}; diff --git a/rt/src/pipe.rs b/rt/src/pipe.rs index 140b0e727..d49797d9a 100644 --- a/rt/src/pipe.rs +++ b/rt/src/pipe.rs @@ -161,7 +161,7 @@ impl Sender { /// Write the bytes in `buf` into the pipe. /// - /// Return the number of bytes written. This may we fewer then the length of + /// Return the number of bytes written. This may we fewer than the length of /// `buf`. To ensure that all bytes are written use [`Sender::write_all`]. pub fn write<'a, B: Buf>(&'a mut self, buf: B) -> Write<'a, B> { Write(self.fd.write(BufWrapper(buf)).extract()) @@ -177,7 +177,7 @@ impl Sender { /// Write the bytes in `bufs` intoto the pipe. /// - /// Return the number of bytes written. This may we fewer then the length of + /// Return the number of bytes written. This may we fewer than the length of /// `bufs`. To ensure that all bytes are written use /// [`Sender::write_vectored_all`]. pub fn write_vectored<'a, B: BufSlice, const N: usize>( @@ -238,13 +238,12 @@ impl Receiver { /// Read at least `n` bytes from the pipe, writing them into `buf`. /// - /// This returns a [`Future`] that receives at least `n` bytes from the - /// `Receiver` and writes them into buffer `B`, or returns - /// [`io::ErrorKind::UnexpectedEof`] if less then `n` bytes could be read. + /// This returns [`io::ErrorKind::UnexpectedEof`] if less than `n` bytes + /// could be read. pub fn read_n<'a, B: BufMut>(&'a mut self, buf: B, n: usize) -> ReadN<'a, B> { debug_assert!( buf.spare_capacity() >= n, - "called `Receiver::read_n` with a buffer smaller then `n`", + "called `Receiver::read_n` with a buffer smaller than `n`", ); ReadN(self.fd.read_n(BufWrapper(buf), n)) } @@ -265,7 +264,7 @@ impl Receiver { ) -> ReadNVectored<'a, B, N> { debug_assert!( bufs.total_spare_capacity() >= n, - "called `Receiver::read_n_vectored` with buffers smaller then `n`" + "called `Receiver::read_n_vectored` with buffers smaller than `n`" ); ReadNVectored(self.fd.read_n_vectored(BufWrapper(bufs), n)) } From 48123dcc1bdbf0ceb02b7dbb876a006f1b2d457a Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 6 Apr 2023 12:25:31 +0200 Subject: [PATCH 018/177] Update rustc version --- http/tests/functional.rs | 2 +- http/tests/functional/client.rs | 3 +- http/tests/functional/server.rs | 4 +-- rt/src/lib.rs | 1 - rt/src/shared/waker.rs | 2 +- rt/src/test.rs | 54 +++++++++++++++++-------------- rt/tests/functional.rs | 1 - rt/tests/functional/tcp/stream.rs | 46 +++++++++++++++++--------- 8 files changed, 65 insertions(+), 48 deletions(-) diff --git a/http/tests/functional.rs b/http/tests/functional.rs index cee416673..09640ad3a 100644 --- a/http/tests/functional.rs +++ b/http/tests/functional.rs @@ -1,6 +1,6 @@ //! Functional tests. -#![feature(async_stream, never_type, once_cell)] +#![feature(async_stream, never_type, const_weak_new)] use std::mem::size_of; diff --git a/http/tests/functional/client.rs b/http/tests/functional/client.rs index 95d5084ef..12a31438e 100644 --- a/http/tests/functional/client.rs +++ b/http/tests/functional/client.rs @@ -1415,8 +1415,7 @@ struct TestServer { impl TestServer { fn spawn() -> Arc { - static TEST_SERVER: LazyLock>> = - LazyLock::new(|| Mutex::new(Weak::new())); + static TEST_SERVER: Mutex> = Mutex::new(Weak::new()); let mut test_server = TEST_SERVER.lock().unwrap(); if let Some(test_server) = test_server.upgrade() { diff --git a/http/tests/functional/server.rs b/http/tests/functional/server.rs index 783c39a13..7f561f460 100644 --- a/http/tests/functional/server.rs +++ b/http/tests/functional/server.rs @@ -2,7 +2,6 @@ use std::borrow::Cow; use std::io::{self, Read, Write}; use std::net::{Shutdown, SocketAddr, TcpStream}; use std::str; -use std::sync::LazyLock; use std::sync::{Arc, Condvar, Mutex, Weak}; use std::thread::{self, sleep}; use std::time::{Duration, SystemTime}; @@ -550,8 +549,7 @@ struct TestServer { impl TestServer { fn spawn() -> Arc { - static TEST_SERVER: LazyLock>> = - LazyLock::new(|| Mutex::new(Weak::new())); + static TEST_SERVER: Mutex> = Mutex::new(Weak::new()); let mut test_server = TEST_SERVER.lock().unwrap(); if let Some(test_server) = test_server.upgrade() { diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 26d521974..e36ae74a7 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -156,7 +156,6 @@ new_uninit, stmt_expr_attributes )] -#![cfg_attr(any(test, feature = "test"), feature(once_cell))] #![warn( anonymous_parameters, bare_trait_objects, diff --git a/rt/src/shared/waker.rs b/rt/src/shared/waker.rs index ec27e05b3..fd59cd147 100644 --- a/rt/src/shared/waker.rs +++ b/rt/src/shared/waker.rs @@ -321,7 +321,7 @@ mod tests { let setup = RuntimeInternals::setup().unwrap(); Arc::new_cyclic(|shared_internals| { let waker_id = waker::init(shared_internals.clone()); - let worker_wakers = vec![&*test::NOOP_WAKER].into_boxed_slice(); + let worker_wakers = vec![test::noop_waker()].into_boxed_slice(); setup.complete(waker_id, worker_wakers, None) }) } diff --git a/rt/src/test.rs b/rt/src/test.rs index f1aea7412..99606c47d 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -51,8 +51,7 @@ use std::async_iter::AsyncIterator; use std::future::Future; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::sync::LazyLock; +use std::sync::{Arc, OnceLock}; use std::task::{self, Poll}; use std::time::{Duration, Instant}; use std::{io, slice, thread}; @@ -79,22 +78,30 @@ pub use heph::test::*; pub(crate) const TEST_PID: ProcessId = ProcessId(0); -pub(crate) static NOOP_WAKER: LazyLock = LazyLock::new(|| { - let poll = mio::Poll::new().expect("failed to create `Poll` instance for test module"); - let waker = mio::Waker::new(poll.registry(), mio::Token(0)) - .expect("failed to create `Waker` instance for test module"); - ThreadWaker::new(waker) -}); - -static SHARED_INTERNAL: LazyLock> = LazyLock::new(|| { - let setup = shared::RuntimeInternals::setup() - .expect("failed to setup runtime internals for test module"); - Arc::new_cyclic(|shared_internals| { - let waker_id = waker::init(shared_internals.clone()); - let worker_wakers = vec![&*NOOP_WAKER].into_boxed_slice(); - setup.complete(waker_id, worker_wakers, None) +pub(crate) fn noop_waker() -> &'static ThreadWaker { + static NOOP_WAKER: OnceLock = OnceLock::new(); + NOOP_WAKER.get_or_init(|| { + let poll = mio::Poll::new().expect("failed to create `Poll` instance for test module"); + let waker = mio::Waker::new(poll.registry(), mio::Token(0)) + .expect("failed to create `Waker` instance for test module"); + ThreadWaker::new(waker) }) -}); +} + +fn shared_internals() -> Arc { + static SHARED_INTERNALS: OnceLock> = OnceLock::new(); + SHARED_INTERNALS + .get_or_init(|| { + let setup = shared::RuntimeInternals::setup() + .expect("failed to setup runtime internals for test module"); + Arc::new_cyclic(|shared_internals| { + let waker_id = waker::init(shared_internals.clone()); + let worker_wakers = vec![noop_waker()].into_boxed_slice(); + setup.complete(waker_id, worker_wakers, None) + }) + }) + .clone() +} /// Returns a reference to a fake local runtime. /// @@ -108,7 +115,7 @@ pub fn runtime() -> RuntimeRef { static TEST_RT: Worker = { let (_, receiver) = rt::channel::new() .expect("failed to create runtime channel for test module"); - Worker::new_test(SHARED_INTERNAL.clone(), receiver) + Worker::new_test(shared_internals(), receiver) .expect("failed to create local `Runtime` for test module") }; } @@ -122,7 +129,8 @@ type RtControl = rt::channel::Sender; /// Lazily start the *test* runtime on a new thread, returning the control /// channel. fn test_runtime() -> &'static RtControl { - static TEST_RT: LazyLock = LazyLock::new(|| { + static TEST_RT: OnceLock = OnceLock::new(); + TEST_RT.get_or_init(|| { let (sender, receiver) = rt::channel::new().expect("failed to create runtime channel for test module"); let _handle = thread::Builder::new() @@ -130,16 +138,14 @@ fn test_runtime() -> &'static RtControl { .spawn(move || { // NOTE: because we didn't indicate the runtime has started this // will never stop. - Worker::new_test(SHARED_INTERNAL.clone(), receiver) + Worker::new_test(shared_internals(), receiver) .expect("failed to create a runtime for test module") .run() .expect("failed to run test runtime"); }) .expect("failed to start thread for test runtime"); sender - }); - - &TEST_RT + }) } /// Run function `f` on the *test* runtime. @@ -404,7 +410,7 @@ where NA: NewActor, { let (manager, sender, receiver) = Manager::new_small_channel(); - let ctx = actor::Context::new(receiver, ThreadSafe::new(TEST_PID, SHARED_INTERNAL.clone())); + let ctx = actor::Context::new(receiver, ThreadSafe::new(TEST_PID, shared_internals())); let actor = new_actor.new(ctx, arg)?; Ok((actor, manager, ActorRef::local(sender))) } diff --git a/rt/tests/functional.rs b/rt/tests/functional.rs index e7688273d..0faf8f150 100644 --- a/rt/tests/functional.rs +++ b/rt/tests/functional.rs @@ -5,7 +5,6 @@ drain_filter, maybe_uninit_slice, never_type, - once_cell, write_all_vectored )] diff --git a/rt/tests/functional/tcp/stream.rs b/rt/tests/functional/tcp/stream.rs index a207463ed..8c2ed71c6 100644 --- a/rt/tests/functional/tcp/stream.rs +++ b/rt/tests/functional/tcp/stream.rs @@ -5,7 +5,7 @@ use std::fs::{self, File}; use std::io::{self, IoSlice, Read, Write}; use std::net::{self, Shutdown, SocketAddr}; use std::num::NonZeroUsize; -use std::sync::LazyLock; +use std::sync::OnceLock; use std::thread::sleep; use std::time::Duration; @@ -24,11 +24,15 @@ const DATA: &[u8] = b"Hello world"; const TEST_FILE0: &str = "./tests/data/hello_world"; const TEST_FILE1: &str = "./tests/data/lorem_ipsum"; -// Contents of the test files. -static EXPECTED0: LazyLock> = - LazyLock::new(|| fs::read(TEST_FILE0).expect("failed to read test file 0")); -static EXPECTED1: LazyLock> = - LazyLock::new(|| fs::read(TEST_FILE1).expect("failed to read test file 0")); +fn expected_data0() -> &'static [u8] { + static EXPECTED0: OnceLock> = OnceLock::new(); + EXPECTED0.get_or_init(|| fs::read(TEST_FILE0).expect("failed to read test file 0")) +} + +fn expected_data1() -> &'static [u8] { + static EXPECTED1: OnceLock> = OnceLock::new(); + EXPECTED1.get_or_init(|| fs::read(TEST_FILE1).expect("failed to read test file 1")) +} #[test] fn smoke() { @@ -763,14 +767,18 @@ fn send_file() { stream1.set_nonblocking(true).unwrap(); let mut expected0_offset = 0; - let expected1 = &EXPECTED1[..LENGTH]; + let expected1 = &expected_data1()[..LENGTH]; let mut expected1_offset = 0; let mut buf = vec![0; LENGTH + 1]; for _ in 0..20 { // NOTE: can't use `&&` as that short circuits. - let done0 = - send_file_check_actor(&mut stream0, &EXPECTED0, &mut expected0_offset, &mut buf); + let done0 = send_file_check_actor( + &mut stream0, + expected_data0(), + &mut expected0_offset, + &mut buf, + ); let done1 = send_file_check_actor(&mut stream1, &expected1, &mut expected1_offset, &mut buf); @@ -821,9 +829,9 @@ fn send_file_all() { let (mut stream1, _) = listener.accept().unwrap(); stream1.set_nonblocking(true).unwrap(); - let expected0 = &EXPECTED0; + let expected0 = expected_data0(); let mut expected0_offset = OFFSET; - let expected1 = &EXPECTED1[..OFFSET + LENGTH]; + let expected1 = &expected_data1()[..OFFSET + LENGTH]; let mut expected1_offset = OFFSET; let mut buf = vec![0; LENGTH + 1]; @@ -878,10 +886,18 @@ fn send_entire_file() { let mut buf = vec![0; 4096]; for _ in 0..20 { // NOTE: can't use `&&` as that short circuits. - let done0 = - send_file_check_actor(&mut stream0, &EXPECTED0, &mut expected0_offset, &mut buf); - let done1 = - send_file_check_actor(&mut stream1, &EXPECTED1, &mut expected1_offset, &mut buf); + let done0 = send_file_check_actor( + &mut stream0, + expected_data0(), + &mut expected0_offset, + &mut buf, + ); + let done1 = send_file_check_actor( + &mut stream1, + expected_data1(), + &mut expected1_offset, + &mut buf, + ); if done0 && done1 { break; From 3d231f2ebdddedd5a4ef4a77d3fb4667cf4e8772 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 6 Apr 2023 15:06:23 +0200 Subject: [PATCH 019/177] Move (Un)Connected to net module From the net::udp module and reexport them. This allows us to use them in other modules as well, such as net::uds (Unix Domain Sockets). --- rt/src/net/mod.rs | 11 +++++++++++ rt/src/net/udp.rs | 10 +--------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/rt/src/net/mod.rs b/rt/src/net/mod.rs index bc1869b8a..677230647 100644 --- a/rt/src/net/mod.rs +++ b/rt/src/net/mod.rs @@ -56,6 +56,17 @@ pub mod udp; pub use tcp::{TcpListener, TcpServer, TcpStream}; #[doc(no_inline)] pub use udp::UdpSocket; + +/// The unconnected mode of an [`UdpSocket`]. +#[allow(missing_debug_implementations)] +#[allow(clippy::empty_enum)] +pub enum Unconnected {} + +/// The connected mode of an [`UdpSocket`]. +#[allow(missing_debug_implementations)] +#[allow(clippy::empty_enum)] +pub enum Connected {} + /// Convert a `socket2:::SockAddr` into a `std::net::SocketAddr`. #[allow(clippy::needless_pass_by_value)] fn convert_address(address: SockAddr) -> io::Result { diff --git a/rt/src/net/udp.rs b/rt/src/net/udp.rs index 60a23a617..139f75aff 100644 --- a/rt/src/net/udp.rs +++ b/rt/src/net/udp.rs @@ -25,15 +25,7 @@ use crate::bytes::{Bytes, BytesVectored, MaybeUninitSlice}; use crate::net::convert_address; use crate::{self as rt, Bound}; -/// The unconnected mode of an [`UdpSocket`]. -#[allow(missing_debug_implementations)] -#[allow(clippy::empty_enum)] -pub enum Unconnected {} - -/// The connected mode of an [`UdpSocket`]. -#[allow(missing_debug_implementations)] -#[allow(clippy::empty_enum)] -pub enum Connected {} +pub use crate::net::{Connected, Unconnected}; /// A User Datagram Protocol (UDP) socket. /// From fde669f80bbaecbf42076aae0f03ab72ab6c8758 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 6 Apr 2023 17:31:48 +0200 Subject: [PATCH 020/177] Add UnixDatagram Switches to use a forked version of socket2, the changes in which will have to be merged into socket v5 once we're ready to update to that. --- rt/Cargo.toml | 3 +- rt/src/net/futures.rs | 54 ++++++++++ rt/src/net/mod.rs | 10 +- rt/src/net/uds/datagram.rs | 159 ++++++++++++++++++++++++++++ rt/src/net/uds/mod.rs | 56 ++++++++++ rt/tests/functional.rs | 1 + rt/tests/functional/uds/datagram.rs | 105 ++++++++++++++++++ rt/tests/functional/uds/mod.rs | 3 + 8 files changed, 388 insertions(+), 3 deletions(-) create mode 100644 rt/src/net/futures.rs create mode 100644 rt/src/net/uds/datagram.rs create mode 100644 rt/src/net/uds/mod.rs create mode 100644 rt/tests/functional/uds/datagram.rs create mode 100644 rt/tests/functional/uds/mod.rs diff --git a/rt/Cargo.toml b/rt/Cargo.toml index 55c14f7be..f0bf0681b 100644 --- a/rt/Cargo.toml +++ b/rt/Cargo.toml @@ -25,7 +25,8 @@ crossbeam-channel = { version = "0.5.0", default-features = false, features = [" libc = { version = "0.2.96", default-features = false } mio = { version = "0.8.0", default-features = false, features = ["os-poll", "net"] } mio-signals = { version = "0.2.0", default-features = false } -socket2 = { version = "0.4.0", default-features = false, features = ["all"] } +# TODO: update to v0.5.0 +socket2 = { version = "0.4.0", default-features = false, features = ["all"], git = "https://github.com/Thomasdezeeuw/socket2", branch = "heph-v0.4" } [dev-dependencies] getrandom = { version = "0.2.2", default-features = false, features = ["std"] } diff --git a/rt/src/net/futures.rs b/rt/src/net/futures.rs new file mode 100644 index 000000000..4ff2869e2 --- /dev/null +++ b/rt/src/net/futures.rs @@ -0,0 +1,54 @@ +//! A10 I/O [`Future`] wrappers. + +#![allow(missing_debug_implementations)] + +use std::future::Future; +use std::io; +use std::pin::Pin; +use std::task::{self, Poll}; + +use a10::extract::Extractor; + +use crate::io::{Buf, BufMut, BufWrapper}; + +/// [`Future`] behind send implementations. +pub struct Send<'a, B>(pub(crate) Extractor>>); + +impl<'a, B: Buf> Future for Send<'a, B> { + type Output = io::Result<(B, usize)>; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|(buf, len)| (buf.0, len)) + } +} + +/// [`Future`] behind send_to implementations. +pub struct SendTo<'a, B, A>(pub(crate) Extractor, A>>); + +impl<'a, B: Buf, A: a10::net::SocketAddress> Future for SendTo<'a, B, A> { + type Output = io::Result<(B, usize)>; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|(buf, _, len)| (buf.0, len)) + } +} + +/// [`Future`] behind recv implementations. +pub struct Recv<'a, B>(pub(crate) a10::net::Recv<'a, BufWrapper>); + +impl<'a, B: BufMut> Future for Recv<'a, B> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|buf| buf.0) + } +} diff --git a/rt/src/net/mod.rs b/rt/src/net/mod.rs index 677230647..019a2b154 100644 --- a/rt/src/net/mod.rs +++ b/rt/src/net/mod.rs @@ -49,20 +49,26 @@ use std::net::SocketAddr; use socket2::SockAddr; +mod futures; pub mod tcp; pub mod udp; +pub mod uds; #[doc(no_inline)] pub use tcp::{TcpListener, TcpServer, TcpStream}; #[doc(no_inline)] pub use udp::UdpSocket; +#[doc(no_inline)] +pub use uds::UnixDatagram; + +pub use futures::{Recv, Send, SendTo}; -/// The unconnected mode of an [`UdpSocket`]. +/// The unconnected mode of an [`UdpSocket`] or [`UnixDatagram`]. #[allow(missing_debug_implementations)] #[allow(clippy::empty_enum)] pub enum Unconnected {} -/// The connected mode of an [`UdpSocket`]. +/// The connected mode of an [`UdpSocket`] or [`UnixDatagram`]. #[allow(missing_debug_implementations)] #[allow(clippy::empty_enum)] pub enum Connected {} diff --git a/rt/src/net/uds/datagram.rs b/rt/src/net/uds/datagram.rs new file mode 100644 index 000000000..0a5fbc1d0 --- /dev/null +++ b/rt/src/net/uds/datagram.rs @@ -0,0 +1,159 @@ +use std::io; +use std::marker::PhantomData; +use std::net::Shutdown; +use std::os::fd::{AsFd, IntoRawFd}; + +use a10::{AsyncFd, Extract}; +use log::warn; +use socket2::{Domain, SockRef, Type}; + +use crate as rt; +use crate::io::{Buf, BufMut, BufWrapper}; +use crate::net::uds::UnixAddr; +use crate::net::{Connected, Recv, Send, SendTo, Unconnected}; + +/// A Unix datagram socket. +#[derive(Debug)] +pub struct UnixDatagram { + fd: AsyncFd, + /// The mode in which the socket is in, this determines what methods are + /// available. + mode: PhantomData, +} + +impl UnixDatagram { + /// Creates a Unix datagram socket bound to `address`. + pub async fn bind(rt: &RT, address: &UnixAddr) -> io::Result> + where + RT: rt::Access, + { + let socket = UnixDatagram::unbound(rt).await?; + socket.with_ref(|socket| socket.bind(&address.inner))?; + Ok(socket) + } + + /// Creates a Unix Datagram socket which is not bound to any address. + pub async fn unbound(rt: &RT) -> io::Result> + where + RT: rt::Access, + { + let fd = a10::net::socket( + rt.submission_queue(), + Domain::UNIX.into(), + Type::DGRAM.cloexec().into(), + 0, + 0, + ) + .await?; + UnixDatagram::new(rt, fd) + } + + /// Creates an unnamed pair of connected sockets. + pub fn pair(rt: &RT) -> io::Result<(UnixDatagram, UnixDatagram)> + where + RT: rt::Access, + { + let (s1, s2) = socket2::Socket::pair(Domain::UNIX, Type::DGRAM.cloexec(), None)?; + let s1 = UnixDatagram::new(rt, unsafe { + // SAFETY: the call to `pair` above ensures the file descriptors are + // valid. + AsyncFd::new(s1.into_raw_fd(), rt.submission_queue()) + })?; + let s2 = UnixDatagram::new(rt, unsafe { + // SAFETY: Same as above. + AsyncFd::new(s2.into_raw_fd(), rt.submission_queue()) + })?; + Ok((s1, s2)) + } + + fn new(rt: &RT, fd: AsyncFd) -> io::Result> + where + RT: rt::Access, + { + let socket = UnixDatagram { + fd, + mode: PhantomData, + }; + + #[cfg(target_os = "linux")] + socket.with_ref(|socket| { + if let Some(cpu) = rt.cpu() { + if let Err(err) = socket.set_cpu_affinity(cpu) { + warn!("failed to set CPU affinity on UnixDatagram: {err}"); + } + } + Ok(()) + })?; + + Ok(socket) + } +} + +impl UnixDatagram { + /// Connects the socket by setting the default destination and limiting + /// packets that are received and send to the `remote` address. + pub async fn connect(self, remote: UnixAddr) -> io::Result> { + self.fd.connect(remote).await?; + Ok(UnixDatagram { + fd: self.fd, + mode: PhantomData, + }) + } + + /// Returns the socket address of the remote peer of this socket. + pub fn peer_addr(&mut self) -> io::Result { + self.with_ref(|socket| socket.peer_addr().map(|a| UnixAddr { inner: a })) + } + + /// Returns the socket address of the local half of this socket. + pub fn local_addr(&mut self) -> io::Result { + self.with_ref(|socket| socket.local_addr().map(|a| UnixAddr { inner: a })) + } + + /// Shuts down the read, write, or both halves of this connection. + /// + /// This function will cause all pending and future I/O on the specified + /// portions to return immediately with an appropriate value (see the + /// documentation of [`Shutdown`]). + pub fn shutdown(&mut self, how: Shutdown) -> io::Result<()> { + self.with_ref(|socket| socket.shutdown(how)) + } + + /// Get the value of the `SO_ERROR` option on this socket. + /// + /// This will retrieve the stored error in the underlying socket, clearing + /// the field in the process. This can be useful for checking errors between + /// calls. + pub fn take_error(&mut self) -> io::Result> { + self.with_ref(|socket| socket.take_error()) + } + + fn with_ref(&self, f: F) -> io::Result + where + F: FnOnce(SockRef<'_>) -> io::Result, + { + let borrowed = self.fd.as_fd(); // TODO: remove this once we update to socket2 v0.5. + f(SockRef::from(&borrowed)) + } +} + +impl UnixDatagram { + // TODO: add `recv_from`, at the time of writing not supported in I/O uring. + + /// Send the bytes in `buf` to `address`. + pub fn send_to<'a, B: Buf>(&'a mut self, buf: B, address: UnixAddr) -> SendTo<'a, B, UnixAddr> { + SendTo(self.fd.sendto(BufWrapper(buf), address, 0).extract()) + } +} + +impl UnixDatagram { + /// Recv bytes from the socket, writing them into `buf`. + pub fn recv<'a, B: BufMut>(&'a mut self, buf: B) -> Recv<'a, B> { + Recv(self.fd.recv(BufWrapper(buf), 0)) + } + + /// Send the bytes in `buf` to the socket's peer. + pub fn send<'a, B: Buf>(&'a mut self, buf: B) -> Send<'a, B> { + Send(self.fd.send(BufWrapper(buf), 0).extract()) + } +} diff --git a/rt/src/net/uds/mod.rs b/rt/src/net/uds/mod.rs new file mode 100644 index 000000000..5bfa94171 --- /dev/null +++ b/rt/src/net/uds/mod.rs @@ -0,0 +1,56 @@ +//! Unix Domain Socket (UDS) or Inter-Process Communication (IPC) related types. +//! +//! Three main types are provided: +//! +//! * [`UnixListener`] listens for incoming Unix connections. +//! * [`UnixStream`] represents a Unix stream socket. +//! * [`UnixDatagram`] represents a Unix datagram socket. + +use std::mem::{size_of, MaybeUninit}; +use std::path::Path; +use std::{io, ptr}; + +use socket2::SockAddr; + +mod datagram; + +pub use datagram::UnixDatagram; + +/// Unix socket address. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct UnixAddr { + inner: SockAddr, +} + +impl UnixAddr { + /// Create a `UnixAddr` from `path`. + pub fn from_pathname

(path: P) -> io::Result + where + P: AsRef, + { + SockAddr::unix(path.as_ref()).map(|a| UnixAddr { inner: a }) + } +} + +/// **Not part of the API, do not use**. +#[doc(hidden)] +impl a10::net::SocketAddress for UnixAddr { + unsafe fn as_ptr(&self) -> (*const libc::sockaddr, libc::socklen_t) { + (self.inner.as_ptr(), self.inner.len()) + } + + unsafe fn as_mut_ptr(this: &mut MaybeUninit) -> (*mut libc::sockaddr, libc::socklen_t) { + ( + ptr::addr_of_mut!((*this.as_mut_ptr()).inner).cast(), + size_of::() as _, + ) + } + + unsafe fn init(this: MaybeUninit, length: libc::socklen_t) -> Self { + debug_assert!(length >= size_of::() as _); + // SAFETY: caller must initialise the address. + let mut this = this.assume_init(); + this.inner.set_length(length); + this + } +} diff --git a/rt/tests/functional.rs b/rt/tests/functional.rs index 0faf8f150..4c5d0b0d0 100644 --- a/rt/tests/functional.rs +++ b/rt/tests/functional.rs @@ -30,4 +30,5 @@ mod functional { mod test; mod timer; mod udp; + mod uds; } diff --git a/rt/tests/functional/uds/datagram.rs b/rt/tests/functional/uds/datagram.rs new file mode 100644 index 000000000..adc24e32d --- /dev/null +++ b/rt/tests/functional/uds/datagram.rs @@ -0,0 +1,105 @@ +//! Tests for `UnixDatagram`. + +use std::io; +use std::net::Shutdown; +use std::time::Duration; + +use heph::actor; +use heph_rt::net::uds::{UnixAddr, UnixDatagram}; +use heph_rt::spawn::ActorOptions; +use heph_rt::test::{join, try_spawn_local, PanicSupervisor}; +use heph_rt::{self as rt}; + +use crate::util::temp_file; + +const DATA: &[u8] = b"Hello world"; +const DATA2: &[u8] = b"Hello mars"; + +#[test] +fn pair() { + async fn actor(ctx: actor::Context) -> io::Result<()> + where + RT: rt::Access, + { + let (mut s1, mut s2) = UnixDatagram::pair(ctx.runtime_ref())?; + + // Addresses must point to each other. + let s1_local = s1.local_addr()?; + let s1_peer = s1.peer_addr()?; + let s2_local = s2.local_addr()?; + let s2_peer = s2.peer_addr()?; + assert_eq!(s1_local, s2_peer); + assert_eq!(s1_peer, s2_local); + + // Send to one arrives at the other. + let (_, n) = s1.send(DATA).await?; + assert_eq!(n, DATA.len()); + let mut buf = s2.recv(Vec::with_capacity(DATA.len() + 1)).await?; + assert_eq!(buf.len(), DATA.len()); + assert_eq!(buf, DATA); + buf.clear(); + + // Same as above, but then in the other direction. + let (_, n) = s2.send(DATA2).await?; + assert_eq!(n, DATA2.len()); + let mut buf = s1.recv(buf).await?; + assert_eq!(buf.len(), DATA2.len()); + assert_eq!(buf, DATA2); + buf.clear(); + + // Shutdown. + s1.shutdown(Shutdown::Both)?; + s2.shutdown(Shutdown::Both)?; + + // No errors. + assert!(s1.take_error()?.is_none()); + assert!(s2.take_error()?.is_none()); + + Ok(()) + } + + #[allow(trivial_casts)] + let actor = actor as fn(_) -> _; + let actor_ref = try_spawn_local(PanicSupervisor, actor, (), ActorOptions::default()).unwrap(); + join(&actor_ref, Duration::from_secs(1)).unwrap(); +} + +#[test] +fn bound() { + async fn actor(ctx: actor::Context) -> io::Result<()> + where + RT: rt::Access, + { + let path1 = temp_file("uds.bound1"); + let path2 = temp_file("uds.bound2"); + let address1 = UnixAddr::from_pathname(path1)?; + let address2 = UnixAddr::from_pathname(path2)?; + let mut listener = UnixDatagram::bind(ctx.runtime_ref(), &address1).await?; + + // Addresses must point to each other. + assert_eq!(listener.local_addr()?, address1); + assert!(listener.peer_addr().is_err()); + + let socket = UnixDatagram::bind(ctx.runtime_ref(), &address2).await?; + let mut socket = socket.connect(address1.clone()).await?; + assert_eq!(socket.local_addr()?, address2); + assert_eq!(socket.peer_addr()?, address1); + + let (_, n) = listener.send_to(DATA, address2).await?; + assert_eq!(n, DATA.len()); + let buf = socket.recv(Vec::with_capacity(DATA.len() + 1)).await?; + assert_eq!(buf.len(), DATA.len()); + assert_eq!(buf, DATA); + + // No errors. + assert!(listener.take_error()?.is_none()); + assert!(socket.take_error()?.is_none()); + + Ok(()) + } + + #[allow(trivial_casts)] + let actor = actor as fn(_) -> _; + let actor_ref = try_spawn_local(PanicSupervisor, actor, (), ActorOptions::default()).unwrap(); + join(&actor_ref, Duration::from_secs(1)).unwrap(); +} diff --git a/rt/tests/functional/uds/mod.rs b/rt/tests/functional/uds/mod.rs new file mode 100644 index 000000000..abdd34f0f --- /dev/null +++ b/rt/tests/functional/uds/mod.rs @@ -0,0 +1,3 @@ +//! Tests for the UDS types. + +mod datagram; From 1949531426b4a14f106235114749b57df9dba33f Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 6 Apr 2023 19:00:30 +0200 Subject: [PATCH 021/177] Manually implement fmt::Debug for UnixDatagram Since M (Connected or Unconnected) doesn't implement fmt::Debug. --- rt/src/net/uds/datagram.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/rt/src/net/uds/datagram.rs b/rt/src/net/uds/datagram.rs index 0a5fbc1d0..8f7acf32c 100644 --- a/rt/src/net/uds/datagram.rs +++ b/rt/src/net/uds/datagram.rs @@ -1,7 +1,7 @@ -use std::io; use std::marker::PhantomData; use std::net::Shutdown; use std::os::fd::{AsFd, IntoRawFd}; +use std::{fmt, io}; use a10::{AsyncFd, Extract}; use log::warn; @@ -13,7 +13,6 @@ use crate::net::uds::UnixAddr; use crate::net::{Connected, Recv, Send, SendTo, Unconnected}; /// A Unix datagram socket. -#[derive(Debug)] pub struct UnixDatagram { fd: AsyncFd, /// The mode in which the socket is in, this determines what methods are @@ -157,3 +156,9 @@ impl UnixDatagram { Send(self.fd.send(BufWrapper(buf), 0).extract()) } } + +impl fmt::Debug for UnixDatagram { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.fd.fmt(f) + } +} From 1249d556830009c384d41c7e6616a33c70a84bbe Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 6 Apr 2023 19:01:02 +0200 Subject: [PATCH 022/177] Reexport (Un)Connected from net::uds For easy usage with UnixDatagram. --- rt/src/net/uds/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/rt/src/net/uds/mod.rs b/rt/src/net/uds/mod.rs index 5bfa94171..c9dec50de 100644 --- a/rt/src/net/uds/mod.rs +++ b/rt/src/net/uds/mod.rs @@ -14,6 +14,7 @@ use socket2::SockAddr; mod datagram; +pub use crate::net::{Connected, Unconnected}; pub use datagram::UnixDatagram; /// Unix socket address. From ff9806c091c8a4eb093f25de4d7ec93f0de3e8d1 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 6 Apr 2023 19:01:31 +0200 Subject: [PATCH 023/177] Take ownership of address in UnixDatagram::bind In case io_uring even supports it, which needs ownership of the address. --- rt/src/net/uds/datagram.rs | 2 +- rt/tests/functional/uds/datagram.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/rt/src/net/uds/datagram.rs b/rt/src/net/uds/datagram.rs index 8f7acf32c..08d475758 100644 --- a/rt/src/net/uds/datagram.rs +++ b/rt/src/net/uds/datagram.rs @@ -22,7 +22,7 @@ pub struct UnixDatagram { impl UnixDatagram { /// Creates a Unix datagram socket bound to `address`. - pub async fn bind(rt: &RT, address: &UnixAddr) -> io::Result> + pub async fn bind(rt: &RT, address: UnixAddr) -> io::Result> where RT: rt::Access, { diff --git a/rt/tests/functional/uds/datagram.rs b/rt/tests/functional/uds/datagram.rs index adc24e32d..fe15fea2b 100644 --- a/rt/tests/functional/uds/datagram.rs +++ b/rt/tests/functional/uds/datagram.rs @@ -74,13 +74,13 @@ fn bound() { let path2 = temp_file("uds.bound2"); let address1 = UnixAddr::from_pathname(path1)?; let address2 = UnixAddr::from_pathname(path2)?; - let mut listener = UnixDatagram::bind(ctx.runtime_ref(), &address1).await?; + let mut listener = UnixDatagram::bind(ctx.runtime_ref(), address1.clone()).await?; // Addresses must point to each other. assert_eq!(listener.local_addr()?, address1); assert!(listener.peer_addr().is_err()); - let socket = UnixDatagram::bind(ctx.runtime_ref(), &address2).await?; + let socket = UnixDatagram::bind(ctx.runtime_ref(), address2.clone()).await?; let mut socket = socket.connect(address1.clone()).await?; assert_eq!(socket.local_addr()?, address2); assert_eq!(socket.peer_addr()?, address1); From af4d61a3a91649e022dc07a31db0e8a892d40792 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 6 Apr 2023 19:48:25 +0200 Subject: [PATCH 024/177] Use net::UnixDatagram in systemd module Instead of using mio::net::UnixDatagram. --- rt/src/systemd.rs | 115 ++++++++++------------------------------------ 1 file changed, 24 insertions(+), 91 deletions(-) diff --git a/rt/src/systemd.rs b/rt/src/systemd.rs index d2a9b80d9..2a7b370a9 100644 --- a/rt/src/systemd.rs +++ b/rt/src/systemd.rs @@ -14,26 +14,21 @@ use std::convert::TryFrom; use std::ffi::OsString; -use std::future::Future; use std::path::Path; -use std::pin::Pin; use std::str::FromStr; -use std::task::{self, Poll}; use std::time::Duration; use std::{env, io, process}; use heph::actor; use heph::messages::Terminate; use log::{as_debug, debug, warn}; -use mio::net::UnixDatagram; -use mio::Interest; -use socket2::SockRef; +use crate::net::uds::{Connected, UnixAddr, UnixDatagram}; use crate::timer::Interval; use crate::util::{either, next}; -use crate::{self as rt, Bound, Signal}; +use crate::{self as rt, Signal}; -/// Systemd notifier. +/// systemd notifier. /// /// This is only used by systemd if the service definition file has /// `Type=notify` set, see [`systemd.service(5)`]. Read [`sd_notify(3)`] for @@ -44,8 +39,7 @@ use crate::{self as rt, Bound, Signal}; /// [`sd_notify(3)`]: https://www.freedesktop.org/software/systemd/man/sd_notify.html #[derive(Debug)] pub struct Notify { - // TODO: replace with Heph version. - socket: UnixDatagram, + socket: UnixDatagram, watch_dog: Option, } @@ -61,7 +55,7 @@ impl Notify { /// Returns `None` if the environment `NOTIFY_SOCKET` variable is not set. /// /// [`systemd.service(5)`]: https://www.freedesktop.org/software/systemd/man/systemd.service.html#WatchdogSec= - pub fn new(ctx: &mut actor::Context) -> io::Result> + pub async fn new(rt: &RT) -> io::Result> where RT: rt::Access, { @@ -83,7 +77,7 @@ impl Notify { } let mut notifier = match socket_path { - Some(path) => Notify::connect(ctx, Path::new(&path))?, + Some(path) => Notify::connect(rt, Path::new(&path)).await?, None => return Ok(None), }; @@ -106,19 +100,15 @@ impl Notify { /// /// Also see [`Notify::new`] which creates a new `systemd::Notify` based on /// the environment variables set by systemd. - pub fn connect(ctx: &mut actor::Context, path: P) -> io::Result + pub async fn connect(rt: &RT, path: P) -> io::Result where RT: rt::Access, P: AsRef, { - let mut socket = UnixDatagram::unbound()?; - socket.connect(path)?; - ctx.runtime().register(&mut socket, Interest::WRITABLE)?; - if let Some(cpu) = ctx.runtime_ref().cpu() { - if let Err(err) = SockRef::from(&socket).set_cpu_affinity(cpu) { - warn!("failed to set CPU affinity on systemd::Notify: {err}"); - } - } + let socket = UnixDatagram::unbound(rt).await?; + let socket = socket + .connect(UnixAddr::from_pathname(path.as_ref())?) + .await?; Ok(Notify { socket, watch_dog: None, @@ -144,7 +134,7 @@ impl Notify { /// programs could pass completion percentages and failing programs could /// pass a human-readable error message. **Note that it must be limited to a /// single line.** - pub fn change_state<'a>(&'a self, state: State, status: Option<&str>) -> ChangeState<'a> { + pub async fn change_state(&mut self, state: State, status: Option<&str>) -> io::Result<()> { debug!(state = log::as_debug!(state), status = log::as_debug!(status); "updating state with service manager"); let state_line = match state { State::Ready => "READY=1\n", @@ -164,10 +154,8 @@ impl Notify { } None => String::from(state_line), }; - ChangeState { - notifier: self, - state_update, - } + _ = self.socket.send(state_update).await?; + Ok(()) } /// Inform the service manager of a change in the application status. @@ -180,26 +168,25 @@ impl Notify { /// /// If you also need to change the state of the application you can use /// [`Notify::change_state`]. - pub fn change_status<'a>(&'a self, status: &str) -> ChangeState<'a> { + pub async fn change_status(&mut self, status: &str) -> io::Result<()> { debug!(status = log::as_display!(status); "updating status with service manager"); let mut state_update = String::with_capacity(7 + status.len() + 1); state_update.push_str("STATUS="); state_update.push_str(status); replace_newline(&mut state_update[7..]); state_update.push('\n'); - ChangeState { - notifier: self, - state_update, - } + _ = self.socket.send(state_update).await?; + Ok(()) } /// Inform the service manager to update the watchdog timestamp. /// /// Send a keep-alive ping that services need to issue in regular intervals /// if `WatchdogSec=` is enabled for it. - pub fn ping_watchdog<'a>(&'a self) -> PingWatchdog<'a> { + pub async fn ping_watchdog(&mut self) -> io::Result<()> { debug!("pinging service manager watchdog"); - PingWatchdog { notifier: self } + _ = self.socket.send("WATCHDOG=1").await?; + Ok(()) } /// Inform the service manager that the service detected an internal error @@ -213,9 +200,10 @@ impl Notify { /// the watchdog behavior. /// /// [`systemd.service(5)`]: https://www.freedesktop.org/software/systemd/man/systemd.service.html - pub fn trigger_watchdog<'a>(&'a self) -> TriggerWatchdog<'a> { + pub async fn trigger_watchdog(&mut self) -> io::Result<()> { debug!("triggering service manager watchdog"); - TriggerWatchdog { notifier: self } + _ = self.socket.send("WATCHDOG=trigger").await?; + Ok(()) } } @@ -266,61 +254,6 @@ pub enum State { Stopping, } -/// The [`Future`] behind [`Notify::change_state`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct ChangeState<'a> { - notifier: &'a Notify, - state_update: String, -} - -impl<'a> Future for ChangeState<'a> { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - try_io!(self.notifier.socket.send(self.state_update.as_bytes())).map_ok(|_| ()) - } -} - -/// The [`Future`] behind [`Notify::ping_watchdog`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct PingWatchdog<'a> { - notifier: &'a Notify, -} - -impl<'a> Future for PingWatchdog<'a> { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - try_io!(self.notifier.socket.send(b"WATCHDOG=1")).map_ok(|_| ()) - } -} - -/// The [`Future`] behind [`Notify::trigger_watchdog`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct TriggerWatchdog<'a> { - notifier: &'a Notify, -} - -impl<'a> Future for TriggerWatchdog<'a> { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - try_io!(self.notifier.socket.send(b"WATCHDOG=trigger")).map_ok(|_| ()) - } -} - -impl Bound for Notify { - type Error = io::Error; - - fn bind_to(&mut self, ctx: &mut actor::Context) -> io::Result<()> { - ctx.runtime() - .reregister(&mut self.socket, Interest::WRITABLE) - } -} - /// Actor that manages the communication to the service manager. /// /// It will set the application state (with the service manager) to ready when @@ -350,7 +283,7 @@ where H: FnMut() -> Result<(), E>, E: ToString, { - let notify = match Notify::new(&mut ctx)? { + let mut notify = match Notify::new(ctx.runtime_ref()).await? { Some(notify) => notify, None => { debug!("not started via systemd, not starting `systemd::watchdog`"); From 17f0a757f8a7408e255f30792e75341cb7df2379 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 7 Apr 2023 21:04:49 +0200 Subject: [PATCH 025/177] Expand UnixDatagram docs Explaining the use of Connected and Unconnected enums. --- rt/src/net/uds/datagram.rs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/rt/src/net/uds/datagram.rs b/rt/src/net/uds/datagram.rs index 08d475758..4fe6be694 100644 --- a/rt/src/net/uds/datagram.rs +++ b/rt/src/net/uds/datagram.rs @@ -13,6 +13,26 @@ use crate::net::uds::UnixAddr; use crate::net::{Connected, Recv, Send, SendTo, Unconnected}; /// A Unix datagram socket. +/// +/// To create a socket [`UnixDatagram::bind`] or [`UnixDatagram::unbound`] can +/// be used. The created socket will be in unconnected mode. A socket can be in +/// one of two modes: +/// +/// - [`Unconnected`] mode allows sending and receiving packets to and from all +/// sources. +/// - [`Connected`] mode only allows sending and receiving packets from/to a +/// single source. +/// +/// An unconnected socket can be [`connect`ed] to a specific address if needed, +/// changing the mode to [`Connected`] in the process. The remote address of an +/// already connected socket can be changed to a different address using the +/// same method. +/// +/// Both unconnected and connected sockets have three main operations send, +/// receive and peek, all these methods return a [`Future`]. +/// +/// [`connect`ed]: UnixDatagram::connect +/// [`Future`]: std::future::Future pub struct UnixDatagram { fd: AsyncFd, /// The mode in which the socket is in, this determines what methods are From 3d1ed4a68609c3146c3b03964645b8999daae610 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 9 Apr 2023 13:28:48 +0200 Subject: [PATCH 026/177] Use io_uring for the UdpSocket --- rt/src/net/futures.rs | 100 ++++- rt/src/net/mod.rs | 122 +++++- rt/src/net/udp.rs | 826 +++++++------------------------------ rt/tests/functional/udp.rs | 224 +++------- 4 files changed, 432 insertions(+), 840 deletions(-) diff --git a/rt/src/net/futures.rs b/rt/src/net/futures.rs index 4ff2869e2..81137f655 100644 --- a/rt/src/net/futures.rs +++ b/rt/src/net/futures.rs @@ -9,9 +9,71 @@ use std::task::{self, Poll}; use a10::extract::Extractor; -use crate::io::{Buf, BufMut, BufWrapper}; +use crate::io::{Buf, BufMut, BufMutSlice, BufSlice, BufWrapper}; -/// [`Future`] behind send implementations. +/// [`Future`] behind `recv` implementations. +pub struct Recv<'a, B>(pub(crate) a10::net::Recv<'a, BufWrapper>); + +impl<'a, B: BufMut> Future for Recv<'a, B> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|buf| buf.0) + } +} + +/// [`Future`] behind `recv_vectored` implementations. +pub struct RecvVectored<'a, B, const N: usize>( + pub(crate) a10::net::RecvVectored<'a, BufWrapper, N>, +); + +impl<'a, B: BufMutSlice, const N: usize> Future for RecvVectored<'a, B, N> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|(buf, _)| buf.0) + } +} + +/// [`Future`] behind `recv_from` implementations. +pub struct RecvFrom<'a, B, A>(pub(crate) a10::net::RecvFrom<'a, BufWrapper, A>); + +impl<'a, B: BufMut, A: a10::net::SocketAddress> Future for RecvFrom<'a, B, A> { + type Output = io::Result<(B, A)>; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|(buf, addr, _)| (buf.0, addr)) + } +} + +/// [`Future`] behind `recv_from_vectored` implementations. +pub struct RecvFromVectored<'a, B, A, const N: usize>( + pub(crate) a10::net::RecvFromVectored<'a, BufWrapper, A, N>, +); + +impl<'a, B: BufMutSlice, A: a10::net::SocketAddress, const N: usize> Future + for RecvFromVectored<'a, B, A, N> +{ + type Output = io::Result<(B, A)>; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|(buf, addr, _)| (buf.0, addr)) + } +} + +/// [`Future`] behind `send` implementations. pub struct Send<'a, B>(pub(crate) Extractor>>); impl<'a, B: Buf> Future for Send<'a, B> { @@ -25,7 +87,23 @@ impl<'a, B: Buf> Future for Send<'a, B> { } } -/// [`Future`] behind send_to implementations. +/// [`Future`] behind `send_vectored` implementations. +pub struct SendVectored<'a, B, const N: usize>( + pub(crate) Extractor, a10::net::NoAddress, N>>, +); + +impl<'a, B: BufSlice, const N: usize> Future for SendVectored<'a, B, N> { + type Output = io::Result<(B, usize)>; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|(buf, n)| (buf.0, n)) + } +} + +/// [`Future`] behind `send_to` implementations. pub struct SendTo<'a, B, A>(pub(crate) Extractor, A>>); impl<'a, B: Buf, A: a10::net::SocketAddress> Future for SendTo<'a, B, A> { @@ -35,20 +113,24 @@ impl<'a, B: Buf, A: a10::net::SocketAddress> Future for SendTo<'a, B, A> { // SAFETY: not moving the `Future`. unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } .poll(ctx) - .map_ok(|(buf, _, len)| (buf.0, len)) + .map_ok(|(buf, len)| (buf.0, len)) } } -/// [`Future`] behind recv implementations. -pub struct Recv<'a, B>(pub(crate) a10::net::Recv<'a, BufWrapper>); +/// [`Future`] behind `send_to_vectored` implementations. +pub struct SendToVectored<'a, B, A, const N: usize>( + pub(crate) Extractor, A, N>>, +); -impl<'a, B: BufMut> Future for Recv<'a, B> { - type Output = io::Result; +impl<'a, B: BufSlice, A: a10::net::SocketAddress, const N: usize> Future + for SendToVectored<'a, B, A, N> +{ + type Output = io::Result<(B, usize)>; fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { // SAFETY: not moving the `Future`. unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } .poll(ctx) - .map_ok(|buf| buf.0) + .map_ok(|(buf, n)| (buf.0, n)) } } diff --git a/rt/src/net/mod.rs b/rt/src/net/mod.rs index 019a2b154..3656d79aa 100644 --- a/rt/src/net/mod.rs +++ b/rt/src/net/mod.rs @@ -47,8 +47,6 @@ use std::io; use std::net::SocketAddr; -use socket2::SockAddr; - mod futures; pub mod tcp; pub mod udp; @@ -61,7 +59,9 @@ pub use udp::UdpSocket; #[doc(no_inline)] pub use uds::UnixDatagram; -pub use futures::{Recv, Send, SendTo}; +pub use futures::{ + Recv, RecvFrom, RecvFromVectored, RecvVectored, Send, SendTo, SendToVectored, SendVectored, +}; /// The unconnected mode of an [`UdpSocket`] or [`UnixDatagram`]. #[allow(missing_debug_implementations)] @@ -75,7 +75,7 @@ pub enum Connected {} /// Convert a `socket2:::SockAddr` into a `std::net::SocketAddr`. #[allow(clippy::needless_pass_by_value)] -fn convert_address(address: SockAddr) -> io::Result { +fn convert_address(address: socket2::SockAddr) -> io::Result { match address.as_socket() { Some(address) => Ok(address), None => Err(io::Error::new( @@ -84,3 +84,117 @@ fn convert_address(address: SockAddr) -> io::Result { )), } } + +mod address { + //! Public in private hack. + + use std::mem::{size_of, MaybeUninit}; + use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; + use std::{fmt, ptr}; + + // TODO: merge this into socket2 in some form. + #[derive(Copy, Clone)] + pub union SockAddr { + ip: libc::sockaddr, + ipv4: libc::sockaddr_in, + ipv6: libc::sockaddr_in6, + } + + impl From for SockAddr { + fn from(addr: SocketAddr) -> SockAddr { + match addr { + SocketAddr::V4(addr) => addr.into(), + SocketAddr::V6(addr) => addr.into(), + } + } + } + + impl From for SockAddr { + fn from(addr: SocketAddrV4) -> SockAddr { + SockAddr { + ipv4: libc::sockaddr_in { + sin_family: libc::AF_INET as libc::sa_family_t, + sin_port: addr.port().to_be(), + sin_addr: libc::in_addr { + s_addr: u32::from_ne_bytes(addr.ip().octets()), + }, + sin_zero: Default::default(), + }, + } + } + } + + impl From for SockAddr { + fn from(addr: SocketAddrV6) -> SockAddr { + SockAddr { + ipv6: libc::sockaddr_in6 { + sin6_family: libc::AF_INET6 as libc::sa_family_t, + sin6_port: addr.port().to_be(), + sin6_addr: libc::in6_addr { + s6_addr: addr.ip().octets(), + }, + sin6_flowinfo: addr.flowinfo(), + sin6_scope_id: addr.scope_id(), + }, + } + } + } + + impl From for SocketAddr { + fn from(addr: SockAddr) -> SocketAddr { + match unsafe { addr.ip.sa_family as _ } { + libc::AF_INET => { + let addr = unsafe { addr.ipv4 }; + let ip = Ipv4Addr::from(addr.sin_addr.s_addr.to_ne_bytes()); + let port = u16::from_be(addr.sin_port); + SocketAddr::V4(SocketAddrV4::new(ip, port)) + } + libc::AF_INET6 => { + let addr = unsafe { addr.ipv6 }; + let ip = Ipv6Addr::from(addr.sin6_addr.s6_addr); + let port = u16::from_be(addr.sin6_port); + SocketAddr::V6(SocketAddrV6::new( + ip, + port, + addr.sin6_flowinfo, + addr.sin6_scope_id, + )) + } + _ => unreachable!(), + } + } + } + + impl a10::net::SocketAddress for SockAddr { + unsafe fn as_ptr(&self) -> (*const libc::sockaddr, libc::socklen_t) { + match unsafe { self.ip.sa_family as _ } { + libc::AF_INET => self.ipv4.as_ptr(), + libc::AF_INET6 => self.ipv6.as_ptr(), + _ => unreachable!(), + } + } + + unsafe fn as_mut_ptr( + this: &mut MaybeUninit, + ) -> (*mut libc::sockaddr, libc::socklen_t) { + ( + ptr::addr_of_mut!(*this.as_mut_ptr()).cast(), + size_of::() as _, + ) + } + + unsafe fn init(this: MaybeUninit, length: libc::socklen_t) -> Self { + debug_assert!(length >= size_of::() as _); + // SAFETY: caller must initialise the address. + this.assume_init() + } + } + + impl fmt::Debug for SockAddr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + SocketAddr::from(*self).fmt(f) + } + } +} + +use address::SockAddr; diff --git a/rt/src/net/udp.rs b/rt/src/net/udp.rs index 139f75aff..8dcaa92f5 100644 --- a/rt/src/net/udp.rs +++ b/rt/src/net/udp.rs @@ -2,28 +2,22 @@ //! //! See [`UdpSocket`]. -// TODO: a number of send/recv methods don't use Mio directly, this is fine on -// Unix but doesn't work on Windows (which we don't support). We need to fix -// that once Mio uses Socket2 and supports all the methods we need, Mio's -// tracking issue: https://github.com/tokio-rs/mio/issues/1381. - -use std::fmt; -use std::future::Future; -use std::io::{self, IoSlice}; use std::marker::PhantomData; use std::net::SocketAddr; -use std::pin::Pin; -use std::task::{self, Poll}; +use std::os::fd::AsFd; +use std::{fmt, io}; -use heph::actor; +use a10::{AsyncFd, Extract}; #[cfg(target_os = "linux")] use log::warn; -use mio::{net, Interest}; -use socket2::{SockAddr, SockRef}; +use socket2::{Domain, Protocol, SockRef, Type}; -use crate::bytes::{Bytes, BytesVectored, MaybeUninitSlice}; -use crate::net::convert_address; -use crate::{self as rt, Bound}; +use crate::io::{Buf, BufMut, BufMutSlice, BufSlice, BufWrapper}; +use crate::net::{ + convert_address, Recv, RecvFrom, RecvFromVectored, RecvVectored, Send, SendTo, SendToVectored, + SendVectored, SockAddr, +}; +use crate::{self as rt}; pub use crate::net::{Connected, Unconnected}; @@ -38,7 +32,7 @@ pub use crate::net::{Connected, Unconnected}; /// - [`Connected`] mode only allows sending and receiving packets from/to a /// single source. /// -/// An unconnected socket can be [connected] to a specific address if needed, +/// An unconnected socket can be [`connect`ed] to a specific address if needed, /// changing the mode to [`Connected`] in the process. The remote address of an /// already connected socket can be changed to a different address using the /// same method. @@ -46,7 +40,8 @@ pub use crate::net::{Connected, Unconnected}; /// Both unconnected and connected sockets have three main operations send, /// receive and peek, all these methods return a [`Future`]. /// -/// [connected]: UdpSocket::connect +/// [`connect`ed]: UdpSocket::connect +/// [`Future`]: std::future::Future /// /// # Examples /// @@ -91,15 +86,18 @@ pub use crate::net::{Connected, Unconnected}; /// /// Actor that will bind a UDP socket and waits for incoming packets and /// /// echos the message to standard out. /// async fn echo_server(mut ctx: actor::Context, local: SocketAddr) -> io::Result<()> { -/// let mut socket = UdpSocket::bind(&mut ctx, local)?; +/// let mut socket = UdpSocket::bind(ctx.runtime_ref(), local).await?; /// let mut buf = Vec::with_capacity(4096); /// loop { /// buf.clear(); /// let receive_msg = ctx.receive_next(); -/// let read = socket.recv_from(&mut buf); +/// let read = socket.recv_from(buf); /// let address = match either(read, receive_msg).await { /// // Received a packet. -/// Ok(Ok((_, address))) => address, +/// Ok(Ok((b, address))) => { +/// buf = b; // The buffer will now be filled with data. +/// address +/// }, /// // Read error. /// Ok(Err(err)) => return Err(err), /// // If we receive a terminate message we'll stop the actor. @@ -115,20 +113,18 @@ pub use crate::net::{Connected, Unconnected}; /// } /// /// /// The client that will send a message to the server. -/// async fn client(mut ctx: actor::Context, server_address: SocketAddr) -> io::Result<()> { +/// async fn client(ctx: actor::Context, server_address: SocketAddr) -> io::Result<()> { /// let local_address = "127.0.0.1:7001".parse().unwrap(); -/// let mut socket = UdpSocket::bind(&mut ctx, local_address) -/// .and_then(|socket| socket.connect(server_address))?; +/// let mut socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await? +/// .connect(server_address).await?; /// -/// let msg = b"Hello world"; -/// let n = socket.send(&*msg).await?; +/// let (msg, n) = socket.send("Hello world").await?; /// assert_eq!(n, msg.len()); /// Ok(()) /// } /// ``` pub struct UdpSocket { - /// Underlying UDP socket, backed by Mio. - socket: net::UdpSocket, + fd: AsyncFd, /// The mode in which the socket is in, this determines what methods are /// available. mode: PhantomData, @@ -136,50 +132,60 @@ pub struct UdpSocket { impl UdpSocket { /// Create a UDP socket binding to the `local` address. - /// - /// # Notes - /// - /// The UDP socket is also [bound] to the actor that owns the - /// `actor::Context`, which means the actor will be run every time the - /// socket is ready to be read from or write to. - /// - /// [bound]: crate::Bound - pub fn bind( - ctx: &mut actor::Context, - local: SocketAddr, - ) -> io::Result> + pub async fn bind(rt: &RT, local: SocketAddr) -> io::Result> where RT: rt::Access, { - let mut socket = net::UdpSocket::bind(local)?; - ctx.runtime() - .register(&mut socket, Interest::READABLE | Interest::WRITABLE)?; - #[cfg(target_os = "linux")] - if let Some(cpu) = ctx.runtime_ref().cpu() { - if let Err(err) = SockRef::from(&socket).set_cpu_affinity(cpu) { - warn!("failed to set CPU affinity on UdpSocket: {err}"); - } - } - Ok(UdpSocket { - socket, + let fd = a10::net::socket( + rt.submission_queue(), + Domain::for_address(local).into(), + Type::DGRAM.cloexec().into(), + Protocol::UDP.into(), + 0, + ) + .await?; + + let socket = UdpSocket { + fd, mode: PhantomData, - }) + }; + + socket.with_ref(|socket| { + #[cfg(target_os = "linux")] + if let Some(cpu) = rt.cpu() { + if let Err(err) = socket.set_cpu_affinity(cpu) { + warn!("failed to set CPU affinity on UdpSocket: {err}"); + } + } + + socket.bind(&local.into())?; + + Ok(()) + })?; + + Ok(socket) } } impl UdpSocket { /// Connects the UDP socket by setting the default destination and limiting - /// packets that are read, written and peeked to the `remote` address. - pub fn connect(self, remote: SocketAddr) -> io::Result> { - self.socket.connect(remote).map(|()| UdpSocket { - socket: self.socket, + /// packets that are received, send and peeked to the `remote` address. + pub async fn connect(self, remote: SocketAddr) -> io::Result> { + self.fd.connect(SockAddr::from(remote)).await?; + Ok(UdpSocket { + fd: self.fd, mode: PhantomData, }) } + /// Returns the sockets peer address. + pub fn peer_addr(&mut self) -> io::Result { + self.with_ref(|socket| socket.peer_addr().and_then(convert_address)) + } + /// Returns the sockets local address. pub fn local_addr(&mut self) -> io::Result { - self.socket.local_addr() + self.with_ref(|socket| socket.local_addr().and_then(convert_address)) } /// Get the value of the `SO_ERROR` option on this socket. @@ -188,653 +194,131 @@ impl UdpSocket { /// the field in the process. This can be useful for checking errors between /// calls. pub fn take_error(&mut self) -> io::Result> { - self.socket.take_error() - } -} - -impl UdpSocket { - /// Attempt to send data to the given `target` address. - /// - /// If the buffer currently can't be send this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`UdpSocket::send_to`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_send_to(&mut self, buf: &[u8], target: SocketAddr) -> io::Result { - self.socket.send_to(buf, target) - } - - /// Sends data to the given `target` address. Returns a [`Future`] that on - /// success returns the number of bytes written (`io::Result`). - pub fn send_to<'a, 'b>(&'a mut self, buf: &'b [u8], target: SocketAddr) -> SendTo<'a, 'b> { - SendTo { - socket: self, - buf, - target, - } + self.with_ref(|socket| socket.take_error()) } - /// Attempt to send bytes in `bufs` to the peer. - /// - /// If no bytes can currently be send this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`UdpSocket::send_to_vectored`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_send_to_vectored( - &mut self, - bufs: &[IoSlice<'_>], - target: SocketAddr, - ) -> io::Result { - SockRef::from(&self.socket).send_to_vectored(bufs, &target.into()) - } - - /// Send the bytes in `bufs` to the peer. - /// - /// Returns the number of bytes written. This may be fewer then the length - /// of `bufs`. - pub fn send_to_vectored<'a, 'b>( - &'a mut self, - bufs: &'b mut [IoSlice<'b>], - target: SocketAddr, - ) -> SendToVectored<'a, 'b> { - SendToVectored { - socket: self, - bufs, - target: target.into(), - } - } - - /// Attempt to receive data from the socket, writing them into `buf`. - /// - /// If no bytes can currently be received this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`UdpSocket::recv_from`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_recv_from(&mut self, mut buf: B) -> io::Result<(usize, SocketAddr)> + fn with_ref(&self, f: F) -> io::Result where - B: Bytes, + F: FnOnce(SockRef<'_>) -> io::Result, { - debug_assert!( - buf.has_spare_capacity(), - "called `UdpSocket::try_recv_from` with an empty buffer" - ); - SockRef::from(&self.socket) - .recv_from(buf.as_bytes()) - .and_then(|(read, address)| { - // Safety: just read the bytes. - unsafe { buf.update_length(read) } - let address = convert_address(address)?; - Ok((read, address)) - }) - } - - /// Receives data from the socket. Returns a [`Future`] that on success - /// returns the number of bytes read and the address from whence the data - /// came (`io::Result<(usize, SocketAddr>`). - pub fn recv_from(&mut self, buf: B) -> RecvFrom<'_, B> - where - B: Bytes, - { - RecvFrom { socket: self, buf } - } - - /// Attempt to receive data from the socket, writing them into `bufs`. - /// - /// If no bytes can currently be received this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`UdpSocket::recv_from`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_recv_from_vectored(&mut self, mut bufs: B) -> io::Result<(usize, SocketAddr)> - where - B: BytesVectored, - { - debug_assert!( - bufs.has_spare_capacity(), - "called `UdpSocket::try_recv_from` with empty buffers" - ); - let res = SockRef::from(&self.socket) - .recv_from_vectored(MaybeUninitSlice::as_socket2(bufs.as_bufs().as_mut())); - match res { - Ok((read, _, address)) => { - // Safety: just read the bytes. - unsafe { bufs.update_lengths(read) } - let address = convert_address(address)?; - Ok((read, address)) - } - Err(err) => Err(err), - } - } - - /// Receives data from the socket. Returns a [`Future`] that on success - /// returns the number of bytes read and the address from whence the data - /// came (`io::Result<(usize, SocketAddr>`). - pub fn recv_from_vectored(&mut self, bufs: B) -> RecvFromVectored<'_, B> - where - B: BytesVectored, - { - RecvFromVectored { socket: self, bufs } - } - - /// Attempt to peek data from the socket, writing them into `buf`. - /// - /// If no bytes can currently be peeked this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`UdpSocket::peek_from`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_peek_from(&mut self, mut buf: B) -> io::Result<(usize, SocketAddr)> - where - B: Bytes, - { - debug_assert!( - buf.has_spare_capacity(), - "called `UdpSocket::try_peek_from` with an empty buffer" - ); - SockRef::from(&self.socket) - .peek_from(buf.as_bytes()) - .and_then(|(read, address)| { - // Safety: just read the bytes. - unsafe { buf.update_length(read) } - let address = convert_address(address)?; - Ok((read, address)) - }) - } - - /// Receives data from the socket, without removing it from the input queue. - /// Returns a [`Future`] that on success returns the number of bytes read - /// and the address from whence the data came (`io::Result<(usize, - /// SocketAddr>`). - pub fn peek_from(&mut self, buf: B) -> PeekFrom<'_, B> - where - B: Bytes, - { - PeekFrom { socket: self, buf } - } - - /// Attempt to peek data from the socket, writing them into `bufs`. - /// - /// If no bytes can currently be received this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`UdpSocket::recv_vectored`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_peek_from_vectored(&mut self, mut bufs: B) -> io::Result<(usize, SocketAddr)> - where - B: BytesVectored, - { - debug_assert!( - bufs.has_spare_capacity(), - "called `UdpSocket::try_peek_from_vectored` with empty buffers" - ); - let res = SockRef::from(&self.socket).recv_from_vectored_with_flags( - MaybeUninitSlice::as_socket2(bufs.as_bufs().as_mut()), - libc::MSG_PEEK, - ); - match res { - Ok((read, _, address)) => { - // Safety: just read the bytes. - unsafe { bufs.update_lengths(read) } - let address = convert_address(address)?; - Ok((read, address)) - } - Err(err) => Err(err), - } - } - - /// Receives data from the socket, without removing it from the input queue. - /// Returns a [`Future`] that on success returns the number of bytes read - /// and the address from whence the data came (`io::Result<(usize, - /// SocketAddr>`). - pub fn peek_from_vectored(&mut self, bufs: B) -> PeekFromVectored<'_, B> - where - B: BytesVectored, - { - PeekFromVectored { socket: self, bufs } - } -} - -/// The [`Future`] behind [`UdpSocket::send_to`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct SendTo<'a, 'b> { - socket: &'a mut UdpSocket, - buf: &'b [u8], - target: SocketAddr, -} - -impl<'a, 'b> Future for SendTo<'a, 'b> { - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - #[rustfmt::skip] - let SendTo { socket, buf, target } = Pin::into_inner(self); - try_io!(socket.try_send_to(buf, *target)) - } -} - -/// The [`Future`] behind [`UdpSocket::send_to_vectored`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct SendToVectored<'a, 'b> { - socket: &'a mut UdpSocket, - bufs: &'b mut [IoSlice<'b>], - target: SockAddr, -} - -impl<'a, 'b> Future for SendToVectored<'a, 'b> { - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - #[rustfmt::skip] - let SendToVectored { socket, bufs, target } = Pin::into_inner(self); - try_io!(SockRef::from(&socket.socket).send_to_vectored(bufs, target)) - } -} - -/// The [`Future`] behind [`UdpSocket::recv_from`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct RecvFrom<'a, B> { - socket: &'a mut UdpSocket, - buf: B, -} - -impl<'a, B> Future for RecvFrom<'a, B> -where - B: Bytes + Unpin, -{ - type Output = io::Result<(usize, SocketAddr)>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let RecvFrom { socket, buf } = Pin::into_inner(self); - try_io!(socket.try_recv_from(&mut *buf)) - } -} - -/// The [`Future`] behind [`UdpSocket::recv_from_vectored`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct RecvFromVectored<'a, B> { - socket: &'a mut UdpSocket, - bufs: B, -} - -impl<'a, B> Future for RecvFromVectored<'a, B> -where - B: BytesVectored + Unpin, -{ - type Output = io::Result<(usize, SocketAddr)>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let RecvFromVectored { socket, bufs } = Pin::into_inner(self); - try_io!(socket.try_recv_from_vectored(&mut *bufs)) + let borrowed = self.fd.as_fd(); // TODO: remove this once we update to socket2 v0.5. + f(SockRef::from(&borrowed)) } } -/// The [`Future`] behind [`UdpSocket::peek_from`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct PeekFrom<'a, B> { - socket: &'a mut UdpSocket, - buf: B, -} - -impl<'a, B> Future for PeekFrom<'a, B> -where - B: Bytes + Unpin, -{ - type Output = io::Result<(usize, SocketAddr)>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let PeekFrom { socket, buf } = Pin::into_inner(self); - try_io!(socket.try_peek_from(&mut *buf)) - } -} - -/// The [`Future`] behind [`UdpSocket::peek_from_vectored`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct PeekFromVectored<'a, B> { - socket: &'a mut UdpSocket, - bufs: B, -} - -impl<'a, B> Future for PeekFromVectored<'a, B> -where - B: BytesVectored + Unpin, -{ - type Output = io::Result<(usize, SocketAddr)>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let PeekFromVectored { socket, bufs } = Pin::into_inner(self); - try_io!(socket.try_peek_from_vectored(&mut *bufs)) +impl UdpSocket { + /// Receives data from the unconnceted socket. + pub async fn recv_from(&mut self, buf: B) -> io::Result<(B, SocketAddr)> { + RecvFrom::(self.fd.recvfrom(BufWrapper(buf), 0)) + .await + .map(|(buf, addr)| (buf, addr.into())) } -} -impl UdpSocket { - /// Attempt to send data to the peer. - /// - /// If the buffer currently can't be send this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`UdpSocket::send_to`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_send(&mut self, buf: &[u8]) -> io::Result { - self.socket.send(buf) + /// Receives data from the unconnected socket, using vectored I/O. + pub async fn recv_from_vectored, const N: usize>( + &mut self, + bufs: B, + ) -> io::Result<(B, SocketAddr)> { + RecvFromVectored::(self.fd.recvfrom_vectored(BufWrapper(bufs), 0)) + .await + .map(|(bufs, addr)| (bufs, addr.into())) } - /// Sends data on the socket to the connected socket. Returns a [`Future`] - /// that on success returns the number of bytes written - /// (`io::Result`). - pub fn send<'a, 'b>(&'a mut self, buf: &'b [u8]) -> Send<'a, 'b> { - Send { socket: self, buf } + /// Receives data from the unconnected socket, without removing it from the + /// input queue. + pub async fn peek_from(&mut self, buf: B) -> io::Result<(B, SocketAddr)> { + RecvFrom::(self.fd.recvfrom(BufWrapper(buf), libc::MSG_PEEK)) + .await + .map(|(buf, addr)| (buf, addr.into())) } - /// Attempt to send bytes in `bufs` to the peer. - /// - /// If no bytes can currently be send this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`UdpSocket::send_vectored`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_send_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result { - SockRef::from(&self.socket).send_vectored(bufs) + /// Receives data from the unconnected socket, without removing it from the + /// input queue, using vectored I/O. + pub async fn peek_from_vectored, const N: usize>( + &mut self, + bufs: B, + ) -> io::Result<(B, SocketAddr)> { + RecvFromVectored::( + self.fd.recvfrom_vectored(BufWrapper(bufs), libc::MSG_PEEK), + ) + .await + .map(|(buf, addr)| (buf, addr.into())) } - /// Send the bytes in `bufs` to the peer. - /// - /// Returns the number of bytes written. This may we fewer then the length - /// of `bufs`. - pub fn send_vectored<'a, 'b>( + /// Send the bytes in `buf` to `address`. + pub async fn send_to<'a, B: Buf>( &'a mut self, - bufs: &'b mut [IoSlice<'b>], - ) -> SendVectored<'a, 'b> { - SendVectored { socket: self, bufs } - } - - /// Attempt to receive data from the socket, writing them into `buf`. - /// - /// If no bytes can currently be received this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`UdpSocket::recv`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_recv(&mut self, mut buf: B) -> io::Result - where - B: Bytes, - { - debug_assert!( - buf.has_spare_capacity(), - "called `UdpSocket::try_recv` with an empty buffer" - ); - SockRef::from(&self.socket) - .recv(buf.as_bytes()) - .map(|read| { - // Safety: just read the bytes. - unsafe { buf.update_length(read) } - read - }) - } - - /// Receives data from the socket. Returns a [`Future`] that on success - /// returns the number of bytes read (`io::Result`). - pub fn recv(&mut self, buf: B) -> Recv<'_, B> - where - B: Bytes, - { - Recv { socket: self, buf } - } - - /// Attempt to receive data from the socket, writing them into `bufs`. - /// - /// If no bytes can currently be received this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`UdpSocket::recv_vectored`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_recv_vectored(&mut self, mut bufs: B) -> io::Result - where - B: BytesVectored, - { - debug_assert!( - bufs.has_spare_capacity(), - "called `UdpSocket::try_recv_vectored` with empty buffers" - ); - let res = SockRef::from(&self.socket) - .recv_vectored(MaybeUninitSlice::as_socket2(bufs.as_bufs().as_mut())); - match res { - Ok((read, _)) => { - // Safety: just read the bytes. - unsafe { bufs.update_lengths(read) } - Ok(read) - } - Err(err) => Err(err), - } - } - - /// Receives data from the socket. Returns a [`Future`] that on success - /// returns the number of bytes read (`io::Result`). - pub fn recv_vectored(&mut self, bufs: B) -> RecvVectored<'_, B> - where - B: BytesVectored, - { - RecvVectored { socket: self, bufs } - } - - /// Attempt to peek data from the socket, writing them into `buf`. - /// - /// If no bytes can currently be peeked this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`UdpSocket::peek`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_peek(&mut self, mut buf: B) -> io::Result - where - B: Bytes, - { - debug_assert!( - buf.has_spare_capacity(), - "called `UdpSocket::try_peek` with an empty buffer" - ); - SockRef::from(&self.socket) - .peek(buf.as_bytes()) - .map(|read| { - // Safety: just read the bytes. - unsafe { buf.update_length(read) } - read - }) - } - - /// Receives data from the socket, without removing it from the input queue. - /// Returns a [`Future`] that on success returns the number of bytes read - /// (`io::Result`). - pub fn peek(&mut self, buf: B) -> Peek<'_, B> - where - B: Bytes, - { - Peek { socket: self, buf } - } - - /// Attempt to peek data from the socket, writing them into `bufs`. - /// - /// If no bytes can currently be received this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`UdpSocket::recv_vectored`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_peek_vectored(&mut self, mut bufs: B) -> io::Result - where - B: BytesVectored, - { - debug_assert!( - bufs.has_spare_capacity(), - "called `UdpSocket::try_peek_vectored` with empty buffers" - ); - let res = SockRef::from(&self.socket).recv_vectored_with_flags( - MaybeUninitSlice::as_socket2(bufs.as_bufs().as_mut()), - libc::MSG_PEEK, - ); - match res { - Ok((read, _)) => { - // Safety: just read the bytes. - unsafe { bufs.update_lengths(read) } - Ok(read) - } - Err(err) => Err(err), - } - } - - /// Receives data from the socket, without removing it from the input queue. - /// Returns a [`Future`] that on success returns the number of bytes read - /// (`io::Result`). - pub fn peek_vectored(&mut self, bufs: B) -> PeekVectored<'_, B> - where - B: BytesVectored, - { - PeekVectored { socket: self, bufs } + buf: B, + address: SocketAddr, + ) -> io::Result<(B, usize)> { + SendTo( + self.fd + .sendto(BufWrapper(buf), SockAddr::from(address), 0) + .extract(), + ) + .await + } + + /// Send the bytes in `bufs` to `address`, using vectored I/O. + pub async fn send_to_vectored, const N: usize>( + &mut self, + bufs: B, + address: SocketAddr, + ) -> io::Result<(B, usize)> { + SendToVectored( + self.fd + .sendto_vectored(BufWrapper(bufs), SockAddr::from(address), 0) + .extract(), + ) + .await } } -/// The [`Future`] behind [`UdpSocket::send`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Send<'a, 'b> { - socket: &'a mut UdpSocket, - buf: &'b [u8], -} - -impl<'a, 'b> Future for Send<'a, 'b> { - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let Send { socket, buf } = Pin::into_inner(self); - try_io!(socket.try_send(buf)) +impl UdpSocket { + /// Receive bytes from the connected socket. + pub async fn recv<'a, B: BufMut>(&'a mut self, buf: B) -> io::Result { + Recv(self.fd.recv(BufWrapper(buf), 0)).await } -} - -/// The [`Future`] behind [`UdpSocket::send_vectored`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct SendVectored<'a, 'b> { - socket: &'a mut UdpSocket, - bufs: &'b mut [IoSlice<'b>], -} - -impl<'a, 'b> Future for SendVectored<'a, 'b> { - type Output = io::Result; - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let SendVectored { socket, bufs } = Pin::into_inner(self); - try_io!(socket.try_send_vectored(bufs)) + /// Receives data from the connected socket, using vectored I/O. + pub async fn recv_vectored, const N: usize>( + &mut self, + bufs: B, + ) -> io::Result { + RecvVectored(self.fd.recv_vectored(BufWrapper(bufs), 0)).await } -} - -/// The [`Future`] behind [`UdpSocket::recv`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Recv<'a, B> { - socket: &'a mut UdpSocket, - buf: B, -} - -impl<'a, B> Future for Recv<'a, B> -where - B: Bytes + Unpin, -{ - type Output = io::Result; - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let Recv { socket, buf } = Pin::into_inner(self); - try_io!(socket.try_recv(&mut *buf)) + /// Receive bytes from the connected socket, without removing it from the + /// input queue, writing them into `buf`. + pub async fn peek<'a, B: BufMut>(&'a mut self, buf: B) -> io::Result { + Recv(self.fd.recv(BufWrapper(buf), libc::MSG_PEEK)).await } -} -/// The [`Future`] behind [`UdpSocket::recv_vectored`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct PeekVectored<'a, B> { - socket: &'a mut UdpSocket, - bufs: B, -} - -impl<'a, B> Future for PeekVectored<'a, B> -where - B: BytesVectored + Unpin, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let PeekVectored { socket, bufs } = Pin::into_inner(self); - try_io!(socket.try_peek_vectored(&mut *bufs)) + /// Receive bytes from the connected socket, without removing it from the + /// input queue, using vectored I/O. + pub async fn peek_vectored, const N: usize>( + &mut self, + bufs: B, + ) -> io::Result { + RecvVectored(self.fd.recv_vectored(BufWrapper(bufs), libc::MSG_PEEK)).await } -} -/// The [`Future`] behind [`UdpSocket::peek`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Peek<'a, B> { - socket: &'a mut UdpSocket, - buf: B, -} - -impl<'a, B> Future for Peek<'a, B> -where - B: Bytes + Unpin, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let Peek { socket, buf } = Pin::into_inner(self); - try_io!(socket.try_peek(&mut *buf)) + /// Sends data on the socket to the connected socket. + pub async fn send<'a, B: Buf>(&'a mut self, buf: B) -> io::Result<(B, usize)> { + Send(self.fd.send(BufWrapper(buf), 0).extract()).await } -} - -/// The [`Future`] behind [`UdpSocket::recv_vectored`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct RecvVectored<'a, B> { - socket: &'a mut UdpSocket, - bufs: B, -} - -impl<'a, B> Future for RecvVectored<'a, B> -where - B: BytesVectored + Unpin, -{ - type Output = io::Result; - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let RecvVectored { socket, bufs } = Pin::into_inner(self); - try_io!(socket.try_recv_vectored(&mut *bufs)) + /// Sends data on the socket to the connected socket, using vectored I/O. + pub async fn send_vectored, const N: usize>( + &mut self, + bufs: B, + ) -> io::Result<(B, usize)> { + SendVectored(self.fd.send_vectored(BufWrapper(bufs), 0).extract()).await } } impl fmt::Debug for UdpSocket { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.socket.fmt(f) - } -} - -impl Bound for UdpSocket { - type Error = io::Error; - - fn bind_to(&mut self, ctx: &mut actor::Context) -> io::Result<()> { - ctx.runtime() - .reregister(&mut self.socket, Interest::READABLE | Interest::WRITABLE) + self.fd.fmt(f) } } diff --git a/rt/tests/functional/udp.rs b/rt/tests/functional/udp.rs index 1e8c02b51..5cc70180d 100644 --- a/rt/tests/functional/udp.rs +++ b/rt/tests/functional/udp.rs @@ -1,16 +1,14 @@ //! Tests related to `UdpSocket`. -use std::io::{self, IoSlice}; +use std::io; use std::net::SocketAddr; use std::time::Duration; use heph::actor::{self, Actor, NewActor}; -use heph::actor_ref::{ActorRef, RpcMessage}; -use heph::supervisor::NoSupervisor; -use heph_rt::net::udp::{UdpSocket, Unconnected}; +use heph_rt::net::udp::UdpSocket; use heph_rt::spawn::ActorOptions; use heph_rt::test::{join, try_spawn_local, PanicSupervisor}; -use heph_rt::{self as rt, Bound, Runtime, RuntimeRef, ThreadLocal}; +use heph_rt::ThreadLocal; use crate::util::{any_local_address, any_local_ipv6_address}; @@ -68,26 +66,23 @@ where } async fn unconnected_udp_actor( - mut ctx: actor::Context, + ctx: actor::Context, peer_address: SocketAddr, ) -> io::Result<()> { let local_address = SocketAddr::new(peer_address.ip(), 0); - let mut socket = UdpSocket::bind(&mut ctx, local_address)?; + let mut socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await?; assert_eq!(socket.local_addr().unwrap().ip(), local_address.ip()); - let bytes_written = socket.send_to(&DATA, peer_address).await?; + let (_, bytes_written) = socket.send_to(DATA, peer_address).await?; assert_eq!(bytes_written, DATA.len()); - let mut buf = Vec::with_capacity(DATA.len() + 2); - let (bytes_peeked, address) = socket.peek_from(&mut buf).await?; - assert_eq!(bytes_peeked, DATA.len()); - assert_eq!(&buf[..bytes_peeked], &*DATA); + let (mut buf, address) = socket.peek_from(Vec::with_capacity(DATA.len() + 2)).await?; + assert_eq!(buf, DATA); assert_eq!(address, peer_address); buf.clear(); - let (bytes_read, address) = socket.recv_from(&mut buf).await?; - assert_eq!(bytes_read, DATA.len()); - assert_eq!(&buf[..bytes_read], &*DATA); + let (buf, address) = socket.recv_from(buf).await?; + assert_eq!(buf, DATA); assert_eq!(address, peer_address); assert!(socket.take_error().unwrap().is_none()); @@ -96,26 +91,23 @@ async fn unconnected_udp_actor( } async fn connected_udp_actor( - mut ctx: actor::Context, + ctx: actor::Context, peer_address: SocketAddr, ) -> io::Result<()> { let local_address = SocketAddr::new(peer_address.ip(), 0); - let socket = UdpSocket::bind(&mut ctx, local_address)?; - let mut socket = socket.connect(peer_address)?; + let socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await?; + let mut socket = socket.connect(peer_address).await?; assert_eq!(socket.local_addr().unwrap().ip(), local_address.ip()); - let bytes_written = socket.send(&DATA).await?; + let (_, bytes_written) = socket.send(DATA).await?; assert_eq!(bytes_written, DATA.len()); - let mut buf = Vec::with_capacity(DATA.len() + 2); - let bytes_peeked = socket.peek(&mut buf).await?; - assert_eq!(bytes_peeked, DATA.len()); - assert_eq!(&buf[..bytes_peeked], &*DATA); + let mut buf = socket.peek(Vec::with_capacity(DATA.len() + 2)).await?; + assert_eq!(buf, DATA); buf.clear(); - let bytes_read = socket.recv(&mut buf).await?; - assert_eq!(bytes_read, DATA.len()); - assert_eq!(&buf[..bytes_read], &*DATA); + let buf = socket.recv(buf).await?; + assert_eq!(buf, DATA); assert!(socket.take_error().unwrap().is_none()); @@ -164,23 +156,23 @@ fn test_reconnecting(local_address: SocketAddr) { } async fn reconnecting_actor( - mut ctx: actor::Context, + ctx: actor::Context, peer_address1: SocketAddr, peer_address2: SocketAddr, ) -> io::Result<()> { let local_address = SocketAddr::new(peer_address1.ip(), 0); - let socket = UdpSocket::bind(&mut ctx, local_address)?; - let mut socket = socket.connect(peer_address1)?; + let socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await?; + let mut socket = socket.connect(peer_address1).await?; - let bytes_written = socket.send(&DATA).await?; + let (_, bytes_written) = socket.send(DATA).await?; assert_eq!(bytes_written, DATA.len()); - let mut socket = socket.connect(peer_address1)?; - let bytes_written = socket.send(&DATA).await?; + let mut socket = socket.connect(peer_address1).await?; + let (_, bytes_written) = socket.send(DATA).await?; assert_eq!(bytes_written, DATA.len()); - let mut socket = socket.connect(peer_address2)?; - let bytes_written = socket.send(&DATA).await?; + let mut socket = socket.connect(peer_address2).await?; + let (_, bytes_written) = socket.send(DATA).await?; assert_eq!(bytes_written, DATA.len()); assert!(socket.take_error().unwrap().is_none()); @@ -213,80 +205,68 @@ fn unconnected_vectored_io_ipv6() { } async fn unconnected_vectored_io_actor( - mut ctx: actor::Context, + ctx: actor::Context, peer_address: SocketAddr, ) -> io::Result<()> { let local_address = SocketAddr::new(peer_address.ip(), 0); - let mut socket = UdpSocket::bind(&mut ctx, local_address)?; + let mut socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await?; - let bufs = &mut [ - IoSlice::new(DATAV[0]), - IoSlice::new(DATAV[1]), - IoSlice::new(DATAV[2]), - ]; - let bytes_written = socket.send_to_vectored(bufs, peer_address).await?; + let bufs = [DATAV[0], DATAV[1], DATAV[2]]; + let (_, bytes_written) = socket.send_to_vectored(bufs, peer_address).await?; assert_eq!(bytes_written, DATAV_LEN); - let mut buf1 = Vec::with_capacity(DATAV[0].len()); - let mut buf2 = Vec::with_capacity(DATAV[1].len()); - let mut buf3 = Vec::with_capacity(DATAV[2].len() + 2); - let mut bufs = [&mut buf1, &mut buf2, &mut buf3]; - let (bytes_peeked, address) = socket.peek_from_vectored(&mut bufs).await?; - assert_eq!(bytes_peeked, DATAV_LEN); - assert_eq!(buf1, DATAV[0]); - assert_eq!(buf2, DATAV[1]); - assert_eq!(buf3, DATAV[2]); + let bufs = [ + Vec::with_capacity(DATAV[0].len()), + Vec::with_capacity(DATAV[1].len()), + Vec::with_capacity(DATAV[2].len() + 2), + ]; + let (mut bufs, address) = socket.peek_from_vectored(bufs).await?; + assert_eq!(bufs[0], DATAV[0]); + assert_eq!(bufs[1], DATAV[1]); + assert_eq!(bufs[2], DATAV[2]); assert_eq!(address, peer_address); - buf1.clear(); - buf2.clear(); - buf3.clear(); - let mut bufs = [&mut buf1, &mut buf2, &mut buf3]; - let (bytes_read, address) = socket.recv_from_vectored(&mut bufs).await?; - assert_eq!(bytes_read, DATAV_LEN); - assert_eq!(buf1, DATAV[0]); - assert_eq!(buf2, DATAV[1]); - assert_eq!(buf3, DATAV[2]); + for buf in bufs.iter_mut() { + buf.clear(); + } + let (bufs, address) = socket.recv_from_vectored(bufs).await?; + assert_eq!(bufs[0], DATAV[0]); + assert_eq!(bufs[1], DATAV[1]); + assert_eq!(bufs[2], DATAV[2]); assert_eq!(address, peer_address); Ok(()) } async fn connected_vectored_io_actor( - mut ctx: actor::Context, + ctx: actor::Context, peer_address: SocketAddr, ) -> io::Result<()> { let local_address = SocketAddr::new(peer_address.ip(), 0); - let socket = UdpSocket::bind(&mut ctx, local_address)?; - let mut socket = socket.connect(peer_address)?; + let socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await?; + let mut socket = socket.connect(peer_address).await?; - let bufs = &mut [ - IoSlice::new(DATAV[0]), - IoSlice::new(DATAV[1]), - IoSlice::new(DATAV[2]), - ]; - let bytes_written = socket.send_vectored(bufs).await?; + let bufs = [DATAV[0], DATAV[1], DATAV[2]]; + let (_, bytes_written) = socket.send_vectored(bufs).await?; assert_eq!(bytes_written, DATAV_LEN); - let mut buf1 = Vec::with_capacity(DATAV[0].len()); - let mut buf2 = Vec::with_capacity(DATAV[1].len()); - let mut buf3 = Vec::with_capacity(DATAV[2].len() + 2); - let mut bufs = [&mut buf1, &mut buf2, &mut buf3]; - let bytes_peeked = socket.peek_vectored(&mut bufs).await?; - assert_eq!(bytes_peeked, DATAV_LEN); - assert_eq!(buf1, DATAV[0]); - assert_eq!(buf2, DATAV[1]); - assert_eq!(buf3, DATAV[2]); - - buf1.clear(); - buf2.clear(); - buf3.clear(); - let mut bufs = [&mut buf1, &mut buf2, &mut buf3]; - let bytes_read = socket.recv_vectored(&mut bufs).await?; - assert_eq!(bytes_read, DATAV_LEN); - assert_eq!(buf1, DATAV[0]); - assert_eq!(buf2, DATAV[1]); - assert_eq!(buf3, DATAV[2]); + let bufs = [ + Vec::with_capacity(DATAV[0].len()), + Vec::with_capacity(DATAV[1].len()), + Vec::with_capacity(DATAV[2].len() + 2), + ]; + let mut bufs = socket.peek_vectored(bufs).await?; + assert_eq!(bufs[0], DATAV[0]); + assert_eq!(bufs[1], DATAV[1]); + assert_eq!(bufs[2], DATAV[2]); + + for buf in bufs.iter_mut() { + buf.clear(); + } + let bufs = socket.recv_vectored(bufs).await?; + assert_eq!(bufs[0], DATAV[0]); + assert_eq!(bufs[1], DATAV[1]); + assert_eq!(bufs[2], DATAV[2]); Ok(()) } @@ -324,71 +304,3 @@ fn assert_read(mut got: &[u8], expected: &[&[u8]]) { got = g; } } - -#[test] -fn actor_bound() { - type Message = RpcMessage, ()>; - - async fn actor1(mut ctx: actor::Context, actor_ref: ActorRef) - where - RT: rt::Access, - { - let mut socket = UdpSocket::bind(&mut ctx, any_local_address()).unwrap(); - let peer_address = socket.local_addr().unwrap(); - let _ = actor_ref.rpc(socket).await.unwrap(); - - let mut socket = UdpSocket::bind(&mut ctx, any_local_address()).unwrap(); - socket.send_to(DATA, peer_address).await.unwrap(); - } - - async fn actor2(mut ctx: actor::Context) - where - RT: rt::Access, - { - let msg = ctx.receive_next().await.unwrap(); - let mut socket = msg.request; - socket.bind_to(&mut ctx).unwrap(); - msg.response.respond(()).unwrap(); - let mut buf = Vec::with_capacity(DATA.len() + 1); - let (n, _) = socket.recv_from(&mut buf).await.unwrap(); - assert_eq!(buf, DATA); - assert_eq!(n, DATA.len()); - } - - fn setup(mut runtime_ref: RuntimeRef) -> Result<(), !> { - // Spawn thread-local actors. - let actor_ref = runtime_ref.spawn_local( - NoSupervisor, - actor2 as fn(_) -> _, - (), - ActorOptions::default(), - ); - let _ = runtime_ref.spawn_local( - NoSupervisor, - actor1 as fn(_, _) -> _, - actor_ref, - ActorOptions::default(), - ); - - Ok(()) - } - - let mut runtime = Runtime::setup().build().unwrap(); - runtime.run_on_workers(setup).unwrap(); - - // Spawn thread-safe actors. - let actor_ref = runtime.spawn( - NoSupervisor, - actor2 as fn(_) -> _, - (), - ActorOptions::default(), - ); - let _ = runtime.spawn( - NoSupervisor, - actor1 as fn(_, _) -> _, - actor_ref, - ActorOptions::default(), - ); - - runtime.start().unwrap(); -} From 33463ac4621a928bbb4df4bc1dfbdb9ddfe2c124 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 9 Apr 2023 13:34:20 +0200 Subject: [PATCH 027/177] Expand UnixDatagram API Now matches the UdpSocket API. --- rt/src/net/uds/datagram.rs | 100 +++++++++++++++++++++++++++++++++---- 1 file changed, 89 insertions(+), 11 deletions(-) diff --git a/rt/src/net/uds/datagram.rs b/rt/src/net/uds/datagram.rs index 4fe6be694..853adeccc 100644 --- a/rt/src/net/uds/datagram.rs +++ b/rt/src/net/uds/datagram.rs @@ -8,9 +8,12 @@ use log::warn; use socket2::{Domain, SockRef, Type}; use crate as rt; -use crate::io::{Buf, BufMut, BufWrapper}; +use crate::io::{Buf, BufMut, BufMutSlice, BufSlice, BufWrapper}; use crate::net::uds::UnixAddr; -use crate::net::{Connected, Recv, Send, SendTo, Unconnected}; +use crate::net::{ + Connected, Recv, RecvFrom, RecvFromVectored, RecvVectored, Send, SendTo, SendToVectored, + SendVectored, Unconnected, +}; /// A Unix datagram socket. /// @@ -157,23 +160,98 @@ impl UnixDatagram { } impl UnixDatagram { - // TODO: add `recv_from`, at the time of writing not supported in I/O uring. + /// Receives data from the unconnceted socket. + pub async fn recv_from(&mut self, buf: B) -> io::Result<(B, UnixAddr)> { + RecvFrom(self.fd.recvfrom(BufWrapper(buf), 0)).await + } + + /// Receives data from the unconnected socket, using vectored I/O. + pub async fn recv_from_vectored, const N: usize>( + &mut self, + bufs: B, + ) -> io::Result<(B, UnixAddr)> { + RecvFromVectored(self.fd.recvfrom_vectored(BufWrapper(bufs), 0)).await + } + + /// Receives data from the unconnected socket, without removing it from the + /// input queue. + pub async fn peek_from(&mut self, buf: B) -> io::Result<(B, UnixAddr)> { + RecvFrom(self.fd.recvfrom(BufWrapper(buf), libc::MSG_PEEK)).await + } + + /// Receives data from the unconnected socket, without removing it from the + /// input queue, using vectored I/O. + pub async fn peek_from_vectored, const N: usize>( + &mut self, + bufs: B, + ) -> io::Result<(B, UnixAddr)> { + RecvFromVectored(self.fd.recvfrom_vectored(BufWrapper(bufs), libc::MSG_PEEK)).await + } /// Send the bytes in `buf` to `address`. - pub fn send_to<'a, B: Buf>(&'a mut self, buf: B, address: UnixAddr) -> SendTo<'a, B, UnixAddr> { - SendTo(self.fd.sendto(BufWrapper(buf), address, 0).extract()) + pub async fn send_to<'a, B: Buf>( + &'a mut self, + buf: B, + address: UnixAddr, + ) -> io::Result<(B, usize)> { + SendTo(self.fd.sendto(BufWrapper(buf), address, 0).extract()).await + } + + /// Send the bytes in `bufs` to `address`, using vectored I/O. + pub async fn send_to_vectored, const N: usize>( + &mut self, + bufs: B, + address: UnixAddr, + ) -> io::Result<(B, usize)> { + SendToVectored( + self.fd + .sendto_vectored(BufWrapper(bufs), address, 0) + .extract(), + ) + .await } } impl UnixDatagram { - /// Recv bytes from the socket, writing them into `buf`. - pub fn recv<'a, B: BufMut>(&'a mut self, buf: B) -> Recv<'a, B> { - Recv(self.fd.recv(BufWrapper(buf), 0)) + /// Receive bytes from the connected socket. + pub async fn recv<'a, B: BufMut>(&'a mut self, buf: B) -> io::Result { + Recv(self.fd.recv(BufWrapper(buf), 0)).await + } + + /// Receives data from the connected socket, using vectored I/O. + pub async fn recv_vectored, const N: usize>( + &mut self, + bufs: B, + ) -> io::Result { + RecvVectored(self.fd.recv_vectored(BufWrapper(bufs), 0)).await + } + + /// Receive bytes from the connected socket, without removing it from the + /// input queue, writing them into `buf`. + pub async fn peek<'a, B: BufMut>(&'a mut self, buf: B) -> io::Result { + Recv(self.fd.recv(BufWrapper(buf), libc::MSG_PEEK)).await + } + + /// Receive bytes from the connected socket, without removing it from the + /// input queue, using vectored I/O. + pub async fn peek_vectored, const N: usize>( + &mut self, + bufs: B, + ) -> io::Result { + RecvVectored(self.fd.recv_vectored(BufWrapper(bufs), libc::MSG_PEEK)).await + } + + /// Sends data on the socket to the connected socket. + pub async fn send<'a, B: Buf>(&'a mut self, buf: B) -> io::Result<(B, usize)> { + Send(self.fd.send(BufWrapper(buf), 0).extract()).await } - /// Send the bytes in `buf` to the socket's peer. - pub fn send<'a, B: Buf>(&'a mut self, buf: B) -> Send<'a, B> { - Send(self.fd.send(BufWrapper(buf), 0).extract()) + /// Sends data on the socket to the connected socket, using vectored I/O. + pub async fn send_vectored, const N: usize>( + &mut self, + bufs: B, + ) -> io::Result<(B, usize)> { + SendVectored(self.fd.send_vectored(BufWrapper(bufs), 0).extract()).await } } From e5983595580fcd3dba62cdc58c3e1e324dc5d6f0 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 9 Apr 2023 13:35:08 +0200 Subject: [PATCH 028/177] Make Future types in net module private Instead we'll just expose async functions. --- rt/src/net/futures.rs | 16 ++++++++-------- rt/src/net/mod.rs | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/rt/src/net/futures.rs b/rt/src/net/futures.rs index 81137f655..2729ec46d 100644 --- a/rt/src/net/futures.rs +++ b/rt/src/net/futures.rs @@ -12,7 +12,7 @@ use a10::extract::Extractor; use crate::io::{Buf, BufMut, BufMutSlice, BufSlice, BufWrapper}; /// [`Future`] behind `recv` implementations. -pub struct Recv<'a, B>(pub(crate) a10::net::Recv<'a, BufWrapper>); +pub(crate) struct Recv<'a, B>(pub(crate) a10::net::Recv<'a, BufWrapper>); impl<'a, B: BufMut> Future for Recv<'a, B> { type Output = io::Result; @@ -26,7 +26,7 @@ impl<'a, B: BufMut> Future for Recv<'a, B> { } /// [`Future`] behind `recv_vectored` implementations. -pub struct RecvVectored<'a, B, const N: usize>( +pub(crate) struct RecvVectored<'a, B, const N: usize>( pub(crate) a10::net::RecvVectored<'a, BufWrapper, N>, ); @@ -42,7 +42,7 @@ impl<'a, B: BufMutSlice, const N: usize> Future for RecvVectored<'a, B, N> { } /// [`Future`] behind `recv_from` implementations. -pub struct RecvFrom<'a, B, A>(pub(crate) a10::net::RecvFrom<'a, BufWrapper, A>); +pub(crate) struct RecvFrom<'a, B, A>(pub(crate) a10::net::RecvFrom<'a, BufWrapper, A>); impl<'a, B: BufMut, A: a10::net::SocketAddress> Future for RecvFrom<'a, B, A> { type Output = io::Result<(B, A)>; @@ -56,7 +56,7 @@ impl<'a, B: BufMut, A: a10::net::SocketAddress> Future for RecvFrom<'a, B, A> { } /// [`Future`] behind `recv_from_vectored` implementations. -pub struct RecvFromVectored<'a, B, A, const N: usize>( +pub(crate) struct RecvFromVectored<'a, B, A, const N: usize>( pub(crate) a10::net::RecvFromVectored<'a, BufWrapper, A, N>, ); @@ -74,7 +74,7 @@ impl<'a, B: BufMutSlice, A: a10::net::SocketAddress, const N: usize> Future } /// [`Future`] behind `send` implementations. -pub struct Send<'a, B>(pub(crate) Extractor>>); +pub(crate) struct Send<'a, B>(pub(crate) Extractor>>); impl<'a, B: Buf> Future for Send<'a, B> { type Output = io::Result<(B, usize)>; @@ -88,7 +88,7 @@ impl<'a, B: Buf> Future for Send<'a, B> { } /// [`Future`] behind `send_vectored` implementations. -pub struct SendVectored<'a, B, const N: usize>( +pub(crate) struct SendVectored<'a, B, const N: usize>( pub(crate) Extractor, a10::net::NoAddress, N>>, ); @@ -104,7 +104,7 @@ impl<'a, B: BufSlice, const N: usize> Future for SendVectored<'a, B, N> { } /// [`Future`] behind `send_to` implementations. -pub struct SendTo<'a, B, A>(pub(crate) Extractor, A>>); +pub(crate) struct SendTo<'a, B, A>(pub(crate) Extractor, A>>); impl<'a, B: Buf, A: a10::net::SocketAddress> Future for SendTo<'a, B, A> { type Output = io::Result<(B, usize)>; @@ -118,7 +118,7 @@ impl<'a, B: Buf, A: a10::net::SocketAddress> Future for SendTo<'a, B, A> { } /// [`Future`] behind `send_to_vectored` implementations. -pub struct SendToVectored<'a, B, A, const N: usize>( +pub(crate) struct SendToVectored<'a, B, A, const N: usize>( pub(crate) Extractor, A, N>>, ); diff --git a/rt/src/net/mod.rs b/rt/src/net/mod.rs index 3656d79aa..1079c6c60 100644 --- a/rt/src/net/mod.rs +++ b/rt/src/net/mod.rs @@ -59,7 +59,7 @@ pub use udp::UdpSocket; #[doc(no_inline)] pub use uds::UnixDatagram; -pub use futures::{ +pub(crate) use futures::{ Recv, RecvFrom, RecvFromVectored, RecvVectored, Send, SendTo, SendToVectored, SendVectored, }; From cfac2ba15cef07f059acf3522a05724e53e69ad9 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 9 Apr 2023 13:38:27 +0200 Subject: [PATCH 029/177] Use async function in pipe module Instead of Future types. --- rt/src/pipe.rs | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/rt/src/pipe.rs b/rt/src/pipe.rs index d49797d9a..fefc5db70 100644 --- a/rt/src/pipe.rs +++ b/rt/src/pipe.rs @@ -163,16 +163,16 @@ impl Sender { /// /// Return the number of bytes written. This may we fewer than the length of /// `buf`. To ensure that all bytes are written use [`Sender::write_all`]. - pub fn write<'a, B: Buf>(&'a mut self, buf: B) -> Write<'a, B> { - Write(self.fd.write(BufWrapper(buf)).extract()) + pub async fn write<'a, B: Buf>(&'a mut self, buf: B) -> io::Result<(B, usize)> { + Write(self.fd.write(BufWrapper(buf)).extract()).await } /// Write the all bytes in `buf` into the pipe. /// /// If this fails to write all bytes (this happens if a write returns /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. - pub fn write_all<'a, B: Buf>(&'a mut self, buf: B) -> WriteAll<'a, B> { - WriteAll(self.fd.write_all(BufWrapper(buf)).extract()) + pub async fn write_all<'a, B: Buf>(&'a mut self, buf: B) -> io::Result { + WriteAll(self.fd.write_all(BufWrapper(buf)).extract()).await } /// Write the bytes in `bufs` intoto the pipe. @@ -180,22 +180,22 @@ impl Sender { /// Return the number of bytes written. This may we fewer than the length of /// `bufs`. To ensure that all bytes are written use /// [`Sender::write_vectored_all`]. - pub fn write_vectored<'a, B: BufSlice, const N: usize>( + pub async fn write_vectored<'a, B: BufSlice, const N: usize>( &'a mut self, bufs: B, - ) -> WriteVectored<'a, B, N> { - WriteVectored(self.fd.write_vectored(BufWrapper(bufs)).extract()) + ) -> io::Result<(B, usize)> { + WriteVectored(self.fd.write_vectored(BufWrapper(bufs)).extract()).await } /// Write the all bytes in `bufs` into the pipe. /// /// If this fails to write all bytes (this happens if a write returns /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. - pub fn write_vectored_all<'a, B: BufSlice, const N: usize>( + pub async fn write_vectored_all<'a, B: BufSlice, const N: usize>( &'a mut self, bufs: B, - ) -> WriteAllVectored<'a, B, N> { - WriteAllVectored(self.fd.write_all_vectored(BufWrapper(bufs)).extract()) + ) -> io::Result { + WriteAllVectored(self.fd.write_all_vectored(BufWrapper(bufs)).extract()).await } } @@ -232,40 +232,40 @@ impl Receiver { } /// Read bytes from the pipe, writing them into `buf`. - pub fn read<'a, B: BufMut>(&'a mut self, buf: B) -> Read<'a, B> { - Read(self.fd.read(BufWrapper(buf))) + pub async fn read<'a, B: BufMut>(&'a mut self, buf: B) -> io::Result { + Read(self.fd.read(BufWrapper(buf))).await } /// Read at least `n` bytes from the pipe, writing them into `buf`. /// /// This returns [`io::ErrorKind::UnexpectedEof`] if less than `n` bytes /// could be read. - pub fn read_n<'a, B: BufMut>(&'a mut self, buf: B, n: usize) -> ReadN<'a, B> { + pub async fn read_n<'a, B: BufMut>(&'a mut self, buf: B, n: usize) -> io::Result { debug_assert!( buf.spare_capacity() >= n, "called `Receiver::read_n` with a buffer smaller than `n`", ); - ReadN(self.fd.read_n(BufWrapper(buf), n)) + ReadN(self.fd.read_n(BufWrapper(buf), n)).await } /// Read bytes from the pipe, writing them into `bufs`. - pub fn read_vectored<'a, B: BufMutSlice, const N: usize>( + pub async fn read_vectored<'a, B: BufMutSlice, const N: usize>( &'a mut self, bufs: B, - ) -> ReadVectored<'a, B, N> { - ReadVectored(self.fd.read_vectored(BufWrapper(bufs))) + ) -> io::Result { + ReadVectored(self.fd.read_vectored(BufWrapper(bufs))).await } /// Read at least `n` bytes from the pipe, writing them into `bufs`. - pub fn read_n_vectored<'a, B: BufMutSlice, const N: usize>( + pub async fn read_n_vectored<'a, B: BufMutSlice, const N: usize>( &'a mut self, bufs: B, n: usize, - ) -> ReadNVectored<'a, B, N> { + ) -> io::Result { debug_assert!( bufs.total_spare_capacity() >= n, "called `Receiver::read_n_vectored` with buffers smaller than `n`" ); - ReadNVectored(self.fd.read_n_vectored(BufWrapper(bufs), n)) + ReadNVectored(self.fd.read_n_vectored(BufWrapper(bufs), n)).await } } From 4b9b1bc090786e02ec88e1d0ec40d15a0240fe1e Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 9 Apr 2023 13:38:47 +0200 Subject: [PATCH 030/177] Make io Future types private --- rt/src/io/futures.rs | 16 ++++++++-------- rt/src/io/mod.rs | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/rt/src/io/futures.rs b/rt/src/io/futures.rs index f567ab7e4..a9a289175 100644 --- a/rt/src/io/futures.rs +++ b/rt/src/io/futures.rs @@ -12,7 +12,7 @@ use a10::extract::Extractor; use crate::io::buf::{Buf, BufMut, BufMutSlice, BufSlice, BufWrapper}; /// [`Future`] behind write implementations. -pub struct Write<'a, B>(pub(crate) Extractor>>); +pub(crate) struct Write<'a, B>(pub(crate) Extractor>>); impl<'a, B: Buf> Future for Write<'a, B> { type Output = io::Result<(B, usize)>; @@ -26,7 +26,7 @@ impl<'a, B: Buf> Future for Write<'a, B> { } /// [`Future`] behind write all implementations. -pub struct WriteAll<'a, B>(pub(crate) Extractor>>); +pub(crate) struct WriteAll<'a, B>(pub(crate) Extractor>>); impl<'a, B: Buf> Future for WriteAll<'a, B> { type Output = io::Result; @@ -40,7 +40,7 @@ impl<'a, B: Buf> Future for WriteAll<'a, B> { } /// [`Future`] behind write vectored implementations. -pub struct WriteVectored<'a, B, const N: usize>( +pub(crate) struct WriteVectored<'a, B, const N: usize>( pub(crate) Extractor, N>>, ); @@ -56,7 +56,7 @@ impl<'a, B: BufSlice, const N: usize> Future for WriteVectored<'a, B, N> { } /// [`Future`] behind write all vectored implementations. -pub struct WriteAllVectored<'a, B, const N: usize>( +pub(crate) struct WriteAllVectored<'a, B, const N: usize>( pub(crate) Extractor, N>>, ); @@ -72,7 +72,7 @@ impl<'a, B: BufSlice, const N: usize> Future for WriteAllVectored<'a, B, N> { } /// [`Future`] behind read implementations. -pub struct Read<'a, B>(pub(crate) a10::io::Read<'a, BufWrapper>); +pub(crate) struct Read<'a, B>(pub(crate) a10::io::Read<'a, BufWrapper>); impl<'a, B: BufMut> Future for Read<'a, B> { type Output = io::Result; @@ -86,7 +86,7 @@ impl<'a, B: BufMut> Future for Read<'a, B> { } /// [`Future`] behind read vectored implementations. -pub struct ReadVectored<'a, B, const N: usize>( +pub(crate) struct ReadVectored<'a, B, const N: usize>( pub(crate) a10::io::ReadVectored<'a, BufWrapper, N>, ); @@ -102,7 +102,7 @@ impl<'a, B: BufMutSlice, const N: usize> Future for ReadVectored<'a, B, N> { } /// [`Future`] behind read `n` implementations. -pub struct ReadN<'a, B>(pub(crate) a10::io::ReadN<'a, BufWrapper>); +pub(crate) struct ReadN<'a, B>(pub(crate) a10::io::ReadN<'a, BufWrapper>); impl<'a, B: BufMut> Future for ReadN<'a, B> { type Output = io::Result; @@ -116,7 +116,7 @@ impl<'a, B: BufMut> Future for ReadN<'a, B> { } /// [`Future`] behind read `n` vectored implementations. -pub struct ReadNVectored<'a, B, const N: usize>( +pub(crate) struct ReadNVectored<'a, B, const N: usize>( pub(crate) a10::io::ReadNVectored<'a, BufWrapper, N>, ); diff --git a/rt/src/io/mod.rs b/rt/src/io/mod.rs index c10edbde8..6122860e3 100644 --- a/rt/src/io/mod.rs +++ b/rt/src/io/mod.rs @@ -18,6 +18,6 @@ pub(crate) use buf::BufWrapper; pub use buf::{Buf, BufMut, BufMutSlice, BufSlice}; mod futures; -pub use futures::{ +pub(crate) use futures::{ Read, ReadN, ReadNVectored, ReadVectored, Write, WriteAll, WriteAllVectored, WriteVectored, }; From b527d4a1479fcfa64c0663701a5cf0ba6f03f7df Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 9 Apr 2023 13:41:40 +0200 Subject: [PATCH 031/177] Remove notes about actor::Bound trait in pipe module No longer relevant. --- rt/src/pipe.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/rt/src/pipe.rs b/rt/src/pipe.rs index fefc5db70..b64a00cf7 100644 --- a/rt/src/pipe.rs +++ b/rt/src/pipe.rs @@ -10,14 +10,6 @@ //! //! [spawning another process]: std::process::Command //! -//! # Notes -//! -//! Both the [`Sender`] and [`Receiver`] types are [bound] to an actor. See the -//! [`Bound`] trait for more information. -//! -//! [bound]: crate::Bound -//! [`Bound`]: crate::Bound -//! //! # Examples //! //! Creating a new Unix pipe. From a60fe08cb62bc19eb5e51ad42b71c795bf003e6a Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 9 Apr 2023 14:22:52 +0200 Subject: [PATCH 032/177] Make net::SockAddr private Instead of using the public in private hack, now that the net Future types are also private. --- rt/src/net/mod.rs | 173 ++++++++++++++++++++++------------------------ 1 file changed, 81 insertions(+), 92 deletions(-) diff --git a/rt/src/net/mod.rs b/rt/src/net/mod.rs index 1079c6c60..cfa989b1c 100644 --- a/rt/src/net/mod.rs +++ b/rt/src/net/mod.rs @@ -44,8 +44,9 @@ //! [bound]: crate::Bound //! [`Bound`]: crate::Bound -use std::io; -use std::net::SocketAddr; +use std::mem::{size_of, MaybeUninit}; +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::{fmt, io, ptr}; mod futures; pub mod tcp; @@ -85,116 +86,104 @@ fn convert_address(address: socket2::SockAddr) -> io::Result { } } -mod address { - //! Public in private hack. - - use std::mem::{size_of, MaybeUninit}; - use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; - use std::{fmt, ptr}; - - // TODO: merge this into socket2 in some form. - #[derive(Copy, Clone)] - pub union SockAddr { - ip: libc::sockaddr, - ipv4: libc::sockaddr_in, - ipv6: libc::sockaddr_in6, - } +// TODO: merge this into socket2 in some form. +#[derive(Copy, Clone)] +pub(crate) union SockAddr { + ip: libc::sockaddr, + ipv4: libc::sockaddr_in, + ipv6: libc::sockaddr_in6, +} - impl From for SockAddr { - fn from(addr: SocketAddr) -> SockAddr { - match addr { - SocketAddr::V4(addr) => addr.into(), - SocketAddr::V6(addr) => addr.into(), - } +impl From for SockAddr { + fn from(addr: SocketAddr) -> SockAddr { + match addr { + SocketAddr::V4(addr) => addr.into(), + SocketAddr::V6(addr) => addr.into(), } } +} - impl From for SockAddr { - fn from(addr: SocketAddrV4) -> SockAddr { - SockAddr { - ipv4: libc::sockaddr_in { - sin_family: libc::AF_INET as libc::sa_family_t, - sin_port: addr.port().to_be(), - sin_addr: libc::in_addr { - s_addr: u32::from_ne_bytes(addr.ip().octets()), - }, - sin_zero: Default::default(), +impl From for SockAddr { + fn from(addr: SocketAddrV4) -> SockAddr { + SockAddr { + ipv4: libc::sockaddr_in { + sin_family: libc::AF_INET as libc::sa_family_t, + sin_port: addr.port().to_be(), + sin_addr: libc::in_addr { + s_addr: u32::from_ne_bytes(addr.ip().octets()), }, - } + sin_zero: Default::default(), + }, } } +} - impl From for SockAddr { - fn from(addr: SocketAddrV6) -> SockAddr { - SockAddr { - ipv6: libc::sockaddr_in6 { - sin6_family: libc::AF_INET6 as libc::sa_family_t, - sin6_port: addr.port().to_be(), - sin6_addr: libc::in6_addr { - s6_addr: addr.ip().octets(), - }, - sin6_flowinfo: addr.flowinfo(), - sin6_scope_id: addr.scope_id(), +impl From for SockAddr { + fn from(addr: SocketAddrV6) -> SockAddr { + SockAddr { + ipv6: libc::sockaddr_in6 { + sin6_family: libc::AF_INET6 as libc::sa_family_t, + sin6_port: addr.port().to_be(), + sin6_addr: libc::in6_addr { + s6_addr: addr.ip().octets(), }, - } + sin6_flowinfo: addr.flowinfo(), + sin6_scope_id: addr.scope_id(), + }, } } +} - impl From for SocketAddr { - fn from(addr: SockAddr) -> SocketAddr { - match unsafe { addr.ip.sa_family as _ } { - libc::AF_INET => { - let addr = unsafe { addr.ipv4 }; - let ip = Ipv4Addr::from(addr.sin_addr.s_addr.to_ne_bytes()); - let port = u16::from_be(addr.sin_port); - SocketAddr::V4(SocketAddrV4::new(ip, port)) - } - libc::AF_INET6 => { - let addr = unsafe { addr.ipv6 }; - let ip = Ipv6Addr::from(addr.sin6_addr.s6_addr); - let port = u16::from_be(addr.sin6_port); - SocketAddr::V6(SocketAddrV6::new( - ip, - port, - addr.sin6_flowinfo, - addr.sin6_scope_id, - )) - } - _ => unreachable!(), +impl From for SocketAddr { + fn from(addr: SockAddr) -> SocketAddr { + match unsafe { addr.ip.sa_family as _ } { + libc::AF_INET => { + let addr = unsafe { addr.ipv4 }; + let ip = Ipv4Addr::from(addr.sin_addr.s_addr.to_ne_bytes()); + let port = u16::from_be(addr.sin_port); + SocketAddr::V4(SocketAddrV4::new(ip, port)) } - } - } - - impl a10::net::SocketAddress for SockAddr { - unsafe fn as_ptr(&self) -> (*const libc::sockaddr, libc::socklen_t) { - match unsafe { self.ip.sa_family as _ } { - libc::AF_INET => self.ipv4.as_ptr(), - libc::AF_INET6 => self.ipv6.as_ptr(), - _ => unreachable!(), + libc::AF_INET6 => { + let addr = unsafe { addr.ipv6 }; + let ip = Ipv6Addr::from(addr.sin6_addr.s6_addr); + let port = u16::from_be(addr.sin6_port); + SocketAddr::V6(SocketAddrV6::new( + ip, + port, + addr.sin6_flowinfo, + addr.sin6_scope_id, + )) } + _ => unreachable!(), } + } +} - unsafe fn as_mut_ptr( - this: &mut MaybeUninit, - ) -> (*mut libc::sockaddr, libc::socklen_t) { - ( - ptr::addr_of_mut!(*this.as_mut_ptr()).cast(), - size_of::() as _, - ) +impl a10::net::SocketAddress for SockAddr { + unsafe fn as_ptr(&self) -> (*const libc::sockaddr, libc::socklen_t) { + match unsafe { self.ip.sa_family as _ } { + libc::AF_INET => self.ipv4.as_ptr(), + libc::AF_INET6 => self.ipv6.as_ptr(), + _ => unreachable!(), } + } - unsafe fn init(this: MaybeUninit, length: libc::socklen_t) -> Self { - debug_assert!(length >= size_of::() as _); - // SAFETY: caller must initialise the address. - this.assume_init() - } + unsafe fn as_mut_ptr(this: &mut MaybeUninit) -> (*mut libc::sockaddr, libc::socklen_t) { + ( + ptr::addr_of_mut!(*this.as_mut_ptr()).cast(), + size_of::() as _, + ) } - impl fmt::Debug for SockAddr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - SocketAddr::from(*self).fmt(f) - } + unsafe fn init(this: MaybeUninit, length: libc::socklen_t) -> Self { + debug_assert!(length >= size_of::() as _); + // SAFETY: caller must initialise the address. + this.assume_init() } } -use address::SockAddr; +impl fmt::Debug for SockAddr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + SocketAddr::from(*self).fmt(f) + } +} From 56cc737cf061570af3d1f65052e7ba25dea30548 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 9 Apr 2023 15:15:13 +0200 Subject: [PATCH 033/177] Use io_uring for TcpListener --- rt/src/net/tcp/listener.rs | 187 +++++++++++++--------------- rt/tests/functional/tcp/listener.rs | 178 ++++---------------------- rt/tests/functional/tcp/stream.rs | 16 ++- rt/tests/regression/issue_145.rs | 4 +- 4 files changed, 128 insertions(+), 257 deletions(-) diff --git a/rt/src/net/tcp/listener.rs b/rt/src/net/tcp/listener.rs index f60011701..f72b04aca 100644 --- a/rt/src/net/tcp/listener.rs +++ b/rt/src/net/tcp/listener.rs @@ -1,17 +1,20 @@ //! Module with [`TcpListener`] and related types. use std::async_iter::AsyncIterator; -use std::future::Future; -use std::io; +use std::mem::ManuallyDrop; use std::net::SocketAddr; +use std::os::fd::{AsFd, AsRawFd, FromRawFd}; use std::pin::Pin; use std::task::{self, Poll}; +use std::{fmt, io}; +use a10::AsyncFd; use heph::actor; -use mio::{net, Interest}; +use mio::Interest; +use socket2::{Domain, Protocol, SockRef, Type}; -use crate::net::TcpStream; -use crate::{self as rt, Bound}; +use crate::net::{convert_address, SockAddr, TcpStream}; +use crate::{self as rt}; /// A TCP socket listener. /// @@ -77,7 +80,7 @@ use crate::{self as rt, Bound}; /// /// async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { /// // Create a new listener. -/// let mut listener = TcpListener::bind(&mut ctx, address)?; +/// let mut listener = TcpListener::bind(ctx.runtime_ref(), address).await?; /// /// // Accept a connection. /// let (unbound_stream, peer_address) = listener.accept().await?; @@ -143,17 +146,18 @@ use crate::{self as rt, Bound}; /// /// async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { /// // Create a new listener. -/// let mut listener = TcpListener::bind(&mut ctx, address)?; +/// let mut listener = TcpListener::bind(ctx.runtime_ref(), address).await?; /// let mut incoming = listener.incoming(); /// loop { -/// let (unbound_stream, peer_address) = match next(&mut incoming).await { -/// Some(Ok((unbound_stream, peer_address))) => (unbound_stream, peer_address), +/// let unbound_stream = match next(&mut incoming).await { +/// Some(Ok(unbound_stream)) => unbound_stream, /// Some(Err(err)) => return Err(err), /// None => return Ok(()), /// }; /// -/// info!("accepted connection from: {peer_address}"); /// let mut stream = unbound_stream.bind_to(&mut ctx)?; +/// let peer_address = stream.peer_addr()?; +/// info!("accepted connection from: {peer_address}"); /// /// // Next we write the IP address to the connection. /// let ip = peer_address.to_string(); @@ -162,92 +166,80 @@ use crate::{self as rt, Bound}; /// } /// } /// ``` -#[derive(Debug)] pub struct TcpListener { - /// The underlying TCP listener, backed by Mio. - socket: net::TcpListener, + fd: AsyncFd, } impl TcpListener { /// Creates a new `TcpListener` which will be bound to the specified /// `address`. - /// - /// # Notes - /// - /// The listener is also [bound] to the actor that owns the - /// `actor::Context`, which means the actor will be run every time the - /// listener has a connection ready to be accepted. - /// - /// [bound]: crate::Bound - pub fn bind( - ctx: &mut actor::Context, - address: SocketAddr, - ) -> io::Result + pub async fn bind(rt: &RT, address: SocketAddr) -> io::Result where RT: rt::Access, { - let mut socket = net::TcpListener::bind(address)?; - ctx.runtime().register(&mut socket, Interest::READABLE)?; - Ok(TcpListener { socket }) + let fd = a10::net::socket( + rt.submission_queue(), + Domain::for_address(address).into(), + Type::STREAM.cloexec().into(), + Protocol::TCP.into(), + 0, + ) + .await?; + + let socket = TcpListener { fd }; + + socket.with_ref(|socket| { + #[cfg(target_os = "linux")] + if let Some(cpu) = rt.cpu() { + if let Err(err) = socket.set_cpu_affinity(cpu) { + log::warn!("failed to set CPU affinity on UdpSocket: {err}"); + } + } + + socket.bind(&address.into())?; + socket.listen(1024)?; + + Ok(()) + })?; + + Ok(socket) } /// Returns the local socket address of this listener. pub fn local_addr(&mut self) -> io::Result { - self.socket.local_addr() + self.with_ref(|socket| socket.local_addr().and_then(convert_address)) } /// Sets the value for the `IP_TTL` option on this socket. pub fn set_ttl(&mut self, ttl: u32) -> io::Result<()> { - self.socket.set_ttl(ttl) + self.with_ref(|socket| socket.set_ttl(ttl)) } /// Gets the value of the `IP_TTL` option for this socket. pub fn ttl(&mut self) -> io::Result { - self.socket.ttl() + self.with_ref(|socket| socket.ttl()) } - /// Attempts to accept a new incoming [`TcpStream`]. - /// - /// If an accepted TCP stream is returned, the remote address of the peer is - /// returned along with it. - /// - /// If no streams are currently queued this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`TcpListener::accept`]. + /// Accept a new incoming [`TcpStream`]. /// - /// See the [`TcpListener`] documentation for an example. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_accept(&mut self) -> io::Result<(UnboundTcpStream, SocketAddr)> { - self.socket.accept().map(|(socket, address)| { - ( - UnboundTcpStream { - stream: TcpStream { socket }, - }, - address, - ) - }) + /// Returns the TCP stream and the remote address of the peer. See the + /// [`TcpListener`] documentation for an example. + pub async fn accept(&mut self) -> io::Result<(UnboundTcpStream, SocketAddr)> { + self.fd + .accept::() + .await + .map(|(fd, addr)| (UnboundTcpStream::from_async_fd(fd), addr.into())) } - /// Accepts a new incoming [`TcpStream`]. + /// Returns a stream of incoming [`TcpStream`]s. /// - /// If an accepted TCP stream is returned, the remote address of the peer is - /// returned along with it. + /// Note that unlike [`accept`] this doesn't return the address because uses + /// io_uring's multishot accept (making it faster then calling `accept` in a + /// loop). See the [`TcpListener`] documentation for an example. /// - /// See the [`TcpListener`] documentation for an example. - pub fn accept(&mut self) -> Accept<'_> { - Accept { - listener: Some(self), - } - } - - /// Returns a stream that iterates over the [`TcpStream`]s being received on - /// this listener. - /// - /// See the [`TcpListener`] documentation for an example. + /// [`accept`]: TcpListener::accept pub fn incoming(&mut self) -> Incoming<'_> { - Incoming { listener: self } + Incoming(self.fd.multishot_accept()) } /// Get the value of the `SO_ERROR` option on this socket. @@ -256,7 +248,15 @@ impl TcpListener { /// the field in the process. This can be useful for checking errors between /// calls. pub fn take_error(&mut self) -> io::Result> { - self.socket.take_error() + self.with_ref(|socket| socket.take_error()) + } + + fn with_ref(&self, f: F) -> io::Result + where + F: FnOnce(SockRef<'_>) -> io::Result, + { + let borrowed = self.fd.as_fd(); // TODO: remove this once we update to socket2 v0.5. + f(SockRef::from(&borrowed)) } } @@ -284,26 +284,17 @@ impl UnboundTcpStream { ) .map(|()| self.stream) } -} -/// The [`Future`] behind [`TcpListener::accept`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Accept<'a> { - listener: Option<&'a mut TcpListener>, -} - -impl<'a> Future for Accept<'a> { - type Output = io::Result<(UnboundTcpStream, SocketAddr)>; - - fn poll(mut self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - match self.listener { - Some(ref mut listener) => try_io!(listener.try_accept()).map(|res| { - // Only remove the listener if we return a stream. - self.listener = None; - res - }), - None => panic!("polled Accept after it return Poll::Ready"), + fn from_async_fd(fd: AsyncFd) -> UnboundTcpStream { + UnboundTcpStream { + stream: TcpStream { + // SAFETY: the put `fd` in a `ManuallyDrop` to ensure we don't + // close it, so we're free to create a `TcpStream` from the fd. + socket: unsafe { + let fd = ManuallyDrop::new(fd); + FromRawFd::from_raw_fd(fd.as_fd().as_raw_fd()) + }, + }, } } } @@ -311,23 +302,21 @@ impl<'a> Future for Accept<'a> { /// The [`AsyncIterator`] behind [`TcpListener::incoming`]. #[derive(Debug)] #[must_use = "AsyncIterators do nothing unless polled"] -pub struct Incoming<'a> { - listener: &'a mut TcpListener, -} +pub struct Incoming<'a>(a10::net::MultishotAccept<'a>); impl<'a> AsyncIterator for Incoming<'a> { - type Item = io::Result<(UnboundTcpStream, SocketAddr)>; + type Item = io::Result; - fn poll_next(mut self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll> { - try_io!(self.listener.try_accept()).map(Some) + fn poll_next(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll> { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll_next(ctx) + .map_ok(UnboundTcpStream::from_async_fd) } } -impl Bound for TcpListener { - type Error = io::Error; - - fn bind_to(&mut self, ctx: &mut actor::Context) -> io::Result<()> { - ctx.runtime() - .reregister(&mut self.socket, Interest::READABLE) +impl fmt::Debug for TcpListener { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.fd.fmt(f) } } diff --git a/rt/tests/functional/tcp/listener.rs b/rt/tests/functional/tcp/listener.rs index 021af0cfa..3d518029f 100644 --- a/rt/tests/functional/tcp/listener.rs +++ b/rt/tests/functional/tcp/listener.rs @@ -1,65 +1,62 @@ -use std::io; use std::net::SocketAddr; -use std::pin::Pin; -use std::task::Poll; use std::time::Duration; use heph::supervisor::NoSupervisor; use heph::{actor, ActorRef}; use heph_rt::net::{TcpListener, TcpStream}; use heph_rt::spawn::ActorOptions; -use heph_rt::test::{init_local_actor, join_many, poll_actor, try_spawn_local}; +use heph_rt::test::{join, join_many, try_spawn_local}; use heph_rt::util::next; -use heph_rt::{self as rt, Bound, Runtime, RuntimeRef, ThreadLocal}; +use heph_rt::{self as rt, ThreadLocal}; -use crate::util::{any_local_address, any_local_ipv6_address, pending_once}; +use crate::util::{any_local_address, any_local_ipv6_address}; #[test] fn local_addr() { - async fn actor(mut ctx: actor::Context) { + async fn actor(ctx: actor::Context) { let address = "127.0.0.1:12345".parse().unwrap(); - let mut listener = TcpListener::bind(&mut ctx, address).unwrap(); + let mut listener = TcpListener::bind(ctx.runtime_ref(), address).await.unwrap(); assert_eq!(listener.local_addr().unwrap(), address); drop(listener); let address = "[::1]:12345".parse().unwrap(); - let mut listener = TcpListener::bind(&mut ctx, address).unwrap(); + let mut listener = TcpListener::bind(ctx.runtime_ref(), address).await.unwrap(); assert_eq!(listener.local_addr().unwrap(), address); } let actor = actor as fn(_) -> _; - let (actor, _) = init_local_actor(actor, ()).unwrap(); - let mut actor = Box::pin(actor); - assert_eq!(poll_actor(Pin::as_mut(&mut actor)), Poll::Ready(Ok(()))); + let actor_ref = try_spawn_local(NoSupervisor, actor, (), ActorOptions::default()).unwrap(); + join(&actor_ref, Duration::from_secs(1)).unwrap(); } #[test] fn local_addr_port_zero() { - async fn actor(mut ctx: actor::Context) { + async fn actor(ctx: actor::Context) { let address = any_local_address(); - let mut listener = TcpListener::bind(&mut ctx, address).unwrap(); + let mut listener = TcpListener::bind(ctx.runtime_ref(), address).await.unwrap(); let got = listener.local_addr().unwrap(); assert_eq!(got.ip(), address.ip()); assert!(got.port() != 0); drop(listener); let address = any_local_ipv6_address(); - let mut listener = TcpListener::bind(&mut ctx, address).unwrap(); + let mut listener = TcpListener::bind(ctx.runtime_ref(), address).await.unwrap(); let got = listener.local_addr().unwrap(); assert_eq!(got.ip(), address.ip()); assert!(got.port() != 0); } let actor = actor as fn(_) -> _; - let (actor, _) = init_local_actor(actor, ()).unwrap(); - let mut actor = Box::pin(actor); - assert_eq!(poll_actor(Pin::as_mut(&mut actor)), Poll::Ready(Ok(()))); + let actor_ref = try_spawn_local(NoSupervisor, actor, (), ActorOptions::default()).unwrap(); + join(&actor_ref, Duration::from_secs(1)).unwrap(); } #[test] fn ttl() { - async fn actor(mut ctx: actor::Context) { - let mut listener = TcpListener::bind(&mut ctx, any_local_address()).unwrap(); + async fn actor(ctx: actor::Context) { + let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) + .await + .unwrap(); let initial = listener.ttl().unwrap(); let expected = initial + 10; @@ -68,9 +65,8 @@ fn ttl() { } let actor = actor as fn(_) -> _; - let (actor, _) = init_local_actor(actor, ()).unwrap(); - let mut actor = Box::pin(actor); - assert_eq!(poll_actor(Pin::as_mut(&mut actor)), Poll::Ready(Ok(()))); + let actor_ref = try_spawn_local(NoSupervisor, actor, (), ActorOptions::default()).unwrap(); + join(&actor_ref, Duration::from_secs(1)).unwrap(); } const DATA: &[u8] = b"Hello world"; @@ -89,55 +85,15 @@ where assert_eq!(n, DATA.len()); } -#[test] -fn try_accept() { - async fn listener_actor( - mut ctx: actor::Context, - actor_ref: ActorRef, - ) { - let mut listener = TcpListener::bind(&mut ctx, any_local_address()).unwrap(); - - let address = listener.local_addr().unwrap(); - actor_ref.send(address).await.unwrap(); - - assert_eq!( - listener.try_accept().unwrap_err().kind(), - io::ErrorKind::WouldBlock - ); - - let mut stream = loop { - pending_once().await; - if let Ok((stream, remote_address)) = listener.try_accept() { - assert!(remote_address.ip().is_loopback()); - break stream.bind_to(&mut ctx).unwrap(); - } - }; - - let mut buf = Vec::with_capacity(DATA.len() + 1); - let n = stream.recv(&mut buf).await.unwrap(); - assert_eq!(n, DATA.len()); - assert_eq!(buf, DATA); - } - - let stream_actor = stream_actor as fn(_) -> _; - let stream_ref = - try_spawn_local(NoSupervisor, stream_actor, (), ActorOptions::default()).unwrap(); - - let listener_actor = listener_actor as fn(_, _) -> _; - let s_ref = stream_ref.clone(); - let listener_ref = - try_spawn_local(NoSupervisor, listener_actor, s_ref, ActorOptions::default()).unwrap(); - - join_many(&[stream_ref, listener_ref], Duration::from_secs(1)).unwrap(); -} - #[test] fn accept() { async fn listener_actor( mut ctx: actor::Context, actor_ref: ActorRef, ) { - let mut listener = TcpListener::bind(&mut ctx, any_local_address()).unwrap(); + let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) + .await + .unwrap(); let address = listener.local_addr().unwrap(); actor_ref.send(address).await.unwrap(); @@ -170,15 +126,16 @@ fn incoming() { mut ctx: actor::Context, actor_ref: ActorRef, ) { - let mut listener = TcpListener::bind(&mut ctx, any_local_address()).unwrap(); + let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) + .await + .unwrap(); let address = listener.local_addr().unwrap(); actor_ref.send(address).await.unwrap(); let mut incoming = listener.incoming(); - let (stream, remote_address) = next(&mut incoming).await.unwrap().unwrap(); + let stream = next(&mut incoming).await.unwrap().unwrap(); let mut stream = stream.bind_to(&mut ctx).unwrap(); - assert!(remote_address.ip().is_loopback()); let mut buf = Vec::with_capacity(DATA.len() + 1); let n = stream.recv(&mut buf).await.unwrap(); @@ -197,86 +154,3 @@ fn incoming() { join_many(&[stream_ref, listener_ref], Duration::from_secs(1)).unwrap(); } - -#[test] -fn actor_bound() { - async fn listener_actor1(mut ctx: actor::Context, actor_ref: ActorRef) - where - RT: rt::Access, - { - let listener = TcpListener::bind(&mut ctx, any_local_address()).unwrap(); - actor_ref.send(listener).await.unwrap(); - } - - async fn listener_actor2( - mut ctx: actor::Context, - actor_ref: ActorRef, - ) where - RT: rt::Access, - { - let mut listener = ctx.receive_next().await.unwrap(); - listener.bind_to(&mut ctx).unwrap(); - - let address = listener.local_addr().unwrap(); - actor_ref.send(address).await.unwrap(); - - let (stream, remote_address) = listener.accept().await.unwrap(); - let mut stream = stream.bind_to(&mut ctx).unwrap(); - assert!(remote_address.ip().is_loopback()); - - stream.bind_to(&mut ctx).unwrap(); - - let mut buf = Vec::with_capacity(DATA.len() + 1); - let n = stream.recv(&mut buf).await.unwrap(); - assert_eq!(n, DATA.len()); - assert_eq!(buf, DATA); - } - - fn setup(mut runtime_ref: RuntimeRef) -> Result<(), !> { - // Spawn thread-local actors. - let stream_ref = runtime_ref.spawn_local( - NoSupervisor, - stream_actor as fn(_) -> _, - (), - ActorOptions::default(), - ); - let listener_ref = runtime_ref.spawn_local( - NoSupervisor, - listener_actor2 as fn(_, _) -> _, - stream_ref, - ActorOptions::default(), - ); - let _ = runtime_ref.spawn_local( - NoSupervisor, - listener_actor1 as fn(_, _) -> _, - listener_ref, - ActorOptions::default(), - ); - Ok(()) - } - - let mut runtime = Runtime::setup().build().unwrap(); - runtime.run_on_workers(setup).unwrap(); - - // Spawn thread-safe actors. - let stream_ref = runtime.spawn( - NoSupervisor, - stream_actor as fn(_) -> _, - (), - ActorOptions::default(), - ); - let listener_ref = runtime.spawn( - NoSupervisor, - listener_actor2 as fn(_, _) -> _, - stream_ref, - ActorOptions::default(), - ); - let _ = runtime.spawn( - NoSupervisor, - listener_actor1 as fn(_, _) -> _, - listener_ref, - ActorOptions::default(), - ); - - runtime.start().unwrap(); -} diff --git a/rt/tests/functional/tcp/stream.rs b/rt/tests/functional/tcp/stream.rs index 8c2ed71c6..e368ce15f 100644 --- a/rt/tests/functional/tcp/stream.rs +++ b/rt/tests/functional/tcp/stream.rs @@ -992,7 +992,9 @@ fn shutdown_read() { mut ctx: actor::Context, actor_ref: ActorRef, ) { - let mut listener = TcpListener::bind(&mut ctx, any_local_address()).unwrap(); + let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) + .await + .unwrap(); let address = listener.local_addr().unwrap(); actor_ref.send(address).await.unwrap(); @@ -1043,7 +1045,9 @@ fn shutdown_write() { mut ctx: actor::Context, actor_ref: ActorRef, ) { - let mut listener = TcpListener::bind(&mut ctx, any_local_address()).unwrap(); + let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) + .await + .unwrap(); let address = listener.local_addr().unwrap(); actor_ref.send(address).await.unwrap(); @@ -1096,7 +1100,9 @@ fn shutdown_both() { mut ctx: actor::Context, actor_ref: ActorRef, ) { - let mut listener = TcpListener::bind(&mut ctx, any_local_address()).unwrap(); + let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) + .await + .unwrap(); let address = listener.local_addr().unwrap(); actor_ref.send(address).await.unwrap(); @@ -1147,7 +1153,9 @@ fn actor_bound() { where RT: rt::Access, { - let mut listener = TcpListener::bind(&mut ctx, any_local_address()).unwrap(); + let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) + .await + .unwrap(); let peer_address = listener.local_addr().unwrap(); let stream = TcpStream::connect(&mut ctx, peer_address) diff --git a/rt/tests/regression/issue_145.rs b/rt/tests/regression/issue_145.rs index a341b458b..789c00d1e 100644 --- a/rt/tests/regression/issue_145.rs +++ b/rt/tests/regression/issue_145.rs @@ -123,10 +123,10 @@ fn issue_145_tcp_listener() { runtime.start().unwrap(); } -async fn listener_actor(mut ctx: actor::Context) -> Result<(), !> { +async fn listener_actor(ctx: actor::Context) -> Result<(), !> { let address = "127.0.0.1:0".parse().unwrap(); // NOTE: this should not fail. - let mut listener = TcpListener::bind(&mut ctx, address).unwrap(); + let mut listener = TcpListener::bind(ctx.runtime_ref(), address).await.unwrap(); let addr = listener.local_addr().unwrap(); assert!(addr.port() != 0); Ok(()) From bd3c31e4376684261ae7f0ed777104f99e3b3d89 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 9 Apr 2023 15:15:30 +0200 Subject: [PATCH 034/177] Reregister A10 ring with mio::Poll after event Otherwise we don't seem to always get more notifications from epoll. --- rt/src/coordinator.rs | 15 ++++++++++++++- rt/src/worker.rs | 24 ++++++++++++++++++++++-- 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/rt/src/coordinator.rs b/rt/src/coordinator.rs index 5f2fd0c70..8d5521348 100644 --- a/rt/src/coordinator.rs +++ b/rt/src/coordinator.rs @@ -19,7 +19,7 @@ use std::env::consts::ARCH; use std::os::fd::{AsFd, AsRawFd}; use std::os::unix::process::parent_id; use std::sync::Arc; -use std::time::Instant; +use std::time::{Duration, Instant}; use std::{fmt, io, process}; use heph::actor_ref::{ActorGroup, Delivery}; @@ -152,6 +152,19 @@ impl Coordinator { self.log_metrics(&workers, &sync_workers, &signal_refs, &mut trace_log); } } + RING => { + self.ring + .poll(Some(Duration::ZERO)) + .map_err(|err| rt::Error::coordinator(Error::Polling(err)))?; + self.poll + .registry() + .reregister( + &mut SourceFd(&self.ring.as_fd().as_raw_fd()), + RING, + Interest::READABLE, + ) + .map_err(|err| rt::Error::coordinator(Error::Polling(err)))?; + } token if token.0 < SYNC_WORKER_ID_START => { let timing = trace::start(&trace_log); handle_worker_event(&mut workers, event)?; diff --git a/rt/src/worker.rs b/rt/src/worker.rs index cac509e40..b5e16a700 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -233,7 +233,7 @@ impl Worker { poll.registry() .register( &mut SourceFd(&shared_ring_fd), - SHARED_POLL, + SHARED_RING, Interest::READABLE, ) .map_err(Error::Init)?; @@ -498,6 +498,16 @@ impl Worker { .borrow_mut() .poll(Some(Duration::ZERO)) .map_err(Error::Polling)?; + self.internals + .poll + .borrow() + .registry() + .reregister( + &mut SourceFd(&self.internals.ring.borrow().as_fd().as_raw_fd()), + RING, + Interest::READABLE, + ) + .map_err(Error::Polling)?; } if check_shared_ring { @@ -505,6 +515,16 @@ impl Worker { .shared .try_poll_ring() .map_err(Error::Polling)?; + self.internals + .poll + .borrow() + .registry() + .reregister( + &mut SourceFd(&self.internals.shared.ring_fd()), + SHARED_RING, + Interest::READABLE, + ) + .map_err(Error::Polling)?; } trace::finish_rt( @@ -694,7 +714,7 @@ impl Worker { ); match res { Ok(()) => Ok(()), - // The I/O uring will interrupt us. + // The io_uring will interrupt us. Err(ref err) if err.kind() == io::ErrorKind::Interrupted => Ok(()), Err(err) => Err(err), } From c4c54613b220e34e4f4af4e6e9226d762d11c8ad Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 9 Apr 2023 16:37:54 +0200 Subject: [PATCH 035/177] Set CPU affinity in UnboundTcpStream --- rt/src/net/tcp/listener.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/rt/src/net/tcp/listener.rs b/rt/src/net/tcp/listener.rs index f72b04aca..a35ca9e9c 100644 --- a/rt/src/net/tcp/listener.rs +++ b/rt/src/net/tcp/listener.rs @@ -277,12 +277,20 @@ impl UnboundTcpStream { where RT: rt::Access, { - ctx.runtime() + let mut stream = ctx + .runtime() .register( &mut self.stream.socket, Interest::READABLE | Interest::WRITABLE, ) - .map(|()| self.stream) + .map(|()| self.stream)?; + #[cfg(target_os = "linux")] + if let Some(cpu) = ctx.runtime_ref().cpu() { + if let Err(err) = stream.set_cpu_affinity(cpu) { + log::warn!("failed to set CPU affinity on TcpStream: {err}"); + } + } + Ok(stream) } fn from_async_fd(fd: AsyncFd) -> UnboundTcpStream { From d13ce5f9fe9ae903ce87065d4bb965eff607a4fa Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 9 Apr 2023 20:49:07 +0200 Subject: [PATCH 036/177] Rewrite TcpServer as async function And using io_uring's multishot accept. --- rt/examples/2_my_ip.rs | 49 +- rt/examples/9_systemd.rs | 17 +- rt/examples/redis.rs | 27 +- rt/src/lib.rs | 13 +- rt/src/net/mod.rs | 4 +- rt/src/net/tcp/listener.rs | 24 +- rt/src/net/tcp/mod.rs | 5 +- rt/src/net/tcp/server.rs | 885 ++++++++++++++---------------- rt/tests/functional/pipe.rs | 3 +- rt/tests/functional/runtime.rs | 30 +- rt/tests/functional/tcp/server.rs | 51 +- rt/tests/regression/issue_145.rs | 30 +- rt/tests/util/mod.rs | 34 +- 13 files changed, 563 insertions(+), 609 deletions(-) diff --git a/rt/examples/2_my_ip.rs b/rt/examples/2_my_ip.rs index bc97b3337..a49af52e6 100644 --- a/rt/examples/2_my_ip.rs +++ b/rt/examples/2_my_ip.rs @@ -1,11 +1,10 @@ #![feature(never_type)] use std::io; -use std::net::SocketAddr; use heph::actor::{self, Actor, NewActor}; use heph::supervisor::{Supervisor, SupervisorStrategy}; -use heph_rt::net::{tcp, TcpServer, TcpStream}; +use heph_rt::net::{tcp, TcpStream}; use heph_rt::spawn::options::{ActorOptions, Priority}; use heph_rt::{self as rt, Runtime, ThreadLocal}; use log::{error, info}; @@ -23,9 +22,9 @@ fn main() -> Result<(), rt::Error> { // done by `conn_supervisor` in this example. And as each actor will need to // be added to the runtime it needs the `ActorOptions` to do that, we'll use // the defaults options here. - let actor = conn_actor as fn(_, _, _) -> _; + let actor = conn_actor as fn(_, _) -> _; let address = "127.0.0.1:7890".parse().unwrap(); - let server = TcpServer::setup(address, conn_supervisor, actor, ActorOptions::default()) + let server = tcp::server::setup(address, conn_supervisor, actor, ActorOptions::default()) .map_err(rt::Error::setup)?; // Just like in examples 1 and 2 we'll create our runtime and run our setup @@ -33,14 +32,14 @@ fn main() -> Result<(), rt::Error> { // CPU core using `use_all_cores`. let mut runtime = Runtime::setup().use_all_cores().build()?; runtime.run_on_workers(move |mut runtime_ref| -> io::Result<()> { - // As the TCP listener is just another actor we need to spawn it - // like any other actor. And again actors needs supervision, thus we - // provide `ServerSupervisor` as supervisor. + // As the TCP server is just another actor we need to spawn it like any + // other actor. And again actors needs supervision, thus we provide + // `ServerSupervisor` as supervisor. // We'll give our server a low priority to prioritise handling of - // ongoing requests over accepting new requests possibly overloading - // the system. + // ongoing requests over accepting new requests possibly overloading the + // system. let options = ActorOptions::default().with_priority(Priority::LOW); - let server_ref = runtime_ref.try_spawn_local(ServerSupervisor, server, (), options)?; + let server_ref = runtime_ref.spawn_local(ServerSupervisor, server, (), options); // The server can handle the interrupt, terminate and quit signals, // so it will perform a clean shutdown for us. @@ -57,33 +56,28 @@ struct ServerSupervisor; impl Supervisor for ServerSupervisor where - NA: NewActor, + NA: NewActor, NA::Actor: Actor>, { fn decide(&mut self, err: tcp::server::Error) -> SupervisorStrategy<()> { - use tcp::server::Error::*; match err { // When we hit an error accepting a connection we'll drop the old // listener and create a new one. - Accept(err) => { + tcp::server::Error::Accept(err) => { error!("error accepting new connection: {err}"); SupervisorStrategy::Restart(()) } // Async function never return an error creating a new actor. - NewActor(_) => unreachable!(), + tcp::server::Error::NewActor(err) => err, } } - fn decide_on_restart_error(&mut self, err: io::Error) -> SupervisorStrategy<()> { - // If we can't create a new listener we'll stop. - error!("error restarting the TCP server: {err}"); - SupervisorStrategy::Stop + fn decide_on_restart_error(&mut self, err: !) -> SupervisorStrategy<()> { + err } - fn second_restart_error(&mut self, err: io::Error) { - // This shouldn't ever be called as we don't restart the actor a second - // time (see `decide_on_restart_error`), but just in case. - error!("error restarting the actor a second time: {err}"); + fn second_restart_error(&mut self, err: !) { + err } } @@ -91,7 +85,7 @@ where /// /// Since we can't create a new TCP connection all this supervisor does is log /// the error and signal to stop the actor. -fn conn_supervisor(err: io::Error) -> SupervisorStrategy<(TcpStream, SocketAddr)> { +fn conn_supervisor(err: io::Error) -> SupervisorStrategy { error!("error handling connection: {err}"); SupervisorStrategy::Stop } @@ -100,12 +94,9 @@ fn conn_supervisor(err: io::Error) -> SupervisorStrategy<(TcpStream, SocketAddr) /// /// This actor will not receive any message and thus uses `!` (the never type) /// as message type. -async fn conn_actor( - _: actor::Context, - mut stream: TcpStream, - address: SocketAddr, -) -> io::Result<()> { - info!("accepted connection: address={address}"); +async fn conn_actor(_: actor::Context, mut stream: TcpStream) -> io::Result<()> { + let address = stream.peer_addr()?; + info!(address = log::as_display!(address); "accepted connection"); // This will allocate a new string which isn't the most efficient way to do // this, but it's the easiest so we'll keep this for sake of example. diff --git a/rt/examples/9_systemd.rs b/rt/examples/9_systemd.rs index 6ea8e2ebf..f11a3a090 100644 --- a/rt/examples/9_systemd.rs +++ b/rt/examples/9_systemd.rs @@ -1,11 +1,11 @@ #![feature(never_type)] -use std::net::{Ipv4Addr, SocketAddr}; +use std::net::Ipv4Addr; use std::{env, io}; use heph::supervisor::StopSupervisor; use heph::{actor, restart_supervisor}; -use heph_rt::net::{TcpServer, TcpStream}; +use heph_rt::net::{tcp, TcpStream}; use heph_rt::spawn::options::{ActorOptions, Priority}; use heph_rt::{self as rt, Runtime, ThreadLocal}; use log::info; @@ -20,8 +20,8 @@ fn main() -> Result<(), rt::Error> { }; let address = (Ipv4Addr::LOCALHOST, port).into(); let supervisor = StopSupervisor::for_actor("connection actor"); - let actor = conn_actor as fn(_, _, _) -> _; - let server = TcpServer::setup(address, supervisor, actor, ActorOptions::default()) + let actor = conn_actor as fn(_, _) -> _; + let server = tcp::server::setup(address, supervisor, actor, ActorOptions::default()) .map_err(rt::Error::setup)?; let mut runtime = Runtime::setup() @@ -43,7 +43,7 @@ fn main() -> Result<(), rt::Error> { runtime.run_on_workers(move |mut runtime_ref| -> io::Result<()> { let supervisor = ServerSupervisor::new(); let options = ActorOptions::default().with_priority(Priority::LOW); - let server_ref = runtime_ref.try_spawn_local(supervisor, server, (), options)?; + let server_ref = runtime_ref.spawn_local(supervisor, server, (), options); runtime_ref.receive_signals(server_ref.try_map()); Ok(()) })?; @@ -54,11 +54,8 @@ fn main() -> Result<(), rt::Error> { restart_supervisor!(ServerSupervisor, "TCP server actor", ()); -async fn conn_actor( - _: actor::Context, - mut stream: TcpStream, - address: SocketAddr, -) -> io::Result<()> { +async fn conn_actor(_: actor::Context, mut stream: TcpStream) -> io::Result<()> { + let address = stream.peer_addr()?; info!("accepted connection: address={address}"); let ip = address.ip().to_string(); stream.send_all(ip.as_bytes()).await diff --git a/rt/examples/redis.rs b/rt/examples/redis.rs index 427788775..45cc61937 100644 --- a/rt/examples/redis.rs +++ b/rt/examples/redis.rs @@ -7,13 +7,12 @@ use std::collections::HashMap; use std::io::{self, IoSlice, Write}; -use std::net::SocketAddr; use std::sync::{Arc, RwLock}; use std::time::Duration; use heph::actor::{self, Actor, NewActor}; use heph::supervisor::{Supervisor, SupervisorStrategy}; -use heph_rt::net::{tcp, TcpServer, TcpStream}; +use heph_rt::net::{tcp, TcpStream}; use heph_rt::spawn::options::{ActorOptions, Priority}; use heph_rt::timer::Deadline; use heph_rt::{self as rt, Runtime}; @@ -42,16 +41,15 @@ fn main() -> Result<(), rt::Error> { std_logger::Config::logfmt().init(); let values = Arc::new(RwLock::new(HashMap::new())); - let actor = (conn_actor as fn(_, _, _, _) -> _) - .map_arg(move |(stream, address)| (stream, address, values.clone())); + let actor = (conn_actor as fn(_, _, _) -> _).map_arg(move |stream| (stream, values.clone())); let address = "127.0.0.1:6379".parse().unwrap(); - let server = TcpServer::setup(address, conn_supervisor, actor, ActorOptions::default()) + let server = tcp::server::setup(address, conn_supervisor, actor, ActorOptions::default()) .map_err(rt::Error::setup)?; let mut runtime = Runtime::setup().use_all_cores().build()?; runtime.run_on_workers(move |mut runtime_ref| -> io::Result<()> { let options = ActorOptions::default().with_priority(Priority::LOW); - let server_ref = runtime_ref.try_spawn_local(ServerSupervisor, server, (), options)?; + let server_ref = runtime_ref.spawn_local(ServerSupervisor, server, (), options); runtime_ref.receive_signals(server_ref.try_map()); Ok(()) @@ -65,7 +63,7 @@ struct ServerSupervisor; impl Supervisor for ServerSupervisor where - NA: NewActor, + NA: NewActor, NA::Actor: Actor>, { fn decide(&mut self, err: tcp::server::Error) -> SupervisorStrategy<()> { @@ -79,17 +77,16 @@ where } } - fn decide_on_restart_error(&mut self, err: io::Error) -> SupervisorStrategy<()> { - error!("error restarting the TCP server: {err}"); - SupervisorStrategy::Stop + fn decide_on_restart_error(&mut self, err: !) -> SupervisorStrategy<()> { + err } - fn second_restart_error(&mut self, err: io::Error) { - error!("error restarting the actor a second time: {err}"); + fn second_restart_error(&mut self, err: !) { + err } } -fn conn_supervisor(err: io::Error) -> SupervisorStrategy<(TcpStream, SocketAddr)> { +fn conn_supervisor(err: io::Error) -> SupervisorStrategy { error!("error handling connection: {err}"); SupervisorStrategy::Stop } @@ -97,13 +94,13 @@ fn conn_supervisor(err: io::Error) -> SupervisorStrategy<(TcpStream, SocketAddr) async fn conn_actor( mut ctx: actor::Context, mut stream: TcpStream, - address: SocketAddr, values: Arc, Arc<[u8]>>>>, ) -> io::Result<()> where RT: rt::Access + Clone, { - info!("accepted connection: address={address}"); + let address = stream.peer_addr()?; + info!(address = log::as_display!(address); "accepted connection"); let mut buffer = Vec::with_capacity(1024); let err = loop { diff --git a/rt/src/lib.rs b/rt/src/lib.rs index e36ae74a7..963f59fcc 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -154,7 +154,8 @@ maybe_uninit_uninit_array, never_type, new_uninit, - stmt_expr_attributes + stmt_expr_attributes, + type_alias_impl_trait )] #![warn( anonymous_parameters, @@ -255,7 +256,6 @@ pub mod trace; pub mod util; mod worker; -use access::PrivateAccess; use process::ProcessId; #[doc(no_inline)] @@ -432,12 +432,9 @@ impl Runtime { /// Run the function `f` on all worker threads. /// - /// This can be used to spawn thread-local actors, e.g. [`TcpServer`], or to - /// initialise thread-local data on each worker thread ensuring that it's - /// properly initialised without impacting the performance of the first - /// request(s). - /// - /// [`TcpServer`]: crate::net::TcpServer + /// This can be used to spawn thread-local actors, or to initialise + /// thread-local data on each worker thread ensuring that it's properly + /// initialised without impacting the performance of the first request(s). pub fn run_on_workers(&mut self, f: F) -> Result<(), Error> where F: FnOnce(RuntimeRef) -> Result<(), E> + Send + Clone + 'static, diff --git a/rt/src/net/mod.rs b/rt/src/net/mod.rs index cfa989b1c..7235f22f4 100644 --- a/rt/src/net/mod.rs +++ b/rt/src/net/mod.rs @@ -12,7 +12,7 @@ //! [Transmission Control Protocol]: crate::net::tcp //! [TCP stream]: crate::net::TcpStream //! [TCP listening socket]: crate::net::TcpListener -//! [TCP server]: crate::net::TcpServer +//! [TCP server]: crate::net::tcp::server //! [User Datagram Protocol]: crate::net::udp //! //! # I/O with Heph's socket @@ -54,7 +54,7 @@ pub mod udp; pub mod uds; #[doc(no_inline)] -pub use tcp::{TcpListener, TcpServer, TcpStream}; +pub use tcp::{TcpListener, TcpStream}; #[doc(no_inline)] pub use udp::UdpSocket; #[doc(no_inline)] diff --git a/rt/src/net/tcp/listener.rs b/rt/src/net/tcp/listener.rs index a35ca9e9c..ad9206dd1 100644 --- a/rt/src/net/tcp/listener.rs +++ b/rt/src/net/tcp/listener.rs @@ -11,7 +11,7 @@ use std::{fmt, io}; use a10::AsyncFd; use heph::actor; use mio::Interest; -use socket2::{Domain, Protocol, SockRef, Type}; +use socket2::{Domain, Protocol, SockRef, Socket, Type}; use crate::net::{convert_address, SockAddr, TcpStream}; use crate::{self as rt}; @@ -176,6 +176,18 @@ impl TcpListener { pub async fn bind(rt: &RT, address: SocketAddr) -> io::Result where RT: rt::Access, + { + TcpListener::bind_setup(rt, address, |_| Ok(())).await + } + + pub(crate) async fn bind_setup( + rt: &RT, + address: SocketAddr, + setup: F, + ) -> io::Result + where + RT: rt::Access, + F: FnOnce(&Socket) -> io::Result<()>, { let fd = a10::net::socket( rt.submission_queue(), @@ -196,6 +208,7 @@ impl TcpListener { } } + setup(&socket)?; socket.bind(&address.into())?; socket.listen(1024)?; @@ -242,6 +255,11 @@ impl TcpListener { Incoming(self.fd.multishot_accept()) } + /// Temp function used by `TcpListener`. + pub(crate) fn incoming2(&mut self) -> a10::net::MultishotAccept<'_> { + self.fd.multishot_accept() + } + /// Get the value of the `SO_ERROR` option on this socket. /// /// This will retrieve the stored error in the underlying socket, clearing @@ -251,7 +269,7 @@ impl TcpListener { self.with_ref(|socket| socket.take_error()) } - fn with_ref(&self, f: F) -> io::Result + pub(crate) fn with_ref(&self, f: F) -> io::Result where F: FnOnce(SockRef<'_>) -> io::Result, { @@ -293,7 +311,7 @@ impl UnboundTcpStream { Ok(stream) } - fn from_async_fd(fd: AsyncFd) -> UnboundTcpStream { + pub(crate) fn from_async_fd(fd: AsyncFd) -> UnboundTcpStream { UnboundTcpStream { stream: TcpStream { // SAFETY: the put `fd` in a `ManuallyDrop` to ensure we don't diff --git a/rt/src/net/tcp/mod.rs b/rt/src/net/tcp/mod.rs index 39006c460..e0fe5220c 100644 --- a/rt/src/net/tcp/mod.rs +++ b/rt/src/net/tcp/mod.rs @@ -4,9 +4,10 @@ //! //! * [`TcpListener`] listens for incoming connections. //! * [`TcpStream`] represents a single TCP connection. -//! * [`TcpServer`] is an [`Actor`] that listens for incoming connections and +//! * [TCP server] is an [`Actor`] that listens for incoming connections and //! starts a new actor for each. //! +//! [TCP server]: crate::net::tcp::server //! [`Actor`]: heph::actor::Actor pub mod listener; @@ -16,6 +17,4 @@ pub mod stream; #[doc(no_inline)] pub use listener::TcpListener; #[doc(no_inline)] -pub use server::TcpServer; -#[doc(no_inline)] pub use stream::TcpStream; diff --git a/rt/src/net/tcp/server.rs b/rt/src/net/tcp/server.rs index f36364eb4..728bbcbdd 100644 --- a/rt/src/net/tcp/server.rs +++ b/rt/src/net/tcp/server.rs @@ -1,31 +1,373 @@ -//! Module with [`TcpServer`] and related types. +//! TCP server actor. +//! +//! The TCP server is an actor that starts a new actor for each accepted TCP +//! connection. This actor can start as a thread-local or thread-safe actor. +//! When using the thread-local variant one actor runs per worker thread which +//! spawns thread-local actors to handle the [`TcpStream`]s. See the first +//! example below on how to run this actor as a thread-local actor. +//! +//! This actor can also run as thread-safe actor in which case it also spawns +//! thread-safe actors. Note however that using thread-*local* version is +//! recommended. The third example below shows how to run the actor as +//! thread-safe actor. +//! +//! # Graceful shutdown +//! +//! Graceful shutdown is done by sending it a [`Terminate`] message, see below +//! for an example. The TCP server can also handle (shutdown) process signals, +//! see "Example 2 my ip" (in the examples directory of the source code) for an +//! example of that. +//! +//! # Examples +//! +//! The following example is a TCP server that writes "Hello World" to the +//! connection, using the server as a thread-local actor. +//! +//! ``` +//! #![feature(never_type)] +//! +//! use std::io; +//! +//! # use heph::messages::Terminate; +//! use heph::actor::{self, NewActor}; +//! use heph::supervisor::{Supervisor, SupervisorStrategy}; +//! use heph_rt::net::{tcp, TcpStream}; +//! use heph_rt::spawn::ActorOptions; +//! use heph_rt::spawn::options::Priority; +//! use heph_rt::{self as rt, Runtime, RuntimeRef, ThreadLocal}; +//! use log::error; +//! +//! fn main() -> Result<(), rt::Error> { +//! // Create and start the Heph runtime. +//! let mut runtime = Runtime::new()?; +//! runtime.run_on_workers(setup)?; +//! runtime.start() +//! } +//! +//! /// In this setup function we'll spawn the TCP server. +//! fn setup(mut runtime_ref: RuntimeRef) -> io::Result<()> { +//! // The address to listen on. +//! let address = "127.0.0.1:7890".parse().unwrap(); +//! // Create our TCP server. +//! let new_actor = conn_actor as fn(_, _) -> _; +//! // Wait for the `TcpStream` to become ready before running the actor. +//! let options = ActorOptions::default().mark_ready(false); +//! let server = tcp::server::setup(address, conn_supervisor, new_actor, options)?; +//! +//! // We advice to give the TCP server a low priority to prioritise +//! // handling of ongoing requests over accepting new requests possibly +//! // overloading the system. +//! let options = ActorOptions::default().with_priority(Priority::LOW); +//! # let actor_ref = +//! runtime_ref.spawn_local(ServerSupervisor, server, (), options); +//! # actor_ref.try_send(Terminate).unwrap(); +//! +//! Ok(()) +//! } +//! +//! /// Our supervisor for the TCP server. +//! #[derive(Copy, Clone, Debug)] +//! struct ServerSupervisor; +//! +//! impl Supervisor> for ServerSupervisor +//! where +//! // Trait bounds needed by `tcp::server::setup`. +//! S: Supervisor + Clone + 'static, +//! NA: NewActor + Clone + 'static, +//! { +//! fn decide(&mut self, err: tcp::server::Error) -> SupervisorStrategy<()> { +//! match err { +//! // When we hit an error accepting a connection we'll drop the old +//! // server and create a new one. +//! tcp::server::Error::Accept(err) => { +//! error!("error accepting new connection: {err}"); +//! SupervisorStrategy::Restart(()) +//! } +//! // Async function never return an error creating a new actor. +//! tcp::server::Error::NewActor(_) => unreachable!(), +//! } +//! } +//! +//! fn decide_on_restart_error(&mut self, err: !) -> SupervisorStrategy<()> { +//! err +//! } +//! +//! fn second_restart_error(&mut self, err: !) { +//! err +//! } +//! } +//! +//! /// `conn_actor`'s supervisor. +//! fn conn_supervisor(err: io::Error) -> SupervisorStrategy { +//! error!("error handling connection: {err}"); +//! SupervisorStrategy::Stop +//! } +//! +//! /// The actor responsible for a single TCP stream. +//! async fn conn_actor(_: actor::Context, mut stream: TcpStream) -> io::Result<()> { +//! stream.send_all(b"Hello World").await +//! } +//! ``` +//! +//! The following example shows how the actor can gracefully be shutdown by +//! sending it a [`Terminate`] message. +//! +//! ``` +//! #![feature(never_type)] +//! +//! use std::io; +//! +//! use heph::messages::Terminate; +//! use heph::actor::{self, NewActor}; +//! use heph::supervisor::{Supervisor, SupervisorStrategy}; +//! use heph_rt::net::{tcp, TcpStream}; +//! use heph_rt::spawn::options::{ActorOptions, Priority}; +//! use heph_rt::{self as rt, Runtime, RuntimeRef, ThreadLocal}; +//! use log::error; +//! +//! fn main() -> Result<(), rt::Error> { +//! let mut runtime = Runtime::new()?; +//! runtime.run_on_workers(setup)?; +//! runtime.start() +//! } +//! +//! fn setup(mut runtime_ref: RuntimeRef) -> io::Result<()> { +//! // This uses the same supervisors as in the previous example, not shown here. +//! +//! // Adding the TCP server is the same as in the example above. +//! let new_actor = conn_actor as fn(_, _) -> _; +//! let address = "127.0.0.1:7890".parse().unwrap(); +//! let server = tcp::server::setup(address, conn_supervisor, new_actor, ActorOptions::default())?; +//! let options = ActorOptions::default().with_priority(Priority::LOW); +//! let server_ref = runtime_ref.spawn_local(ServerSupervisor, server, (), options); +//! +//! // Because the server is just another actor we can send it messages. Here +//! // we'll send it a terminate message so it will gracefully shutdown. +//! server_ref.try_send(Terminate).unwrap(); +//! +//! Ok(()) +//! } +//! +//! # /// # Our supervisor for the TCP server. +//! # #[derive(Copy, Clone, Debug)] +//! # struct ServerSupervisor; +//! # +//! # impl Supervisor> for ServerSupervisor +//! # where +//! # S: Supervisor + Clone + 'static, +//! # NA: NewActor + Clone + 'static, +//! # { +//! # fn decide(&mut self, err: tcp::server::Error) -> SupervisorStrategy<()> { +//! # match err { +//! # tcp::server::Error::Accept(err) => { +//! # error!("error accepting new connection: {err}"); +//! # SupervisorStrategy::Restart(()) +//! # } +//! # tcp::server::Error::NewActor(_) => unreachable!(), +//! # } +//! # } +//! # +//! # fn decide_on_restart_error(&mut self, err: !) -> SupervisorStrategy<()> { +//! # err +//! # } +//! # +//! # fn second_restart_error(&mut self, err: !) { +//! # err +//! # } +//! # } +//! # +//! # /// # `conn_actor`'s supervisor. +//! # fn conn_supervisor(err: io::Error) -> SupervisorStrategy { +//! # error!("error handling connection: {err}"); +//! # SupervisorStrategy::Stop +//! # } +//! # +//! /// The actor responsible for a single TCP stream. +//! async fn conn_actor(_: actor::Context, mut stream: TcpStream) -> io::Result<()> { +//! stream.send_all(b"Hello World").await +//! } +//! ``` +//! +//! This example is similar to the first example, but runs the TCP server actor +//! as thread-safe actor. *It's recommended to run the server as thread-local +//! actor!* This is just an example show its possible. +//! +//! ``` +//! #![feature(never_type)] +//! +//! use std::io; +//! +//! use heph::actor::{self, NewActor}; +//! # use heph::messages::Terminate; +//! use heph::supervisor::{Supervisor, SupervisorStrategy}; +//! use heph_rt::net::{tcp, TcpStream}; +//! use heph_rt::spawn::options::{ActorOptions, Priority}; +//! use heph_rt::{self as rt, Runtime, ThreadSafe}; +//! use log::error; +//! +//! fn main() -> Result<(), rt::Error> { +//! let mut runtime = Runtime::new()?; +//! +//! // The address to listen on. +//! let address = "127.0.0.1:7890".parse().unwrap(); +//! // Create our TCP server. We'll use the default actor options. +//! let new_actor = conn_actor as fn(_, _) -> _; +//! let server = tcp::server::setup(address, conn_supervisor, new_actor, ActorOptions::default()) +//! .map_err(rt::Error::setup)?; +//! +//! let options = ActorOptions::default().with_priority(Priority::LOW); +//! # let actor_ref = +//! runtime.try_spawn(ServerSupervisor, server, (), options) +//! .map_err(rt::Error::setup)?; +//! # actor_ref.try_send(Terminate).unwrap(); +//! +//! runtime.start() +//! } +//! +//! /// Our supervisor for the TCP server. +//! #[derive(Copy, Clone, Debug)] +//! struct ServerSupervisor; +//! +//! impl Supervisor> for ServerSupervisor +//! where +//! // Trait bounds needed by `tcp::server::setup` using a thread-safe actor. +//! S: Supervisor + Send + Sync + Clone + 'static, +//! NA: NewActor + Send + Sync + Clone + 'static, +//! NA::Actor: Send + Sync + 'static, +//! NA::Message: Send, +//! { +//! fn decide(&mut self, err: tcp::server::Error) -> SupervisorStrategy<()> { +//! match err { +//! // When we hit an error accepting a connection we'll drop the old +//! // server and create a new one. +//! tcp::server::Error::Accept(err) => { +//! error!("error accepting new connection: {err}"); +//! SupervisorStrategy::Restart(()) +//! } +//! // Async function never return an error creating a new actor. +//! tcp::server::Error::NewActor(_) => unreachable!(), +//! } +//! } +//! +//! fn decide_on_restart_error(&mut self, err: !) -> SupervisorStrategy<()> { +//! err +//! } +//! +//! fn second_restart_error(&mut self, err: !) { +//! err +//! } +//! } +//! +//! /// `conn_actor`'s supervisor. +//! fn conn_supervisor(err: io::Error) -> SupervisorStrategy { +//! error!("error handling connection: {err}"); +//! SupervisorStrategy::Stop +//! } +//! +//! /// The actor responsible for a single TCP stream. +//! async fn conn_actor(_: actor::Context, mut stream: TcpStream) -> io::Result<()> { +//! stream.send_all(b"Hello World").await +//! } use std::convert::TryFrom; +use std::future::Future; use std::net::SocketAddr; -use std::os::unix::io::{FromRawFd, IntoRawFd}; -use std::pin::Pin; use std::sync::Arc; -use std::task::{self, Poll}; use std::{fmt, io}; -use heph::actor::{self, Actor, NewActor}; +use heph::actor::{self, NewActor, NoMessages}; use heph::messages::Terminate; use heph::supervisor::Supervisor; -#[cfg(target_os = "linux")] -use log::warn; -use log::{as_display, debug}; -use mio::net::TcpListener; -use mio::Interest; +use log::{debug, trace}; use socket2::{Domain, Protocol, Socket, Type}; -use crate::net::TcpStream; +use crate::net::tcp::listener::UnboundTcpStream; +use crate::net::{TcpListener, TcpStream}; use crate::spawn::{ActorOptions, AddActorError, PrivateSpawn, Spawn}; -use crate::{self as rt, PrivateAccess, Signal}; +use crate::util::{either, next}; +use crate::{self as rt, Signal}; -/// A intermediate structure that implements [`NewActor`], creating -/// [`TcpServer`]. +/// Create a new [server setup]. /// -/// See [`TcpServer::setup`] to create this and [`TcpServer`] for examples. +/// Arguments: +/// * `address`: the address to listen on. +/// * `supervisor`: the [`Supervisor`] used to supervise each started actor, +/// * `new_actor`: the [`NewActor`] implementation to start each actor, and +/// * `options`: the actor options used to spawn the new actors. +/// +/// See the [module documentation] for examples. +/// +/// [server setup]: Setup +/// [module documentation]: crate::net::tcp::server +pub fn setup( + mut address: SocketAddr, + supervisor: S, + new_actor: NA, + options: ActorOptions, +) -> io::Result> +where + S: Supervisor + Clone + 'static, + NA: NewActor + Clone + 'static, +{ + // We create a listener which don't actually use. However it gives a + // nicer user-experience to get an error up-front rather than $n errors + // later, where $n is the number of cpu cores when spawning a new server + // on each worker thread. + bind_listener(address).and_then(|socket| { + // Using a port of 0 means the OS can select one for us. However + // we still consistently want to use the same port instead of + // binding to a number of random ports. + if address.port() == 0 { + // NOTE: we just created the socket above so we know it's either + // IPv4 or IPv6, meaning this `unwrap` never fails. + address = socket.local_addr()?.as_socket().unwrap(); + } + + Ok(Setup { + inner: Arc::new(SetupInner { + _socket: socket, + address, + supervisor, + new_actor, + options, + }), + }) + }) +} + +/// Create a new TCP listener bound to `address`, but **not** listening using +/// blocking I/O. +fn bind_listener(address: SocketAddr) -> io::Result { + let socket = Socket::new( + Domain::for_address(address), + Type::STREAM, + Some(Protocol::TCP), + )?; + + set_listener_options(&socket)?; + + // Bind the socket and start listening if required. + socket.bind(&address.into())?; + + Ok(socket) +} + +/// Set the desired socket options on `socket`. +fn set_listener_options(socket: &Socket) -> io::Result<()> { + // Allow the other worker threads and processes to reuse the address and + // port we're binding to. This allow reload the process without dropping + // clients. + socket.set_reuse_address(true)?; + socket.set_reuse_port(true)?; + Ok(()) +} + +/// A intermediate structure that implements [`NewActor`], creating an actor +/// that spawn a new actor for each incoming TCP connection. +/// +/// See [`setup`] to create this and the [module documentation] for examples. +/// +/// [module documentation]: crate::net::tcp::server #[derive(Debug)] pub struct Setup { /// All fields are in an `Arc` to allow `Setup` to cheaply be cloned and @@ -58,62 +400,31 @@ impl Setup { impl NewActor for Setup where S: Supervisor + Clone + 'static, - NA: NewActor + Clone + 'static, + NA: NewActor + Clone + 'static, NA::RuntimeAccess: rt::Access + Spawn, { type Message = Message; type Argument = (); - type Actor = TcpServer; - type Error = io::Error; + type Actor = impl Future>>; + type Error = !; type RuntimeAccess = NA::RuntimeAccess; fn new( &mut self, - mut ctx: actor::Context, + ctx: actor::Context, _: Self::Argument, ) -> Result { let this = &*self.inner; - let socket = new_listener(this.address, 1024)?; - let mut listener = unsafe { TcpListener::from_raw_fd(socket.into_raw_fd()) }; - ctx.runtime().register(&mut listener, Interest::READABLE)?; - Ok(TcpServer { + Ok(tcp_server( ctx, - set_waker: false, - listener, - supervisor: this.supervisor.clone(), - new_actor: this.new_actor.clone(), - options: this.options.clone(), - }) + this.address, + this.supervisor.clone(), + this.new_actor.clone(), + this.options.clone(), + )) } } -fn new_listener(address: SocketAddr, backlog: libc::c_int) -> io::Result { - // Create a new non-blocking socket. - let domain = Domain::for_address(address); - let ty = Type::STREAM; - #[cfg(any(target_os = "freebsd", target_os = "linux"))] - let ty = ty.nonblocking(); - let protocol = Protocol::TCP; - let socket = Socket::new(domain, ty, Some(protocol))?; - // For OSs that don't support `SOCK_NONBLOCK`. - #[cfg(not(any(target_os = "freebsd", target_os = "linux")))] - socket.set_nonblocking(true)?; - - // Allow the other worker threads and processes to reuse the address and - // port we're binding to. This allow reload the process without dropping - // clients. - socket.set_reuse_address(true)?; - socket.set_reuse_port(true)?; // TODO: use `SO_REUSEPORT_LB` on FreeBSD. - - // Bind the socket and start listening if required. - socket.bind(&address.into())?; - if backlog != 0 { - socket.listen(backlog)?; - } - - Ok(socket) -} - impl Clone for Setup { fn clone(&self) -> Setup { Setup { @@ -122,438 +433,58 @@ impl Clone for Setup { } } -/// An actor that starts a new actor for each accepted TCP connection. -/// -/// This actor can start as a thread-local or thread-safe actor. When using the -/// thread-local variant one actor runs per worker thread which spawns -/// thread-local actors to handle the [`TcpStream`]s. See the first example -/// below on how to run this `TcpServer` as a thread-local actor. -/// -/// This actor can also run as thread-safe actor in which case it also spawns -/// thread-safe actors. Note however that using thread-*local* version is -/// recommended. The third example below shows how to run the `TcpServer` as -/// thread-safe actor. -/// -/// # Graceful shutdown -/// -/// Graceful shutdown is done by sending it a [`Terminate`] message, see below -/// for an example. The TCP server can also handle (shutdown) process signals, -/// see "Example 2 my ip" (in the examples directory of the source code) for an -/// example of that. -/// -/// # Examples -/// -/// The following example is a TCP server that writes "Hello World" to the -/// connection, using the server as a thread-local actor. -/// -/// ``` -/// #![feature(never_type)] -/// -/// use std::io; -/// use std::net::SocketAddr; -/// -/// # use heph::messages::Terminate; -/// use heph::actor::{self, NewActor}; -/// use heph::supervisor::{Supervisor, SupervisorStrategy}; -/// use heph_rt::net::tcp::{server, TcpServer, TcpStream}; -/// use heph_rt::spawn::ActorOptions; -/// use heph_rt::spawn::options::Priority; -/// use heph_rt::{self as rt, Runtime, RuntimeRef, ThreadLocal}; -/// use log::error; -/// -/// fn main() -> Result<(), rt::Error> { -/// // Create and start the Heph runtime. -/// let mut runtime = Runtime::new()?; -/// runtime.run_on_workers(setup)?; -/// runtime.start() -/// } -/// -/// /// In this setup function we'll spawn the TCP server. -/// fn setup(mut runtime_ref: RuntimeRef) -> io::Result<()> { -/// // The address to listen on. -/// let address = "127.0.0.1:7890".parse().unwrap(); -/// // Create our TCP server. -/// let new_actor = conn_actor as fn(_, _, _) -> _; -/// // Wait for the `TcpStream` to become ready before running the actor. -/// let options = ActorOptions::default().mark_ready(false); -/// let server = TcpServer::setup(address, conn_supervisor, new_actor, options)?; -/// -/// // We advice to give the TCP server a low priority to prioritise -/// // handling of ongoing requests over accepting new requests possibly -/// // overloading the system. -/// let options = ActorOptions::default().with_priority(Priority::LOW); -/// # let actor_ref = -/// runtime_ref.try_spawn_local(ServerSupervisor, server, (), options)?; -/// # actor_ref.try_send(Terminate).unwrap(); -/// -/// Ok(()) -/// } -/// -/// /// Our supervisor for the TCP server. -/// #[derive(Copy, Clone, Debug)] -/// struct ServerSupervisor; -/// -/// impl Supervisor> for ServerSupervisor -/// where -/// // Trait bounds needed by `server::Setup`. -/// S: Supervisor + Clone + 'static, -/// NA: NewActor + Clone + 'static, -/// { -/// fn decide(&mut self, err: server::Error) -> SupervisorStrategy<()> { -/// use server::Error::*; -/// match err { -/// // When we hit an error accepting a connection we'll drop the old -/// // server and create a new one. -/// Accept(err) => { -/// error!("error accepting new connection: {err}"); -/// SupervisorStrategy::Restart(()) -/// } -/// // Async function never return an error creating a new actor. -/// NewActor(_) => unreachable!(), -/// } -/// } -/// -/// fn decide_on_restart_error(&mut self, err: io::Error) -> SupervisorStrategy<()> { -/// // If we can't create a new server we'll stop. -/// error!("error restarting the TCP server: {err}"); -/// SupervisorStrategy::Stop -/// } -/// -/// fn second_restart_error(&mut self, _: io::Error) { -/// // We don't restart a second time, so this will never be called. -/// unreachable!(); -/// } -/// } -/// -/// /// `conn_actor`'s supervisor. -/// fn conn_supervisor(err: io::Error) -> SupervisorStrategy<(TcpStream, SocketAddr)> { -/// error!("error handling connection: {err}"); -/// SupervisorStrategy::Stop -/// } -/// -/// /// The actor responsible for a single TCP stream. -/// async fn conn_actor(_: actor::Context, mut stream: TcpStream, address: SocketAddr) -> io::Result<()> { -/// # drop(address); // Silence dead code warnings. -/// stream.send_all(b"Hello World").await -/// } -/// ``` -/// -/// The following example shows how the actor can gracefully be shutdown by -/// sending it a [`Terminate`] message. -/// -/// ``` -/// #![feature(never_type)] -/// -/// use std::io; -/// use std::net::SocketAddr; -/// -/// use heph::messages::Terminate; -/// use heph::actor::{self, NewActor}; -/// use heph::supervisor::{Supervisor, SupervisorStrategy}; -/// # use heph_rt::net::tcp; -/// use heph_rt::net::{TcpServer, TcpStream}; -/// use heph_rt::spawn::options::{ActorOptions, Priority}; -/// use heph_rt::{self as rt, Runtime, RuntimeRef, ThreadLocal}; -/// use log::error; -/// -/// fn main() -> Result<(), rt::Error> { -/// let mut runtime = Runtime::new()?; -/// runtime.run_on_workers(setup)?; -/// runtime.start() -/// } -/// -/// fn setup(mut runtime_ref: RuntimeRef) -> io::Result<()> { -/// // This uses the same supervisors as in the previous example, not shown here. -/// -/// // Adding the TCP server is the same as in the example above. -/// let new_actor = conn_actor as fn(_, _, _) -> _; -/// let address = "127.0.0.1:7890".parse().unwrap(); -/// let server = TcpServer::setup(address, conn_supervisor, new_actor, ActorOptions::default())?; -/// let options = ActorOptions::default().with_priority(Priority::LOW); -/// let server_ref = runtime_ref.try_spawn_local(ServerSupervisor, server, (), options)?; -/// -/// // Because the server is just another actor we can send it messages. Here -/// // we'll send it a terminate message so it will gracefully shutdown. -/// server_ref.try_send(Terminate).unwrap(); -/// -/// Ok(()) -/// } -/// -/// # /// # Our supervisor for the TCP server. -/// # #[derive(Copy, Clone, Debug)] -/// # struct ServerSupervisor; -/// # -/// # impl Supervisor> for ServerSupervisor -/// # where -/// # S: Supervisor + Clone + 'static, -/// # NA: NewActor + Clone + 'static, -/// # { -/// # fn decide(&mut self, err: tcp::server::Error) -> SupervisorStrategy<()> { -/// # use tcp::server::Error::*; -/// # match err { -/// # Accept(err) => { -/// # error!("error accepting new connection: {err}"); -/// # SupervisorStrategy::Restart(()) -/// # } -/// # NewActor(_) => unreachable!(), -/// # } -/// # } -/// # -/// # fn decide_on_restart_error(&mut self, err: io::Error) -> SupervisorStrategy<()> { -/// # error!("error restarting the TCP server: {err}"); -/// # SupervisorStrategy::Stop -/// # } -/// # -/// # fn second_restart_error(&mut self, _: io::Error) { -/// # // We don't restart a second time, so this will never be called. -/// # unreachable!(); -/// # } -/// # } -/// # -/// # /// # `conn_actor`'s supervisor. -/// # fn conn_supervisor(err: io::Error) -> SupervisorStrategy<(TcpStream, SocketAddr)> { -/// # error!("error handling connection: {err}"); -/// # SupervisorStrategy::Stop -/// # } -/// # -/// /// The actor responsible for a single TCP stream. -/// async fn conn_actor(_: actor::Context, mut stream: TcpStream, address: SocketAddr) -> io::Result<()> { -/// # drop(address); // Silence dead code warnings. -/// stream.send_all(b"Hello World").await -/// } -/// ``` -/// -/// This example is similar to the first example, but runs the `TcpServer` actor -/// as thread-safe actor. *It's recommended to run the server as thread-local -/// actor!* This is just an example show its possible. -/// -/// ``` -/// #![feature(never_type)] -/// -/// use std::io; -/// use std::net::SocketAddr; -/// -/// use heph::actor::{self, NewActor}; -/// # use heph::messages::Terminate; -/// use heph::supervisor::{Supervisor, SupervisorStrategy}; -/// use heph_rt::net::tcp::{server, TcpServer, TcpStream}; -/// use heph_rt::spawn::options::{ActorOptions, Priority}; -/// use heph_rt::{self as rt, Runtime, ThreadSafe}; -/// use log::error; -/// -/// fn main() -> Result<(), rt::Error> { -/// let mut runtime = Runtime::new()?; -/// -/// // The address to listen on. -/// let address = "127.0.0.1:7890".parse().unwrap(); -/// // Create our TCP server. We'll use the default actor options. -/// let new_actor = conn_actor as fn(_, _, _) -> _; -/// let server = TcpServer::setup(address, conn_supervisor, new_actor, ActorOptions::default()) -/// .map_err(rt::Error::setup)?; -/// -/// let options = ActorOptions::default().with_priority(Priority::LOW); -/// # let actor_ref = -/// runtime.try_spawn(ServerSupervisor, server, (), options) -/// .map_err(rt::Error::setup)?; -/// # actor_ref.try_send(Terminate).unwrap(); -/// -/// runtime.start() -/// } -/// -/// /// Our supervisor for the TCP server. -/// #[derive(Copy, Clone, Debug)] -/// struct ServerSupervisor; -/// -/// impl Supervisor> for ServerSupervisor -/// where -/// // Trait bounds needed by `server::Setup` using a thread-safe actor. -/// S: Supervisor + Send + Sync + Clone + 'static, -/// NA: NewActor + Send + Sync + Clone + 'static, -/// NA::Actor: Send + Sync + 'static, -/// NA::Message: Send, -/// { -/// fn decide(&mut self, err: server::Error) -> SupervisorStrategy<()> { -/// use server::Error::*; -/// match err { -/// // When we hit an error accepting a connection we'll drop the old -/// // server and create a new one. -/// Accept(err) => { -/// error!("error accepting new connection: {err}"); -/// SupervisorStrategy::Restart(()) -/// } -/// // Async function never return an error creating a new actor. -/// NewActor(_) => unreachable!(), -/// } -/// } -/// -/// fn decide_on_restart_error(&mut self, err: io::Error) -> SupervisorStrategy<()> { -/// // If we can't create a new server we'll stop. -/// error!("error restarting the TCP server: {err}"); -/// SupervisorStrategy::Stop -/// } -/// -/// fn second_restart_error(&mut self, _: io::Error) { -/// // We don't restart a second time, so this will never be called. -/// unreachable!(); -/// } -/// } -/// -/// /// `conn_actor`'s supervisor. -/// fn conn_supervisor(err: io::Error) -> SupervisorStrategy<(TcpStream, SocketAddr)> { -/// error!("error handling connection: {err}"); -/// SupervisorStrategy::Stop -/// } -/// -/// /// The actor responsible for a single TCP stream. -/// async fn conn_actor(_: actor::Context, mut stream: TcpStream, address: SocketAddr) -> io::Result<()> { -/// # drop(address); // Silence dead code warnings. -/// stream.send_all(b"Hello World").await -/// } -#[derive(Debug)] -pub struct TcpServer { - /// Actor context in which this actor is running. - ctx: actor::Context, - /// Whether or not we set the waker for the inbox. - set_waker: bool, - /// The underlying TCP listener, backed by Mio. - listener: TcpListener, - /// Supervisor for all actors created by `NewActor`. +async fn tcp_server( + mut ctx: actor::Context, + local: SocketAddr, supervisor: S, - /// `NewActor` used to create an actor for each connection. new_actor: NA, - /// Options used to spawn the actor. options: ActorOptions, -} - -impl TcpServer +) -> Result<(), Error> where S: Supervisor + Clone + 'static, - NA: NewActor + Clone + 'static, -{ - /// Create a new [server setup]. - /// - /// Arguments: - /// * `address`: the address to listen on. - /// * `supervisor`: the [`Supervisor`] used to supervise each started actor, - /// * `new_actor`: the [`NewActor`] implementation to start each actor, - /// and - /// * `options`: the actor options used to spawn the new actors. - /// - /// [server setup]: Setup - pub fn setup( - mut address: SocketAddr, - supervisor: S, - new_actor: NA, - options: ActorOptions, - ) -> io::Result> { - // We create a listener which don't actually use. However it gives a - // nicer user-experience to get an error up-front rather than $n errors - // later, where $n is the number of cpu cores when spawning a new server - // on each worker thread. - // - // Also note that we use a backlog of `0`, which causes `new_listener` - // to never call `listen(2)` on the socket. - new_listener(address, 0).and_then(|socket| { - // Using a port of 0 means the OS can select one for us. However - // we still consistently want to use the same port instead of - // binding to a number of random ports. - if address.port() == 0 { - // NOTE: we just created the socket above so we know it's either - // IPv4 or IPv6, meaning this `unwrap` never fails. - address = socket.local_addr()?.as_socket().unwrap(); - } - - Ok(Setup { - inner: Arc::new(SetupInner { - _socket: socket, - address, - supervisor, - new_actor, - options, - }), - }) - }) - } -} - -impl Actor for TcpServer -where - S: Supervisor + Clone + 'static, - NA: NewActor + Clone + 'static, + NA: NewActor + Clone + 'static, NA::RuntimeAccess: rt::Access + Spawn, { - type Error = Error; - - fn try_poll( - self: Pin<&mut Self>, - ctx: &mut task::Context<'_>, - ) -> Poll> { - // Safety: This is safe because only the `actor::Context` and - // `set_waker` are mutably borrowed and both are `Unpin`. - let this = unsafe { Pin::into_inner_unchecked(self) }; - - if !this.set_waker { - // Set the waker of the inbox to ensure we get run when we receive a - // message. - this.ctx.register_inbox_waker(ctx.waker()); - this.set_waker = true; - } + let mut listener = TcpListener::bind_setup(ctx.runtime_ref(), local, set_listener_options) + .await + .map_err(Error::Accept)?; + trace!(address = log::as_display!(local); "TCP server listening"); - // See if we need to shutdown. - // - // We don't return immediately here because we're using `SO_REUSEPORT`, - // which on most OSes causes each listener (file descriptor) to have - // there own accept queue. This means that connections in *ours* would - // be dropped if we would close the file descriptor immediately. So we - // first accept all pending connections and start actors for them. Note - // however that there is still a race condition between our last call to - // `accept` and the time the file descriptor is actually closed, - // currently we can't avoid this. - let should_stop = this.ctx.try_receive_next().is_ok(); + let mut accept = listener.incoming2(); + let mut receive = ctx.receive_next(); + loop { + match either(next(&mut accept), &mut receive).await { + Ok(Some(Ok(fd))) => { + let stream = UnboundTcpStream::from_async_fd(fd); + trace!("TCP server accepted connection"); - loop { - let (mut stream, addr) = match this.listener.accept() { - Ok(ok) => ok, - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => break, - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, // Try again. - Err(err) => return Poll::Ready(Err(Error::Accept(err))), - }; - debug!(remote_address = as_display!(addr); "TcpServer accepted connection"); - - let setup_actor = move |ctx: &mut actor::Context| { - ctx.runtime() - .register(&mut stream, Interest::READABLE | Interest::WRITABLE)?; - #[allow(unused_mut)] - let mut stream = TcpStream { socket: stream }; - #[cfg(target_os = "linux")] - if let Some(cpu) = ctx.runtime_ref().cpu() { - if let Err(err) = stream.set_cpu_affinity(cpu) { - warn!("failed to set CPU affinity on TcpStream: {err}"); - } - } - Ok((stream, addr)) - }; - let res = this.ctx.try_spawn_setup( - this.supervisor.clone(), - this.new_actor.clone(), - setup_actor, - this.options.clone(), - ); - if let Err(err) = res { - return Poll::Ready(Err(err.into())); + drop(receive); // Can't double borrow `ctx`. + _ = ctx.try_spawn_setup( + supervisor.clone(), + new_actor.clone(), + |ctx| stream.bind_to(ctx), + options.clone(), + )?; + receive = ctx.receive_next(); + } + Ok(Some(Err(err))) => return Err(Error::Accept(err)), + Ok(None) => { + debug!("no more connections to accept in TCP server, stopping"); + return Ok(()); + } + Err(Ok(_)) => { + debug!("TCP server received shutdown message, stopping"); + return Ok(()); + } + Err(Err(NoMessages)) => { + debug!("All actor references to TCP server dropped, stopping"); + return Ok(()); } - } - - if should_stop { - debug!("TCP server received shutdown message, stopping"); - Poll::Ready(Ok(())) - } else { - Poll::Pending } } } -/// The message type used by [`TcpServer`]. +/// The message type used by TCP server actor. /// /// The message implements [`From`]`<`[`Terminate`]`>` and /// [`TryFrom`]`<`[`Signal`]`>` for the message, allowing for graceful shutdown. @@ -582,7 +513,7 @@ impl TryFrom for Message { } } -/// Error returned by the [`TcpServer`] actor. +/// Error returned by the TCP server actor. #[derive(Debug)] pub enum Error { /// Error accepting TCP stream. @@ -606,8 +537,8 @@ impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use Error::*; match self { - Accept(ref err) => write!(f, "error accepting TCP stream: {err}"), - NewActor(ref err) => write!(f, "error creating new actor: {err}"), + Accept(err) => write!(f, "error accepting TCP stream: {err}"), + NewActor(err) => write!(f, "error creating new actor: {err}"), } } } diff --git a/rt/tests/functional/pipe.rs b/rt/tests/functional/pipe.rs index 98d6b9417..db128169a 100644 --- a/rt/tests/functional/pipe.rs +++ b/rt/tests/functional/pipe.rs @@ -11,6 +11,7 @@ use heph_rt::{self as rt}; const DATA: &[u8] = b"Hello world"; const DATAV: &[&[u8]] = &[b"Hello world!", b" ", b"From mars."]; +const DATAV_LEN: usize = DATAV[0].len() + DATAV[1].len() + DATAV[2].len(); #[test] fn smoke() { @@ -154,7 +155,7 @@ fn vectored_io() { let bufs = [DATAV[0], DATAV[1], DATAV[2]]; let (_, n) = sender.write_vectored(bufs).await?; - assert_eq!(n, DATA.len()); + assert_eq!(n, DATAV_LEN); drop(sender); let bufs = [ diff --git a/rt/tests/functional/runtime.rs b/rt/tests/functional/runtime.rs index 23ec56fd7..576af2682 100644 --- a/rt/tests/functional/runtime.rs +++ b/rt/tests/functional/runtime.rs @@ -39,11 +39,12 @@ fn auto_cpu_affinity() { use heph::messages::Terminate; use heph::supervisor::{Supervisor, SupervisorStrategy}; use heph::{actor, ActorRef, NewActor}; - use heph_rt::net::tcp::server; - use heph_rt::net::{TcpServer, TcpStream}; + use heph_rt::net::{tcp, TcpStream}; use heph_rt::spawn::ActorOptions; use heph_rt::{RuntimeRef, ThreadLocal}; + use crate::util::tcp_connect; + fn cpu_affinity(stream: &TcpStream) -> io::Result { // TODO: do this better. let socket = @@ -54,9 +55,9 @@ fn auto_cpu_affinity() { async fn stream_actor( mut ctx: actor::Context, address: SocketAddr, - server_ref: ActorRef, + server_ref: ActorRef, ) -> io::Result<()> { - let stream = TcpStream::connect(&mut ctx, address)?.await?; + let stream = tcp_connect(&mut ctx, address).await?; let cpu = cpu_affinity(&stream).unwrap(); assert_eq!(cpu, 0); @@ -68,7 +69,6 @@ fn auto_cpu_affinity() { async fn accepted_stream_actor( _: actor::Context, stream: TcpStream, - _: SocketAddr, ) -> io::Result<()> { let cpu = cpu_affinity(&stream)?; assert_eq!(cpu, 0); @@ -94,23 +94,23 @@ fn auto_cpu_affinity() { #[derive(Copy, Clone, Debug)] struct ServerSupervisor; - impl Supervisor> for ServerSupervisor + impl Supervisor> for ServerSupervisor where S: Supervisor + Clone + 'static, - NA: NewActor + NA: NewActor + Clone + 'static, { - fn decide(&mut self, err: server::Error) -> SupervisorStrategy<()> { + fn decide(&mut self, err: tcp::server::Error) -> SupervisorStrategy<()> { panic!("unexpected error accept stream: {err}"); } - fn decide_on_restart_error(&mut self, err: io::Error) -> SupervisorStrategy<()> { - panic!("unexpected restarting server: {err}"); + fn decide_on_restart_error(&mut self, err: !) -> SupervisorStrategy<()> { + err } - fn second_restart_error(&mut self, err: io::Error) { - panic!("unexpected restarting server: {err}"); + fn second_restart_error(&mut self, err: !) { + err } } @@ -119,8 +119,8 @@ fn auto_cpu_affinity() { check_thread_affinity(0)?; let address = "127.0.0.1:0".parse().unwrap(); - let accepted_stream_actor = accepted_stream_actor as fn(_, TcpStream, SocketAddr) -> _; - let server = TcpServer::setup( + let accepted_stream_actor = accepted_stream_actor as fn(_, TcpStream) -> _; + let server = tcp::server::setup( address, |err: io::Error| panic!("unexpected error: {err}"), accepted_stream_actor, @@ -128,7 +128,7 @@ fn auto_cpu_affinity() { )?; let address = server.local_addr(); let server_ref = - runtime_ref.try_spawn_local(ServerSupervisor, server, (), ActorOptions::default())?; + runtime_ref.spawn_local(ServerSupervisor, server, (), ActorOptions::default()); let stream_actor = stream_actor as fn(_, _, _) -> _; let args = (address, server_ref); diff --git a/rt/tests/functional/tcp/server.rs b/rt/tests/functional/tcp/server.rs index c01b9ec3e..0468fc11b 100644 --- a/rt/tests/functional/tcp/server.rs +++ b/rt/tests/functional/tcp/server.rs @@ -9,28 +9,27 @@ use heph::actor::{self, Actor, NewActor}; use heph::messages::Terminate; use heph::supervisor::{NoSupervisor, Supervisor, SupervisorStrategy}; use heph::ActorRef; -use heph_rt::net::tcp::server; -use heph_rt::net::{TcpServer, TcpStream}; +use heph_rt::net::{tcp, TcpStream}; use heph_rt::spawn::ActorOptions; use heph_rt::test::{join_many, try_spawn_local, PanicSupervisor}; use heph_rt::{self as rt, Runtime, Signal, ThreadLocal}; -use crate::util::any_local_address; +use crate::util::{any_local_address, tcp_connect}; #[test] fn message_from_terminate() { - let _msg = server::Message::from(Terminate); + let _msg = tcp::server::Message::from(Terminate); } #[test] fn message_from_process_signal() { let signals = &[Signal::Interrupt, Signal::Terminate, Signal::Quit]; for signal in signals { - assert!(server::Message::try_from(*signal).is_ok()); + assert!(tcp::server::Message::try_from(*signal).is_ok()); } } -async fn actor(_: actor::Context, mut stream: TcpStream, _: SocketAddr) +async fn actor(_: actor::Context, mut stream: TcpStream) where RT: rt::Access, { @@ -45,14 +44,11 @@ const DATA: &[u8] = b"Hello world"; async fn stream_actor( mut ctx: actor::Context, address: SocketAddr, - actor_ref: ActorRef, + actor_ref: ActorRef, ) where - RT: rt::Access, + RT: rt::Access + Clone, { - let mut stream = TcpStream::connect(&mut ctx, address) - .unwrap() - .await - .unwrap(); + let mut stream = tcp_connect(&mut ctx, address).await.unwrap(); let n = stream.send(DATA).await.unwrap(); assert_eq!(n, DATA.len()); @@ -63,21 +59,21 @@ async fn stream_actor( #[test] fn smoke() { - let server = TcpServer::setup( + let server = tcp::server::setup( any_local_address(), |err| panic!("unexpect error: {err}"), - actor as fn(_, _, _) -> _, + actor as fn(_, _) -> _, ActorOptions::default(), ) .unwrap(); let server_address = server.local_addr(); - // `TcpServer` should be able to be created outside the setup function and + // TCP server should be able to be created outside the setup function and // used in it. - let local_server = TcpServer::setup( + let local_server = tcp::server::setup( any_local_address(), |err| panic!("unexpect error: {err}"), - actor as fn(_, _, _) -> _, + actor as fn(_, _) -> _, ActorOptions::default(), ) .unwrap(); @@ -115,8 +111,8 @@ fn smoke() { #[test] fn zero_port() { - let actor = actor as fn(actor::Context, _, _) -> _; - let server = TcpServer::setup( + let actor = actor as fn(actor::Context, _) -> _; + let server = tcp::server::setup( any_local_address(), |err| panic!("unexpect error: {err}"), actor, @@ -154,7 +150,7 @@ fn new_actor_error() { // error here. impl Actor for ServerWrapper where - A: Actor>, + A: Actor>, { type Error = !; @@ -169,10 +165,10 @@ fn new_actor_error() { ); match res { Poll::Ready(Ok(())) => Poll::Ready(Ok(())), - Poll::Ready(Err(server::Error::Accept(err))) => { + Poll::Ready(Err(tcp::server::Error::Accept(err))) => { panic!("unexpected accept error: {err}") } - Poll::Ready(Err(server::Error::NewActor(()))) => Poll::Ready(Ok(())), + Poll::Ready(Err(tcp::server::Error::NewActor(()))) => Poll::Ready(Ok(())), Poll::Pending => Poll::Pending, } } @@ -197,7 +193,7 @@ fn new_actor_error() { RT: rt::Access, { type Message = !; - type Argument = (TcpStream, SocketAddr); + type Argument = TcpStream; type Actor = ActorErrorGenerator; type Error = (); type RuntimeAccess = RT; @@ -242,7 +238,7 @@ fn new_actor_error() { fn second_restart_error(&mut self, _: NA::Error) {} } - let server = TcpServer::setup( + let server = tcp::server::setup( any_local_address(), ErrorSupervisor, NewActorErrorGenerator(PhantomData), @@ -255,12 +251,9 @@ fn new_actor_error() { async fn stream_actor(mut ctx: actor::Context, address: SocketAddr) where - RT: rt::Access, + RT: rt::Access + Clone, { - let stream = TcpStream::connect(&mut ctx, address) - .unwrap() - .await - .unwrap(); + let stream = tcp_connect(&mut ctx, address).await.unwrap(); // Just need to create the connection. drop(stream); diff --git a/rt/tests/regression/issue_145.rs b/rt/tests/regression/issue_145.rs index 789c00d1e..bd3e0f11d 100644 --- a/rt/tests/regression/issue_145.rs +++ b/rt/tests/regression/issue_145.rs @@ -1,7 +1,7 @@ -//! The `TcpListener` and `TcpServer` should bind to port 0, using the same port +//! The `TcpListener` and TCP server should bind to port 0, using the same port //! on each worker thread. -use std::io::{self, Read}; +use std::io::Read; use std::net::SocketAddr; use std::sync::{Arc, Mutex}; use std::thread; @@ -10,7 +10,7 @@ use std::time::Duration; use heph::messages::Terminate; use heph::supervisor::{NoSupervisor, Supervisor, SupervisorStrategy}; use heph::{actor, Actor, ActorRef, NewActor}; -use heph_rt::net::{tcp, TcpListener, TcpServer, TcpStream}; +use heph_rt::net::{tcp, TcpListener, TcpStream}; use heph_rt::spawn::ActorOptions; use heph_rt::{Runtime, RuntimeRef, ThreadLocal}; @@ -24,18 +24,17 @@ fn issue_145_tcp_server() { let servers = Arc::new(Mutex::new(Vec::new())); let addr2 = addresses.clone(); let srv2 = servers.clone(); - let conn_actor = (conn_actor as fn(_, _, _, _, _) -> _) - .map_arg(move |(stream, address)| (stream, address, addr2.clone(), srv2.clone())); + let conn_actor = (conn_actor as fn(_, _, _, _) -> _) + .map_arg(move |stream| (stream, addr2.clone(), srv2.clone())); let address = "127.0.0.1:0".parse().unwrap(); let server = - TcpServer::setup(address, NoSupervisor, conn_actor, ActorOptions::default()).unwrap(); + tcp::server::setup(address, NoSupervisor, conn_actor, ActorOptions::default()).unwrap(); let expected_address = server.local_addr(); runtime .run_on_workers::<_, !>(move |mut runtime_ref| { - let srv_ref = runtime_ref - .try_spawn_local(ServerSupervisor, server, (), ActorOptions::default()) - .unwrap(); + let srv_ref = + runtime_ref.spawn_local(ServerSupervisor, server, (), ActorOptions::default()); // NOTE: this is not safe or supported. DO NOT USE THIS. let r = unsafe { std::mem::transmute_copy::(&runtime_ref) }; servers.lock().unwrap().push((r, srv_ref)); @@ -71,32 +70,33 @@ struct ServerSupervisor; impl Supervisor for ServerSupervisor where - L: NewActor, + L: NewActor, A: Actor>, { fn decide(&mut self, _: tcp::server::Error) -> SupervisorStrategy<()> { SupervisorStrategy::Stop } - fn decide_on_restart_error(&mut self, _: io::Error) -> SupervisorStrategy<()> { - SupervisorStrategy::Stop + fn decide_on_restart_error(&mut self, err: !) -> SupervisorStrategy<()> { + err } - fn second_restart_error(&mut self, _: io::Error) {} + fn second_restart_error(&mut self, err: !) { + err + } } #[allow(clippy::type_complexity)] // `servers` is too complex. async fn conn_actor( mut ctx: actor::Context, mut stream: TcpStream, - _: SocketAddr, addresses: Arc>>, servers: Arc)>>>, ) -> Result<(), !> { let mut addresses = addresses.lock().unwrap(); addresses.push(stream.local_addr().unwrap()); - // Shutdown the `TcpServer` that started us to ensure the next request goes + // Shutdown the TCP server that started us to ensure the next request goes // to a different server. // NOTE: this is not safe or supported. DO NOT USE THIS. let r = unsafe { std::mem::transmute_copy::(&*ctx.runtime()) }; diff --git a/rt/tests/util/mod.rs b/rt/tests/util/mod.rs index ec05f7524..a13721990 100644 --- a/rt/tests/util/mod.rs +++ b/rt/tests/util/mod.rs @@ -2,7 +2,6 @@ use std::async_iter::AsyncIterator; use std::env::temp_dir; -use std::fmt; use std::fs::{create_dir_all, remove_dir_all}; use std::future::Future; use std::mem::size_of; @@ -11,6 +10,13 @@ use std::path::PathBuf; use std::pin::Pin; use std::sync::Once; use std::task::{self, Poll}; +use std::time::Duration; +use std::{fmt, io}; + +use heph::actor; +use heph_rt as rt; +use heph_rt::net::TcpStream; +use heph_rt::timer::Timer; macro_rules! limited_loop { ($($arg: tt)*) => {{ @@ -125,11 +131,12 @@ pub struct PendingOnce(bool); impl Future for PendingOnce { type Output = (); - fn poll(mut self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { if self.0 { Poll::Ready(()) } else { self.0 = true; + ctx.waker().wake_by_ref(); Poll::Pending } } @@ -181,3 +188,26 @@ where .map(|out| out.map(|out| (out, this.count))) } } + +/// Because creating the listening socket asynchronously it's possible we're run +/// before the listener is setup. So try a couple of times. +pub async fn tcp_connect( + ctx: &mut actor::Context, + address: SocketAddr, +) -> io::Result +where + RT: rt::Access + Clone, +{ + let mut i = 10; + loop { + match TcpStream::connect(ctx, address).unwrap().await { + Ok(stream) => break Ok(stream), + Err(_) if i >= 1 => { + Timer::after(ctx, Duration::from_millis(1)).await; + i -= 1; + continue; + } + Err(err) => break Err(err), + } + } +} From 784557d7efd65337626231ee6078d23ee7fcf4d7 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 9 Apr 2023 21:45:49 +0200 Subject: [PATCH 037/177] Implement Buf for Arc<[u8]> --- rt/src/io/buf.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index 45f690309..090c41ca2 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -1,6 +1,7 @@ //! Buffers. use std::mem::MaybeUninit; +use std::sync::Arc; /// Trait that defines the behaviour of buffers used in reading, which requires /// mutable access. @@ -256,6 +257,16 @@ unsafe impl Buf for String { } } +// SAFETY: `Arc` manages the allocation of the bytes, so as long as it's +// alive, so is the slice of bytes. When the `Vec`tor is leaked the allocation +// will also be leaked. +unsafe impl Buf for Arc<[u8]> { + unsafe fn parts(&self) -> (*const u8, usize) { + let slice: &[u8] = &*self; + (slice.as_ptr().cast(), slice.len()) + } +} + // SAFETY: because the reference has a `'static` lifetime we know the bytes // can't be deallocated, so it's safe to implement `Buf`. unsafe impl Buf for &'static [u8] { From bf818e47940801a91cc94939ca9f36af0dae7084 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Mon, 10 Apr 2023 15:36:38 +0200 Subject: [PATCH 038/177] Change TcpStream to use io_uring --- rt/examples/2_my_ip.rs | 3 +- rt/examples/9_systemd.rs | 3 +- rt/examples/redis.rs | 24 +- rt/src/bytes.rs | 10 - rt/src/lib.rs | 17 - rt/src/net/futures.rs | 60 ++ rt/src/net/mod.rs | 32 +- rt/src/net/tcp/listener.rs | 120 ++-- rt/src/net/tcp/server.rs | 44 +- rt/src/net/tcp/stream.rs | 860 +++++----------------------- rt/src/net/udp.rs | 4 +- rt/tests/functional/tcp/listener.rs | 25 +- rt/tests/functional/tcp/server.rs | 7 +- rt/tests/functional/tcp/stream.rs | 501 ++++++---------- rt/tests/util/mod.rs | 2 +- 15 files changed, 451 insertions(+), 1261 deletions(-) diff --git a/rt/examples/2_my_ip.rs b/rt/examples/2_my_ip.rs index a49af52e6..17b184ff5 100644 --- a/rt/examples/2_my_ip.rs +++ b/rt/examples/2_my_ip.rs @@ -103,5 +103,6 @@ async fn conn_actor(_: actor::Context, mut stream: TcpStream) -> let ip = address.ip().to_string(); // Next we'll write the IP address to the connection. - stream.send_all(ip.as_bytes()).await + stream.send_all(ip).await?; + Ok(()) } diff --git a/rt/examples/9_systemd.rs b/rt/examples/9_systemd.rs index f11a3a090..ee24fae21 100644 --- a/rt/examples/9_systemd.rs +++ b/rt/examples/9_systemd.rs @@ -58,5 +58,6 @@ async fn conn_actor(_: actor::Context, mut stream: TcpStream) -> let address = stream.peer_addr()?; info!("accepted connection: address={address}"); let ip = address.ip().to_string(); - stream.send_all(ip.as_bytes()).await + stream.send_all(ip).await?; + Ok(()) } diff --git a/rt/examples/redis.rs b/rt/examples/redis.rs index 45cc61937..69f1f0d98 100644 --- a/rt/examples/redis.rs +++ b/rt/examples/redis.rs @@ -6,7 +6,7 @@ #![feature(never_type)] use std::collections::HashMap; -use std::io::{self, IoSlice, Write}; +use std::io::{self, Write}; use std::sync::{Arc, RwLock}; use std::time::Duration; @@ -105,8 +105,8 @@ where let err = loop { buffer.clear(); - let n = Deadline::after(&mut ctx, TIMEOUT, stream.recv(&mut buffer)).await?; - if n == 0 { + buffer = Deadline::after(&mut ctx, TIMEOUT, stream.recv(buffer)).await?; + if buffer.is_empty() { return Ok(()); } let buf = &buffer[..]; @@ -154,14 +154,11 @@ where buffer.clear(); if let Some(value) = value { write!(&mut buffer, "${}\r\n", value.len()).unwrap(); - let mut bufs = [ - IoSlice::new(&buffer), - IoSlice::new(&*value), - IoSlice::new(b"\r\n"), - ]; - stream.send_vectored_all(&mut bufs).await?; + let bufs = (buffer, value, "\r\n"); + let bufs = stream.send_vectored_all(bufs).await?; + buffer = bufs.0; } else { - stream.send_all(NIL.as_bytes()).await?; + stream.send_all(NIL).await?; } } "SET" => { @@ -193,14 +190,17 @@ where } stream.send_all(OK.as_bytes()).await?; } - "COMMAND" => stream.send_all(COMMANDS.as_bytes()).await?, + "COMMAND" => { + stream.send_all(COMMANDS).await?; + } _ => break ERR_UNIMPLEMENTED, } } _ => break ERR_UNIMPLEMENTED, } }; - stream.send_all(err.as_bytes()).await + stream.send_all(err).await?; + Ok(()) } /// Parse an integer from `buf` including `\r\n`. diff --git a/rt/src/bytes.rs b/rt/src/bytes.rs index 1dd975aec..168dd8a64 100644 --- a/rt/src/bytes.rs +++ b/rt/src/bytes.rs @@ -199,16 +199,6 @@ impl<'a> MaybeUninitSlice<'a> { socket2::MaybeUninitSlice::new(slice::from_raw_parts_mut(self.0.as_mut_ptr(), limit)) }; } - - /// Returns `bufs` as [`socket2::MaybeUninitSlice`]. - #[allow(clippy::wrong_self_convention)] - pub(crate) fn as_socket2<'b>( - bufs: &'b mut [MaybeUninitSlice<'a>], - ) -> &'b mut [socket2::MaybeUninitSlice<'a>] { - // Safety: this is safe because `MaybeUninitSlice` has the - // `repr(transparent)` attribute. - unsafe { &mut *(bufs as *mut _ as *mut _) } - } } impl<'a> Deref for MaybeUninitSlice<'a> { diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 963f59fcc..6fec10df6 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -186,23 +186,6 @@ compile_error!("Heph currently only supports Linux."); #[cfg(not(target_pointer_width = "64"))] compile_error!("Heph currently only supports 64 bit architectures."); -/// A macro to try an I/O function. -/// -/// Note that this is used in the net and pipe modules and has to be defined -/// before use. -macro_rules! try_io { - ($op: expr) => { - loop { - match $op { - Ok(ok) => break Poll::Ready(Ok(ok)), - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => break Poll::Pending, - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => break Poll::Ready(Err(err)), - } - } - }; -} - /// Helper macro to execute a system call that returns an `io::Result`. macro_rules! syscall { ($fn: ident ( $($arg: expr),* $(,)? ) ) => {{ diff --git a/rt/src/net/futures.rs b/rt/src/net/futures.rs index 2729ec46d..12da32b01 100644 --- a/rt/src/net/futures.rs +++ b/rt/src/net/futures.rs @@ -25,6 +25,20 @@ impl<'a, B: BufMut> Future for Recv<'a, B> { } } +/// [`Future`] behind `recv_n` implementations. +pub(crate) struct RecvN<'a, B>(pub(crate) a10::net::RecvN<'a, BufWrapper>); + +impl<'a, B: BufMut> Future for RecvN<'a, B> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|buf| buf.0) + } +} + /// [`Future`] behind `recv_vectored` implementations. pub(crate) struct RecvVectored<'a, B, const N: usize>( pub(crate) a10::net::RecvVectored<'a, BufWrapper, N>, @@ -41,6 +55,22 @@ impl<'a, B: BufMutSlice, const N: usize> Future for RecvVectored<'a, B, N> { } } +/// [`Future`] behind `recv_n_vectored` implementations. +pub(crate) struct RecvNVectored<'a, B, const N: usize>( + pub(crate) a10::net::RecvNVectored<'a, BufWrapper, N>, +); + +impl<'a, B: BufMutSlice, const N: usize> Future for RecvNVectored<'a, B, N> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|buf| buf.0) + } +} + /// [`Future`] behind `recv_from` implementations. pub(crate) struct RecvFrom<'a, B, A>(pub(crate) a10::net::RecvFrom<'a, BufWrapper, A>); @@ -87,6 +117,20 @@ impl<'a, B: Buf> Future for Send<'a, B> { } } +/// [`Future`] behind `send_all` implementations. +pub(crate) struct SendAll<'a, B>(pub(crate) Extractor>>); + +impl<'a, B: Buf> Future for SendAll<'a, B> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|buf| buf.0) + } +} + /// [`Future`] behind `send_vectored` implementations. pub(crate) struct SendVectored<'a, B, const N: usize>( pub(crate) Extractor, a10::net::NoAddress, N>>, @@ -103,6 +147,22 @@ impl<'a, B: BufSlice, const N: usize> Future for SendVectored<'a, B, N> { } } +/// [`Future`] behind `send_all_vectored` implementations. +pub(crate) struct SendAllVectored<'a, B, const N: usize>( + pub(crate) Extractor, N>>, +); + +impl<'a, B: BufSlice, const N: usize> Future for SendAllVectored<'a, B, N> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll(ctx) + .map_ok(|buf| buf.0) + } +} + /// [`Future`] behind `send_to` implementations. pub(crate) struct SendTo<'a, B, A>(pub(crate) Extractor, A>>); diff --git a/rt/src/net/mod.rs b/rt/src/net/mod.rs index 7235f22f4..356236ec6 100644 --- a/rt/src/net/mod.rs +++ b/rt/src/net/mod.rs @@ -14,35 +14,6 @@ //! [TCP listening socket]: crate::net::TcpListener //! [TCP server]: crate::net::tcp::server //! [User Datagram Protocol]: crate::net::udp -//! -//! # I/O with Heph's socket -//! -//! The different socket types provide two or three variants of most I/O -//! functions. The `try_*` funtions, which makes the system calls once. For -//! example [`TcpStream::try_send`] calls `send(2)` once, not handling any -//! errors (including [`WouldBlock`] errors!). -//! -//! In addition they provide a [`Future`] function which handles would block -//! errors. For `TcpStream::try_send` the future version is [`TcpStream::send`], -//! i.e. without the `try_` prefix. -//! -//! Finally for a lot of function a convenience version is provided that handle -//! various cases. For example with sending you might want to ensure all bytes -//! are send, for this you can use [`TcpStream::send_all`]. But also see -//! functions such as [`TcpStream::recv_n`]; which receives at least `n` bytes, -//! or [`TcpStream::send_entire_file`]; which sends an entire file using the -//! `sendfile(2)` system call. -//! -//! [`WouldBlock`]: io::ErrorKind::WouldBlock -//! [`Future`]: std::future::Future -//! -//! # Notes -//! -//! All types in the `net` module are [bound] to an actor. See the [`Bound`] -//! trait for more information. -//! -//! [bound]: crate::Bound -//! [`Bound`]: crate::Bound use std::mem::{size_of, MaybeUninit}; use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; @@ -61,7 +32,8 @@ pub use udp::UdpSocket; pub use uds::UnixDatagram; pub(crate) use futures::{ - Recv, RecvFrom, RecvFromVectored, RecvVectored, Send, SendTo, SendToVectored, SendVectored, + Recv, RecvFrom, RecvFromVectored, RecvN, RecvNVectored, RecvVectored, Send, SendAll, + SendAllVectored, SendTo, SendToVectored, SendVectored, }; /// The unconnected mode of an [`UdpSocket`] or [`UnixDatagram`]. diff --git a/rt/src/net/tcp/listener.rs b/rt/src/net/tcp/listener.rs index ad9206dd1..71a974c27 100644 --- a/rt/src/net/tcp/listener.rs +++ b/rt/src/net/tcp/listener.rs @@ -1,16 +1,13 @@ //! Module with [`TcpListener`] and related types. use std::async_iter::AsyncIterator; -use std::mem::ManuallyDrop; use std::net::SocketAddr; -use std::os::fd::{AsFd, AsRawFd, FromRawFd}; +use std::os::fd::AsFd; use std::pin::Pin; use std::task::{self, Poll}; use std::{fmt, io}; use a10::AsyncFd; -use heph::actor; -use mio::Interest; use socket2::{Domain, Protocol, SockRef, Socket, Type}; use crate::net::{convert_address, SockAddr, TcpStream}; @@ -63,11 +60,11 @@ use crate::{self as rt}; /// Ok(()) /// } /// # -/// # async fn client(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { -/// # let mut stream = TcpStream::connect(&mut ctx, address)?.await?; +/// # async fn client(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { +/// # let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; /// # let local_address = stream.local_addr()?.to_string(); -/// # let mut buf = Vec::with_capacity(local_address.len() + 1); -/// # stream.recv_n(&mut buf, local_address.len()).await?; +/// # let buf = Vec::with_capacity(local_address.len() + 1); +/// # let buf = stream.recv_n(buf, local_address.len()).await?; /// # assert_eq!(buf, local_address.as_bytes()); /// # Ok(()) /// # } @@ -78,20 +75,18 @@ use crate::{self as rt}; /// SupervisorStrategy::Stop /// } /// -/// async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { +/// async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { /// // Create a new listener. /// let mut listener = TcpListener::bind(ctx.runtime_ref(), address).await?; /// /// // Accept a connection. -/// let (unbound_stream, peer_address) = listener.accept().await?; +/// let (mut stream, peer_address) = listener.accept().await?; /// info!("accepted connection from: {peer_address}"); /// -/// // Next we need to bind the stream to this actor. -/// let mut stream = unbound_stream.bind_to(&mut ctx)?; -/// /// // Next we write the IP address to the connection. /// let ip = peer_address.to_string(); -/// stream.send_all(ip.as_bytes()).await +/// stream.send_all(ip).await?; +/// Ok(()) /// } /// ``` /// @@ -129,11 +124,11 @@ use crate::{self as rt}; /// Ok(()) /// } /// # -/// # async fn client(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { -/// # let mut stream = TcpStream::connect(&mut ctx, address)?.await?; +/// # async fn client(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { +/// # let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; /// # let local_address = stream.local_addr()?.to_string(); -/// # let mut buf = Vec::with_capacity(local_address.len() + 1); -/// # stream.recv_n(&mut buf, local_address.len()).await?; +/// # let buf = Vec::with_capacity(local_address.len() + 1); +/// # let buf = stream.recv_n(buf, local_address.len()).await?; /// # assert_eq!(buf, local_address.as_bytes()); /// # Ok(()) /// # } @@ -144,24 +139,27 @@ use crate::{self as rt}; /// SupervisorStrategy::Stop /// } /// -/// async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { +/// async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { /// // Create a new listener. /// let mut listener = TcpListener::bind(ctx.runtime_ref(), address).await?; /// let mut incoming = listener.incoming(); /// loop { -/// let unbound_stream = match next(&mut incoming).await { -/// Some(Ok(unbound_stream)) => unbound_stream, +/// let mut stream = match next(&mut incoming).await { +/// Some(Ok(stream)) => stream, /// Some(Err(err)) => return Err(err), /// None => return Ok(()), /// }; /// -/// let mut stream = unbound_stream.bind_to(&mut ctx)?; +/// // Optionally set the CPU affinity as that's not done automatically +/// // (in case the stream is send to another thread). +/// stream.set_auto_cpu_affinity(ctx.runtime_ref()); +/// /// let peer_address = stream.peer_addr()?; /// info!("accepted connection from: {peer_address}"); /// /// // Next we write the IP address to the connection. /// let ip = peer_address.to_string(); -/// stream.send_all(ip.as_bytes()).await?; +/// stream.send_all(ip).await?; /// # return Ok(()); /// } /// } @@ -237,29 +235,34 @@ impl TcpListener { /// /// Returns the TCP stream and the remote address of the peer. See the /// [`TcpListener`] documentation for an example. - pub async fn accept(&mut self) -> io::Result<(UnboundTcpStream, SocketAddr)> { + /// + /// # Notes + /// + /// The CPU affinity is **not** set on the returned TCP stream. To set that + /// use [`TcpStream::set_auto_cpu_affinity`]. + pub async fn accept(&mut self) -> io::Result<(TcpStream, SocketAddr)> { self.fd .accept::() .await - .map(|(fd, addr)| (UnboundTcpStream::from_async_fd(fd), addr.into())) + .map(|(fd, addr)| (TcpStream { fd }, addr.into())) } /// Returns a stream of incoming [`TcpStream`]s. /// - /// Note that unlike [`accept`] this doesn't return the address because uses - /// io_uring's multishot accept (making it faster then calling `accept` in a - /// loop). See the [`TcpListener`] documentation for an example. + /// Note that unlike [`accept`] this doesn't return the address because it + /// uses io_uring's multishot accept (making it faster then calling `accept` + /// in a loop). See the [`TcpListener`] documentation for an example. /// /// [`accept`]: TcpListener::accept + /// + /// # Notes + /// + /// The CPU affinity is **not** set on the returned TCP stream. To set that + /// use [`TcpStream::set_auto_cpu_affinity`]. pub fn incoming(&mut self) -> Incoming<'_> { Incoming(self.fd.multishot_accept()) } - /// Temp function used by `TcpListener`. - pub(crate) fn incoming2(&mut self) -> a10::net::MultishotAccept<'_> { - self.fd.multishot_accept() - } - /// Get the value of the `SO_ERROR` option on this socket. /// /// This will retrieve the stored error in the underlying socket, clearing @@ -278,66 +281,19 @@ impl TcpListener { } } -/// An unbound [`TcpStream`]. -/// -/// The stream first has to be bound to an actor (using [`bind_to`]), before it -/// can be used. -/// -/// [`bind_to`]: UnboundTcpStream::bind_to -#[derive(Debug)] -pub struct UnboundTcpStream { - stream: TcpStream, -} - -impl UnboundTcpStream { - /// Bind this TCP stream to the actor's `ctx`, allowing it to be used. - pub fn bind_to(mut self, ctx: &mut actor::Context) -> io::Result - where - RT: rt::Access, - { - let mut stream = ctx - .runtime() - .register( - &mut self.stream.socket, - Interest::READABLE | Interest::WRITABLE, - ) - .map(|()| self.stream)?; - #[cfg(target_os = "linux")] - if let Some(cpu) = ctx.runtime_ref().cpu() { - if let Err(err) = stream.set_cpu_affinity(cpu) { - log::warn!("failed to set CPU affinity on TcpStream: {err}"); - } - } - Ok(stream) - } - - pub(crate) fn from_async_fd(fd: AsyncFd) -> UnboundTcpStream { - UnboundTcpStream { - stream: TcpStream { - // SAFETY: the put `fd` in a `ManuallyDrop` to ensure we don't - // close it, so we're free to create a `TcpStream` from the fd. - socket: unsafe { - let fd = ManuallyDrop::new(fd); - FromRawFd::from_raw_fd(fd.as_fd().as_raw_fd()) - }, - }, - } - } -} - /// The [`AsyncIterator`] behind [`TcpListener::incoming`]. #[derive(Debug)] #[must_use = "AsyncIterators do nothing unless polled"] pub struct Incoming<'a>(a10::net::MultishotAccept<'a>); impl<'a> AsyncIterator for Incoming<'a> { - type Item = io::Result; + type Item = io::Result; fn poll_next(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll> { // SAFETY: not moving the `Future`. unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } .poll_next(ctx) - .map_ok(UnboundTcpStream::from_async_fd) + .map_ok(|fd| TcpStream { fd }) } } diff --git a/rt/src/net/tcp/server.rs b/rt/src/net/tcp/server.rs index 728bbcbdd..d778c3d51 100644 --- a/rt/src/net/tcp/server.rs +++ b/rt/src/net/tcp/server.rs @@ -105,7 +105,8 @@ //! //! /// The actor responsible for a single TCP stream. //! async fn conn_actor(_: actor::Context, mut stream: TcpStream) -> io::Result<()> { -//! stream.send_all(b"Hello World").await +//! stream.send_all("Hello World").await?; +//! Ok(()) //! } //! ``` //! @@ -184,7 +185,8 @@ //! # //! /// The actor responsible for a single TCP stream. //! async fn conn_actor(_: actor::Context, mut stream: TcpStream) -> io::Result<()> { -//! stream.send_all(b"Hello World").await +//! stream.send_all("Hello World").await?; +//! Ok(()) //! } //! ``` //! @@ -266,7 +268,8 @@ //! //! /// The actor responsible for a single TCP stream. //! async fn conn_actor(_: actor::Context, mut stream: TcpStream) -> io::Result<()> { -//! stream.send_all(b"Hello World").await +//! stream.send_all("Hello World").await?; +//! Ok(()) //! } use std::convert::TryFrom; @@ -281,9 +284,8 @@ use heph::supervisor::Supervisor; use log::{debug, trace}; use socket2::{Domain, Protocol, Socket, Type}; -use crate::net::tcp::listener::UnboundTcpStream; use crate::net::{TcpListener, TcpStream}; -use crate::spawn::{ActorOptions, AddActorError, PrivateSpawn, Spawn}; +use crate::spawn::{ActorOptions, Spawn}; use crate::util::{either, next}; use crate::{self as rt, Signal}; @@ -450,21 +452,22 @@ where .map_err(Error::Accept)?; trace!(address = log::as_display!(local); "TCP server listening"); - let mut accept = listener.incoming2(); + let mut accept = listener.incoming(); let mut receive = ctx.receive_next(); loop { match either(next(&mut accept), &mut receive).await { - Ok(Some(Ok(fd))) => { - let stream = UnboundTcpStream::from_async_fd(fd); + Ok(Some(Ok(mut stream))) => { trace!("TCP server accepted connection"); - drop(receive); // Can't double borrow `ctx`. - _ = ctx.try_spawn_setup( - supervisor.clone(), - new_actor.clone(), - |ctx| stream.bind_to(ctx), - options.clone(), - )?; + stream.set_auto_cpu_affinity(ctx.runtime_ref()); + _ = ctx + .try_spawn( + supervisor.clone(), + new_actor.clone(), + stream, + options.clone(), + ) + .map_err(Error::NewActor)?; receive = ctx.receive_next(); } Ok(Some(Err(err))) => return Err(Error::Accept(err)), @@ -522,17 +525,6 @@ pub enum Error { NewActor(E), } -// Not part of the public API. -#[doc(hidden)] -impl From> for Error { - fn from(err: AddActorError) -> Error { - match err { - AddActorError::NewActor(err) => Error::NewActor(err), - AddActorError::ArgFn(err) => Error::Accept(err), - } - } -} - impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use Error::*; diff --git a/rt/src/net/tcp/stream.rs b/rt/src/net/tcp/stream.rs index 1c854d278..e28f3fe52 100644 --- a/rt/src/net/tcp/stream.rs +++ b/rt/src/net/tcp/stream.rs @@ -1,26 +1,18 @@ //! Module with [`TcpStream`] and related types. -// TODO: a number of send/recv methods don't use Mio directly, this is fine on -// Unix but doesn't work on Windows (which we don't support). We need to fix -// that once Mio uses Socket2 and supports all the methods we need, Mio's -// tracking issue: https://github.com/tokio-rs/mio/issues/1381. - -use std::future::Future; -use std::io::{self, IoSlice}; +use std::io; use std::net::{Shutdown, SocketAddr}; -use std::num::NonZeroUsize; -use std::pin::Pin; -use std::task::{self, Poll}; - -#[cfg(target_os = "linux")] -use log::warn; -use mio::{net, Interest}; +use std::os::fd::AsFd; -use heph::actor; -use socket2::SockRef; +use a10::{AsyncFd, Extract}; +use socket2::{Domain, Protocol, SockRef, Type}; -use crate::bytes::{Bytes, BytesVectored, MaybeUninitSlice}; -use crate::{self as rt, Bound}; +use crate as rt; +use crate::io::{Buf, BufMut, BufMutSlice, BufSlice, BufWrapper}; +use crate::net::{ + convert_address, Recv, RecvN, RecvNVectored, RecvVectored, Send, SendAll, SendAllVectored, + SendVectored, SockAddr, +}; /// A non-blocking TCP stream between a local socket and a remote socket. /// @@ -37,220 +29,148 @@ use crate::{self as rt, Bound}; /// use heph_rt::net::TcpStream; /// use heph_rt::ThreadLocal; /// -/// async fn actor(mut ctx: actor::Context) -> io::Result<()> { +/// async fn actor(ctx: actor::Context) -> io::Result<()> { /// let address = "127.0.0.1:12345".parse().unwrap(); -/// let mut stream = TcpStream::connect(&mut ctx, address)?.await?; -/// stream.send_all(b"Hello world!").await +/// let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; +/// stream.send_all("Hello world!").await?; +/// Ok(()) /// } -/// # /// # drop(actor); // Silent dead code warnings. /// ``` #[derive(Debug)] pub struct TcpStream { - /// Underlying TCP connection, backed by Mio. - pub(in crate::net) socket: net::TcpStream, + pub(in crate::net) fd: AsyncFd, } impl TcpStream { /// Create a new TCP stream and issues a non-blocking connect to the /// specified `address`. + pub async fn connect(rt: &RT, address: SocketAddr) -> io::Result + where + RT: rt::Access, + { + let fd = a10::net::socket( + rt.submission_queue(), + Domain::for_address(address).into(), + Type::STREAM.cloexec().into(), + Protocol::TCP.into(), + 0, + ) + .await?; + let mut socket = TcpStream { fd }; + socket.set_auto_cpu_affinity(rt); + socket.fd.connect(SockAddr::from(address)).await?; + Ok(socket) + } + + /// Automatically set the CPU affinity based on the runtime access `rt`. + /// + /// For non-Linux OSs this is a no-op. If `rt` is not local this is also a + /// no-op. /// /// # Notes /// - /// The stream is also [bound] to the actor that owns the `actor::Context`, - /// which means the actor will be run every time the stream is ready to read - /// or write. + /// This is already called when the `TcpStream` is created using + /// [`TcpStream::connect`], this is mostly useful when accepting a + /// connection from [`TcpListener`]. /// - /// [bound]: crate::Bound - pub fn connect( - ctx: &mut actor::Context, - address: SocketAddr, - ) -> io::Result + /// [`TcpListener`]: crate::net::tcp::TcpListener + pub fn set_auto_cpu_affinity(&mut self, rt: &RT) where RT: rt::Access, { - let mut socket = net::TcpStream::connect(address)?; - ctx.runtime() - .register(&mut socket, Interest::READABLE | Interest::WRITABLE)?; - Ok(Connect { - socket: Some(socket), - #[cfg(target_os = "linux")] - cpu_affinity: ctx.runtime_ref().cpu(), - }) + #[cfg(target_os = "linux")] + if let Some(cpu) = rt.cpu() { + if let Err(err) = self.set_cpu_affinity(cpu) { + log::warn!("failed to set CPU affinity on TcpStream: {err}"); + } + } + } + + /// Set the CPU affinity to `cpu`. + /// + /// On Linux this uses `SO_INCOMING_CPU`. + #[cfg(target_os = "linux")] + pub(crate) fn set_cpu_affinity(&mut self, cpu: usize) -> io::Result<()> { + self.with_ref(|socket| socket.set_cpu_affinity(cpu)) } /// Returns the socket address of the remote peer of this TCP connection. pub fn peer_addr(&mut self) -> io::Result { - self.socket.peer_addr() + self.with_ref(|socket| socket.peer_addr().and_then(convert_address)) } /// Returns the socket address of the local half of this TCP connection. pub fn local_addr(&mut self) -> io::Result { - self.socket.local_addr() - } - - /// Set the CPU affinity to `cpu`. - /// - /// On Linux this uses `SO_INCOMING_CPU`. - #[cfg(target_os = "linux")] - pub(crate) fn set_cpu_affinity(&mut self, cpu: usize) -> io::Result<()> { - SockRef::from(&self.socket).set_cpu_affinity(cpu) + self.with_ref(|socket| socket.local_addr().and_then(convert_address)) } /// Sets the value for the `IP_TTL` option on this socket. pub fn set_ttl(&mut self, ttl: u32) -> io::Result<()> { - self.socket.set_ttl(ttl) + self.with_ref(|socket| socket.set_ttl(ttl)) } /// Gets the value of the `IP_TTL` option for this socket. pub fn ttl(&mut self) -> io::Result { - self.socket.ttl() + self.with_ref(|socket| socket.ttl()) } /// Sets the value of the `TCP_NODELAY` option on this socket. pub fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { - self.socket.set_nodelay(nodelay) + self.with_ref(|socket| socket.set_nodelay(nodelay)) } /// Gets the value of the `TCP_NODELAY` option on this socket. pub fn nodelay(&mut self) -> io::Result { - self.socket.nodelay() + self.with_ref(|socket| socket.nodelay()) } /// Returns `true` if `SO_KEEPALIVE` is set. pub fn keepalive(&self) -> io::Result { - let socket = SockRef::from(&self.socket); - socket.keepalive() + self.with_ref(|socket| socket.keepalive()) } /// Enables or disables `SO_KEEPALIVE`. pub fn set_keepalive(&self, enable: bool) -> io::Result<()> { - let socket = SockRef::from(&self.socket); - socket.set_keepalive(enable) - } - - /// Attempt to send bytes in `buf` to the peer. - /// - /// If no bytes can currently be send this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`TcpStream::send`] or [`TcpStream::send_all`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_send(&mut self, buf: &[u8]) -> io::Result { - SockRef::from(&self.socket).send(buf) + self.with_ref(|socket| socket.set_keepalive(enable)) } /// Send the bytes in `buf` to the peer. /// /// Return the number of bytes written. This may we fewer then the length of /// `buf`. To ensure that all bytes are written use [`TcpStream::send_all`]. - pub fn send<'a, 'b>(&'a mut self, buf: &'b [u8]) -> Send<'a, 'b> { - Send { stream: self, buf } + pub async fn send<'a, B: Buf>(&'a mut self, buf: B) -> io::Result<(B, usize)> { + Send(self.fd.send(BufWrapper(buf), 0).extract()).await } /// Send the all bytes in `buf` to the peer. /// /// If this fails to send all bytes (this happens if a write returns /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. - pub fn send_all<'a, 'b>(&'a mut self, buf: &'b [u8]) -> SendAll<'a, 'b> { - SendAll { stream: self, buf } - } - - /// Attempt to send bytes in `bufs` to the peer. - /// - /// If no bytes can currently be send this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`TcpStream::send_vectored`] or [`TcpStream::send_vectored_all`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_send_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result { - SockRef::from(&self.socket).send_vectored(bufs) + pub async fn send_all<'a, B: Buf>(&'a mut self, buf: B) -> io::Result { + SendAll(self.fd.send_all(BufWrapper(buf)).extract()).await } - /// Send the bytes in `bufs` to the peer. - /// - /// Return the number of bytes written. This may we fewer then the length of - /// `bufs`. To ensure that all bytes are written use - /// [`TcpStream::send_vectored_all`]. - pub fn send_vectored<'a, 'b>( - &'a mut self, - bufs: &'b mut [IoSlice<'b>], - ) -> SendVectored<'a, 'b> { - SendVectored { stream: self, bufs } + /// Sends data on the socket to the connected socket, using vectored I/O. + pub async fn send_vectored, const N: usize>( + &mut self, + bufs: B, + ) -> io::Result<(B, usize)> { + SendVectored(self.fd.send_vectored(BufWrapper(bufs), 0).extract()).await } /// Send the all bytes in `bufs` to the peer. /// /// If this fails to send all bytes (this happens if a write returns /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. - pub fn send_vectored_all<'a, 'b>( - &'a mut self, - bufs: &'b mut [IoSlice<'b>], - ) -> SendVectoredAll<'a, 'b> { - SendVectoredAll { stream: self, bufs } - } - - /// Attempt to receive message(s) from the stream, writing them into `buf`. - /// - /// If no bytes can currently be received this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`TcpStream::recv`] or [`TcpStream::recv_n`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - /// - /// # Examples - /// - /// ``` - /// #![feature(never_type)] - /// - /// use std::io; - /// - /// use heph::actor; - /// use heph_rt::net::TcpStream; - /// use heph_rt::ThreadLocal; - /// - /// async fn actor(mut ctx: actor::Context) -> io::Result<()> { - /// let address = "127.0.0.1:12345".parse().unwrap(); - /// let mut stream = TcpStream::connect(&mut ctx, address)?.await?; - /// - /// let mut buf = Vec::with_capacity(4 * 1024); // 4 KB. - /// match stream.try_recv(&mut buf) { - /// Ok(n) => println!("read {n} bytes: {buf:?}"), - /// Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - /// println!("no bytes can't be read at this time"); - /// }, - /// Err(ref err) if err.kind() == io::ErrorKind::Interrupted => { - /// println!("read got interrupted"); - /// }, - /// Err(err) => return Err(err), - /// } - /// - /// Ok(()) - /// } - /// # - /// # drop(actor); // Silent dead code warnings. - /// ``` - pub fn try_recv(&mut self, mut buf: B) -> io::Result - where - B: Bytes, - { - debug_assert!( - buf.has_spare_capacity(), - "called `TcpStream::try_recv with an empty buffer" - ); - SockRef::from(&self.socket) - .recv(buf.as_bytes()) - .map(|read| { - // Safety: just read the bytes. - unsafe { buf.update_length(read) } - read - }) + pub async fn send_vectored_all, const N: usize>( + &mut self, + bufs: B, + ) -> io::Result { + SendAllVectored(self.fd.send_all_vectored(BufWrapper(bufs)).extract()).await } - /// Receive messages from the stream, writing them into `buf`. + /// Receive messages from the stream. /// /// # Examples /// @@ -263,31 +183,26 @@ impl TcpStream { /// use heph_rt::net::TcpStream; /// use heph_rt::ThreadLocal; /// - /// async fn actor(mut ctx: actor::Context) -> io::Result<()> { + /// async fn actor(ctx: actor::Context) -> io::Result<()> { /// let address = "127.0.0.1:12345".parse().unwrap(); - /// let mut stream = TcpStream::connect(&mut ctx, address)?.await?; + /// let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; /// - /// let mut buf = Vec::with_capacity(4 * 1024); // 4 KB. - /// let n = stream.recv(&mut buf).await?; - /// println!("read {n} bytes: {buf:?}"); + /// let buf = Vec::with_capacity(4 * 1024); // 4 KB. + /// let buf = stream.recv(buf).await?; + /// println!("read {} bytes: {buf:?}", buf.len()); /// /// Ok(()) /// } /// # /// # drop(actor); // Silent dead code warnings. /// ``` - pub fn recv<'a, B>(&'a mut self, buf: B) -> Recv<'a, B> - where - B: Bytes, - { - Recv { stream: self, buf } + pub async fn recv<'a, B: BufMut>(&'a mut self, buf: B) -> io::Result { + Recv(self.fd.recv(BufWrapper(buf), 0)).await } - /// Receive at least `n` bytes from the stream, writing them into `buf`. + /// Receive at least `n` bytes from the stream. /// - /// This returns a [`Future`] that receives at least `n` bytes from a - /// `TcpStream` and writes them into buffer `B`, or returns - /// [`io::ErrorKind::UnexpectedEof`] if less then `n` bytes could be read. + /// This returns [`io::ErrorKind::UnexpectedEof`] if less then `n` bytes could be read. /// /// # Examples /// @@ -300,173 +215,69 @@ impl TcpStream { /// use heph_rt::net::TcpStream; /// use heph_rt::ThreadLocal; /// - /// async fn actor(mut ctx: actor::Context) -> io::Result<()> { + /// async fn actor(ctx: actor::Context) -> io::Result<()> { /// let address = "127.0.0.1:12345".parse().unwrap(); - /// let mut stream = TcpStream::connect(&mut ctx, address)?.await?; + /// let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; /// - /// let mut buf = Vec::with_capacity(4 * 1024); // 4 KB. + /// let buf = Vec::with_capacity(4 * 1024); // 4 KB. /// // NOTE: this will return an error if the peer sends less than 1 KB /// // of data before shutting down or closing the connection. /// let n = 1024; - /// stream.recv_n(&mut buf, n).await?; - /// println!("read {n} bytes: {buf:?}"); + /// let buf = stream.recv_n(buf, n).await?; + /// println!("read {} bytes: {buf:?}", buf.len()); /// /// Ok(()) /// } /// # /// # drop(actor); // Silent dead code warnings. /// ``` - pub fn recv_n<'a, B>(&'a mut self, buf: B, n: usize) -> RecvN<'a, B> - where - B: Bytes, - { + pub async fn recv_n<'a, B: BufMut>(&'a mut self, buf: B, n: usize) -> io::Result { debug_assert!( buf.spare_capacity() >= n, "called `TcpStream::recv_n` with a buffer smaller then `n`" ); - RecvN { - stream: self, - buf, - left: n, - } - } - - /// Attempt to receive message(s) from the stream, writing them into `bufs`. - /// - /// If no bytes can currently be received this will return an error with the - /// [kind] set to [`ErrorKind::WouldBlock`]. Most users should prefer to use - /// [`TcpStream::recv_vectored`] or [`TcpStream::recv_n_vectored`]. - /// - /// [kind]: io::Error::kind - /// [`ErrorKind::WouldBlock`]: io::ErrorKind::WouldBlock - pub fn try_recv_vectored(&mut self, mut bufs: B) -> io::Result - where - B: BytesVectored, - { - debug_assert!( - bufs.has_spare_capacity(), - "called `TcpStream::try_recv_vectored` with empty buffers" - ); - let res = SockRef::from(&self.socket) - .recv_vectored(MaybeUninitSlice::as_socket2(bufs.as_bufs().as_mut())); - match res { - Ok((read, _)) => { - // Safety: just read the bytes. - unsafe { bufs.update_lengths(read) } - Ok(read) - } - Err(err) => Err(err), - } + RecvN(self.fd.recv_n(BufWrapper(buf), n)).await } - /// Receive messages from the stream, writing them into `bufs`. - pub fn recv_vectored(&mut self, bufs: B) -> RecvVectored<'_, B> - where - B: BytesVectored, - { - debug_assert!( - bufs.has_spare_capacity(), - "called `TcpStream::recv_vectored` with empty buffers" - ); - RecvVectored { stream: self, bufs } + /// Receive messages from the stream, using vectored I/O. + pub async fn recv_vectored, const N: usize>( + &mut self, + bufs: B, + ) -> io::Result { + RecvVectored(self.fd.recv_vectored(BufWrapper(bufs), 0)).await } - /// Receive at least `n` bytes from the stream, writing them into `bufs`. - pub fn recv_n_vectored(&mut self, bufs: B, n: usize) -> RecvNVectored<'_, B> - where - B: BytesVectored, - { + /// Receive at least `n` bytes from the stream, using vectored I/O. + /// + /// This returns [`io::ErrorKind::UnexpectedEof`] if less then `n` bytes could be read. + pub async fn recv_n_vectored, const N: usize>( + &mut self, + bufs: B, + n: usize, + ) -> io::Result { debug_assert!( - bufs.spare_capacity() >= n, + bufs.total_spare_capacity() >= n, "called `TcpStream::recv_n_vectored` with a buffer smaller then `n`" ); - RecvNVectored { - stream: self, - bufs, - left: n, - } + RecvNVectored(self.fd.recv_n_vectored(BufWrapper(bufs), n)).await } - /// Attempt to receive messages from the stream, writing them into `buf`, - /// without removing that data from the queue. On success, returns the - /// number of bytes peeked. - pub fn try_peek(&mut self, mut buf: B) -> io::Result - where - B: Bytes, - { - debug_assert!( - buf.has_spare_capacity(), - "called `TcpStream::try_peek with an empty buffer" - ); - SockRef::from(&self.socket) - .peek(buf.as_bytes()) - .map(|read| { - // Safety: just read the bytes. - unsafe { buf.update_length(read) } - read - }) + /// Receive messages from the stream, without removing that data from the + /// queue. + pub async fn peek<'a, B: BufMut>(&'a mut self, buf: B) -> io::Result { + Recv(self.fd.recv(BufWrapper(buf), libc::MSG_PEEK)).await } - /// Receive messages from the stream, writing them into `buf`, without - /// removing that data from the queue. On success, returns the number of - /// bytes peeked. - pub fn peek<'a, B>(&'a mut self, buf: B) -> Peek<'a, B> - where - B: Bytes, - { - Peek { stream: self, buf } - } - - /// Attempt to receive messages from the stream using vectored I/O, writing - /// them into `bufs`, without removing that data from the queue. On success, - /// returns the number of bytes peeked. - pub fn try_peek_vectored(&mut self, mut bufs: B) -> io::Result - where - B: BytesVectored, - { - debug_assert!( - bufs.has_spare_capacity(), - "called `TcpStream::try_peek_vectored` with empty buffers" - ); - let res = SockRef::from(&self.socket).recv_vectored_with_flags( - MaybeUninitSlice::as_socket2(bufs.as_bufs().as_mut()), - libc::MSG_PEEK, - ); - match res { - Ok((read, _)) => { - // Safety: just read the bytes. - unsafe { bufs.update_lengths(read) } - Ok(read) - } - Err(err) => Err(err), - } - } - - /// Receive messages from the stream using vectored I/O, writing them into - /// `bufs`, without removing that data from the queue. On success, returns - /// the number of bytes peeked. - pub fn peek_vectored(&mut self, bufs: B) -> PeekVectored<'_, B> - where - B: BytesVectored, - { - PeekVectored { stream: self, bufs } - } - - /// Attempt to make a `sendfile(2)` system call. - /// - /// See [`TcpStream::send_file`] for more information. - pub fn try_send_file( + /// Receive messages from the stream, without removing it from the input + /// queue, using vectored I/O. + pub async fn peek_vectored, const N: usize>( &mut self, - file: &F, - offset: usize, - length: Option, - ) -> io::Result - where - F: FileSend, - { - SockRef::from(&self.socket).sendfile(file, offset, length) + bufs: B, + ) -> io::Result { + RecvVectored(self.fd.recv_vectored(BufWrapper(bufs), libc::MSG_PEEK)).await } + /* TODO: add `sendfile(2)` wrappers io_uring at the time of writing doesn't support this. /// Send the `file` out this stream. /// /// What kind of files are support depends on the OS and is determined by @@ -525,6 +336,7 @@ impl TcpStream { { self.send_file_all(file, 0, None) } + */ /// Shuts down the read, write, or both halves of this connection. /// @@ -532,7 +344,7 @@ impl TcpStream { /// portions to return immediately with an appropriate value (see the /// documentation of [`Shutdown`]). pub fn shutdown(&mut self, how: Shutdown) -> io::Result<()> { - self.socket.shutdown(how) + self.with_ref(|socket| socket.shutdown(how)) } /// Get the value of the `SO_ERROR` option on this socket. @@ -541,422 +353,14 @@ impl TcpStream { /// the field in the process. This can be useful for checking errors between /// calls. pub fn take_error(&mut self) -> io::Result> { - self.socket.take_error() - } -} - -/// The [`Future`] behind [`TcpStream::connect`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Connect { - socket: Option, - #[cfg(target_os = "linux")] - cpu_affinity: Option, -} - -impl Future for Connect { - type Output = io::Result; - - #[track_caller] - fn poll(mut self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - // This relates directly Mio and `kqueue(2)` and `epoll(2)`. To do a - // non-blocking TCP connect properly we need to a couple of things. - // - // 1. Setup a socket and call `connect(2)`. Mio does this for us. - // However it doesn't mean the socket is connected, as we can't - // determine that without blocking. - // 2. To determine if a socket is connected we need to wait for a - // `kqueue(2)`/`epoll(2)` event (we get scheduled once we do). But - // that doesn't tell us whether or not the socket is connected. To - // determine if the socket is connected we need to use `getpeername` - // (`TcpStream::peer_addr`). But before checking if we're connected - // we need to check for a connection error, by checking `SO_ERROR` - // (`TcpStream::take_error`) to not lose that information. - // However if we get an event (and thus get scheduled) and - // `getpeername` fails with `ENOTCONN` it doesn't actually mean the - // socket will never connect properly. So we loop (by returned - // `Poll::Pending`) until either `SO_ERROR` is set or the socket is - // connected. - // - // Sources: - // * https://cr.yp.to/docs/connect.html - // * https://stackoverflow.com/questions/17769964/linux-sockets-non-blocking-connect - match self.socket.take() { - Some(socket) => { - // If we hit an error while connecting return that error. - if let Ok(Some(err)) | Err(err) = socket.take_error() { - return Poll::Ready(Err(err)); - } - - // If we can get a peer address it means the stream is - // connected. - match socket.peer_addr() { - Ok(..) => { - #[allow(unused_mut)] - let mut stream = TcpStream { socket }; - #[cfg(target_os = "linux")] - if let Some(cpu) = self.cpu_affinity { - if let Err(err) = stream.set_cpu_affinity(cpu) { - warn!("failed to set CPU affinity on TcpStream: {err}"); - } - } - Poll::Ready(Ok(stream)) - } - // `NotConnected` (`ENOTCONN`) means the socket not yet - // connected, but still working on it. `ECONNREFUSED` will - // be reported if it fails. - Err(err) - if err.kind() == io::ErrorKind::NotConnected - || err.raw_os_error() == Some(libc::EINPROGRESS) => - { - // Socket is not (yet) connected but haven't hit an - // error either. So we return `Pending` and wait for - // another event. - self.socket = Some(socket); - Poll::Pending - } - Err(err) => Poll::Ready(Err(err)), - } - } - None => panic!("polled `tcp::stream::Connect` after completion"), - } - } -} - -/// The [`Future`] behind [`TcpStream::send`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Send<'a, 'b> { - stream: &'a mut TcpStream, - buf: &'b [u8], -} - -impl<'a, 'b> Future for Send<'a, 'b> { - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let Send { stream, buf } = Pin::into_inner(self); - try_io!(stream.try_send(buf)) - } -} - -/// The [`Future`] behind [`TcpStream::send_all`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct SendAll<'a, 'b> { - stream: &'a mut TcpStream, - buf: &'b [u8], -} - -impl<'a, 'b> Future for SendAll<'a, 'b> { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let SendAll { stream, buf } = Pin::into_inner(self); - loop { - match stream.try_send(buf) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::WriteZero.into())), - Ok(n) if buf.len() <= n => return Poll::Ready(Ok(())), - Ok(n) => { - *buf = &buf[n..]; - // Try to send some more bytes. - continue; - } - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => break Poll::Pending, - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => break Poll::Ready(Err(err)), - } - } - } -} - -/// The [`Future`] behind [`TcpStream::send_vectored`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct SendVectored<'a, 'b> { - stream: &'a mut TcpStream, - bufs: &'b mut [IoSlice<'b>], -} - -impl<'a, 'b> Future for SendVectored<'a, 'b> { - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let SendVectored { stream, bufs } = Pin::into_inner(self); - try_io!(stream.try_send_vectored(bufs)) - } -} - -/// The [`Future`] behind [`TcpStream::send_vectored_all`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct SendVectoredAll<'a, 'b> { - stream: &'a mut TcpStream, - bufs: &'b mut [IoSlice<'b>], -} - -impl<'a, 'b> Future for SendVectoredAll<'a, 'b> { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let SendVectoredAll { stream, bufs } = Pin::into_inner(self); - while !bufs.is_empty() { - match stream.try_send_vectored(bufs) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::WriteZero.into())), - Ok(n) => IoSlice::advance_slices(bufs, n), - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => return Poll::Pending, - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Poll::Ready(Err(err)), - } - } - Poll::Ready(Ok(())) + self.with_ref(|socket| socket.take_error()) } -} - -/// The [`Future`] behind [`TcpStream::recv`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Recv<'b, B> { - stream: &'b mut TcpStream, - buf: B, -} - -impl<'b, B> Future for Recv<'b, B> -where - B: Bytes + Unpin, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let Recv { stream, buf } = Pin::into_inner(self); - try_io!(stream.try_recv(&mut *buf)) - } -} - -/// The [`Future`] behind [`TcpStream::peek`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Peek<'b, B> { - stream: &'b mut TcpStream, - buf: B, -} - -impl<'b, B> Future for Peek<'b, B> -where - B: Bytes + Unpin, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let Peek { stream, buf } = Pin::into_inner(self); - try_io!(stream.try_peek(&mut *buf)) - } -} -/// The [`Future`] behind [`TcpStream::recv_n`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct RecvN<'b, B> { - stream: &'b mut TcpStream, - buf: B, - left: usize, -} - -impl<'b, B> Future for RecvN<'b, B> -where - B: Bytes + Unpin, -{ - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let RecvN { stream, buf, left } = Pin::into_inner(self); - loop { - match stream.try_recv(&mut *buf) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::UnexpectedEof.into())), - Ok(n) if n >= *left => return Poll::Ready(Ok(())), - Ok(n) => { - *left -= n; - // Try to read some more bytes. - continue; - } - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => break Poll::Pending, - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => break Poll::Ready(Err(err)), - } - } - } -} - -/// The [`Future`] behind [`TcpStream::recv_vectored`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct RecvVectored<'b, B> { - stream: &'b mut TcpStream, - bufs: B, -} - -impl<'b, B> Future for RecvVectored<'b, B> -where - B: BytesVectored + Unpin, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let RecvVectored { stream, bufs } = Pin::into_inner(self); - try_io!(stream.try_recv_vectored(&mut *bufs)) - } -} - -/// The [`Future`] behind [`TcpStream::recv_n_vectored`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct RecvNVectored<'b, B> { - stream: &'b mut TcpStream, - bufs: B, - left: usize, -} - -impl<'b, B> Future for RecvNVectored<'b, B> -where - B: BytesVectored + Unpin, -{ - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let RecvNVectored { stream, bufs, left } = Pin::into_inner(self); - loop { - match stream.try_recv_vectored(&mut *bufs) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::UnexpectedEof.into())), - Ok(n) if n >= *left => return Poll::Ready(Ok(())), - Ok(n) => { - *left -= n; - // Try to read some more bytes. - continue; - } - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => break Poll::Pending, - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => break Poll::Ready(Err(err)), - } - } - } -} - -/// The [`Future`] behind [`TcpStream::peek_vectored`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct PeekVectored<'b, B> { - stream: &'b mut TcpStream, - bufs: B, -} - -impl<'b, B> Future for PeekVectored<'b, B> -where - B: BytesVectored + Unpin, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let PeekVectored { stream, bufs } = Pin::into_inner(self); - try_io!(stream.try_peek_vectored(&mut *bufs)) - } -} - -/// The [`Future`] behind [`TcpStream::send_file`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct SendFile<'a, 'f, F> { - stream: &'a mut TcpStream, - file: &'f F, - offset: usize, - length: Option, -} - -impl<'a, 'f, F> Future for SendFile<'a, 'f, F> -where - F: FileSend, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - #[rustfmt::skip] - let SendFile { stream, file, offset, length } = Pin::into_inner(self); - try_io!(stream.try_send_file(*file, *offset, *length)) - } -} - -/// The [`Future`] behind [`TcpStream::send_file_all`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct SendFileAll<'a, 'f, F> { - stream: &'a mut TcpStream, - file: &'f F, - /// Starting and ending offsets into `file`. - /// If `start >= end` all bytes are send. - start: usize, - end: Option, -} - -impl<'a, 'f, F> Future for SendFileAll<'a, 'f, F> -where - F: FileSend, -{ - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - #[rustfmt::skip] - let SendFileAll { stream, file, start, end } = Pin::into_inner(self); - loop { - let length = end.and_then(|end| NonZeroUsize::new(end.get() - *start)); - match stream.try_send_file(*file, *start, length) { - // If zero bytes are send it means the entire file was send. - Ok(0) => break Poll::Ready(Ok(())), - Ok(n) => { - *start += n; - match end { - Some(end) if *start >= end.get() => break Poll::Ready(Ok(())), - Some(_) | None => { - // If we haven't send all bytes yet, or if we don't - // know when to stop (e.g. in case we want to send - // the entire file) we must try to send more - // bytes because we use edge triggers. - continue; - } - } - } - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => break Poll::Pending, - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, // Try again. - Err(err) => break Poll::Ready(Err(err)), - } - } - } -} - -/// Trait that determines which types are safe to use in -/// [`TcpStream::try_send_file`], [`TcpStream::send_file`] and -/// [`TcpStream::send_file_all`]. -pub trait FileSend: PrivateFileSend {} - -use private::PrivateFileSend; - -mod private { - use std::fs::File; - use std::os::unix::io::AsRawFd; - - /// Private version of [`FileSend`]. - /// - /// [`FileSend`]: super::FileSend - pub trait PrivateFileSend: AsRawFd {} - - impl super::FileSend for File {} - - impl PrivateFileSend for File {} -} - -impl Bound for TcpStream { - type Error = io::Error; - - fn bind_to(&mut self, ctx: &mut actor::Context) -> io::Result<()> { - ctx.runtime() - .reregister(&mut self.socket, Interest::READABLE | Interest::WRITABLE) + fn with_ref(&self, f: F) -> io::Result + where + F: FnOnce(SockRef<'_>) -> io::Result, + { + let borrowed = self.fd.as_fd(); // TODO: remove this once we update to socket2 v0.5. + f(SockRef::from(&borrowed)) } } diff --git a/rt/src/net/udp.rs b/rt/src/net/udp.rs index 8dcaa92f5..61116e7c9 100644 --- a/rt/src/net/udp.rs +++ b/rt/src/net/udp.rs @@ -8,8 +8,6 @@ use std::os::fd::AsFd; use std::{fmt, io}; use a10::{AsyncFd, Extract}; -#[cfg(target_os = "linux")] -use log::warn; use socket2::{Domain, Protocol, SockRef, Type}; use crate::io::{Buf, BufMut, BufMutSlice, BufSlice, BufWrapper}; @@ -154,7 +152,7 @@ impl UdpSocket { #[cfg(target_os = "linux")] if let Some(cpu) = rt.cpu() { if let Err(err) = socket.set_cpu_affinity(cpu) { - warn!("failed to set CPU affinity on UdpSocket: {err}"); + log::warn!("failed to set CPU affinity on UdpSocket: {err}"); } } diff --git a/rt/tests/functional/tcp/listener.rs b/rt/tests/functional/tcp/listener.rs index 3d518029f..c2645cf7b 100644 --- a/rt/tests/functional/tcp/listener.rs +++ b/rt/tests/functional/tcp/listener.rs @@ -76,19 +76,18 @@ where RT: rt::Access, { let address = ctx.receive_next().await.unwrap(); - let mut stream = TcpStream::connect(&mut ctx, address) - .unwrap() + let mut stream = TcpStream::connect(ctx.runtime_ref(), address) .await .unwrap(); - let n = stream.send(DATA).await.unwrap(); + let (_, n) = stream.send(DATA).await.unwrap(); assert_eq!(n, DATA.len()); } #[test] fn accept() { async fn listener_actor( - mut ctx: actor::Context, + ctx: actor::Context, actor_ref: ActorRef, ) { let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) @@ -98,13 +97,11 @@ fn accept() { let address = listener.local_addr().unwrap(); actor_ref.send(address).await.unwrap(); - let (stream, remote_address) = listener.accept().await.unwrap(); - let mut stream = stream.bind_to(&mut ctx).unwrap(); + let (mut stream, remote_address) = listener.accept().await.unwrap(); assert!(remote_address.ip().is_loopback()); - let mut buf = Vec::with_capacity(DATA.len() + 1); - let n = stream.recv(&mut buf).await.unwrap(); - assert_eq!(n, DATA.len()); + let buf = Vec::with_capacity(DATA.len() + 1); + let buf = stream.recv(buf).await.unwrap(); assert_eq!(buf, DATA); } @@ -123,7 +120,7 @@ fn accept() { #[test] fn incoming() { async fn listener_actor( - mut ctx: actor::Context, + ctx: actor::Context, actor_ref: ActorRef, ) { let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) @@ -134,12 +131,10 @@ fn incoming() { actor_ref.send(address).await.unwrap(); let mut incoming = listener.incoming(); - let stream = next(&mut incoming).await.unwrap().unwrap(); - let mut stream = stream.bind_to(&mut ctx).unwrap(); + let mut stream = next(&mut incoming).await.unwrap().unwrap(); - let mut buf = Vec::with_capacity(DATA.len() + 1); - let n = stream.recv(&mut buf).await.unwrap(); - assert_eq!(n, DATA.len()); + let buf = Vec::with_capacity(DATA.len() + 1); + let buf = stream.recv(buf).await.unwrap(); assert_eq!(buf, DATA); } diff --git a/rt/tests/functional/tcp/server.rs b/rt/tests/functional/tcp/server.rs index 0468fc11b..e8bc88413 100644 --- a/rt/tests/functional/tcp/server.rs +++ b/rt/tests/functional/tcp/server.rs @@ -33,9 +33,8 @@ async fn actor(_: actor::Context, mut stream: TcpStream) where RT: rt::Access, { - let mut buf = Vec::with_capacity(DATA.len() + 1); - let n = stream.recv(&mut buf).await.unwrap(); - assert_eq!(n, DATA.len()); + let buf = Vec::with_capacity(DATA.len() + 1); + let buf = stream.recv(buf).await.unwrap(); assert_eq!(buf, DATA); } @@ -50,7 +49,7 @@ async fn stream_actor( { let mut stream = tcp_connect(&mut ctx, address).await.unwrap(); - let n = stream.send(DATA).await.unwrap(); + let (_, n) = stream.send(DATA).await.unwrap(); assert_eq!(n, DATA.len()); // Send a message to stop the listener. diff --git a/rt/tests/functional/tcp/stream.rs b/rt/tests/functional/tcp/stream.rs index e368ce15f..9908389c5 100644 --- a/rt/tests/functional/tcp/stream.rs +++ b/rt/tests/functional/tcp/stream.rs @@ -1,25 +1,23 @@ //! Tests for `TcpStream`. use std::cmp::min; -use std::fs::{self, File}; use std::io::{self, IoSlice, Read, Write}; use std::net::{self, Shutdown, SocketAddr}; -use std::num::NonZeroUsize; -use std::sync::OnceLock; -use std::thread::sleep; use std::time::Duration; use heph::actor; -use heph::actor_ref::{ActorRef, RpcMessage}; +use heph::actor_ref::ActorRef; use heph::supervisor::NoSupervisor; use heph_rt::net::{TcpListener, TcpStream}; use heph_rt::spawn::ActorOptions; use heph_rt::test::{join, join_many, try_spawn_local, PanicSupervisor}; -use heph_rt::{self as rt, Bound, Runtime, RuntimeRef, ThreadLocal}; +use heph_rt::ThreadLocal; use crate::util::{any_local_address, refused_address}; const DATA: &[u8] = b"Hello world"; + +/* TODO: add back once we add back `sendfile(2)` support. // Test files used in testing `send_file`. const TEST_FILE0: &str = "./tests/data/hello_world"; const TEST_FILE1: &str = "./tests/data/lorem_ipsum"; @@ -33,6 +31,7 @@ fn expected_data1() -> &'static [u8] { static EXPECTED1: OnceLock> = OnceLock::new(); EXPECTED1.get_or_init(|| fs::read(TEST_FILE1).expect("failed to read test file 1")) } +*/ #[test] fn smoke() { @@ -40,7 +39,7 @@ fn smoke() { mut ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; assert_eq!(stream.peer_addr().unwrap(), address); let local_address = stream.local_addr().unwrap(); @@ -86,8 +85,8 @@ fn smoke() { #[test] fn connect() { - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let stream = TcpStream::connect(&mut ctx, address)?.await?; + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; drop(stream); Ok(()) } @@ -114,9 +113,9 @@ fn connect() { ignore = "Fails on the CI; running locally on FreeBSD works, not sure what the problem is" )] fn connect_connection_refused() { - async fn actor(mut ctx: actor::Context) -> io::Result<()> { - let connect = match TcpStream::connect(&mut ctx, refused_address()) { - Ok(connect) => connect, + async fn actor(ctx: actor::Context) -> io::Result<()> { + match TcpStream::connect(ctx.runtime_ref(), refused_address()).await { + Ok(..) => panic!("unexpected success"), Err(err) => { assert_eq!( err.kind(), @@ -125,16 +124,7 @@ fn connect_connection_refused() { ); return Ok(()); } - }; - match connect.await { - Ok(..) => panic!("unexpected success"), - Err(err) => assert_eq!( - err.kind(), - io::ErrorKind::ConnectionRefused, - "unexpected error: {err:?}", - ), } - Ok(()) } let actor = actor as fn(_) -> _; @@ -142,80 +132,19 @@ fn connect_connection_refused() { join(&actor_ref, Duration::from_secs(1)).unwrap(); } -#[test] -fn try_recv() { - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; - - let mut buf = Vec::with_capacity(128); - match stream.try_recv(&mut buf) { - Ok(n) => panic!("unexpected bytes: {buf:?} ({n})"), - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {} - Err(err) => return Err(err), - } - - limited_loop! { - match stream.try_recv(&mut buf) { - Ok(n) => { - assert_eq!(n, DATA.len()); - assert_eq!(&*buf, DATA); - break; - } - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - sleep(Duration::from_millis(1)); - continue; - } - Err(err) => return Err(err), - } - } - - // The stream is dropped, so we should read 0. - buf.clear(); - limited_loop! { - match stream.try_recv(&mut buf) { - Ok(n) => { - assert_eq!(n, 0); - break; - } - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - sleep(Duration::from_millis(1)); - continue; - }, - Err(err) => return Err(err), - } - } - - Ok(()) - } - - let listener = net::TcpListener::bind(any_local_address()).unwrap(); - let address = listener.local_addr().unwrap(); - - let actor = actor as fn(_, _) -> _; - let actor_ref = - try_spawn_local(PanicSupervisor, actor, address, ActorOptions::default()).unwrap(); - - let (mut stream, _) = listener.accept().unwrap(); - stream.write_all(&DATA).unwrap(); - drop(stream); - - join(&actor_ref, Duration::from_secs(1)).unwrap(); -} - #[test] fn recv() { - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; - - let mut buf = Vec::with_capacity(128); + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; - let n = stream.recv(&mut buf).await?; - assert_eq!(n, DATA.len()); - assert_eq!(&*buf, DATA); + let buf = Vec::with_capacity(128); + let mut buf = stream.recv(buf).await?; + assert_eq!(buf, DATA); // The stream is dropped so next we should read 0. buf.clear(); - assert_eq!(stream.recv(&mut buf).await?, 0); + let buf = stream.recv(buf).await?; + assert!(buf.is_empty()); Ok(()) } @@ -236,19 +165,19 @@ fn recv() { #[test] fn recv_n_read_exact_amount() { - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; - let mut buf = Vec::with_capacity(128); - stream.recv_n(&mut buf, DATA.len()).await?; + let buf = Vec::with_capacity(128); + let mut buf = stream.recv_n(buf, DATA.len()).await?; assert_eq!(buf.len(), DATA.len()); - assert_eq!(&*buf, DATA); + assert_eq!(buf, DATA); // The stream is dropped so next we should read 0, which should cause an // `UnexpectedEof` error. buf.clear(); - match stream.recv_n(&mut buf, 10).await { - Ok(()) => panic!("unexpected recv: {buf:?}"), + match stream.recv_n(buf, 10).await { + Ok(buf) => panic!("unexpected recv: {buf:?}"), Err(ref err) if err.kind() == io::ErrorKind::UnexpectedEof => Ok(()), Err(err) => Err(err), } @@ -270,23 +199,22 @@ fn recv_n_read_exact_amount() { #[test] fn recv_n_read_more_bytes() { - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; - - let mut buf = Vec::with_capacity(128); + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let buf = Vec::with_capacity(128); let want_n = DATA.len() - 2; - stream.recv_n(&mut buf, want_n).await?; + let mut buf = stream.recv_n(buf, want_n).await?; // We should still receive all data, not limiting ourselves to `want_n` // bytes. assert_eq!(buf.len(), DATA.len()); - assert_eq!(&*buf, DATA); + assert_eq!(buf, DATA); // The stream is dropped so next we should read 0, which should cause an // `UnexpectedEof` error. buf.clear(); - match stream.recv_n(&mut buf, 10).await { - Ok(()) => panic!("unexpected recv: {buf:?}"), + match stream.recv_n(buf, 10).await { + Ok(buf) => panic!("unexpected recv: {buf:?}"), Err(ref err) if err.kind() == io::ErrorKind::UnexpectedEof => Ok(()), Err(err) => Err(err), } @@ -308,14 +236,13 @@ fn recv_n_read_more_bytes() { #[test] fn recv_n_less_bytes() { - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; - - let mut buf = Vec::with_capacity(128); + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let buf = Vec::with_capacity(128); let want_n = 2 * DATA.len(); - match stream.recv_n(&mut buf, want_n).await { - Ok(()) => panic!("unexpected recv: {buf:?}"), + match stream.recv_n(buf, want_n).await { + Ok(buf) => panic!("unexpected recv: {buf:?}"), Err(ref err) if err.kind() == io::ErrorKind::UnexpectedEof => Ok(()), Err(err) => Err(err), } @@ -337,11 +264,11 @@ fn recv_n_less_bytes() { #[test] fn recv_n_from_multiple_writes() { - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; - let mut buf = Vec::with_capacity(128); - stream.recv_n(&mut buf, 3 * DATA.len()).await?; + let buf = Vec::with_capacity(128); + let buf = stream.recv_n(buf, 3 * DATA.len()).await?; assert_eq!(&buf[..DATA.len()], DATA); assert_eq!(&buf[DATA.len()..2 * DATA.len()], DATA); assert_eq!(&buf[2 * DATA.len()..], DATA); @@ -366,10 +293,10 @@ fn recv_n_from_multiple_writes() { #[test] fn send() { - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; - let n = stream.send(&DATA).await?; + let (_, n) = stream.send(DATA).await?; assert_eq!(n, DATA.len()); Ok(()) @@ -397,9 +324,10 @@ fn send() { fn send_all() { // A lot of data to get at least two write calls. const DATA: &[u8] = &[213; 40 * 1024]; - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; - stream.send_all(DATA).await + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + stream.send_all(DATA).await?; + Ok(()) } let listener = net::TcpListener::bind(any_local_address()).unwrap(); @@ -428,16 +356,11 @@ fn send_all() { #[test] fn send_vectored() { - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; - - let bufs = &mut [ - IoSlice::new(DATA), - IoSlice::new(DATA), - IoSlice::new(DATA), - IoSlice::new(DATA), - ]; - let n = stream.send_vectored(bufs).await?; + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + + let bufs = [DATA, DATA, DATA, DATA]; + let (_, n) = stream.send_vectored(bufs).await?; assert_eq!(n, 4 * DATA.len()); Ok(()) @@ -468,10 +391,11 @@ fn send_vectored_all() { // A lot of data to get at least two write calls. const DATA1: &[u8] = &[213; 40 * 1023]; const DATA2: &[u8] = &[155; 30 * 1024]; - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; - let bufs = &mut [IoSlice::new(DATA1), IoSlice::new(DATA2)]; - stream.send_vectored_all(bufs).await + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let bufs = [DATA1, DATA2]; + stream.send_vectored_all(bufs).await?; + Ok(()) } let listener = net::TcpListener::bind(any_local_address()).unwrap(); @@ -513,18 +437,18 @@ fn send_vectored_all() { #[test] fn recv_vectored() { - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; - let mut buf1 = Vec::with_capacity(2 * DATA.len()); - let mut buf2 = Vec::with_capacity(2 * DATA.len() + 1); - let bufs = [&mut buf1, &mut buf2]; - let n = stream.recv_vectored(bufs).await?; - assert_eq!(n, 4 * DATA.len()); - assert_eq!(&buf1[..DATA.len()], DATA); - assert_eq!(&buf1[DATA.len()..], DATA); - assert_eq!(&buf2[..DATA.len()], DATA); - assert_eq!(&buf2[DATA.len()..], DATA); + let bufs = [ + Vec::with_capacity(2 * DATA.len()), + Vec::with_capacity(2 * DATA.len() + 1), + ]; + let bufs = stream.recv_vectored(bufs).await?; + assert_eq!(&bufs[0][..DATA.len()], DATA); + assert_eq!(&bufs[0][DATA.len()..], DATA); + assert_eq!(&bufs[1][..DATA.len()], DATA); + assert_eq!(&bufs[1][DATA.len()..], DATA); Ok(()) } @@ -553,23 +477,24 @@ fn recv_vectored() { #[test] fn recv_n_vectored_exact_amount() { - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; - let mut buf1 = Vec::with_capacity(DATA.len()); - let mut buf2 = Vec::with_capacity(DATA.len() + 1); - let bufs = [&mut buf1, &mut buf2]; - stream.recv_n_vectored(bufs, 2 * DATA.len()).await?; - assert_eq!(buf1, DATA); - assert_eq!(buf2, DATA); + let bufs = [ + Vec::with_capacity(DATA.len()), + Vec::with_capacity(DATA.len() + 1), + ]; + let mut bufs = stream.recv_n_vectored(bufs, 2 * DATA.len()).await?; + assert_eq!(bufs[0], DATA); + assert_eq!(bufs[1], DATA); // The stream is dropped so next we should read 0, which should cause an // `UnexpectedEof` error. - buf1.clear(); - buf2.clear(); - let bufs = [&mut buf1, &mut buf2]; + for buf in bufs.iter_mut() { + buf.clear() + } match stream.recv_n_vectored(bufs, 10).await { - Ok(()) => panic!("unexpected recv: {buf1:?}"), + Ok(bufs) => panic!("unexpected recv: {bufs:?}"), Err(ref err) if err.kind() == io::ErrorKind::UnexpectedEof => Ok(()), Err(err) => Err(err), } @@ -592,23 +517,24 @@ fn recv_n_vectored_exact_amount() { #[test] fn recv_n_vectored_more_bytes() { - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; - let mut buf1 = Vec::with_capacity(DATA.len()); - let mut buf2 = Vec::with_capacity(DATA.len() + 1); - let bufs = [&mut buf1, &mut buf2]; - stream.recv_n_vectored(bufs, (2 * DATA.len()) - 3).await?; - assert_eq!(buf1, DATA); - assert_eq!(buf2, DATA); + let bufs = [ + Vec::with_capacity(DATA.len()), + Vec::with_capacity(DATA.len() + 1), + ]; + let mut bufs = stream.recv_n_vectored(bufs, (2 * DATA.len()) - 3).await?; + assert_eq!(bufs[0], DATA); + assert_eq!(bufs[1], DATA); // The stream is dropped so next we should read 0, which should cause an // `UnexpectedEof` error. - buf1.clear(); - buf2.clear(); - let bufs = [&mut buf1, &mut buf2]; + for buf in bufs.iter_mut() { + buf.clear() + } match stream.recv_n_vectored(bufs, 10).await { - Ok(()) => panic!("unexpected recv: {buf1:?}"), + Ok(bufs) => panic!("unexpected recv: {bufs:?}"), Err(ref err) if err.kind() == io::ErrorKind::UnexpectedEof => Ok(()), Err(err) => Err(err), } @@ -631,14 +557,15 @@ fn recv_n_vectored_more_bytes() { #[test] fn recv_n_vectored_less_bytes() { - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; - let mut buf1 = Vec::with_capacity(DATA.len()); - let mut buf2 = Vec::with_capacity(DATA.len() + 1); - let bufs = [&mut buf1, &mut buf2]; + let bufs = [ + Vec::with_capacity(DATA.len()), + Vec::with_capacity(DATA.len() + 1), + ]; match stream.recv_n_vectored(bufs, 2 * DATA.len()).await { - Ok(()) => panic!("unexpected recv: {buf1:?}"), + Ok(bufs) => panic!("unexpected recv: {bufs:?}"), Err(ref err) if err.kind() == io::ErrorKind::UnexpectedEof => Ok(()), Err(err) => Err(err), } @@ -660,17 +587,18 @@ fn recv_n_vectored_less_bytes() { #[test] fn recv_n_vectored_from_multiple_writes() { - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; - - let mut buf1 = Vec::with_capacity(DATA.len()); - let mut buf2 = Vec::with_capacity(DATA.len()); - let mut buf3 = Vec::with_capacity(DATA.len() + 1); - let bufs = [&mut buf1, &mut buf2, &mut buf3]; - stream.recv_n_vectored(bufs, 3 * DATA.len()).await?; - assert_eq!(buf1, DATA); - assert_eq!(buf2, DATA); - assert_eq!(buf3, DATA); + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + + let bufs = [ + Vec::with_capacity(DATA.len()), + Vec::with_capacity(DATA.len()), + Vec::with_capacity(DATA.len() + 1), + ]; + let bufs = stream.recv_n_vectored(bufs, 3 * DATA.len()).await?; + assert_eq!(bufs[0], DATA); + assert_eq!(bufs[1], DATA); + assert_eq!(bufs[2], DATA); Ok(()) } @@ -693,24 +621,22 @@ fn recv_n_vectored_from_multiple_writes() { #[test] fn peek() { - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; - - let mut buf = Vec::with_capacity(128); + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; - let n = stream.peek(&mut buf).await?; - assert_eq!(n, DATA.len()); + let buf = Vec::with_capacity(128); + let mut buf = stream.peek(buf).await?; assert_eq!(&*buf, DATA); // We peeked the data above so we should receive the same data again. buf.clear(); - let n = stream.recv(&mut buf).await?; - assert_eq!(n, DATA.len()); - assert_eq!(&*buf, DATA); + let mut buf = stream.recv(buf).await?; + assert_eq!(buf, DATA); // The stream is dropped so next we should read 0. buf.clear(); - assert_eq!(stream.recv(&mut buf).await?, 0); + let buf = stream.recv(buf).await?; + assert!(buf.is_empty()); Ok(()) } @@ -729,20 +655,21 @@ fn peek() { join(&actor_ref, Duration::from_secs(1)).unwrap(); } +/* TODO: add back `sendfile(2)` support. #[test] fn send_file() { // Should be able to send this many bytes in a single call. const LENGTH: usize = 128; async fn actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, path: &'static str, ) -> io::Result<()> { let file = File::open(path)?; let metadata = file.metadata()?; let length = min(metadata.len(), LENGTH as u64) as usize; - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let n = stream .send_file(&file, 0, NonZeroUsize::new(length)) .await?; @@ -799,14 +726,14 @@ fn send_file_all() { const LENGTH: usize = 1 << 14; // 16kb. async fn actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, path: &'static str, ) -> io::Result<()> { let file = File::open(path)?; let metadata = file.metadata()?; let length = min(metadata.len(), LENGTH as u64) as usize; - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; stream .send_file_all(&file, OFFSET, NonZeroUsize::new(length)) .await?; @@ -855,12 +782,12 @@ fn send_file_all() { #[test] fn send_entire_file() { async fn actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, path: &'static str, ) -> io::Result<()> { let file = File::open(path)?; - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; stream.send_entire_file(&file).await } @@ -932,32 +859,32 @@ fn send_file_check_actor( *offset == expected.len() } +*/ #[test] fn peek_vectored() { - async fn actor(mut ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(&mut ctx, address)?.await?; + async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { + let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; - let mut buf1 = Vec::with_capacity(2 * DATA.len()); - let mut buf2 = Vec::with_capacity(2 * DATA.len() + 1); - let bufs = [&mut buf1, &mut buf2]; - let n = stream.peek_vectored(bufs).await?; - assert_eq!(n, 4 * DATA.len()); - assert_eq!(&buf1[..DATA.len()], DATA); - assert_eq!(&buf1[DATA.len()..], DATA); - assert_eq!(&buf2[..DATA.len()], DATA); - assert_eq!(&buf2[DATA.len()..], DATA); + let bufs = [ + Vec::with_capacity(2 * DATA.len()), + Vec::with_capacity(2 * DATA.len() + 1), + ]; + let mut bufs = stream.peek_vectored(bufs).await?; + assert_eq!(&bufs[0][..DATA.len()], DATA); + assert_eq!(&bufs[0][DATA.len()..], DATA); + assert_eq!(&bufs[1][..DATA.len()], DATA); + assert_eq!(&bufs[1][DATA.len()..], DATA); // We should receive the same data again after peeking. - buf1.clear(); - buf2.clear(); - let bufs = [&mut buf1, &mut buf2]; - let n = stream.recv_vectored(bufs).await?; - assert_eq!(n, 4 * DATA.len()); - assert_eq!(&buf1[..DATA.len()], DATA); - assert_eq!(&buf1[DATA.len()..], DATA); - assert_eq!(&buf2[..DATA.len()], DATA); - assert_eq!(&buf2[DATA.len()..], DATA); + for buf in bufs.iter_mut() { + buf.clear() + } + let bufs = stream.recv_vectored(bufs).await?; + assert_eq!(&bufs[0][..DATA.len()], DATA); + assert_eq!(&bufs[0][DATA.len()..], DATA); + assert_eq!(&bufs[1][..DATA.len()], DATA); + assert_eq!(&bufs[1][DATA.len()..], DATA); Ok(()) } @@ -989,7 +916,7 @@ fn peek_vectored() { #[test] fn shutdown_read() { async fn listener_actor( - mut ctx: actor::Context, + ctx: actor::Context, actor_ref: ActorRef, ) { let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) @@ -999,30 +926,26 @@ fn shutdown_read() { let address = listener.local_addr().unwrap(); actor_ref.send(address).await.unwrap(); - let (stream, remote_address) = listener.accept().await.unwrap(); - let mut stream = stream.bind_to(&mut ctx).unwrap(); + let (mut stream, remote_address) = listener.accept().await.unwrap(); assert!(remote_address.ip().is_loopback()); // Shutting down the reading side of the peer should return 0 bytes // here. - let mut buf = Vec::with_capacity(DATA.len() + 1); - let n = stream.recv(&mut buf).await.unwrap(); - assert_eq!(n, DATA.len()); + let buf = Vec::with_capacity(DATA.len() + 1); + let buf = stream.recv(buf).await.unwrap(); assert_eq!(buf, DATA); } async fn stream_actor(mut ctx: actor::Context) { let address = ctx.receive_next().await.unwrap(); - let mut stream = TcpStream::connect(&mut ctx, address) - .unwrap() + let mut stream = TcpStream::connect(ctx.runtime_ref(), address) .await .unwrap(); stream.shutdown(Shutdown::Read).unwrap(); - let mut buf = Vec::with_capacity(2); - let n = stream.recv(&mut buf).await.unwrap(); - assert_eq!(n, 0); + let buf = stream.recv(Vec::with_capacity(2)).await.unwrap(); + assert!(buf.is_empty()); stream.send_all(DATA).await.unwrap(); } @@ -1042,7 +965,7 @@ fn shutdown_read() { #[test] fn shutdown_write() { async fn listener_actor( - mut ctx: actor::Context, + ctx: actor::Context, actor_ref: ActorRef, ) { let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) @@ -1052,22 +975,19 @@ fn shutdown_write() { let address = listener.local_addr().unwrap(); actor_ref.send(address).await.unwrap(); - let (stream, remote_address) = listener.accept().await.unwrap(); - let mut stream = stream.bind_to(&mut ctx).unwrap(); + let (mut stream, remote_address) = listener.accept().await.unwrap(); assert!(remote_address.ip().is_loopback()); // Shutting down the writing side of the peer should return EOF here. - let mut buf = Vec::with_capacity(2); - let n = stream.recv(&mut buf).await.unwrap(); - assert_eq!(n, 0); + let buf = stream.recv(Vec::with_capacity(2)).await.unwrap(); + assert!(buf.is_empty()); stream.send_all(DATA).await.unwrap(); } async fn stream_actor(mut ctx: actor::Context) { let address = ctx.receive_next().await.unwrap(); - let mut stream = TcpStream::connect(&mut ctx, address) - .unwrap() + let mut stream = TcpStream::connect(ctx.runtime_ref(), address) .await .unwrap(); @@ -1076,9 +996,8 @@ fn shutdown_write() { let err = stream.send(DATA).await.unwrap_err(); assert_eq!(err.kind(), io::ErrorKind::BrokenPipe); - let mut buf = Vec::with_capacity(DATA.len() + 1); - let n = stream.recv(&mut buf).await.unwrap(); - assert_eq!(n, DATA.len()); + let buf = Vec::with_capacity(DATA.len() + 1); + let buf = stream.recv(buf).await.unwrap(); assert_eq!(buf, DATA); } @@ -1097,7 +1016,7 @@ fn shutdown_write() { #[test] fn shutdown_both() { async fn listener_actor( - mut ctx: actor::Context, + ctx: actor::Context, actor_ref: ActorRef, ) { let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) @@ -1107,19 +1026,16 @@ fn shutdown_both() { let address = listener.local_addr().unwrap(); actor_ref.send(address).await.unwrap(); - let (stream, remote_address) = listener.accept().await.unwrap(); - let mut stream = stream.bind_to(&mut ctx).unwrap(); + let (mut stream, remote_address) = listener.accept().await.unwrap(); assert!(remote_address.ip().is_loopback()); - let mut buf = Vec::with_capacity(2); - let n = stream.recv(&mut buf).await.unwrap(); - assert_eq!(n, 0); + let buf = stream.recv(Vec::with_capacity(2)).await.unwrap(); + assert!(buf.is_empty()); } async fn stream_actor(mut ctx: actor::Context) { let address = ctx.receive_next().await.unwrap(); - let mut stream = TcpStream::connect(&mut ctx, address) - .unwrap() + let mut stream = TcpStream::connect(ctx.runtime_ref(), address) .await .unwrap(); @@ -1128,9 +1044,8 @@ fn shutdown_both() { let err = stream.send(DATA).await.unwrap_err(); assert_eq!(err.kind(), io::ErrorKind::BrokenPipe); - let mut buf = Vec::with_capacity(2); - let n = stream.recv(&mut buf).await.unwrap(); - assert_eq!(n, 0); + let buf = stream.recv(Vec::with_capacity(2)).await.unwrap(); + assert!(buf.is_empty()); } let stream_actor = stream_actor as fn(_) -> _; @@ -1144,79 +1059,3 @@ fn shutdown_both() { join_many(&[stream_ref, listener_ref], Duration::from_secs(1)).unwrap(); } - -#[test] -fn actor_bound() { - type Message = RpcMessage; - - async fn actor1(mut ctx: actor::Context, actor_ref: ActorRef) - where - RT: rt::Access, - { - let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) - .await - .unwrap(); - let peer_address = listener.local_addr().unwrap(); - - let stream = TcpStream::connect(&mut ctx, peer_address) - .unwrap() - .await - .unwrap(); - let _ = actor_ref.rpc(stream).await.unwrap(); - - let (stream, _) = listener.accept().await.unwrap(); - let mut stream = stream.bind_to(&mut ctx).unwrap(); - stream.send_all(DATA).await.unwrap(); - } - - async fn actor2(mut ctx: actor::Context) - where - RT: rt::Access, - { - let msg = ctx.receive_next().await.unwrap(); - let mut stream = msg.request; - stream.bind_to(&mut ctx).unwrap(); - msg.response.respond(()).unwrap(); - let mut buf = Vec::with_capacity(DATA.len() + 1); - let n = stream.recv(&mut buf).await.unwrap(); - assert_eq!(buf, DATA); - assert_eq!(n, DATA.len()); - } - - fn setup(mut runtime_ref: RuntimeRef) -> Result<(), !> { - // Spawn thread-local actors. - let actor_ref = runtime_ref.spawn_local( - NoSupervisor, - actor2 as fn(_) -> _, - (), - ActorOptions::default(), - ); - let _ = runtime_ref.spawn_local( - NoSupervisor, - actor1 as fn(_, _) -> _, - actor_ref, - ActorOptions::default(), - ); - - Ok(()) - } - - let mut runtime = Runtime::setup().build().unwrap(); - runtime.run_on_workers(setup).unwrap(); - - // Spawn thread-safe actors. - let actor_ref = runtime.spawn( - NoSupervisor, - actor2 as fn(_) -> _, - (), - ActorOptions::default(), - ); - let _ = runtime.spawn( - NoSupervisor, - actor1 as fn(_, _) -> _, - actor_ref, - ActorOptions::default(), - ); - - runtime.start().unwrap(); -} diff --git a/rt/tests/util/mod.rs b/rt/tests/util/mod.rs index a13721990..98802804c 100644 --- a/rt/tests/util/mod.rs +++ b/rt/tests/util/mod.rs @@ -200,7 +200,7 @@ where { let mut i = 10; loop { - match TcpStream::connect(ctx, address).unwrap().await { + match TcpStream::connect(ctx.runtime_ref(), address).await { Ok(stream) => break Ok(stream), Err(_) if i >= 1 => { Timer::after(ctx, Duration::from_millis(1)).await; From e2b922ca47f3f8d9fda7ea43078e44f568123833 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 11 Apr 2023 12:57:03 +0200 Subject: [PATCH 039/177] Update to socket2 v0.5 --- rt/Cargo.toml | 3 +-- rt/src/lib.rs | 2 +- rt/src/net/tcp/listener.rs | 4 +--- rt/src/net/tcp/stream.rs | 4 +--- rt/src/net/udp.rs | 4 +--- rt/src/net/uds/datagram.rs | 5 ++--- rt/tests/functional/runtime.rs | 2 +- 7 files changed, 8 insertions(+), 16 deletions(-) diff --git a/rt/Cargo.toml b/rt/Cargo.toml index f0bf0681b..133ec482b 100644 --- a/rt/Cargo.toml +++ b/rt/Cargo.toml @@ -25,8 +25,7 @@ crossbeam-channel = { version = "0.5.0", default-features = false, features = [" libc = { version = "0.2.96", default-features = false } mio = { version = "0.8.0", default-features = false, features = ["os-poll", "net"] } mio-signals = { version = "0.2.0", default-features = false } -# TODO: update to v0.5.0 -socket2 = { version = "0.4.0", default-features = false, features = ["all"], git = "https://github.com/Thomasdezeeuw/socket2", branch = "heph-v0.4" } +socket2 = { version = "0.5.2", default-features = false, features = ["all"] } [dev-dependencies] getrandom = { version = "0.2.2", default-features = false, features = ["std"] } diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 6fec10df6..b8f740395 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -181,7 +181,7 @@ // the test feature. #![doc(cfg_hide(any(test, feature = "test")))] -#[cfg(not(any(target_os = "linux")))] +#[cfg(not(target_os = "linux"))] compile_error!("Heph currently only supports Linux."); #[cfg(not(target_pointer_width = "64"))] compile_error!("Heph currently only supports 64 bit architectures."); diff --git a/rt/src/net/tcp/listener.rs b/rt/src/net/tcp/listener.rs index 71a974c27..7d4ef0388 100644 --- a/rt/src/net/tcp/listener.rs +++ b/rt/src/net/tcp/listener.rs @@ -2,7 +2,6 @@ use std::async_iter::AsyncIterator; use std::net::SocketAddr; -use std::os::fd::AsFd; use std::pin::Pin; use std::task::{self, Poll}; use std::{fmt, io}; @@ -276,8 +275,7 @@ impl TcpListener { where F: FnOnce(SockRef<'_>) -> io::Result, { - let borrowed = self.fd.as_fd(); // TODO: remove this once we update to socket2 v0.5. - f(SockRef::from(&borrowed)) + f(SockRef::from(&self.fd)) } } diff --git a/rt/src/net/tcp/stream.rs b/rt/src/net/tcp/stream.rs index e28f3fe52..51f983ca3 100644 --- a/rt/src/net/tcp/stream.rs +++ b/rt/src/net/tcp/stream.rs @@ -2,7 +2,6 @@ use std::io; use std::net::{Shutdown, SocketAddr}; -use std::os::fd::AsFd; use a10::{AsyncFd, Extract}; use socket2::{Domain, Protocol, SockRef, Type}; @@ -360,7 +359,6 @@ impl TcpStream { where F: FnOnce(SockRef<'_>) -> io::Result, { - let borrowed = self.fd.as_fd(); // TODO: remove this once we update to socket2 v0.5. - f(SockRef::from(&borrowed)) + f(SockRef::from(&self.fd)) } } diff --git a/rt/src/net/udp.rs b/rt/src/net/udp.rs index 61116e7c9..2ece4d0d4 100644 --- a/rt/src/net/udp.rs +++ b/rt/src/net/udp.rs @@ -4,7 +4,6 @@ use std::marker::PhantomData; use std::net::SocketAddr; -use std::os::fd::AsFd; use std::{fmt, io}; use a10::{AsyncFd, Extract}; @@ -199,8 +198,7 @@ impl UdpSocket { where F: FnOnce(SockRef<'_>) -> io::Result, { - let borrowed = self.fd.as_fd(); // TODO: remove this once we update to socket2 v0.5. - f(SockRef::from(&borrowed)) + f(SockRef::from(&self.fd)) } } diff --git a/rt/src/net/uds/datagram.rs b/rt/src/net/uds/datagram.rs index 853adeccc..6bd02a652 100644 --- a/rt/src/net/uds/datagram.rs +++ b/rt/src/net/uds/datagram.rs @@ -1,6 +1,6 @@ use std::marker::PhantomData; use std::net::Shutdown; -use std::os::fd::{AsFd, IntoRawFd}; +use std::os::fd::IntoRawFd; use std::{fmt, io}; use a10::{AsyncFd, Extract}; @@ -154,8 +154,7 @@ impl UnixDatagram { where F: FnOnce(SockRef<'_>) -> io::Result, { - let borrowed = self.fd.as_fd(); // TODO: remove this once we update to socket2 v0.5. - f(SockRef::from(&borrowed)) + f(SockRef::from(&self.fd)) } } diff --git a/rt/tests/functional/runtime.rs b/rt/tests/functional/runtime.rs index 576af2682..ac151d513 100644 --- a/rt/tests/functional/runtime.rs +++ b/rt/tests/functional/runtime.rs @@ -48,7 +48,7 @@ fn auto_cpu_affinity() { fn cpu_affinity(stream: &TcpStream) -> io::Result { // TODO: do this better. let socket = - SockRef::from(unsafe { &*(stream as *const TcpStream as *const mio::net::TcpStream) }); + SockRef::from(unsafe { &*(stream as *const TcpStream as *const a10::AsyncFd) }); socket.cpu_affinity() } From a7a1a409fac16514c3b5519d7c233d7f2d40a2c9 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 11 Apr 2023 13:05:56 +0200 Subject: [PATCH 040/177] Ignore the clipy::future_not_send lint It triggers for any async function using a generic paramters that doesn't have a Send trait bound, which is not something we want to require everywhere. --- Makefile.include | 2 ++ http/src/client.rs | 2 -- http/src/server.rs | 3 --- remote/src/net_relay/tcp.rs | 3 --- remote/src/net_relay/udp.rs | 3 --- rt/src/systemd.rs | 1 - 6 files changed, 2 insertions(+), 12 deletions(-) diff --git a/Makefile.include b/Makefile.include index b4d02833d..10c55f068 100644 --- a/Makefile.include +++ b/Makefile.include @@ -40,6 +40,7 @@ $(TARGETS): # Reasons to allow lints: # `cargo-common-metadata`: for `benches` and `tools`. # `equatable-if-let`: bad lint. +# `future-not-send`: we don't want to require all generic parameters to be `Send`. # `manual-let-else`: not really a fan of this. # `match-bool`, `single-match-else`: often less lines of code and I find that # use `match` generally strictly better then `if`s. @@ -70,6 +71,7 @@ clippy: --allow clippy::cargo-common-metadata \ --allow clippy::enum-glob-use \ --allow clippy::equatable-if-let \ + --allow clippy::future-not-send \ --allow clippy::manual-let-else \ --allow clippy::match-bool \ --allow clippy::missing-const-for-fn \ diff --git a/http/src/client.rs b/http/src/client.rs index 2aa57b08b..8d4265077 100644 --- a/http/src/client.rs +++ b/http/src/client.rs @@ -67,7 +67,6 @@ impl Client { /// /// If the server doesn't respond this return an [`io::Error`] with /// [`io::ErrorKind::UnexpectedEof`]. - #[allow(clippy::future_not_send)] // TODO. pub async fn request<'c, 'b, B>( &'c mut self, method: Method, @@ -99,7 +98,6 @@ impl Client { /// Sets the following headers if not present in `Headers`: /// * User-Agent and /// * Content-Length and/or Transfer-Encoding based on the `body`. - #[allow(clippy::future_not_send)] // TODO. pub async fn send_request<'b, B>( &mut self, method: Method, diff --git a/http/src/server.rs b/http/src/server.rs index b5f425512..0384ba0e0 100644 --- a/http/src/server.rs +++ b/http/src/server.rs @@ -652,7 +652,6 @@ impl Connection { /// /// See the notes for [`Connection::send_response`], they apply to this /// function also. - #[allow(clippy::future_not_send)] // TODO. pub async fn respond<'b, B>( &mut self, status: StatusCode, @@ -671,7 +670,6 @@ impl Connection { /// Respond to the last parsed request with `response`. /// /// See [`Connection::respond`] for more documentation. - #[allow(clippy::future_not_send)] // TODO. pub async fn respond_with<'b, B>(&mut self, response: Response) -> io::Result<()> where B: crate::Body<'b>, @@ -703,7 +701,6 @@ impl Connection { /// /// [`expects_body()`]: Method::expects_body /// [`includes_body()`]: StatusCode::includes_body - #[allow(clippy::future_not_send)] // TODO. pub async fn send_response<'b, B>( &mut self, request_method: Method, diff --git a/remote/src/net_relay/tcp.rs b/remote/src/net_relay/tcp.rs index 1e128a6ae..e4c3bd61c 100644 --- a/remote/src/net_relay/tcp.rs +++ b/remote/src/net_relay/tcp.rs @@ -57,7 +57,6 @@ impl TryFrom for RelayMessage { /// It receives `Out`going messages from it's inbox and sends them to a remote /// actor using TCP. Any `In`coming message on the same socket will be routed /// using the `R`outer. -#[allow(clippy::future_not_send)] pub(crate) async fn remote_relay( mut ctx: actor::Context, RT>, remote_address: SocketAddr, @@ -91,7 +90,6 @@ where } /// Send a `msg` to the remote actor, using `stream`. -#[allow(clippy::future_not_send)] async fn send_message( stream: &mut TcpStream, buf: &mut Vec, @@ -117,7 +115,6 @@ where /// Routes all messages in `buf` using `router`. /// /// Returns an error if the message can't be routed or can't be deserialised. -#[allow(clippy::future_not_send)] async fn route_messages( router: &mut R, buf: &mut Vec, diff --git a/remote/src/net_relay/udp.rs b/remote/src/net_relay/udp.rs index 1b6f77ecc..e3b65448e 100644 --- a/remote/src/net_relay/udp.rs +++ b/remote/src/net_relay/udp.rs @@ -83,7 +83,6 @@ impl TryFrom for UdpRelayMessage { /// It receives `Out`going messages from it's inbox and sends them to a remote /// actor using UDP. Any `In`coming message on the same socket will be routed /// using the `R`outer. -#[allow(clippy::future_not_send)] pub(crate) async fn remote_relay( mut ctx: actor::Context, RT>, local_address: SocketAddr, @@ -119,7 +118,6 @@ where } /// Send a `msg` to a remote actor at `target` address, using `socket`. -#[allow(clippy::future_not_send)] async fn send_message( socket: &mut UdpSocket, buf: &mut Vec, @@ -162,7 +160,6 @@ where /// /// Returns an error if the message can't be routed. Errors from deserialising /// the message in `buf` are only logged using `warn!`. -#[allow(clippy::future_not_send)] async fn route_message(router: &mut R, buf: &[u8], source: SocketAddr) -> io::Result<()> where S: Serde, diff --git a/rt/src/systemd.rs b/rt/src/systemd.rs index 2a7b370a9..67bddeaa1 100644 --- a/rt/src/systemd.rs +++ b/rt/src/systemd.rs @@ -273,7 +273,6 @@ pub enum State { /// /// If no systemd watchdog is active (i.e. `WatchdogSec` is not set in the /// systemd service configuration) this will not call `health_check`. -#[allow(clippy::future_not_send)] pub async fn watchdog( mut ctx: actor::Context, mut health_check: H, From 4d3aa5ed4bf97a4cd2195161ea3af413acb2afd4 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 11 Apr 2023 13:14:06 +0200 Subject: [PATCH 041/177] Fix Clippy lints --- rt/src/io/buf.rs | 8 +++++--- rt/src/net/mod.rs | 7 ++++++- rt/src/net/tcp/listener.rs | 1 + rt/src/net/uds/mod.rs | 3 ++- rt/src/pipe.rs | 9 +++------ 5 files changed, 17 insertions(+), 11 deletions(-) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index 090c41ca2..c4d0b8729 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -262,7 +262,7 @@ unsafe impl Buf for String { // will also be leaked. unsafe impl Buf for Arc<[u8]> { unsafe fn parts(&self) -> (*const u8, usize) { - let slice: &[u8] = &*self; + let slice: &[u8] = self; (slice.as_ptr().cast(), slice.len()) } } @@ -450,6 +450,7 @@ mod private { pub(crate) struct BufWrapper(pub(crate) B); unsafe impl a10::io::BufMut for BufWrapper { + #[allow(clippy::cast_possible_truncation)] unsafe fn parts_mut(&mut self) -> (*mut u8, u32) { let (ptr, size) = self.0.parts_mut(); (ptr, size as u32) @@ -479,6 +480,7 @@ unsafe impl BufMut for BufWrapper { } unsafe impl a10::io::Buf for BufWrapper { + #[allow(clippy::cast_possible_truncation)] unsafe fn parts(&self) -> (*const u8, u32) { let (ptr, size) = self.0.parts(); (ptr, size as u32) @@ -497,7 +499,7 @@ unsafe impl, const N: usize> a10::io::BufMutSlice for BufWr } unsafe fn set_init(&mut self, n: usize) { - self.0.update_length(n) + self.0.update_length(n); } } @@ -517,7 +519,7 @@ unsafe impl, const N: usize> private::BufMutSlice for BufWr } unsafe fn update_length(&mut self, n: usize) { - self.0.update_length(n) + self.0.update_length(n); } } diff --git a/rt/src/net/mod.rs b/rt/src/net/mod.rs index 356236ec6..23d9dc7ca 100644 --- a/rt/src/net/mod.rs +++ b/rt/src/net/mod.rs @@ -76,6 +76,7 @@ impl From for SockAddr { } impl From for SockAddr { + #[allow(clippy::cast_possible_truncation)] fn from(addr: SocketAddrV4) -> SockAddr { SockAddr { ipv4: libc::sockaddr_in { @@ -91,6 +92,7 @@ impl From for SockAddr { } impl From for SockAddr { + #[allow(clippy::cast_possible_truncation)] fn from(addr: SocketAddrV6) -> SockAddr { SockAddr { ipv6: libc::sockaddr_in6 { @@ -107,6 +109,7 @@ impl From for SockAddr { } impl From for SocketAddr { + #[allow(clippy::cast_lossless)] fn from(addr: SockAddr) -> SocketAddr { match unsafe { addr.ip.sa_family as _ } { libc::AF_INET => { @@ -132,6 +135,7 @@ impl From for SocketAddr { } impl a10::net::SocketAddress for SockAddr { + #[allow(clippy::cast_lossless)] unsafe fn as_ptr(&self) -> (*const libc::sockaddr, libc::socklen_t) { match unsafe { self.ip.sa_family as _ } { libc::AF_INET => self.ipv4.as_ptr(), @@ -140,6 +144,7 @@ impl a10::net::SocketAddress for SockAddr { } } + #[allow(clippy::cast_possible_truncation)] unsafe fn as_mut_ptr(this: &mut MaybeUninit) -> (*mut libc::sockaddr, libc::socklen_t) { ( ptr::addr_of_mut!(*this.as_mut_ptr()).cast(), @@ -148,7 +153,7 @@ impl a10::net::SocketAddress for SockAddr { } unsafe fn init(this: MaybeUninit, length: libc::socklen_t) -> Self { - debug_assert!(length >= size_of::() as _); + debug_assert!(length as usize >= size_of::()); // SAFETY: caller must initialise the address. this.assume_init() } diff --git a/rt/src/net/tcp/listener.rs b/rt/src/net/tcp/listener.rs index 7d4ef0388..77b382181 100644 --- a/rt/src/net/tcp/listener.rs +++ b/rt/src/net/tcp/listener.rs @@ -258,6 +258,7 @@ impl TcpListener { /// /// The CPU affinity is **not** set on the returned TCP stream. To set that /// use [`TcpStream::set_auto_cpu_affinity`]. + #[allow(clippy::doc_markdown)] // For "io_uring". pub fn incoming(&mut self) -> Incoming<'_> { Incoming(self.fd.multishot_accept()) } diff --git a/rt/src/net/uds/mod.rs b/rt/src/net/uds/mod.rs index c9dec50de..d613aaf46 100644 --- a/rt/src/net/uds/mod.rs +++ b/rt/src/net/uds/mod.rs @@ -40,6 +40,7 @@ impl a10::net::SocketAddress for UnixAddr { (self.inner.as_ptr(), self.inner.len()) } + #[allow(clippy::cast_possible_truncation)] unsafe fn as_mut_ptr(this: &mut MaybeUninit) -> (*mut libc::sockaddr, libc::socklen_t) { ( ptr::addr_of_mut!((*this.as_mut_ptr()).inner).cast(), @@ -48,7 +49,7 @@ impl a10::net::SocketAddress for UnixAddr { } unsafe fn init(this: MaybeUninit, length: libc::socklen_t) -> Self { - debug_assert!(length >= size_of::() as _); + debug_assert!(length as usize >= size_of::()); // SAFETY: caller must initialise the address. let mut this = this.assume_init(); this.inner.set_length(length); diff --git a/rt/src/pipe.rs b/rt/src/pipe.rs index b64a00cf7..b7997bf07 100644 --- a/rt/src/pipe.rs +++ b/rt/src/pipe.rs @@ -145,9 +145,8 @@ impl Sender { where RT: rt::Access, { - let sq = rt.submission_queue(); // Safety: `ChildStdin` is guaranteed to be a valid file descriptor. - let fd = unsafe { AsyncFd::new(stdin.into_raw_fd(), sq.clone()) }; + let fd = unsafe { AsyncFd::new(stdin.into_raw_fd(), rt.submission_queue()) }; Ok(Sender { fd }) } @@ -206,9 +205,8 @@ impl Receiver { where RT: rt::Access, { - let sq = rt.submission_queue(); // Safety: `ChildStdout` is guaranteed to be a valid file descriptor. - let fd = unsafe { AsyncFd::new(stdout.into_raw_fd(), sq.clone()) }; + let fd = unsafe { AsyncFd::new(stdout.into_raw_fd(), rt.submission_queue()) }; Ok(Receiver { fd }) } @@ -217,9 +215,8 @@ impl Receiver { where RT: rt::Access, { - let sq = rt.submission_queue(); // Safety: `ChildStderr` is guaranteed to be a valid file descriptor. - let fd = unsafe { AsyncFd::new(stderr.into_raw_fd(), sq.clone()) }; + let fd = unsafe { AsyncFd::new(stderr.into_raw_fd(), rt.submission_queue()) }; Ok(Receiver { fd }) } From 73c3b07646c0ee6b2dc18b83448e902909f91e5d Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 11 Apr 2023 13:15:16 +0200 Subject: [PATCH 042/177] Add Safety sections to private buf traits Fixes Clippy lints. --- rt/src/io/buf.rs | 8 ++++++++ rt/src/net/uds/mod.rs | 4 ++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index c4d0b8729..aa1369b90 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -409,6 +409,10 @@ mod private { /// Private version of [`BufMutSlice`]. /// /// [`BufMutSlice`]: crate::io::BufMutSlice + /// + /// # Safety + /// + /// See the [`a10::io::BufMutSlice`] trait. pub unsafe trait BufMutSlice: 'static { /// Returns the writable buffers as `iovec` structures. /// @@ -435,6 +439,10 @@ mod private { /// Private version of [`BufSlice`]. /// /// [`BufSlice`]: crate::io::BufSlice + /// + /// # Safety + /// + /// See the [`a10::io::BufSlice`] trait. pub unsafe trait BufSlice: 'static { /// Returns the reabable buffer as `iovec` structures. /// diff --git a/rt/src/net/uds/mod.rs b/rt/src/net/uds/mod.rs index d613aaf46..42a783a7c 100644 --- a/rt/src/net/uds/mod.rs +++ b/rt/src/net/uds/mod.rs @@ -2,8 +2,8 @@ //! //! Three main types are provided: //! -//! * [`UnixListener`] listens for incoming Unix connections. -//! * [`UnixStream`] represents a Unix stream socket. +//! * `UnixListener` listens for incoming Unix connections. +//! * `UnixStream` represents a Unix stream socket. //! * [`UnixDatagram`] represents a Unix datagram socket. use std::mem::{size_of, MaybeUninit}; From aa9bb51ea0e3537a0139093fa1e82d7c6097312c Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 11 Apr 2023 13:17:27 +0200 Subject: [PATCH 043/177] Remove Cirrus CI setup We don't support FreeBSD any more. --- .cirrus.yml | 18 ------------------ 1 file changed, 18 deletions(-) delete mode 100644 .cirrus.yml diff --git a/.cirrus.yml b/.cirrus.yml deleted file mode 100644 index 006c2f5e1..000000000 --- a/.cirrus.yml +++ /dev/null @@ -1,18 +0,0 @@ -freebsd_instance: - image: freebsd-12-2-release-amd64 -env: - RUST_BACKTRACE: full -task: - name: FreeBSD - timeout_in: 15m - cargo_cache: - folder: $HOME/.cargo/registry - setup_script: - - pkg install -y curl - - curl https://sh.rustup.rs -sSf --output rustup.sh - - sh rustup.sh -y --profile minimal --default-toolchain nightly - test_script: - - . $HOME/.cargo/env - - cargo test --all-features - before_cache_script: - - rm -rf $HOME/.cargo/registry/index From 6ddd11b882e0b26b06a720587ae17efad81ada93 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 11 Apr 2023 13:17:59 +0200 Subject: [PATCH 044/177] Remove macOS from GitHub Actions No longer supported. --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 47d1b0445..cf45a8172 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,7 +16,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest] + os: [ubuntu-latest] steps: - uses: actions/checkout@v3 - uses: dtolnay/rust-toolchain@nightly From b99f06a16436da31533d25f95ffe9726f5ee1d02 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 11 Apr 2023 13:22:18 +0200 Subject: [PATCH 045/177] Update NewActor::map_arg example It still used the old TcpServer. --- src/actor/mod.rs | 45 +++++++++++++++++++++------------------------ 1 file changed, 21 insertions(+), 24 deletions(-) diff --git a/src/actor/mod.rs b/src/actor/mod.rs index b8c47b020..bdbcb4aad 100644 --- a/src/actor/mod.rs +++ b/src/actor/mod.rs @@ -234,17 +234,16 @@ pub trait NewActor { /// /// # Examples /// - /// Using `TcpServer` (from the Heph-rt crate) requires a `NewActor` that - /// accepts `(TcpStream, SocketAddr)` as arguments, but we need to pass the - /// actor additional arguments. + /// Using TCP server (from the Heph-rt crate) requires a `NewActor` that + /// accepts `TcpStream` as arguments, but we need to pass the actor + /// additional arguments. /// /// ``` /// # #![feature(never_type)] /// use std::io; - /// use std::net::SocketAddr; /// use heph::actor::{self, NewActor}; /// # use heph::messages::Terminate; - /// use heph_rt::net::{TcpServer, TcpStream}; + /// use heph_rt::net::{tcp, TcpStream}; /// # use heph_rt::net::tcp::server; /// use heph_rt::spawn::ActorOptions; /// use heph_rt::{self as rt, Runtime, RuntimeRef, ThreadLocal}; @@ -258,7 +257,7 @@ pub trait NewActor { /// runtime.start() /// } /// - /// /// In this setup function we'll spawn the `TcpServer` actor. + /// /// In this setup function we'll spawn the TCP server actor. /// fn setup(mut runtime_ref: RuntimeRef) -> io::Result<()> { /// // Prepare for humans' expansion to Mars. /// let greet_mars = true; @@ -266,15 +265,15 @@ pub trait NewActor { /// // We convert our actor that accepts three arguments into an actor /// // that accept two arguments and gets `greet_mars` passed to it as /// // third argument. - /// let new_actor = (conn_actor as fn(_, _, _, _) -> _) - /// .map_arg(move |(stream, address)| (stream, address, greet_mars)); + /// let new_actor = (conn_actor as fn(_, _, _) -> _) + /// .map_arg(move |stream| (stream, greet_mars)); /// /// // For more information about the remainder of this example see - /// // `TcpServer` in the heph-rt crate. + /// // the `net::tcp::server` module in the heph-rt crate. /// let address = "127.0.0.1:7890".parse().unwrap(); - /// let server = TcpServer::setup(address, conn_supervisor, new_actor, ActorOptions::default())?; + /// let server = tcp::server::setup(address, conn_supervisor, new_actor, ActorOptions::default())?; /// # let actor_ref = - /// runtime_ref.try_spawn_local(ServerSupervisor, server, (), ActorOptions::default())?; + /// runtime_ref.spawn_local(ServerSupervisor, server, (), ActorOptions::default()); /// # actor_ref.try_send(Terminate).unwrap(); /// Ok(()) /// } @@ -285,7 +284,7 @@ pub trait NewActor { /// # impl Supervisor> for ServerSupervisor /// # where /// # S: Supervisor + Clone + 'static, - /// # NA: NewActor + Clone + 'static, + /// # NA: NewActor + Clone + 'static, /// # { /// # fn decide(&mut self, err: server::Error) -> SupervisorStrategy<()> { /// # use server::Error::*; @@ -298,36 +297,34 @@ pub trait NewActor { /// # } /// # } /// # - /// # fn decide_on_restart_error(&mut self, err: io::Error) -> SupervisorStrategy<()> { - /// # error!("error restarting the TCP server: {err}"); - /// # SupervisorStrategy::Stop + /// # fn decide_on_restart_error(&mut self, err: !) -> SupervisorStrategy<()> { + /// # err /// # } /// # - /// # fn second_restart_error(&mut self, _: io::Error) { - /// # // We don't restart a second time, so this will never be called. - /// # unreachable!(); + /// # fn second_restart_error(&mut self, err: !) { + /// # err /// # } /// # } /// # - /// # fn conn_supervisor(err: io::Error) -> SupervisorStrategy<(TcpStream, SocketAddr)> { + /// # fn conn_supervisor(err: io::Error) -> SupervisorStrategy { /// # error!("error handling connection: {err}"); /// # SupervisorStrategy::Stop /// # } /// # /// // Actor that handles a connection. /// async fn conn_actor( - /// _: actor::Context, + /// ctx: actor::Context, /// mut stream: TcpStream, - /// address: SocketAddr, /// greet_mars: bool /// ) -> io::Result<()> { - /// # drop(address); // Silence dead code warnings. + /// # drop(ctx); // Silence dead code warnings. /// if greet_mars { /// // In case this example ever reaches Mars. - /// stream.send_all(b"Hello Mars").await + /// stream.send_all("Hello Mars").await?; /// } else { - /// stream.send_all(b"Hello World").await + /// stream.send_all("Hello World").await?; /// } + /// Ok(()) /// } /// ``` fn map_arg(self, f: F) -> ArgMap From fc0fa4b7a1b4f4701a96f55c6e1d7494342ce4f0 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 11 Apr 2023 14:12:44 +0200 Subject: [PATCH 046/177] Change method to be by reference on I/O types Because we need ownership of the buffer for io_uring it's harder to cancel I/O operations than with epoll. This means that we've effectively take ownership for the duration for, e.g., a receive call, preventing us from sending on the socket concurrently. --- rt/examples/2_my_ip.rs | 2 +- rt/examples/9_systemd.rs | 2 +- rt/examples/redis.rs | 2 +- rt/src/net/tcp/listener.rs | 24 ++++----- rt/src/net/tcp/server.rs | 10 ++-- rt/src/net/tcp/stream.rs | 60 ++++++++++----------- rt/src/net/udp.rs | 44 ++++++---------- rt/src/net/uds/datagram.rs | 42 ++++++--------- rt/src/pipe.rs | 31 +++++------ rt/src/systemd.rs | 10 ++-- rt/tests/functional/pipe.rs | 12 ++--- rt/tests/functional/tcp/listener.rs | 20 +++---- rt/tests/functional/tcp/server.rs | 4 +- rt/tests/functional/tcp/stream.rs | 82 ++++++++++++++--------------- rt/tests/functional/udp.rs | 14 ++--- rt/tests/functional/uds/datagram.rs | 6 +-- rt/tests/regression/issue_145.rs | 4 +- 17 files changed, 170 insertions(+), 199 deletions(-) diff --git a/rt/examples/2_my_ip.rs b/rt/examples/2_my_ip.rs index 17b184ff5..3a89589fa 100644 --- a/rt/examples/2_my_ip.rs +++ b/rt/examples/2_my_ip.rs @@ -94,7 +94,7 @@ fn conn_supervisor(err: io::Error) -> SupervisorStrategy { /// /// This actor will not receive any message and thus uses `!` (the never type) /// as message type. -async fn conn_actor(_: actor::Context, mut stream: TcpStream) -> io::Result<()> { +async fn conn_actor(_: actor::Context, stream: TcpStream) -> io::Result<()> { let address = stream.peer_addr()?; info!(address = log::as_display!(address); "accepted connection"); diff --git a/rt/examples/9_systemd.rs b/rt/examples/9_systemd.rs index ee24fae21..a98df83a1 100644 --- a/rt/examples/9_systemd.rs +++ b/rt/examples/9_systemd.rs @@ -54,7 +54,7 @@ fn main() -> Result<(), rt::Error> { restart_supervisor!(ServerSupervisor, "TCP server actor", ()); -async fn conn_actor(_: actor::Context, mut stream: TcpStream) -> io::Result<()> { +async fn conn_actor(_: actor::Context, stream: TcpStream) -> io::Result<()> { let address = stream.peer_addr()?; info!("accepted connection: address={address}"); let ip = address.ip().to_string(); diff --git a/rt/examples/redis.rs b/rt/examples/redis.rs index 69f1f0d98..455f1ef86 100644 --- a/rt/examples/redis.rs +++ b/rt/examples/redis.rs @@ -93,7 +93,7 @@ fn conn_supervisor(err: io::Error) -> SupervisorStrategy { async fn conn_actor( mut ctx: actor::Context, - mut stream: TcpStream, + stream: TcpStream, values: Arc, Arc<[u8]>>>>, ) -> io::Result<()> where diff --git a/rt/src/net/tcp/listener.rs b/rt/src/net/tcp/listener.rs index 77b382181..4dbff270c 100644 --- a/rt/src/net/tcp/listener.rs +++ b/rt/src/net/tcp/listener.rs @@ -60,7 +60,7 @@ use crate::{self as rt}; /// } /// # /// # async fn client(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { -/// # let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; +/// # let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; /// # let local_address = stream.local_addr()?.to_string(); /// # let buf = Vec::with_capacity(local_address.len() + 1); /// # let buf = stream.recv_n(buf, local_address.len()).await?; @@ -76,10 +76,10 @@ use crate::{self as rt}; /// /// async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { /// // Create a new listener. -/// let mut listener = TcpListener::bind(ctx.runtime_ref(), address).await?; +/// let listener = TcpListener::bind(ctx.runtime_ref(), address).await?; /// /// // Accept a connection. -/// let (mut stream, peer_address) = listener.accept().await?; +/// let (stream, peer_address) = listener.accept().await?; /// info!("accepted connection from: {peer_address}"); /// /// // Next we write the IP address to the connection. @@ -124,7 +124,7 @@ use crate::{self as rt}; /// } /// # /// # async fn client(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { -/// # let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; +/// # let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; /// # let local_address = stream.local_addr()?.to_string(); /// # let buf = Vec::with_capacity(local_address.len() + 1); /// # let buf = stream.recv_n(buf, local_address.len()).await?; @@ -140,10 +140,10 @@ use crate::{self as rt}; /// /// async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { /// // Create a new listener. -/// let mut listener = TcpListener::bind(ctx.runtime_ref(), address).await?; +/// let listener = TcpListener::bind(ctx.runtime_ref(), address).await?; /// let mut incoming = listener.incoming(); /// loop { -/// let mut stream = match next(&mut incoming).await { +/// let stream = match next(&mut incoming).await { /// Some(Ok(stream)) => stream, /// Some(Err(err)) => return Err(err), /// None => return Ok(()), @@ -216,17 +216,17 @@ impl TcpListener { } /// Returns the local socket address of this listener. - pub fn local_addr(&mut self) -> io::Result { + pub fn local_addr(&self) -> io::Result { self.with_ref(|socket| socket.local_addr().and_then(convert_address)) } /// Sets the value for the `IP_TTL` option on this socket. - pub fn set_ttl(&mut self, ttl: u32) -> io::Result<()> { + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { self.with_ref(|socket| socket.set_ttl(ttl)) } /// Gets the value of the `IP_TTL` option for this socket. - pub fn ttl(&mut self) -> io::Result { + pub fn ttl(&self) -> io::Result { self.with_ref(|socket| socket.ttl()) } @@ -239,7 +239,7 @@ impl TcpListener { /// /// The CPU affinity is **not** set on the returned TCP stream. To set that /// use [`TcpStream::set_auto_cpu_affinity`]. - pub async fn accept(&mut self) -> io::Result<(TcpStream, SocketAddr)> { + pub async fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { self.fd .accept::() .await @@ -259,7 +259,7 @@ impl TcpListener { /// The CPU affinity is **not** set on the returned TCP stream. To set that /// use [`TcpStream::set_auto_cpu_affinity`]. #[allow(clippy::doc_markdown)] // For "io_uring". - pub fn incoming(&mut self) -> Incoming<'_> { + pub fn incoming(&self) -> Incoming<'_> { Incoming(self.fd.multishot_accept()) } @@ -268,7 +268,7 @@ impl TcpListener { /// This will retrieve the stored error in the underlying socket, clearing /// the field in the process. This can be useful for checking errors between /// calls. - pub fn take_error(&mut self) -> io::Result> { + pub fn take_error(&self) -> io::Result> { self.with_ref(|socket| socket.take_error()) } diff --git a/rt/src/net/tcp/server.rs b/rt/src/net/tcp/server.rs index d778c3d51..4253e9943 100644 --- a/rt/src/net/tcp/server.rs +++ b/rt/src/net/tcp/server.rs @@ -104,7 +104,7 @@ //! } //! //! /// The actor responsible for a single TCP stream. -//! async fn conn_actor(_: actor::Context, mut stream: TcpStream) -> io::Result<()> { +//! async fn conn_actor(_: actor::Context, stream: TcpStream) -> io::Result<()> { //! stream.send_all("Hello World").await?; //! Ok(()) //! } @@ -184,7 +184,7 @@ //! # } //! # //! /// The actor responsible for a single TCP stream. -//! async fn conn_actor(_: actor::Context, mut stream: TcpStream) -> io::Result<()> { +//! async fn conn_actor(_: actor::Context, stream: TcpStream) -> io::Result<()> { //! stream.send_all("Hello World").await?; //! Ok(()) //! } @@ -267,7 +267,7 @@ //! } //! //! /// The actor responsible for a single TCP stream. -//! async fn conn_actor(_: actor::Context, mut stream: TcpStream) -> io::Result<()> { +//! async fn conn_actor(_: actor::Context, stream: TcpStream) -> io::Result<()> { //! stream.send_all("Hello World").await?; //! Ok(()) //! } @@ -447,7 +447,7 @@ where NA: NewActor + Clone + 'static, NA::RuntimeAccess: rt::Access + Spawn, { - let mut listener = TcpListener::bind_setup(ctx.runtime_ref(), local, set_listener_options) + let listener = TcpListener::bind_setup(ctx.runtime_ref(), local, set_listener_options) .await .map_err(Error::Accept)?; trace!(address = log::as_display!(local); "TCP server listening"); @@ -456,7 +456,7 @@ where let mut receive = ctx.receive_next(); loop { match either(next(&mut accept), &mut receive).await { - Ok(Some(Ok(mut stream))) => { + Ok(Some(Ok(stream))) => { trace!("TCP server accepted connection"); drop(receive); // Can't double borrow `ctx`. stream.set_auto_cpu_affinity(ctx.runtime_ref()); diff --git a/rt/src/net/tcp/stream.rs b/rt/src/net/tcp/stream.rs index 51f983ca3..54104c978 100644 --- a/rt/src/net/tcp/stream.rs +++ b/rt/src/net/tcp/stream.rs @@ -30,7 +30,7 @@ use crate::net::{ /// /// async fn actor(ctx: actor::Context) -> io::Result<()> { /// let address = "127.0.0.1:12345".parse().unwrap(); -/// let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; +/// let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; /// stream.send_all("Hello world!").await?; /// Ok(()) /// } @@ -56,7 +56,7 @@ impl TcpStream { 0, ) .await?; - let mut socket = TcpStream { fd }; + let socket = TcpStream { fd }; socket.set_auto_cpu_affinity(rt); socket.fd.connect(SockAddr::from(address)).await?; Ok(socket) @@ -74,7 +74,7 @@ impl TcpStream { /// connection from [`TcpListener`]. /// /// [`TcpListener`]: crate::net::tcp::TcpListener - pub fn set_auto_cpu_affinity(&mut self, rt: &RT) + pub fn set_auto_cpu_affinity(&self, rt: &RT) where RT: rt::Access, { @@ -90,37 +90,37 @@ impl TcpStream { /// /// On Linux this uses `SO_INCOMING_CPU`. #[cfg(target_os = "linux")] - pub(crate) fn set_cpu_affinity(&mut self, cpu: usize) -> io::Result<()> { + pub(crate) fn set_cpu_affinity(&self, cpu: usize) -> io::Result<()> { self.with_ref(|socket| socket.set_cpu_affinity(cpu)) } /// Returns the socket address of the remote peer of this TCP connection. - pub fn peer_addr(&mut self) -> io::Result { + pub fn peer_addr(&self) -> io::Result { self.with_ref(|socket| socket.peer_addr().and_then(convert_address)) } /// Returns the socket address of the local half of this TCP connection. - pub fn local_addr(&mut self) -> io::Result { + pub fn local_addr(&self) -> io::Result { self.with_ref(|socket| socket.local_addr().and_then(convert_address)) } /// Sets the value for the `IP_TTL` option on this socket. - pub fn set_ttl(&mut self, ttl: u32) -> io::Result<()> { + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { self.with_ref(|socket| socket.set_ttl(ttl)) } /// Gets the value of the `IP_TTL` option for this socket. - pub fn ttl(&mut self) -> io::Result { + pub fn ttl(&self) -> io::Result { self.with_ref(|socket| socket.ttl()) } /// Sets the value of the `TCP_NODELAY` option on this socket. - pub fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { + pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { self.with_ref(|socket| socket.set_nodelay(nodelay)) } /// Gets the value of the `TCP_NODELAY` option on this socket. - pub fn nodelay(&mut self) -> io::Result { + pub fn nodelay(&self) -> io::Result { self.with_ref(|socket| socket.nodelay()) } @@ -138,7 +138,7 @@ impl TcpStream { /// /// Return the number of bytes written. This may we fewer then the length of /// `buf`. To ensure that all bytes are written use [`TcpStream::send_all`]. - pub async fn send<'a, B: Buf>(&'a mut self, buf: B) -> io::Result<(B, usize)> { + pub async fn send(&self, buf: B) -> io::Result<(B, usize)> { Send(self.fd.send(BufWrapper(buf), 0).extract()).await } @@ -146,13 +146,13 @@ impl TcpStream { /// /// If this fails to send all bytes (this happens if a write returns /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. - pub async fn send_all<'a, B: Buf>(&'a mut self, buf: B) -> io::Result { + pub async fn send_all(&self, buf: B) -> io::Result { SendAll(self.fd.send_all(BufWrapper(buf)).extract()).await } /// Sends data on the socket to the connected socket, using vectored I/O. pub async fn send_vectored, const N: usize>( - &mut self, + &self, bufs: B, ) -> io::Result<(B, usize)> { SendVectored(self.fd.send_vectored(BufWrapper(bufs), 0).extract()).await @@ -163,7 +163,7 @@ impl TcpStream { /// If this fails to send all bytes (this happens if a write returns /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. pub async fn send_vectored_all, const N: usize>( - &mut self, + &self, bufs: B, ) -> io::Result { SendAllVectored(self.fd.send_all_vectored(BufWrapper(bufs)).extract()).await @@ -184,7 +184,7 @@ impl TcpStream { /// /// async fn actor(ctx: actor::Context) -> io::Result<()> { /// let address = "127.0.0.1:12345".parse().unwrap(); - /// let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + /// let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; /// /// let buf = Vec::with_capacity(4 * 1024); // 4 KB. /// let buf = stream.recv(buf).await?; @@ -195,7 +195,7 @@ impl TcpStream { /// # /// # drop(actor); // Silent dead code warnings. /// ``` - pub async fn recv<'a, B: BufMut>(&'a mut self, buf: B) -> io::Result { + pub async fn recv(&self, buf: B) -> io::Result { Recv(self.fd.recv(BufWrapper(buf), 0)).await } @@ -216,7 +216,7 @@ impl TcpStream { /// /// async fn actor(ctx: actor::Context) -> io::Result<()> { /// let address = "127.0.0.1:12345".parse().unwrap(); - /// let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + /// let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; /// /// let buf = Vec::with_capacity(4 * 1024); // 4 KB. /// // NOTE: this will return an error if the peer sends less than 1 KB @@ -230,7 +230,7 @@ impl TcpStream { /// # /// # drop(actor); // Silent dead code warnings. /// ``` - pub async fn recv_n<'a, B: BufMut>(&'a mut self, buf: B, n: usize) -> io::Result { + pub async fn recv_n(&self, buf: B, n: usize) -> io::Result { debug_assert!( buf.spare_capacity() >= n, "called `TcpStream::recv_n` with a buffer smaller then `n`" @@ -239,10 +239,7 @@ impl TcpStream { } /// Receive messages from the stream, using vectored I/O. - pub async fn recv_vectored, const N: usize>( - &mut self, - bufs: B, - ) -> io::Result { + pub async fn recv_vectored, const N: usize>(&self, bufs: B) -> io::Result { RecvVectored(self.fd.recv_vectored(BufWrapper(bufs), 0)).await } @@ -250,7 +247,7 @@ impl TcpStream { /// /// This returns [`io::ErrorKind::UnexpectedEof`] if less then `n` bytes could be read. pub async fn recv_n_vectored, const N: usize>( - &mut self, + &self, bufs: B, n: usize, ) -> io::Result { @@ -263,16 +260,13 @@ impl TcpStream { /// Receive messages from the stream, without removing that data from the /// queue. - pub async fn peek<'a, B: BufMut>(&'a mut self, buf: B) -> io::Result { + pub async fn peek(&self, buf: B) -> io::Result { Recv(self.fd.recv(BufWrapper(buf), libc::MSG_PEEK)).await } /// Receive messages from the stream, without removing it from the input /// queue, using vectored I/O. - pub async fn peek_vectored, const N: usize>( - &mut self, - bufs: B, - ) -> io::Result { + pub async fn peek_vectored, const N: usize>(&self, bufs: B) -> io::Result { RecvVectored(self.fd.recv_vectored(BufWrapper(bufs), libc::MSG_PEEK)).await } @@ -289,7 +283,7 @@ impl TcpStream { /// Users might want to use [`TcpStream::send_file_all`] to ensure all the /// specified bytes (between `offset` and `length`) are send. pub fn send_file<'a, 'f, F>( - &'a mut self, + &'a self, file: &'f F, offset: usize, length: Option, @@ -310,7 +304,7 @@ impl TcpStream { /// Users who want to send the entire file might want to use the /// [`TcpStream::send_entire_file`] method. pub fn send_file_all<'a, 'f, F>( - &'a mut self, + &'a self, file: &'f F, offset: usize, length: Option, @@ -329,7 +323,7 @@ impl TcpStream { /// Convenience method to send the entire `file`. /// /// See [`TcpStream::send_file`] for more information. - pub fn send_entire_file<'a, 'f, F>(&'a mut self, file: &'f F) -> SendFileAll<'a, 'f, F> + pub fn send_entire_file<'a, 'f, F>(&'a self, file: &'f F) -> SendFileAll<'a, 'f, F> where F: FileSend, { @@ -342,7 +336,7 @@ impl TcpStream { /// This function will cause all pending and future I/O on the specified /// portions to return immediately with an appropriate value (see the /// documentation of [`Shutdown`]). - pub fn shutdown(&mut self, how: Shutdown) -> io::Result<()> { + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { self.with_ref(|socket| socket.shutdown(how)) } @@ -351,7 +345,7 @@ impl TcpStream { /// This will retrieve the stored error in the underlying socket, clearing /// the field in the process. This can be useful for checking errors between /// calls. - pub fn take_error(&mut self) -> io::Result> { + pub fn take_error(&self) -> io::Result> { self.with_ref(|socket| socket.take_error()) } diff --git a/rt/src/net/udp.rs b/rt/src/net/udp.rs index 2ece4d0d4..9442a70f4 100644 --- a/rt/src/net/udp.rs +++ b/rt/src/net/udp.rs @@ -83,7 +83,7 @@ pub use crate::net::{Connected, Unconnected}; /// /// Actor that will bind a UDP socket and waits for incoming packets and /// /// echos the message to standard out. /// async fn echo_server(mut ctx: actor::Context, local: SocketAddr) -> io::Result<()> { -/// let mut socket = UdpSocket::bind(ctx.runtime_ref(), local).await?; +/// let socket = UdpSocket::bind(ctx.runtime_ref(), local).await?; /// let mut buf = Vec::with_capacity(4096); /// loop { /// buf.clear(); @@ -112,7 +112,7 @@ pub use crate::net::{Connected, Unconnected}; /// /// The client that will send a message to the server. /// async fn client(ctx: actor::Context, server_address: SocketAddr) -> io::Result<()> { /// let local_address = "127.0.0.1:7001".parse().unwrap(); -/// let mut socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await? +/// let socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await? /// .connect(server_address).await?; /// /// let (msg, n) = socket.send("Hello world").await?; @@ -176,12 +176,12 @@ impl UdpSocket { } /// Returns the sockets peer address. - pub fn peer_addr(&mut self) -> io::Result { + pub fn peer_addr(&self) -> io::Result { self.with_ref(|socket| socket.peer_addr().and_then(convert_address)) } /// Returns the sockets local address. - pub fn local_addr(&mut self) -> io::Result { + pub fn local_addr(&self) -> io::Result { self.with_ref(|socket| socket.local_addr().and_then(convert_address)) } @@ -190,7 +190,7 @@ impl UdpSocket { /// This will retrieve the stored error in the underlying socket, clearing /// the field in the process. This can be useful for checking errors between /// calls. - pub fn take_error(&mut self) -> io::Result> { + pub fn take_error(&self) -> io::Result> { self.with_ref(|socket| socket.take_error()) } @@ -204,7 +204,7 @@ impl UdpSocket { impl UdpSocket { /// Receives data from the unconnceted socket. - pub async fn recv_from(&mut self, buf: B) -> io::Result<(B, SocketAddr)> { + pub async fn recv_from(&self, buf: B) -> io::Result<(B, SocketAddr)> { RecvFrom::(self.fd.recvfrom(BufWrapper(buf), 0)) .await .map(|(buf, addr)| (buf, addr.into())) @@ -212,7 +212,7 @@ impl UdpSocket { /// Receives data from the unconnected socket, using vectored I/O. pub async fn recv_from_vectored, const N: usize>( - &mut self, + &self, bufs: B, ) -> io::Result<(B, SocketAddr)> { RecvFromVectored::(self.fd.recvfrom_vectored(BufWrapper(bufs), 0)) @@ -222,7 +222,7 @@ impl UdpSocket { /// Receives data from the unconnected socket, without removing it from the /// input queue. - pub async fn peek_from(&mut self, buf: B) -> io::Result<(B, SocketAddr)> { + pub async fn peek_from(&self, buf: B) -> io::Result<(B, SocketAddr)> { RecvFrom::(self.fd.recvfrom(BufWrapper(buf), libc::MSG_PEEK)) .await .map(|(buf, addr)| (buf, addr.into())) @@ -231,7 +231,7 @@ impl UdpSocket { /// Receives data from the unconnected socket, without removing it from the /// input queue, using vectored I/O. pub async fn peek_from_vectored, const N: usize>( - &mut self, + &self, bufs: B, ) -> io::Result<(B, SocketAddr)> { RecvFromVectored::( @@ -242,11 +242,7 @@ impl UdpSocket { } /// Send the bytes in `buf` to `address`. - pub async fn send_to<'a, B: Buf>( - &'a mut self, - buf: B, - address: SocketAddr, - ) -> io::Result<(B, usize)> { + pub async fn send_to(&self, buf: B, address: SocketAddr) -> io::Result<(B, usize)> { SendTo( self.fd .sendto(BufWrapper(buf), SockAddr::from(address), 0) @@ -257,7 +253,7 @@ impl UdpSocket { /// Send the bytes in `bufs` to `address`, using vectored I/O. pub async fn send_to_vectored, const N: usize>( - &mut self, + &self, bufs: B, address: SocketAddr, ) -> io::Result<(B, usize)> { @@ -272,41 +268,35 @@ impl UdpSocket { impl UdpSocket { /// Receive bytes from the connected socket. - pub async fn recv<'a, B: BufMut>(&'a mut self, buf: B) -> io::Result { + pub async fn recv(&self, buf: B) -> io::Result { Recv(self.fd.recv(BufWrapper(buf), 0)).await } /// Receives data from the connected socket, using vectored I/O. - pub async fn recv_vectored, const N: usize>( - &mut self, - bufs: B, - ) -> io::Result { + pub async fn recv_vectored, const N: usize>(&self, bufs: B) -> io::Result { RecvVectored(self.fd.recv_vectored(BufWrapper(bufs), 0)).await } /// Receive bytes from the connected socket, without removing it from the /// input queue, writing them into `buf`. - pub async fn peek<'a, B: BufMut>(&'a mut self, buf: B) -> io::Result { + pub async fn peek(&self, buf: B) -> io::Result { Recv(self.fd.recv(BufWrapper(buf), libc::MSG_PEEK)).await } /// Receive bytes from the connected socket, without removing it from the /// input queue, using vectored I/O. - pub async fn peek_vectored, const N: usize>( - &mut self, - bufs: B, - ) -> io::Result { + pub async fn peek_vectored, const N: usize>(&self, bufs: B) -> io::Result { RecvVectored(self.fd.recv_vectored(BufWrapper(bufs), libc::MSG_PEEK)).await } /// Sends data on the socket to the connected socket. - pub async fn send<'a, B: Buf>(&'a mut self, buf: B) -> io::Result<(B, usize)> { + pub async fn send(&self, buf: B) -> io::Result<(B, usize)> { Send(self.fd.send(BufWrapper(buf), 0).extract()).await } /// Sends data on the socket to the connected socket, using vectored I/O. pub async fn send_vectored, const N: usize>( - &mut self, + &self, bufs: B, ) -> io::Result<(B, usize)> { SendVectored(self.fd.send_vectored(BufWrapper(bufs), 0).extract()).await diff --git a/rt/src/net/uds/datagram.rs b/rt/src/net/uds/datagram.rs index 6bd02a652..f8c08558a 100644 --- a/rt/src/net/uds/datagram.rs +++ b/rt/src/net/uds/datagram.rs @@ -123,12 +123,12 @@ impl UnixDatagram { } /// Returns the socket address of the remote peer of this socket. - pub fn peer_addr(&mut self) -> io::Result { + pub fn peer_addr(&self) -> io::Result { self.with_ref(|socket| socket.peer_addr().map(|a| UnixAddr { inner: a })) } /// Returns the socket address of the local half of this socket. - pub fn local_addr(&mut self) -> io::Result { + pub fn local_addr(&self) -> io::Result { self.with_ref(|socket| socket.local_addr().map(|a| UnixAddr { inner: a })) } @@ -137,7 +137,7 @@ impl UnixDatagram { /// This function will cause all pending and future I/O on the specified /// portions to return immediately with an appropriate value (see the /// documentation of [`Shutdown`]). - pub fn shutdown(&mut self, how: Shutdown) -> io::Result<()> { + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { self.with_ref(|socket| socket.shutdown(how)) } @@ -146,7 +146,7 @@ impl UnixDatagram { /// This will retrieve the stored error in the underlying socket, clearing /// the field in the process. This can be useful for checking errors between /// calls. - pub fn take_error(&mut self) -> io::Result> { + pub fn take_error(&self) -> io::Result> { self.with_ref(|socket| socket.take_error()) } @@ -160,13 +160,13 @@ impl UnixDatagram { impl UnixDatagram { /// Receives data from the unconnceted socket. - pub async fn recv_from(&mut self, buf: B) -> io::Result<(B, UnixAddr)> { + pub async fn recv_from(&self, buf: B) -> io::Result<(B, UnixAddr)> { RecvFrom(self.fd.recvfrom(BufWrapper(buf), 0)).await } /// Receives data from the unconnected socket, using vectored I/O. pub async fn recv_from_vectored, const N: usize>( - &mut self, + &self, bufs: B, ) -> io::Result<(B, UnixAddr)> { RecvFromVectored(self.fd.recvfrom_vectored(BufWrapper(bufs), 0)).await @@ -174,31 +174,27 @@ impl UnixDatagram { /// Receives data from the unconnected socket, without removing it from the /// input queue. - pub async fn peek_from(&mut self, buf: B) -> io::Result<(B, UnixAddr)> { + pub async fn peek_from(&self, buf: B) -> io::Result<(B, UnixAddr)> { RecvFrom(self.fd.recvfrom(BufWrapper(buf), libc::MSG_PEEK)).await } /// Receives data from the unconnected socket, without removing it from the /// input queue, using vectored I/O. pub async fn peek_from_vectored, const N: usize>( - &mut self, + &self, bufs: B, ) -> io::Result<(B, UnixAddr)> { RecvFromVectored(self.fd.recvfrom_vectored(BufWrapper(bufs), libc::MSG_PEEK)).await } /// Send the bytes in `buf` to `address`. - pub async fn send_to<'a, B: Buf>( - &'a mut self, - buf: B, - address: UnixAddr, - ) -> io::Result<(B, usize)> { + pub async fn send_to(&self, buf: B, address: UnixAddr) -> io::Result<(B, usize)> { SendTo(self.fd.sendto(BufWrapper(buf), address, 0).extract()).await } /// Send the bytes in `bufs` to `address`, using vectored I/O. pub async fn send_to_vectored, const N: usize>( - &mut self, + &self, bufs: B, address: UnixAddr, ) -> io::Result<(B, usize)> { @@ -213,41 +209,35 @@ impl UnixDatagram { impl UnixDatagram { /// Receive bytes from the connected socket. - pub async fn recv<'a, B: BufMut>(&'a mut self, buf: B) -> io::Result { + pub async fn recv(&self, buf: B) -> io::Result { Recv(self.fd.recv(BufWrapper(buf), 0)).await } /// Receives data from the connected socket, using vectored I/O. - pub async fn recv_vectored, const N: usize>( - &mut self, - bufs: B, - ) -> io::Result { + pub async fn recv_vectored, const N: usize>(&self, bufs: B) -> io::Result { RecvVectored(self.fd.recv_vectored(BufWrapper(bufs), 0)).await } /// Receive bytes from the connected socket, without removing it from the /// input queue, writing them into `buf`. - pub async fn peek<'a, B: BufMut>(&'a mut self, buf: B) -> io::Result { + pub async fn peek(&self, buf: B) -> io::Result { Recv(self.fd.recv(BufWrapper(buf), libc::MSG_PEEK)).await } /// Receive bytes from the connected socket, without removing it from the /// input queue, using vectored I/O. - pub async fn peek_vectored, const N: usize>( - &mut self, - bufs: B, - ) -> io::Result { + pub async fn peek_vectored, const N: usize>(&self, bufs: B) -> io::Result { RecvVectored(self.fd.recv_vectored(BufWrapper(bufs), libc::MSG_PEEK)).await } /// Sends data on the socket to the connected socket. - pub async fn send<'a, B: Buf>(&'a mut self, buf: B) -> io::Result<(B, usize)> { + pub async fn send(&self, buf: B) -> io::Result<(B, usize)> { Send(self.fd.send(BufWrapper(buf), 0).extract()).await } /// Sends data on the socket to the connected socket, using vectored I/O. pub async fn send_vectored, const N: usize>( - &mut self, + &self, bufs: B, ) -> io::Result<(B, usize)> { SendVectored(self.fd.send_vectored(BufWrapper(bufs), 0).extract()).await diff --git a/rt/src/pipe.rs b/rt/src/pipe.rs index b7997bf07..861482db5 100644 --- a/rt/src/pipe.rs +++ b/rt/src/pipe.rs @@ -26,7 +26,7 @@ //! async fn process_handler(ctx: actor::Context) -> io::Result<()> //! where RT: rt::Access, //! { -//! let (mut sender, mut receiver) = pipe::new(ctx.runtime_ref())?; +//! let (sender, receiver) = pipe::new(ctx.runtime_ref())?; //! //! // Write some data. //! sender.write_all(DATA).await?; @@ -72,8 +72,8 @@ //! .spawn()?; //! //! // Create our process standard in and out. -//! let mut stdin = pipe::Sender::from_child_stdin(ctx.runtime_ref(), process.stdin.take().unwrap())?; -//! let mut stdout = pipe::Receiver::from_child_stdout(ctx.runtime_ref(), process.stdout.take().unwrap())?; +//! let stdin = pipe::Sender::from_child_stdin(ctx.runtime_ref(), process.stdin.take().unwrap())?; +//! let stdout = pipe::Receiver::from_child_stdout(ctx.runtime_ref(), process.stdout.take().unwrap())?; //! //! // Write some data. //! stdin.write_all(DATA).await?; @@ -154,7 +154,7 @@ impl Sender { /// /// Return the number of bytes written. This may we fewer than the length of /// `buf`. To ensure that all bytes are written use [`Sender::write_all`]. - pub async fn write<'a, B: Buf>(&'a mut self, buf: B) -> io::Result<(B, usize)> { + pub async fn write(&self, buf: B) -> io::Result<(B, usize)> { Write(self.fd.write(BufWrapper(buf)).extract()).await } @@ -162,7 +162,7 @@ impl Sender { /// /// If this fails to write all bytes (this happens if a write returns /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. - pub async fn write_all<'a, B: Buf>(&'a mut self, buf: B) -> io::Result { + pub async fn write_all(&self, buf: B) -> io::Result { WriteAll(self.fd.write_all(BufWrapper(buf)).extract()).await } @@ -171,8 +171,8 @@ impl Sender { /// Return the number of bytes written. This may we fewer than the length of /// `bufs`. To ensure that all bytes are written use /// [`Sender::write_vectored_all`]. - pub async fn write_vectored<'a, B: BufSlice, const N: usize>( - &'a mut self, + pub async fn write_vectored, const N: usize>( + &self, bufs: B, ) -> io::Result<(B, usize)> { WriteVectored(self.fd.write_vectored(BufWrapper(bufs)).extract()).await @@ -182,8 +182,8 @@ impl Sender { /// /// If this fails to write all bytes (this happens if a write returns /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. - pub async fn write_vectored_all<'a, B: BufSlice, const N: usize>( - &'a mut self, + pub async fn write_vectored_all, const N: usize>( + &self, bufs: B, ) -> io::Result { WriteAllVectored(self.fd.write_all_vectored(BufWrapper(bufs)).extract()).await @@ -221,7 +221,7 @@ impl Receiver { } /// Read bytes from the pipe, writing them into `buf`. - pub async fn read<'a, B: BufMut>(&'a mut self, buf: B) -> io::Result { + pub async fn read(&self, buf: B) -> io::Result { Read(self.fd.read(BufWrapper(buf))).await } @@ -229,7 +229,7 @@ impl Receiver { /// /// This returns [`io::ErrorKind::UnexpectedEof`] if less than `n` bytes /// could be read. - pub async fn read_n<'a, B: BufMut>(&'a mut self, buf: B, n: usize) -> io::Result { + pub async fn read_n(&self, buf: B, n: usize) -> io::Result { debug_assert!( buf.spare_capacity() >= n, "called `Receiver::read_n` with a buffer smaller than `n`", @@ -238,16 +238,13 @@ impl Receiver { } /// Read bytes from the pipe, writing them into `bufs`. - pub async fn read_vectored<'a, B: BufMutSlice, const N: usize>( - &'a mut self, - bufs: B, - ) -> io::Result { + pub async fn read_vectored, const N: usize>(&self, bufs: B) -> io::Result { ReadVectored(self.fd.read_vectored(BufWrapper(bufs))).await } /// Read at least `n` bytes from the pipe, writing them into `bufs`. - pub async fn read_n_vectored<'a, B: BufMutSlice, const N: usize>( - &'a mut self, + pub async fn read_n_vectored, const N: usize>( + &self, bufs: B, n: usize, ) -> io::Result { diff --git a/rt/src/systemd.rs b/rt/src/systemd.rs index 67bddeaa1..2608138ae 100644 --- a/rt/src/systemd.rs +++ b/rt/src/systemd.rs @@ -134,7 +134,7 @@ impl Notify { /// programs could pass completion percentages and failing programs could /// pass a human-readable error message. **Note that it must be limited to a /// single line.** - pub async fn change_state(&mut self, state: State, status: Option<&str>) -> io::Result<()> { + pub async fn change_state(&self, state: State, status: Option<&str>) -> io::Result<()> { debug!(state = log::as_debug!(state), status = log::as_debug!(status); "updating state with service manager"); let state_line = match state { State::Ready => "READY=1\n", @@ -168,7 +168,7 @@ impl Notify { /// /// If you also need to change the state of the application you can use /// [`Notify::change_state`]. - pub async fn change_status(&mut self, status: &str) -> io::Result<()> { + pub async fn change_status(&self, status: &str) -> io::Result<()> { debug!(status = log::as_display!(status); "updating status with service manager"); let mut state_update = String::with_capacity(7 + status.len() + 1); state_update.push_str("STATUS="); @@ -183,7 +183,7 @@ impl Notify { /// /// Send a keep-alive ping that services need to issue in regular intervals /// if `WatchdogSec=` is enabled for it. - pub async fn ping_watchdog(&mut self) -> io::Result<()> { + pub async fn ping_watchdog(&self) -> io::Result<()> { debug!("pinging service manager watchdog"); _ = self.socket.send("WATCHDOG=1").await?; Ok(()) @@ -200,7 +200,7 @@ impl Notify { /// the watchdog behavior. /// /// [`systemd.service(5)`]: https://www.freedesktop.org/software/systemd/man/systemd.service.html - pub async fn trigger_watchdog(&mut self) -> io::Result<()> { + pub async fn trigger_watchdog(&self) -> io::Result<()> { debug!("triggering service manager watchdog"); _ = self.socket.send("WATCHDOG=trigger").await?; Ok(()) @@ -282,7 +282,7 @@ where H: FnMut() -> Result<(), E>, E: ToString, { - let mut notify = match Notify::new(ctx.runtime_ref()).await? { + let notify = match Notify::new(ctx.runtime_ref()).await? { Some(notify) => notify, None => { debug!("not started via systemd, not starting `systemd::watchdog`"); diff --git a/rt/tests/functional/pipe.rs b/rt/tests/functional/pipe.rs index db128169a..89cb85c44 100644 --- a/rt/tests/functional/pipe.rs +++ b/rt/tests/functional/pipe.rs @@ -19,7 +19,7 @@ fn smoke() { where RT: rt::Access, { - let (mut sender, mut receiver) = pipe::new(ctx.runtime_ref())?; + let (sender, receiver) = pipe::new(ctx.runtime_ref())?; let (_, n) = sender.write(DATA).await?; assert_eq!(n, DATA.len()); @@ -49,7 +49,7 @@ fn write_all_read_n() { where RT: rt::Access, { - let (mut sender, receiver) = pipe::new(ctx.runtime_ref())?; + let (sender, receiver) = pipe::new(ctx.runtime_ref())?; reader.send(receiver).await.unwrap(); @@ -62,7 +62,7 @@ fn write_all_read_n() { where RT: rt::Access, { - let mut receiver = ctx.receive_next().await.unwrap(); + let receiver = ctx.receive_next().await.unwrap(); let buf = receiver .read_n(Vec::with_capacity(DATA.len() + 1), DATA.len()) @@ -100,7 +100,7 @@ fn write_vectored_all_read_n_vectored() { where RT: rt::Access, { - let (mut sender, receiver) = pipe::new(ctx.runtime_ref())?; + let (sender, receiver) = pipe::new(ctx.runtime_ref())?; reader.send(receiver).await.unwrap(); @@ -114,7 +114,7 @@ fn write_vectored_all_read_n_vectored() { where RT: rt::Access, { - let mut receiver = ctx.receive_next().await.unwrap(); + let receiver = ctx.receive_next().await.unwrap(); let bufs = [ Vec::with_capacity(8 * 4096), @@ -151,7 +151,7 @@ fn vectored_io() { where RT: rt::Access, { - let (mut sender, mut receiver) = pipe::new(ctx.runtime_ref())?; + let (sender, receiver) = pipe::new(ctx.runtime_ref())?; let bufs = [DATAV[0], DATAV[1], DATAV[2]]; let (_, n) = sender.write_vectored(bufs).await?; diff --git a/rt/tests/functional/tcp/listener.rs b/rt/tests/functional/tcp/listener.rs index c2645cf7b..df1fbadbb 100644 --- a/rt/tests/functional/tcp/listener.rs +++ b/rt/tests/functional/tcp/listener.rs @@ -15,12 +15,12 @@ use crate::util::{any_local_address, any_local_ipv6_address}; fn local_addr() { async fn actor(ctx: actor::Context) { let address = "127.0.0.1:12345".parse().unwrap(); - let mut listener = TcpListener::bind(ctx.runtime_ref(), address).await.unwrap(); + let listener = TcpListener::bind(ctx.runtime_ref(), address).await.unwrap(); assert_eq!(listener.local_addr().unwrap(), address); drop(listener); let address = "[::1]:12345".parse().unwrap(); - let mut listener = TcpListener::bind(ctx.runtime_ref(), address).await.unwrap(); + let listener = TcpListener::bind(ctx.runtime_ref(), address).await.unwrap(); assert_eq!(listener.local_addr().unwrap(), address); } @@ -33,14 +33,14 @@ fn local_addr() { fn local_addr_port_zero() { async fn actor(ctx: actor::Context) { let address = any_local_address(); - let mut listener = TcpListener::bind(ctx.runtime_ref(), address).await.unwrap(); + let listener = TcpListener::bind(ctx.runtime_ref(), address).await.unwrap(); let got = listener.local_addr().unwrap(); assert_eq!(got.ip(), address.ip()); assert!(got.port() != 0); drop(listener); let address = any_local_ipv6_address(); - let mut listener = TcpListener::bind(ctx.runtime_ref(), address).await.unwrap(); + let listener = TcpListener::bind(ctx.runtime_ref(), address).await.unwrap(); let got = listener.local_addr().unwrap(); assert_eq!(got.ip(), address.ip()); assert!(got.port() != 0); @@ -54,7 +54,7 @@ fn local_addr_port_zero() { #[test] fn ttl() { async fn actor(ctx: actor::Context) { - let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) + let listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) .await .unwrap(); @@ -76,7 +76,7 @@ where RT: rt::Access, { let address = ctx.receive_next().await.unwrap(); - let mut stream = TcpStream::connect(ctx.runtime_ref(), address) + let stream = TcpStream::connect(ctx.runtime_ref(), address) .await .unwrap(); @@ -90,14 +90,14 @@ fn accept() { ctx: actor::Context, actor_ref: ActorRef, ) { - let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) + let listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) .await .unwrap(); let address = listener.local_addr().unwrap(); actor_ref.send(address).await.unwrap(); - let (mut stream, remote_address) = listener.accept().await.unwrap(); + let (stream, remote_address) = listener.accept().await.unwrap(); assert!(remote_address.ip().is_loopback()); let buf = Vec::with_capacity(DATA.len() + 1); @@ -123,7 +123,7 @@ fn incoming() { ctx: actor::Context, actor_ref: ActorRef, ) { - let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) + let listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) .await .unwrap(); @@ -131,7 +131,7 @@ fn incoming() { actor_ref.send(address).await.unwrap(); let mut incoming = listener.incoming(); - let mut stream = next(&mut incoming).await.unwrap().unwrap(); + let stream = next(&mut incoming).await.unwrap().unwrap(); let buf = Vec::with_capacity(DATA.len() + 1); let buf = stream.recv(buf).await.unwrap(); diff --git a/rt/tests/functional/tcp/server.rs b/rt/tests/functional/tcp/server.rs index e8bc88413..df7c76d4b 100644 --- a/rt/tests/functional/tcp/server.rs +++ b/rt/tests/functional/tcp/server.rs @@ -29,7 +29,7 @@ fn message_from_process_signal() { } } -async fn actor(_: actor::Context, mut stream: TcpStream) +async fn actor(_: actor::Context, stream: TcpStream) where RT: rt::Access, { @@ -47,7 +47,7 @@ async fn stream_actor( ) where RT: rt::Access + Clone, { - let mut stream = tcp_connect(&mut ctx, address).await.unwrap(); + let stream = tcp_connect(&mut ctx, address).await.unwrap(); let (_, n) = stream.send(DATA).await.unwrap(); assert_eq!(n, DATA.len()); diff --git a/rt/tests/functional/tcp/stream.rs b/rt/tests/functional/tcp/stream.rs index 9908389c5..9cdfd72d7 100644 --- a/rt/tests/functional/tcp/stream.rs +++ b/rt/tests/functional/tcp/stream.rs @@ -39,7 +39,7 @@ fn smoke() { mut ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; assert_eq!(stream.peer_addr().unwrap(), address); let local_address = stream.local_addr().unwrap(); @@ -135,7 +135,7 @@ fn connect_connection_refused() { #[test] fn recv() { async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let buf = Vec::with_capacity(128); let mut buf = stream.recv(buf).await?; @@ -166,7 +166,7 @@ fn recv() { #[test] fn recv_n_read_exact_amount() { async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let buf = Vec::with_capacity(128); let mut buf = stream.recv_n(buf, DATA.len()).await?; @@ -200,7 +200,7 @@ fn recv_n_read_exact_amount() { #[test] fn recv_n_read_more_bytes() { async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let buf = Vec::with_capacity(128); let want_n = DATA.len() - 2; @@ -237,7 +237,7 @@ fn recv_n_read_more_bytes() { #[test] fn recv_n_less_bytes() { async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let buf = Vec::with_capacity(128); let want_n = 2 * DATA.len(); @@ -265,7 +265,7 @@ fn recv_n_less_bytes() { #[test] fn recv_n_from_multiple_writes() { async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let buf = Vec::with_capacity(128); let buf = stream.recv_n(buf, 3 * DATA.len()).await?; @@ -294,7 +294,7 @@ fn recv_n_from_multiple_writes() { #[test] fn send() { async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let (_, n) = stream.send(DATA).await?; assert_eq!(n, DATA.len()); @@ -325,7 +325,7 @@ fn send_all() { // A lot of data to get at least two write calls. const DATA: &[u8] = &[213; 40 * 1024]; async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; stream.send_all(DATA).await?; Ok(()) } @@ -357,7 +357,7 @@ fn send_all() { #[test] fn send_vectored() { async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let bufs = [DATA, DATA, DATA, DATA]; let (_, n) = stream.send_vectored(bufs).await?; @@ -392,7 +392,7 @@ fn send_vectored_all() { const DATA1: &[u8] = &[213; 40 * 1023]; const DATA2: &[u8] = &[155; 30 * 1024]; async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let bufs = [DATA1, DATA2]; stream.send_vectored_all(bufs).await?; Ok(()) @@ -438,7 +438,7 @@ fn send_vectored_all() { #[test] fn recv_vectored() { async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let bufs = [ Vec::with_capacity(2 * DATA.len()), @@ -478,7 +478,7 @@ fn recv_vectored() { #[test] fn recv_n_vectored_exact_amount() { async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let bufs = [ Vec::with_capacity(DATA.len()), @@ -518,7 +518,7 @@ fn recv_n_vectored_exact_amount() { #[test] fn recv_n_vectored_more_bytes() { async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let bufs = [ Vec::with_capacity(DATA.len()), @@ -558,7 +558,7 @@ fn recv_n_vectored_more_bytes() { #[test] fn recv_n_vectored_less_bytes() { async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let bufs = [ Vec::with_capacity(DATA.len()), @@ -588,7 +588,7 @@ fn recv_n_vectored_less_bytes() { #[test] fn recv_n_vectored_from_multiple_writes() { async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let bufs = [ Vec::with_capacity(DATA.len()), @@ -622,7 +622,7 @@ fn recv_n_vectored_from_multiple_writes() { #[test] fn peek() { async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let buf = Vec::with_capacity(128); let mut buf = stream.peek(buf).await?; @@ -669,7 +669,7 @@ fn send_file() { let file = File::open(path)?; let metadata = file.metadata()?; let length = min(metadata.len(), LENGTH as u64) as usize; - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let n = stream .send_file(&file, 0, NonZeroUsize::new(length)) .await?; @@ -684,13 +684,13 @@ fn send_file() { let args = (address, TEST_FILE0); let actor_ref1 = try_spawn_local(PanicSupervisor, actor, args, ActorOptions::default()).unwrap(); - let (mut stream0, _) = listener.accept().unwrap(); + let (stream0, _) = listener.accept().unwrap(); stream0.set_nonblocking(true).unwrap(); let args = (address, TEST_FILE1); let actor_ref2 = try_spawn_local(PanicSupervisor, actor, args, ActorOptions::default()).unwrap(); - let (mut stream1, _) = listener.accept().unwrap(); + let (stream1, _) = listener.accept().unwrap(); stream1.set_nonblocking(true).unwrap(); let mut expected0_offset = 0; @@ -701,13 +701,13 @@ fn send_file() { for _ in 0..20 { // NOTE: can't use `&&` as that short circuits. let done0 = send_file_check_actor( - &mut stream0, + &stream0, expected_data0(), &mut expected0_offset, &mut buf, ); let done1 = - send_file_check_actor(&mut stream1, &expected1, &mut expected1_offset, &mut buf); + send_file_check_actor(&stream1, &expected1, &mut expected1_offset, &mut buf); if done0 && done1 { break; @@ -733,7 +733,7 @@ fn send_file_all() { let file = File::open(path)?; let metadata = file.metadata()?; let length = min(metadata.len(), LENGTH as u64) as usize; - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; stream .send_file_all(&file, OFFSET, NonZeroUsize::new(length)) .await?; @@ -747,13 +747,13 @@ fn send_file_all() { let args = (address, TEST_FILE0); let actor_ref1 = try_spawn_local(PanicSupervisor, actor, args, ActorOptions::default()).unwrap(); - let (mut stream0, _) = listener.accept().unwrap(); + let (stream0, _) = listener.accept().unwrap(); stream0.set_nonblocking(true).unwrap(); let args = (address, TEST_FILE1); let actor_ref2 = try_spawn_local(PanicSupervisor, actor, args, ActorOptions::default()).unwrap(); - let (mut stream1, _) = listener.accept().unwrap(); + let (stream1, _) = listener.accept().unwrap(); stream1.set_nonblocking(true).unwrap(); let expected0 = expected_data0(); @@ -765,9 +765,9 @@ fn send_file_all() { for _ in 0..20 { // NOTE: can't use `&&` as that short circuits. let done0 = - send_file_check_actor(&mut stream0, &expected0, &mut expected0_offset, &mut buf); + send_file_check_actor(&stream0, &expected0, &mut expected0_offset, &mut buf); let done1 = - send_file_check_actor(&mut stream1, &expected1, &mut expected1_offset, &mut buf); + send_file_check_actor(&stream1, &expected1, &mut expected1_offset, &mut buf); if done0 && done1 { break; @@ -787,7 +787,7 @@ fn send_entire_file() { path: &'static str, ) -> io::Result<()> { let file = File::open(path)?; - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; stream.send_entire_file(&file).await } @@ -798,13 +798,13 @@ fn send_entire_file() { let args = (address, TEST_FILE0); let actor_ref1 = try_spawn_local(PanicSupervisor, actor, args, ActorOptions::default()).unwrap(); - let (mut stream0, _) = listener.accept().unwrap(); + let (stream0, _) = listener.accept().unwrap(); stream0.set_nonblocking(true).unwrap(); let args = (address, TEST_FILE1); let actor_ref2 = try_spawn_local(PanicSupervisor, actor, args, ActorOptions::default()).unwrap(); - let (mut stream1, _) = listener.accept().unwrap(); + let (stream1, _) = listener.accept().unwrap(); stream1.set_nonblocking(true).unwrap(); let mut expected0_offset = 0; @@ -814,13 +814,13 @@ fn send_entire_file() { for _ in 0..20 { // NOTE: can't use `&&` as that short circuits. let done0 = send_file_check_actor( - &mut stream0, + &stream0, expected_data0(), &mut expected0_offset, &mut buf, ); let done1 = send_file_check_actor( - &mut stream1, + &stream1, expected_data1(), &mut expected1_offset, &mut buf, @@ -864,7 +864,7 @@ fn send_file_check_actor( #[test] fn peek_vectored() { async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { - let mut stream = TcpStream::connect(ctx.runtime_ref(), address).await?; + let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; let bufs = [ Vec::with_capacity(2 * DATA.len()), @@ -919,14 +919,14 @@ fn shutdown_read() { ctx: actor::Context, actor_ref: ActorRef, ) { - let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) + let listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) .await .unwrap(); let address = listener.local_addr().unwrap(); actor_ref.send(address).await.unwrap(); - let (mut stream, remote_address) = listener.accept().await.unwrap(); + let (stream, remote_address) = listener.accept().await.unwrap(); assert!(remote_address.ip().is_loopback()); // Shutting down the reading side of the peer should return 0 bytes @@ -938,7 +938,7 @@ fn shutdown_read() { async fn stream_actor(mut ctx: actor::Context) { let address = ctx.receive_next().await.unwrap(); - let mut stream = TcpStream::connect(ctx.runtime_ref(), address) + let stream = TcpStream::connect(ctx.runtime_ref(), address) .await .unwrap(); @@ -968,14 +968,14 @@ fn shutdown_write() { ctx: actor::Context, actor_ref: ActorRef, ) { - let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) + let listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) .await .unwrap(); let address = listener.local_addr().unwrap(); actor_ref.send(address).await.unwrap(); - let (mut stream, remote_address) = listener.accept().await.unwrap(); + let (stream, remote_address) = listener.accept().await.unwrap(); assert!(remote_address.ip().is_loopback()); // Shutting down the writing side of the peer should return EOF here. @@ -987,7 +987,7 @@ fn shutdown_write() { async fn stream_actor(mut ctx: actor::Context) { let address = ctx.receive_next().await.unwrap(); - let mut stream = TcpStream::connect(ctx.runtime_ref(), address) + let stream = TcpStream::connect(ctx.runtime_ref(), address) .await .unwrap(); @@ -1019,14 +1019,14 @@ fn shutdown_both() { ctx: actor::Context, actor_ref: ActorRef, ) { - let mut listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) + let listener = TcpListener::bind(ctx.runtime_ref(), any_local_address()) .await .unwrap(); let address = listener.local_addr().unwrap(); actor_ref.send(address).await.unwrap(); - let (mut stream, remote_address) = listener.accept().await.unwrap(); + let (stream, remote_address) = listener.accept().await.unwrap(); assert!(remote_address.ip().is_loopback()); let buf = stream.recv(Vec::with_capacity(2)).await.unwrap(); @@ -1035,7 +1035,7 @@ fn shutdown_both() { async fn stream_actor(mut ctx: actor::Context) { let address = ctx.receive_next().await.unwrap(); - let mut stream = TcpStream::connect(ctx.runtime_ref(), address) + let stream = TcpStream::connect(ctx.runtime_ref(), address) .await .unwrap(); diff --git a/rt/tests/functional/udp.rs b/rt/tests/functional/udp.rs index 5cc70180d..b3bab93a0 100644 --- a/rt/tests/functional/udp.rs +++ b/rt/tests/functional/udp.rs @@ -70,7 +70,7 @@ async fn unconnected_udp_actor( peer_address: SocketAddr, ) -> io::Result<()> { let local_address = SocketAddr::new(peer_address.ip(), 0); - let mut socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await?; + let socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await?; assert_eq!(socket.local_addr().unwrap().ip(), local_address.ip()); let (_, bytes_written) = socket.send_to(DATA, peer_address).await?; @@ -96,7 +96,7 @@ async fn connected_udp_actor( ) -> io::Result<()> { let local_address = SocketAddr::new(peer_address.ip(), 0); let socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await?; - let mut socket = socket.connect(peer_address).await?; + let socket = socket.connect(peer_address).await?; assert_eq!(socket.local_addr().unwrap().ip(), local_address.ip()); let (_, bytes_written) = socket.send(DATA).await?; @@ -162,16 +162,16 @@ async fn reconnecting_actor( ) -> io::Result<()> { let local_address = SocketAddr::new(peer_address1.ip(), 0); let socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await?; - let mut socket = socket.connect(peer_address1).await?; + let socket = socket.connect(peer_address1).await?; let (_, bytes_written) = socket.send(DATA).await?; assert_eq!(bytes_written, DATA.len()); - let mut socket = socket.connect(peer_address1).await?; + let socket = socket.connect(peer_address1).await?; let (_, bytes_written) = socket.send(DATA).await?; assert_eq!(bytes_written, DATA.len()); - let mut socket = socket.connect(peer_address2).await?; + let socket = socket.connect(peer_address2).await?; let (_, bytes_written) = socket.send(DATA).await?; assert_eq!(bytes_written, DATA.len()); @@ -209,7 +209,7 @@ async fn unconnected_vectored_io_actor( peer_address: SocketAddr, ) -> io::Result<()> { let local_address = SocketAddr::new(peer_address.ip(), 0); - let mut socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await?; + let socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await?; let bufs = [DATAV[0], DATAV[1], DATAV[2]]; let (_, bytes_written) = socket.send_to_vectored(bufs, peer_address).await?; @@ -244,7 +244,7 @@ async fn connected_vectored_io_actor( ) -> io::Result<()> { let local_address = SocketAddr::new(peer_address.ip(), 0); let socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await?; - let mut socket = socket.connect(peer_address).await?; + let socket = socket.connect(peer_address).await?; let bufs = [DATAV[0], DATAV[1], DATAV[2]]; let (_, bytes_written) = socket.send_vectored(bufs).await?; diff --git a/rt/tests/functional/uds/datagram.rs b/rt/tests/functional/uds/datagram.rs index fe15fea2b..ac9a043fe 100644 --- a/rt/tests/functional/uds/datagram.rs +++ b/rt/tests/functional/uds/datagram.rs @@ -21,7 +21,7 @@ fn pair() { where RT: rt::Access, { - let (mut s1, mut s2) = UnixDatagram::pair(ctx.runtime_ref())?; + let (s1, s2) = UnixDatagram::pair(ctx.runtime_ref())?; // Addresses must point to each other. let s1_local = s1.local_addr()?; @@ -74,14 +74,14 @@ fn bound() { let path2 = temp_file("uds.bound2"); let address1 = UnixAddr::from_pathname(path1)?; let address2 = UnixAddr::from_pathname(path2)?; - let mut listener = UnixDatagram::bind(ctx.runtime_ref(), address1.clone()).await?; + let listener = UnixDatagram::bind(ctx.runtime_ref(), address1.clone()).await?; // Addresses must point to each other. assert_eq!(listener.local_addr()?, address1); assert!(listener.peer_addr().is_err()); let socket = UnixDatagram::bind(ctx.runtime_ref(), address2.clone()).await?; - let mut socket = socket.connect(address1.clone()).await?; + let socket = socket.connect(address1.clone()).await?; assert_eq!(socket.local_addr()?, address2); assert_eq!(socket.peer_addr()?, address1); diff --git a/rt/tests/regression/issue_145.rs b/rt/tests/regression/issue_145.rs index bd3e0f11d..063cff93b 100644 --- a/rt/tests/regression/issue_145.rs +++ b/rt/tests/regression/issue_145.rs @@ -89,7 +89,7 @@ where #[allow(clippy::type_complexity)] // `servers` is too complex. async fn conn_actor( mut ctx: actor::Context, - mut stream: TcpStream, + stream: TcpStream, addresses: Arc>>, servers: Arc)>>>, ) -> Result<(), !> { @@ -126,7 +126,7 @@ fn issue_145_tcp_listener() { async fn listener_actor(ctx: actor::Context) -> Result<(), !> { let address = "127.0.0.1:0".parse().unwrap(); // NOTE: this should not fail. - let mut listener = TcpListener::bind(ctx.runtime_ref(), address).await.unwrap(); + let listener = TcpListener::bind(ctx.runtime_ref(), address).await.unwrap(); let addr = listener.local_addr().unwrap(); assert!(addr.port() != 0); Ok(()) From 57d7110ea57a70ddb67c8da0c9ade141af8819e4 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 11 Apr 2023 14:22:40 +0200 Subject: [PATCH 047/177] Update Heph-RT to v0.5 --- Cargo.toml | 2 +- remote/Cargo.toml | 2 +- rt/Cargo.toml | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 14ae8c69e..4098d03a6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ getrandom = { version = "0.2.2", default-features = false, features = [" [dev-dependencies] getrandom = { version = "0.2.2", default-features = false, features = ["std"] } -heph-rt = { version = "0.4.0", path = "./rt", default-features = false, features = ["test"] } +heph-rt = { version = "0.5.0", path = "./rt", default-features = false, features = ["test"] } std-logger = { version = "0.5.0", default-features = false, features = ["log-panic", "nightly"] } [[test]] diff --git a/remote/Cargo.toml b/remote/Cargo.toml index 7bbb1ba03..8ae3e2b5e 100644 --- a/remote/Cargo.toml +++ b/remote/Cargo.toml @@ -10,7 +10,7 @@ json = ["serde_json"] [dependencies] heph = { version = "0.4.0", path = "../", default-features = false } -heph-rt = { version = "0.4.0", path = "../rt", default-features = false } +heph-rt = { version = "0.5.0", path = "../rt", default-features = false } log = { version = "0.4.14", default-features = false } serde = { version = "1.0.130", default-features = false } getrandom = { version = "0.2.3", default-features = false } diff --git a/rt/Cargo.toml b/rt/Cargo.toml index 133ec482b..f5a30d3f4 100644 --- a/rt/Cargo.toml +++ b/rt/Cargo.toml @@ -1,7 +1,8 @@ [package] name = "heph-rt" description = "Heph-rt is a speciailised runtime for Heph's actor." -version = "0.4.1" +version = "0.5.0" +publish = false # In development. authors = ["Thomas de Zeeuw "] license = "MIT" documentation = "https://docs.rs/heph-rt" From b822eed869b5c466c4d38c80d6f80d5f8a89da3c Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 11 Apr 2023 14:33:55 +0200 Subject: [PATCH 048/177] Update remote crate to new heph_rt::net API --- remote/src/net_relay/tcp.rs | 27 +++++++++++++-------- remote/src/net_relay/udp.rs | 48 +++++++++++++++++++++---------------- 2 files changed, 45 insertions(+), 30 deletions(-) diff --git a/remote/src/net_relay/tcp.rs b/remote/src/net_relay/tcp.rs index e4c3bd61c..8107a1770 100644 --- a/remote/src/net_relay/tcp.rs +++ b/remote/src/net_relay/tcp.rs @@ -2,6 +2,7 @@ use std::io; use std::net::SocketAddr; +use std::pin::pin; use heph::actor::{self, NoMessages}; use heph_rt as rt; @@ -69,20 +70,26 @@ where RT: rt::Access, R: Route, { - let mut stream = TcpStream::connect(&mut ctx, remote_address)?.await?; + let stream = TcpStream::connect(ctx.runtime_ref(), remote_address).await?; stream.set_nodelay(true)?; - let mut buf = Vec::with_capacity(INITIAL_BUF_SIZE); + let mut uuid_gen = UuidGenerator::new(); + let mut send_buf = Vec::with_capacity(INITIAL_BUF_SIZE); + let mut recv_data = pin!(stream.recv(Vec::with_capacity(INITIAL_BUF_SIZE))); loop { - match either(ctx.receive_next(), stream.recv(&mut buf)).await { + match either(ctx.receive_next(), recv_data.as_mut()).await { // Received an outgoing message we want to relay to a remote actor. Ok(Ok(RelayMessage::Relay(msg))) => { - send_message::(&mut stream, &mut buf, &mut uuid_gen, &msg).await?; + send_buf = send_message::(&stream, send_buf, &mut uuid_gen, &msg).await?; + send_buf.clear(); } Ok(Ok(RelayMessage::Terminate) | Err(NoMessages)) => return Ok(()), // Received some incoming data. - Err(Ok(_)) => route_messages::(&mut router, &mut buf, remote_address).await?, + Err(Ok(mut buf)) => { + route_messages::(&mut router, &mut buf, remote_address).await?; + recv_data.set(stream.recv(buf)); + } // Error receiving data. Err(Err(err)) => return Err(err), } @@ -91,11 +98,11 @@ where /// Send a `msg` to the remote actor, using `stream`. async fn send_message( - stream: &mut TcpStream, - buf: &mut Vec, + stream: &TcpStream, + mut buf: Vec, uuid_gen: &mut UuidGenerator, msg: &M, -) -> io::Result<()> +) -> io::Result> where S: Serde, M: Serialize, @@ -103,10 +110,10 @@ where // Serialise the message to our buffer first. let uuid = uuid_gen.next(); let msg = Message { uuid, msg }; - if let Err(err) = S::to_buf(&mut *buf, &msg) { + if let Err(err) = S::to_buf(&mut buf, &msg) { warn!("error serialising message: {err}"); // Don't want to stop the actor for this. - return Ok(()); + return Ok(buf); } stream.send_all(buf).await diff --git a/remote/src/net_relay/udp.rs b/remote/src/net_relay/udp.rs index e3b65448e..19635ab52 100644 --- a/remote/src/net_relay/udp.rs +++ b/remote/src/net_relay/udp.rs @@ -3,6 +3,7 @@ use std::convert::TryFrom; use std::io; use std::net::SocketAddr; +use std::pin::pin; use heph::actor::{self, NoMessages}; use heph::messages::Terminate; @@ -17,6 +18,7 @@ use crate::net_relay::uuid::UuidGenerator; use crate::net_relay::{Message, Route, Serde}; const MAX_PACKET_SIZE: usize = 1 << 16; // ~65kb. +const INITIAL_SEND_BUF_SIZE: usize = 1 << 12; // 4kb. /// Message type used for network relays using UDP. /// @@ -95,22 +97,29 @@ where RT: rt::Access, R: Route, { - let mut socket = UdpSocket::bind(&mut ctx, local_address)?; - let mut buf = Vec::with_capacity(MAX_PACKET_SIZE); + let socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await?; + let mut uuid_gen = UuidGenerator::new(); + let mut send_buf = Vec::with_capacity(INITIAL_SEND_BUF_SIZE); + let mut recv_data = pin!(socket.recv_from(Vec::with_capacity(MAX_PACKET_SIZE))); loop { - buf.clear(); - match either(ctx.receive_next(), socket.recv_from(&mut buf)).await { + match either(ctx.receive_next(), recv_data.as_mut()).await { // Received an outgoing message we want to relay to a remote // actor. Ok(Ok(UdpRelayMessage::Relay { message, target })) => { - send_message::(&mut socket, &mut buf, &mut uuid_gen, target, &message) - .await?; + send_buf = + send_message::(&socket, send_buf, &mut uuid_gen, target, &message) + .await?; + send_buf.clear(); } Ok(Ok(UdpRelayMessage::Terminate) | Err(NoMessages)) => return Ok(()), // Received an incoming packet. - Err(Ok((_, source))) => route_message::(&mut router, &buf, source).await?, + Err(Ok((mut buf, source))) => { + route_message::(&mut router, &buf, source).await?; + buf.clear(); + recv_data.set(socket.recv_from(buf)); + } // Error receiving a packet. Err(Err(err)) => return Err(err), } @@ -119,12 +128,12 @@ where /// Send a `msg` to a remote actor at `target` address, using `socket`. async fn send_message( - socket: &mut UdpSocket, - buf: &mut Vec, + socket: &UdpSocket, + mut buf: Vec, uuid_gen: &mut UuidGenerator, target: SocketAddr, msg: &M, -) -> io::Result<()> +) -> io::Result> where S: Serde, M: Serialize, @@ -132,10 +141,10 @@ where // Serialise the message to our buffer first. let uuid = uuid_gen.next(); let msg = Message { uuid, msg }; - if let Err(err) = S::to_buf(&mut *buf, &msg) { + if let Err(err) = S::to_buf(&mut buf, &msg) { warn!("error serialising message (for {target}): {err}"); // Don't want to stop the actor for this. - return Ok(()); + return Ok(buf); } // Then send the buffer as a single packet. @@ -145,15 +154,14 @@ where "message too large (for {target}): (serialised) message size {len}, max is {MAX_PACKET_SIZE}", ); // Don't want to stop the actor for this. - return Ok(()); + return Ok(buf); + } + let (buf, bytes_send) = socket.send_to(buf, target).await?; + if bytes_send == buf.len() { + Ok(buf) + } else { + Err(io::ErrorKind::WriteZero.into()) } - socket.send_to(buf, target).await.and_then(|bytes_send| { - if bytes_send == buf.len() { - Ok(()) - } else { - Err(io::ErrorKind::WriteZero.into()) - } - }) } /// Routes a message in `buf` using `router`. From 4ee68c2437b7f6f040083368d92afb1adca8073e Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 11 Apr 2023 14:44:23 +0200 Subject: [PATCH 049/177] Implement Spawn::spawn using try_spawn Instead of try_spawn_setup, in a first step to remove the setup method. --- rt/src/spawn/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/rt/src/spawn/mod.rs b/rt/src/spawn/mod.rs index 13e0617ec..2a01fa1c8 100644 --- a/rt/src/spawn/mod.rs +++ b/rt/src/spawn/mod.rs @@ -123,8 +123,10 @@ pub trait Spawn: PrivateSpawn { S: Supervisor, NA: NewActor, { - self.try_spawn_setup(supervisor, new_actor, |_| Ok(arg), options) - .unwrap_or_else(|_: AddActorError| unreachable!()) + match self.try_spawn(supervisor, new_actor, arg, options) { + Ok(actor_ref) => actor_ref, + Err(err) => err, + } } } From 36c20fe1d1ad016dc7c4277da32fba25fb596c96 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 11 Apr 2023 15:02:32 +0200 Subject: [PATCH 050/177] Remove PrivateSpawn trait We don't need the setup any longer. Allows the Spawn to implemented outside of the crate. --- rt/src/access.rs | 83 +++++++++++++------------------------------- rt/src/lib.rs | 64 +++++++++++----------------------- rt/src/shared/mod.rs | 14 ++++---- rt/src/spawn/mod.rs | 70 +++++-------------------------------- 4 files changed, 59 insertions(+), 172 deletions(-) diff --git a/rt/src/access.rs b/rt/src/access.rs index 749745b02..8253196f6 100644 --- a/rt/src/access.rs +++ b/rt/src/access.rs @@ -41,7 +41,7 @@ use heph::supervisor::Supervisor; use mio::{event, Interest}; use crate::process::ProcessId; -use crate::spawn::{ActorOptions, AddActorError, FutureOptions, PrivateSpawn, Spawn}; +use crate::spawn::{ActorOptions, FutureOptions, Spawn}; use crate::trace::{self, Trace}; use crate::{shared, RuntimeRef}; @@ -231,26 +231,18 @@ where NA: NewActor + 'static, NA::Actor: 'static, { -} - -impl PrivateSpawn for ThreadLocal -where - S: Supervisor + 'static, - NA: NewActor + 'static, - NA::Actor: 'static, -{ - fn try_spawn_setup( + fn try_spawn( &mut self, supervisor: S, new_actor: NA, - arg_fn: ArgFn, + arg: NA::Argument, options: ActorOptions, - ) -> Result, AddActorError> + ) -> Result, NA::Error> where - ArgFn: FnOnce(&mut actor::Context) -> Result, + S: Supervisor, + NA: NewActor, { - self.rt - .try_spawn_setup(supervisor, new_actor, arg_fn, options) + Spawn::try_spawn(&mut self.rt, supervisor, new_actor, arg, options) } } @@ -261,27 +253,18 @@ where NA::Actor: Send + std::marker::Sync + 'static, NA::Message: Send, { -} - -impl PrivateSpawn for ThreadLocal -where - S: Supervisor + Send + std::marker::Sync + 'static, - NA: NewActor + Send + std::marker::Sync + 'static, - NA::Actor: Send + std::marker::Sync + 'static, - NA::Message: Send, -{ - fn try_spawn_setup( + fn try_spawn( &mut self, supervisor: S, new_actor: NA, - arg_fn: ArgFn, + arg: NA::Argument, options: ActorOptions, - ) -> Result, AddActorError> + ) -> Result, NA::Error> where - ArgFn: FnOnce(&mut actor::Context) -> Result, + S: Supervisor, + NA: NewActor, { - self.rt - .try_spawn_setup(supervisor, new_actor, arg_fn, options) + self.rt.try_spawn(supervisor, new_actor, arg, options) } } @@ -396,26 +379,18 @@ where NA::Actor: Send + std::marker::Sync + 'static, NA::Message: Send, { -} - -impl PrivateSpawn for ThreadSafe -where - S: Supervisor + Send + std::marker::Sync + 'static, - NA: NewActor + Send + std::marker::Sync + 'static, - NA::Actor: Send + std::marker::Sync + 'static, - NA::Message: Send, -{ - fn try_spawn_setup( + fn try_spawn( &mut self, supervisor: S, new_actor: NA, - arg_fn: ArgFn, + arg: NA::Argument, options: ActorOptions, - ) -> Result, AddActorError> + ) -> Result, NA::Error> where - ArgFn: FnOnce(&mut actor::Context) -> Result, + S: Supervisor, + NA: NewActor, { - self.rt.spawn_setup(supervisor, new_actor, arg_fn, options) + self.rt.try_spawn(supervisor, new_actor, arg, options) } } @@ -486,26 +461,18 @@ where NA::Actor: Send + std::marker::Sync + 'static, NA::Message: Send, { -} - -impl PrivateSpawn for Sync -where - S: Supervisor + Send + std::marker::Sync + 'static, - NA: NewActor + Send + std::marker::Sync + 'static, - NA::Actor: Send + std::marker::Sync + 'static, - NA::Message: Send, -{ - fn try_spawn_setup( + fn try_spawn( &mut self, supervisor: S, new_actor: NA, - arg_fn: ArgFn, + arg: NA::Argument, options: ActorOptions, - ) -> Result, AddActorError> + ) -> Result, NA::Error> where - ArgFn: FnOnce(&mut actor::Context) -> Result, + S: Supervisor, + NA: NewActor, { - self.rt.spawn_setup(supervisor, new_actor, arg_fn, options) + self.rt.try_spawn(supervisor, new_actor, arg, options) } } diff --git a/rt/src/lib.rs b/rt/src/lib.rs index b8f740395..35f3f9906 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -249,7 +249,7 @@ pub use signal::Signal; use coordinator::Coordinator; use local::waker::MAX_THREADS; -use spawn::{ActorOptions, AddActorError, FutureOptions, PrivateSpawn, Spawn, SyncActorOptions}; +use spawn::{ActorOptions, FutureOptions, Spawn, SyncActorOptions}; use sync_worker::SyncWorker; const SYNC_WORKER_ID_START: usize = 10000; @@ -468,28 +468,20 @@ where NA::Actor: Send + std::marker::Sync + 'static, NA::Message: Send, { -} - -impl PrivateSpawn for Runtime -where - S: Supervisor + Send + std::marker::Sync + 'static, - NA: NewActor + Send + std::marker::Sync + 'static, - NA::Actor: Send + std::marker::Sync + 'static, - NA::Message: Send, -{ - fn try_spawn_setup( + fn try_spawn( &mut self, supervisor: S, new_actor: NA, - arg_fn: ArgFn, + arg: NA::Argument, options: ActorOptions, - ) -> Result, AddActorError> + ) -> Result, NA::Error> where - ArgFn: FnOnce(&mut actor::Context) -> Result, + S: Supervisor, + NA: NewActor, { self.coordinator .shared_internals() - .spawn_setup(supervisor, new_actor, arg_fn, options) + .try_spawn(supervisor, new_actor, arg, options) } } @@ -726,23 +718,16 @@ where NA: NewActor + 'static, NA::Actor: 'static, { -} - -impl PrivateSpawn for RuntimeRef -where - S: Supervisor + 'static, - NA: NewActor + 'static, - NA::Actor: 'static, -{ - fn try_spawn_setup( + fn try_spawn( &mut self, supervisor: S, mut new_actor: NA, - arg_fn: ArgFn, + arg: NA::Argument, options: ActorOptions, - ) -> Result, AddActorError> + ) -> Result, NA::Error> where - ArgFn: FnOnce(&mut actor::Context) -> Result, + S: Supervisor, + NA: NewActor, { // Setup adding a new process to the scheduler. let mut scheduler = self.internals.scheduler.borrow_mut(); @@ -754,10 +739,9 @@ where // Create our actor context and our actor with it. let (manager, sender, receiver) = inbox::Manager::new_small_channel(); let actor_ref = ActorRef::local(sender); - let mut ctx = actor::Context::new(receiver, ThreadLocal::new(pid, self.clone())); + let ctx = actor::Context::new(receiver, ThreadLocal::new(pid, self.clone())); // Create our actor argument, running any setup required by the caller. - let arg = arg_fn(&mut ctx).map_err(AddActorError::ArgFn)?; - let actor = new_actor.new(ctx, arg).map_err(AddActorError::NewActor)?; + let actor = new_actor.new(ctx, arg)?; // Add the actor to the scheduler. actor_entry.add( @@ -780,28 +764,20 @@ where NA::Actor: Send + std::marker::Sync + 'static, NA::Message: Send, { -} - -impl PrivateSpawn for RuntimeRef -where - S: Supervisor + Send + std::marker::Sync + 'static, - NA: NewActor + Send + std::marker::Sync + 'static, - NA::Actor: Send + std::marker::Sync + 'static, - NA::Message: Send, -{ - fn try_spawn_setup( + fn try_spawn( &mut self, supervisor: S, new_actor: NA, - arg_fn: ArgFn, + arg: NA::Argument, options: ActorOptions, - ) -> Result, AddActorError> + ) -> Result, NA::Error> where - ArgFn: FnOnce(&mut actor::Context) -> Result, + S: Supervisor, + NA: NewActor, { self.internals .shared - .spawn_setup(supervisor, new_actor, arg_fn, options) + .try_spawn(supervisor, new_actor, arg, options) } } diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 9b6b75b59..344e10c6e 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -17,7 +17,7 @@ use log::{debug, error, trace}; use mio::unix::SourceFd; use mio::{event, Events, Interest, Poll, Registry, Token}; -use crate::spawn::{ActorOptions, AddActorError, FutureOptions}; +use crate::spawn::{ActorOptions, FutureOptions}; use crate::thread_waker::ThreadWaker; use crate::{trace, ProcessId, ThreadSafe}; @@ -248,17 +248,16 @@ impl RuntimeInternals { } #[allow(clippy::needless_pass_by_value)] // For `ActorOptions`. - pub(crate) fn spawn_setup( + pub(crate) fn try_spawn( self: &Arc, supervisor: S, mut new_actor: NA, - arg_fn: ArgFn, + arg: NA::Argument, options: ActorOptions, - ) -> Result, AddActorError> + ) -> Result, NA::Error> where S: Supervisor + Send + Sync + 'static, NA: NewActor + Sync + Send + 'static, - ArgFn: FnOnce(&mut actor::Context) -> Result, NA::Actor: Send + Sync + 'static, NA::Message: Send, { @@ -271,9 +270,8 @@ impl RuntimeInternals { // Create our actor context and our actor with it. let (manager, sender, receiver) = inbox::Manager::new_small_channel(); let actor_ref = ActorRef::local(sender); - let mut ctx = actor::Context::new(receiver, ThreadSafe::new(pid, self.clone())); - let arg = arg_fn(&mut ctx).map_err(AddActorError::ArgFn)?; - let actor = new_actor.new(ctx, arg).map_err(AddActorError::NewActor)?; + let ctx = actor::Context::new(receiver, ThreadSafe::new(pid, self.clone())); + let actor = new_actor.new(ctx, arg)?; // Add the actor to the scheduler. actor_entry.add( diff --git a/rt/src/spawn/mod.rs b/rt/src/spawn/mod.rs index 2a01fa1c8..b24674552 100644 --- a/rt/src/spawn/mod.rs +++ b/rt/src/spawn/mod.rs @@ -65,13 +65,11 @@ use heph::{actor, ActorRef, NewActor}; pub mod options; -pub(crate) use private::{AddActorError, PrivateSpawn}; - #[doc(no_inline)] pub use options::{ActorOptions, FutureOptions, SyncActorOptions}; /// The `Spawn` trait defines how new actors are added to the runtime. -pub trait Spawn: PrivateSpawn { +pub trait Spawn { /// Attempts to spawn an actor. /// /// Arguments: @@ -97,14 +95,7 @@ pub trait Spawn: PrivateSpawn { ) -> Result, NA::Error> where S: Supervisor, - NA: NewActor, - { - self.try_spawn_setup(supervisor, new_actor, |_| Ok(arg), options) - .map_err(|err| match err { - AddActorError::NewActor(err) => err, - AddActorError::<_, !>::ArgFn(_) => unreachable!(), - }) - } + NA: NewActor; /// Spawn an actor. /// @@ -130,67 +121,22 @@ pub trait Spawn: PrivateSpawn { } } -mod private { - //! Module with private types. - - use heph::actor::{self, NewActor}; - use heph::actor_ref::ActorRef; - use heph::supervisor::Supervisor; - - use crate::spawn::ActorOptions; - - /// Private version of the [`Spawn`] trait. - /// - /// [`Spawn`]: super::Spawn - pub trait PrivateSpawn { - /// Spawn an actor that needs to be initialised. - /// - /// See the public [`Spawn`] trait for documentation on the arguments. - /// - /// [`Spawn`]: super::Spawn - #[allow(clippy::type_complexity)] // Not part of the public API, so it's OK. - fn try_spawn_setup( - &mut self, - supervisor: S, - new_actor: NA, - arg_fn: ArgFn, - options: ActorOptions, - ) -> Result, AddActorError> - where - S: Supervisor, - NA: NewActor, - ArgFn: FnOnce(&mut actor::Context) -> Result; - } - - /// Error returned by spawning a actor. - #[derive(Debug)] - pub enum AddActorError { - /// Calling `NewActor::new` actor resulted in an error. - NewActor(NewActorE), - /// Calling the argument function resulted in an error. - ArgFn(ArgFnE), - } -} - -impl Spawn for actor::Context where RT: Spawn {} - -impl PrivateSpawn for actor::Context +impl Spawn for actor::Context where - RT: PrivateSpawn, + RT: Spawn, { - fn try_spawn_setup( + fn try_spawn( &mut self, supervisor: S, new_actor: NA, - arg_fn: ArgFn, + arg: NA::Argument, options: ActorOptions, - ) -> Result, AddActorError> + ) -> Result, NA::Error> where S: Supervisor, NA: NewActor, - ArgFn: FnOnce(&mut actor::Context) -> Result, { self.runtime() - .try_spawn_setup(supervisor, new_actor, arg_fn, options) + .try_spawn(supervisor, new_actor, arg, options) } } From 3cdcef0de860a2d4e1f06afd97efee2b15202cd5 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 11 Apr 2023 15:37:45 +0200 Subject: [PATCH 051/177] Remove ActorOptions.ready It no longer makes sense as an option as all I/O types are now lazy and will not run the actor to which it's bound any longer. --- rt/examples/99_stress_memory.rs | 7 +-- rt/src/lib.rs | 9 +--- rt/src/local/scheduler/mod.rs | 7 +-- rt/src/local/scheduler/tests.rs | 91 ++++++++++++-------------------- rt/src/net/tcp/server.rs | 4 +- rt/src/shared/mod.rs | 9 +--- rt/src/shared/scheduler/mod.rs | 7 +-- rt/src/shared/scheduler/tests.rs | 19 +++---- rt/src/spawn/options.rs | 23 -------- 9 files changed, 50 insertions(+), 126 deletions(-) diff --git a/rt/examples/99_stress_memory.rs b/rt/examples/99_stress_memory.rs index 83fcd4118..d7b3e2652 100644 --- a/rt/examples/99_stress_memory.rs +++ b/rt/examples/99_stress_memory.rs @@ -22,9 +22,7 @@ fn main() -> Result<(), rt::Error> { let start = std::time::Instant::now(); for _ in 0..N { let actor = actor as fn(_) -> _; - // Don't run the actors as that will remove them from memory. - let options = ActorOptions::default().mark_ready(false); - runtime_ref.spawn_local(NoSupervisor, actor, (), options); + runtime_ref.spawn_local(NoSupervisor, actor, (), ActorOptions::default()); } info!("Spawning took {:?}", start.elapsed()); @@ -48,4 +46,7 @@ async fn actor(_: actor::Context) { async fn control_actor(_: actor::Context) { info!("Running, check the memory usage!"); info!("Send a signal (e.g. by pressing Ctrl-C) to stop."); + // NOTE: don't do this. This is only here to prevent the other actors from + // running. + std::thread::sleep(std::time::Duration::from_secs(100)); } diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 35f3f9906..3d7b7d1d0 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -744,14 +744,7 @@ where let actor = new_actor.new(ctx, arg)?; // Add the actor to the scheduler. - actor_entry.add( - options.priority(), - supervisor, - new_actor, - actor, - manager, - options.is_ready(), - ); + actor_entry.add(options.priority(), supervisor, new_actor, actor, manager); Ok(actor_ref) } diff --git a/rt/src/local/scheduler/mod.rs b/rt/src/local/scheduler/mod.rs index 27b7142fb..2a472cba8 100644 --- a/rt/src/local/scheduler/mod.rs +++ b/rt/src/local/scheduler/mod.rs @@ -134,7 +134,6 @@ impl<'s> AddActor<'s> { new_actor: NA, actor: NA::Actor, inbox: Manager, - is_ready: bool, ) where S: Supervisor + 'static, NA: NewActor + 'static, @@ -156,10 +155,6 @@ impl<'s> AddActor<'s> { // Safe because we write into the allocation above. alloc.assume_init().into() }; - if is_ready { - scheduler.ready.push(process); - } else { - scheduler.inactive.add(process); - } + scheduler.ready.push(process); } } diff --git a/rt/src/local/scheduler/tests.rs b/rt/src/local/scheduler/tests.rs index 778dda5ed..0227f4886 100644 --- a/rt/src/local/scheduler/tests.rs +++ b/rt/src/local/scheduler/tests.rs @@ -61,16 +61,9 @@ fn add_actor() { let actor_entry = scheduler.add_actor(); let new_actor = simple_actor as fn(_) -> _; let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add( - Priority::NORMAL, - NoSupervisor, - new_actor, - actor, - inbox, - false, - ); + actor_entry.add(Priority::NORMAL, NoSupervisor, new_actor, actor, inbox); assert!(scheduler.has_process()); - assert!(!scheduler.has_ready_process()); + assert!(scheduler.has_ready_process()); } #[test] @@ -84,18 +77,35 @@ fn mark_ready() { let pid = actor_entry.pid(); let new_actor = simple_actor as fn(_) -> _; let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add( - Priority::NORMAL, - NoSupervisor, - new_actor, - actor, - inbox, - false, - ); + actor_entry.add(Priority::NORMAL, NoSupervisor, new_actor, actor, inbox); + + assert!(scheduler.has_process()); + assert!(scheduler.has_ready_process()); + let process = scheduler.next_process().unwrap(); + scheduler.add_process(process); scheduler.mark_ready(pid); +} + +#[test] +fn mark_ready_before_run() { + let mut scheduler = Scheduler::new(); + + // Incorrect (outdated) pid should be ok. + scheduler.mark_ready(ProcessId(1)); + + let actor_entry = scheduler.add_actor(); + let pid = actor_entry.pid(); + let new_actor = simple_actor as fn(_) -> _; + let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); + actor_entry.add(Priority::NORMAL, NoSupervisor, new_actor, actor, inbox); + assert!(scheduler.has_process()); assert!(scheduler.has_ready_process()); + + let process = scheduler.next_process().unwrap(); + scheduler.mark_ready(pid); + scheduler.add_process(process); } #[test] @@ -106,15 +116,7 @@ fn next_process() { let pid = actor_entry.pid(); let new_actor = simple_actor as fn(_) -> _; let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add( - Priority::NORMAL, - NoSupervisor, - new_actor, - actor, - inbox, - false, - ); - scheduler.mark_ready(pid); + actor_entry.add(Priority::NORMAL, NoSupervisor, new_actor, actor, inbox); if let Some(process) = scheduler.next_process() { assert_eq!(process.as_ref().id(), pid); @@ -134,24 +136,17 @@ fn next_process_order() { let actor_entry = scheduler.add_actor(); let pid1 = actor_entry.pid(); let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add(Priority::LOW, NoSupervisor, new_actor, actor, inbox, true); + actor_entry.add(Priority::LOW, NoSupervisor, new_actor, actor, inbox); // Actor 2. let actor_entry = scheduler.add_actor(); let pid2 = actor_entry.pid(); let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add(Priority::HIGH, NoSupervisor, new_actor, actor, inbox, true); + actor_entry.add(Priority::HIGH, NoSupervisor, new_actor, actor, inbox); // Actor 3. let actor_entry = scheduler.add_actor(); let pid3 = actor_entry.pid(); let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add( - Priority::NORMAL, - NoSupervisor, - new_actor, - actor, - inbox, - true, - ); + actor_entry.add(Priority::NORMAL, NoSupervisor, new_actor, actor, inbox); assert!(scheduler.has_process()); assert!(scheduler.has_ready_process()); @@ -182,18 +177,10 @@ fn add_process() { let pid = actor_entry.pid(); let new_actor = simple_actor as fn(_) -> _; let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add( - Priority::NORMAL, - NoSupervisor, - new_actor, - actor, - inbox, - false, - ); + actor_entry.add(Priority::NORMAL, NoSupervisor, new_actor, actor, inbox); - assert!(scheduler.next_process().is_none()); assert!(scheduler.has_process()); - assert!(!scheduler.has_ready_process()); + assert!(scheduler.has_ready_process()); scheduler.mark_ready(pid); assert!(scheduler.has_process()); @@ -210,14 +197,7 @@ fn add_process_marked_ready() { let pid = actor_entry.pid(); let new_actor = simple_actor as fn(_) -> _; let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add( - Priority::NORMAL, - NoSupervisor, - new_actor, - actor, - inbox, - true, - ); + actor_entry.add(Priority::NORMAL, NoSupervisor, new_actor, actor, inbox); let process = scheduler.next_process().unwrap(); scheduler.add_process(process); @@ -256,7 +236,7 @@ fn scheduler_run_order() { pids.push(actor_entry.pid()); let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, (id, run_order.clone())).unwrap(); - actor_entry.add(*priority, NoSupervisor, new_actor, actor, inbox, true); + actor_entry.add(*priority, NoSupervisor, new_actor, actor, inbox); } assert!(scheduler.has_process()); @@ -308,7 +288,6 @@ fn assert_actor_process_unmoved() { TestAssertUnmovedNewActor, actor, inbox, - true, ); // Run the process multiple times, ensure it's not moved in the process. diff --git a/rt/src/net/tcp/server.rs b/rt/src/net/tcp/server.rs index 4253e9943..947492149 100644 --- a/rt/src/net/tcp/server.rs +++ b/rt/src/net/tcp/server.rs @@ -50,9 +50,7 @@ //! let address = "127.0.0.1:7890".parse().unwrap(); //! // Create our TCP server. //! let new_actor = conn_actor as fn(_, _) -> _; -//! // Wait for the `TcpStream` to become ready before running the actor. -//! let options = ActorOptions::default().mark_ready(false); -//! let server = tcp::server::setup(address, conn_supervisor, new_actor, options)?; +//! let server = tcp::server::setup(address, conn_supervisor, new_actor, ActorOptions::default())?; //! //! // We advice to give the TCP server a low priority to prioritise //! // handling of ongoing requests over accepting new requests possibly diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 344e10c6e..580790f18 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -274,14 +274,7 @@ impl RuntimeInternals { let actor = new_actor.new(ctx, arg)?; // Add the actor to the scheduler. - actor_entry.add( - options.priority(), - supervisor, - new_actor, - actor, - manager, - options.is_ready(), - ); + actor_entry.add(options.priority(), supervisor, new_actor, actor, manager); Ok(actor_ref) } diff --git a/rt/src/shared/scheduler/mod.rs b/rt/src/shared/scheduler/mod.rs index 2569a6531..5d22ab494 100644 --- a/rt/src/shared/scheduler/mod.rs +++ b/rt/src/shared/scheduler/mod.rs @@ -225,7 +225,6 @@ impl<'s> AddActor<'s> { new_actor: NA, actor: NA::Actor, inbox: Manager, - is_ready: bool, ) where S: Supervisor + Send + Sync + 'static, NA: NewActor + Send + Sync + 'static, @@ -251,10 +250,6 @@ impl<'s> AddActor<'s> { alloc.assume_init().into() }; - if is_ready { - scheduler.ready.add(process); - } else { - scheduler.add_process(process); - } + scheduler.ready.add(process); } } diff --git a/rt/src/shared/scheduler/tests.rs b/rt/src/shared/scheduler/tests.rs index b2acfe7b3..894195872 100644 --- a/rt/src/shared/scheduler/tests.rs +++ b/rt/src/shared/scheduler/tests.rs @@ -43,19 +43,13 @@ fn adding_actor() { let pid = actor_entry.pid(); let new_actor = simple_actor as fn(_) -> _; let (actor, inbox, _) = init_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add( - Priority::NORMAL, - NoSupervisor, - new_actor, - actor, - inbox, - false, - ); + actor_entry.add(Priority::NORMAL, NoSupervisor, new_actor, actor, inbox); - // Newly added processes aren't ready by default. + // Newly added processes are ready by default. assert!(scheduler.has_process()); - assert!(!scheduler.has_ready_process()); - assert_eq!(scheduler.remove(), None); + assert!(scheduler.has_ready_process()); + let process = scheduler.remove().unwrap(); + scheduler.add_process(process); // After scheduling the process should be ready to run. scheduler.mark_ready(pid); @@ -125,7 +119,7 @@ fn scheduler_run_order() { let actor_entry = scheduler.add_actor(); pids.push(actor_entry.pid()); let (actor, inbox, _) = init_actor_with_inbox(new_actor, (id, run_order.clone())).unwrap(); - actor_entry.add(*priority, NoSupervisor, new_actor, actor, inbox, true); + actor_entry.add(*priority, NoSupervisor, new_actor, actor, inbox); } assert!(scheduler.has_process()); @@ -177,7 +171,6 @@ fn assert_actor_process_unmoved() { TestAssertUnmovedNewActor, actor, inbox, - true, ); // Run the process multiple times, ensure it's not moved in the diff --git a/rt/src/spawn/options.rs b/rt/src/spawn/options.rs index 7d08806a7..d4d83c189 100644 --- a/rt/src/spawn/options.rs +++ b/rt/src/spawn/options.rs @@ -38,7 +38,6 @@ use std::time::Duration; #[must_use] pub struct ActorOptions { priority: Priority, - ready: bool, } impl ActorOptions { @@ -52,34 +51,12 @@ impl ActorOptions { self.priority = priority; self } - - /// Returns `true` if the actor is ready to run when spawned. - /// - /// See [`mark_ready`] for more information. - /// - /// [`mark_ready`]: ActorOptions::mark_ready - pub const fn is_ready(&self) -> bool { - self.ready - } - - /// This option will marks the actor as ready to run (or not) when spawned. - /// - /// By default newly spawned actors will be considered to be ready to run - /// once they are spawned. However some actors might not want to run - /// immediately and wait for an external event before running. For example - /// actors that want to wait with running until they receive their first - /// message. - pub const fn mark_ready(mut self, ready: bool) -> Self { - self.ready = ready; - self - } } impl Default for ActorOptions { fn default() -> ActorOptions { ActorOptions { priority: Priority::default(), - ready: true, } } } From 4e0d41223d5f93d2b6727d77f5e939ec35fb3029 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 12 Apr 2023 19:59:46 +0200 Subject: [PATCH 052/177] Add size assertions for timer types --- rt/tests/functional/timer.rs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/rt/tests/functional/timer.rs b/rt/tests/functional/timer.rs index f4f1bf923..b10aafd3e 100644 --- a/rt/tests/functional/timer.rs +++ b/rt/tests/functional/timer.rs @@ -11,13 +11,24 @@ use heph_rt::spawn::ActorOptions; use heph_rt::test::{init_local_actor, poll_actor, poll_future, poll_next}; use heph_rt::timer::{Deadline, DeadlinePassed, Interval, Timer}; use heph_rt::util::next; -use heph_rt::{self as rt, Bound, Runtime, RuntimeRef, ThreadLocal}; +use heph_rt::{self as rt, Bound, Runtime, RuntimeRef, ThreadLocal, ThreadSafe}; -use crate::util::{count_polls, expect_pending}; +use crate::util::{assert_size, count_polls, expect_pending}; const SMALL_TIMEOUT: Duration = Duration::from_millis(50); const TIMEOUT: Duration = Duration::from_millis(100); +#[test] +fn size() { + assert_size::>(32); + assert_size::>(32); + assert_size::>(32); + assert_size::>(32); + assert_size::>(48); + assert_size::>(48); + assert_size::(0); +} + #[test] fn deadline_passed_into_io_error() { let err: io::Error = DeadlinePassed.into(); From 0aaf29a8c5011e8b423dcaed4ca31c240d9090c8 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 12 Apr 2023 20:50:52 +0200 Subject: [PATCH 053/177] Make timers lazy Instead of setting a deadline on creation set only created it after the first poll. Also changes all the timer types to use Timer internally, to which a boolean is added which grows all the types by 8 bytes (yes 8 bytes for a single bit value). --- rt/examples/redis.rs | 4 +- rt/src/access.rs | 11 -- rt/src/lib.rs | 9 - rt/src/local/timers.rs | 29 --- rt/src/local/timers_tests.rs | 138 -------------- rt/src/shared/mod.rs | 5 - rt/src/shared/timers.rs | 22 --- rt/src/shared/timers_tests.rs | 146 -------------- rt/src/systemd.rs | 2 +- rt/src/timer.rs | 261 ++++++++----------------- rt/tests/functional/test.rs | 4 +- rt/tests/functional/timer.rs | 346 +++------------------------------- rt/tests/util/mod.rs | 2 +- 13 files changed, 118 insertions(+), 861 deletions(-) diff --git a/rt/examples/redis.rs b/rt/examples/redis.rs index 455f1ef86..ab3c6b487 100644 --- a/rt/examples/redis.rs +++ b/rt/examples/redis.rs @@ -92,7 +92,7 @@ fn conn_supervisor(err: io::Error) -> SupervisorStrategy { } async fn conn_actor( - mut ctx: actor::Context, + ctx: actor::Context, stream: TcpStream, values: Arc, Arc<[u8]>>>>, ) -> io::Result<()> @@ -105,7 +105,7 @@ where let err = loop { buffer.clear(); - buffer = Deadline::after(&mut ctx, TIMEOUT, stream.recv(buffer)).await?; + buffer = Deadline::after(ctx.runtime_ref().clone(), TIMEOUT, stream.recv(buffer)).await?; if buffer.is_empty() { return Ok(()); } diff --git a/rt/src/access.rs b/rt/src/access.rs index 8253196f6..33691b863 100644 --- a/rt/src/access.rs +++ b/rt/src/access.rs @@ -97,9 +97,6 @@ mod private { /// Remove a previously set deadline. fn remove_deadline(&mut self, deadline: Instant); - /// Changes a deadline's pid from `old_pid` the current pid. - fn change_deadline(&mut self, old_pid: ProcessId, deadline: Instant); - /// Create a new [`task::Waker`]. fn new_task_waker(runtime_ref: &mut RuntimeRef, pid: ProcessId) -> task::Waker; @@ -198,10 +195,6 @@ impl PrivateAccess for ThreadLocal { self.rt.remove_deadline(self.pid, deadline); } - fn change_deadline(&mut self, old_pid: ProcessId, deadline: Instant) { - self.rt.change_deadline(old_pid, self.pid, deadline); - } - fn new_task_waker(runtime_ref: &mut RuntimeRef, pid: ProcessId) -> task::Waker { runtime_ref.new_local_task_waker(pid) } @@ -345,10 +338,6 @@ impl PrivateAccess for ThreadSafe { self.rt.remove_deadline(self.pid, deadline); } - fn change_deadline(&mut self, old_pid: ProcessId, deadline: Instant) { - self.rt.change_deadline(old_pid, self.pid, deadline); - } - fn new_task_waker(runtime_ref: &mut RuntimeRef, pid: ProcessId) -> task::Waker { runtime_ref.new_shared_task_waker(pid) } diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 3d7b7d1d0..e1986086f 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -673,15 +673,6 @@ impl RuntimeRef { self.internals.timers.borrow_mut().remove(pid, deadline); } - /// Change the `ProcessId` of a deadline. - fn change_deadline(&mut self, from: ProcessId, to: ProcessId, deadline: Instant) { - ::log::trace!(old_pid = from.0, new_pid = to.0, deadline = as_debug!(deadline); "changing deadline"); - self.internals - .timers - .borrow_mut() - .change(from, deadline, to); - } - /// Returns a copy of the shared internals. fn clone_shared(&self) -> Arc { self.internals.shared.clone() diff --git a/rt/src/local/timers.rs b/rt/src/local/timers.rs index bf1f5744f..1db2b244d 100644 --- a/rt/src/local/timers.rs +++ b/rt/src/local/timers.rs @@ -156,23 +156,6 @@ impl Timers { self.get_timers(pid, deadline, remove_timer, remove_timer); } - /// Change the `ProcessId` of a previously added deadline. - pub(crate) fn change(&mut self, pid: ProcessId, deadline: Instant, new_pid: ProcessId) { - let deadline = self.checked_deadline(deadline); - // NOTE: we need to update the cache in the case where the deadline was - // never added or expired, because the `timers` module depends on the - // fact it will be scheduled once the timer expires. - self.cached_next_deadline.update(deadline); - // NOTE: don't need to update the change as it only keep track of the - // deadline, which doesn't change. - self.get_timers( - pid, - deadline, - |timers, timer| change_timer(timers, timer, new_pid), - |timers, timer| change_timer(timers, timer, new_pid), - ); - } - /// Determines in what list of timers a timer with `pid` and `deadline` /// would be/go into. Then calls the `slot_f` function for a timer list in /// the slots, or `overflow_f` with the overflow list. @@ -321,18 +304,6 @@ where } } -/// Change the pid of a previously added `timer` in `timers` -fn change_timer(timers: &mut Vec>, timer: Timer, new_pid: ProcessId) -where - Timer: Ord + Copy, -{ - match timers.binary_search(&timer) { - Ok(idx) => timers[idx].pid = new_pid, - #[rustfmt::skip] - Err(idx) => timers.insert(idx, Timer { pid: new_pid, deadline: timer.deadline }), - } -} - /// Returns the different between `epoch` and `time`, truncated to /// [`TimeOffset`]. #[allow(clippy::cast_possible_truncation)] // TODO: move to last line. diff --git a/rt/src/local/timers_tests.rs b/rt/src/local/timers_tests.rs index 4f6a62187..43faa9f60 100644 --- a/rt/src/local/timers_tests.rs +++ b/rt/src/local/timers_tests.rs @@ -218,144 +218,6 @@ fn remove_deadline_in_the_past() { assert_eq!(timers.remove_next(timers.epoch), None); } -#[test] -fn change_deadline() { - let mut timers = Timers::new(); - let deadline = timers.epoch + Duration::from_millis(10); - timers.add(PID, deadline); - timers.change(PID, deadline, PID2); - assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(deadline), Some(PID2)); - assert_eq!(timers.remove_next(deadline), None); -} - -#[test] -fn changing_never_added_deadline_adds_it() { - let mut timers = Timers::new(); - let deadline = timers.epoch + Duration::from_millis(10); - timers.change(PID, deadline, PID2); - assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(deadline), Some(PID2)); -} - -#[test] -fn change_expired_deadline() { - let mut timers = Timers::new(); - let deadline = timers.epoch + Duration::from_millis(10); - timers.add(PID, deadline); - assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(deadline), Some(PID)); - assert_eq!(timers.remove_next(deadline), None); - timers.change(PID, deadline, PID2); - assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(deadline), Some(PID2)); -} - -#[test] -fn change_deadline_from_all_slots() { - let mut timers = Timers::new(); - - // Add a deadline to all slots and the overflow list. - for n in 0..=SLOTS { - let deadline = timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - timers.add(ProcessId(n), deadline); - } - - let first_deadline = timers.epoch + Duration::from_nanos(10); - assert_eq!(timers.next(), Some(first_deadline)); - assert_eq!(timers.remove_next(timers.epoch), None); - assert_eq!(timers.index, 0); - - let mut next_deadline = first_deadline; - for n in 0..=SLOTS { - timers.change(ProcessId(n), next_deadline, ProcessId(100 + n)); - assert_eq!(timers.remove_next(next_deadline), Some(ProcessId(100 + n))); - assert_eq!(timers.remove_next(next_deadline), None); - next_deadline += DURATION_PER_SLOT; - - if n == SLOTS { - assert_eq!(timers.next(), None); - } else { - assert_eq!(timers.next(), Some(next_deadline)); - } - } -} - -#[test] -fn change_deadline_from_all_slots_interleaved() { - let mut timers = Timers::new(); - - // Add a deadline to all slots and the overflow list. - for n in 0..=SLOTS { - let deadline = timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - timers.add(ProcessId(n), deadline); - timers.change(ProcessId(n), deadline, ProcessId(100 + n)); - } - - let now = timers.epoch + Duration::from_nanos((SLOTS as u64 * NS_PER_SLOT as u64) + 10); - - let mut expected_index = 0; - for n in 0..=SLOTS { - assert_eq!(timers.remove_next(now), Some(ProcessId(100 + n))); - assert_eq!(timers.index, expected_index); - expected_index = (expected_index + 1) % SLOTS as u8; - } - assert_eq!(timers.index, 0); -} - -#[test] -fn change_deadline_after_epoch_advance() { - let mut timers = Timers::new(); - - // Add a deadline to all slots and the overflow list. - for n in 0..=SLOTS { - let deadline = timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - timers.add(ProcessId(n), deadline); - } - - let first_deadline = timers.epoch + Duration::from_nanos(10); - let now = timers.epoch + DURATION_PER_SLOT; - assert_eq!(timers.next(), Some(first_deadline)); - assert_eq!(timers.remove_next(now), Some(ProcessId(0))); - assert_eq!(timers.remove_next(now), None); - assert_eq!(timers.index, 1); - assert_eq!(timers.next(), Some(first_deadline + DURATION_PER_SLOT)); - - let mut next_deadline = first_deadline + DURATION_PER_SLOT; - for n in 1..=SLOTS { - timers.change(ProcessId(n), next_deadline, ProcessId(100 + n)); - assert_eq!(timers.remove_next(next_deadline), Some(ProcessId(100 + n))); - assert_eq!(timers.remove_next(next_deadline), None); - next_deadline += DURATION_PER_SLOT; - - if n == SLOTS { - assert_eq!(timers.next(), None); - } else { - assert_eq!(timers.next(), Some(next_deadline)); - } - } -} - -#[test] -fn change_deadline_in_the_past() { - let mut timers = Timers::new(); - let deadline = timers.epoch - Duration::from_secs(1); - timers.add(PID, deadline); - assert_eq!(timers.next(), Some(timers.epoch)); - timers.change(PID, deadline, PID2); - assert_eq!(timers.next(), Some(timers.epoch)); - assert_eq!(timers.remove_next(timers.epoch), Some(PID2)); -} - -#[test] -fn changing_never_added_deadline_in_the_past_adds_it() { - let mut timers = Timers::new(); - let deadline = timers.epoch - Duration::from_secs(1); - timers.change(PID, deadline, PID2); - assert_eq!(timers.next(), Some(timers.epoch)); - assert_eq!(timers.remove_next(timers.epoch), Some(PID2)); -} - #[test] fn deadlines() { let mut timers = Timers::new(); diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 580790f18..6b3999209 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -215,11 +215,6 @@ impl RuntimeInternals { self.timers.remove(pid, deadline); } - /// See [`Timers::change`]. - pub(super) fn change_deadline(&self, from: ProcessId, to: ProcessId, deadline: Instant) { - self.timers.change(from, deadline, to); - } - /// See [`Timers::remove_next`]. pub(crate) fn remove_next_deadline(&self, now: Instant) -> Option { self.timers.remove_next(now) diff --git a/rt/src/shared/timers.rs b/rt/src/shared/timers.rs index 2901b4c76..2cfa769d7 100644 --- a/rt/src/shared/timers.rs +++ b/rt/src/shared/timers.rs @@ -168,16 +168,6 @@ impl Timers { self.get_timers(pid, deadline, remove_timer, remove_timer); } - /// Change the `ProcessId` of a previously added deadline. - pub(crate) fn change(&self, pid: ProcessId, deadline: Instant, new_pid: ProcessId) { - self.get_timers( - pid, - deadline, - |timers, timer| change_timer(timers, timer, new_pid), - |timers, timer| change_timer(timers, timer, new_pid), - ); - } - /// Determines in what list of timers a timer with `pid` and `deadline` /// would be/go into. Then calls the `slot_f` function for a timer list in /// the slots, or `overflow_f` with the overflow list. @@ -306,18 +296,6 @@ where } } -/// Change the pid of a previously added `timer` in `timers` -fn change_timer(timers: &mut Vec>, timer: Timer, new_pid: ProcessId) -where - Timer: Ord + Copy, -{ - match timers.binary_search(&timer) { - Ok(idx) => timers[idx].pid = new_pid, - #[rustfmt::skip] - Err(idx) => timers.insert(idx, Timer { pid: new_pid, deadline: timer.deadline }), - } -} - /// Remove the first timer if it's before `time`. /// /// Returns `Ok(timer)` if there is a timer with a deadline before `time`. diff --git a/rt/src/shared/timers_tests.rs b/rt/src/shared/timers_tests.rs index af04e9e81..2af20a0df 100644 --- a/rt/src/shared/timers_tests.rs +++ b/rt/src/shared/timers_tests.rs @@ -230,149 +230,3 @@ fn remove_deadline_in_the_past() { assert_eq!(timers.next(), None); assert_eq!(timers.remove_next(epoch), None); } - -#[test] -fn change_deadline() { - let timers = Timers::new(); - let epoch = timers.epoch.read().unwrap().time; - let deadline = epoch + Duration::from_millis(10); - timers.add(PID, deadline); - timers.change(PID, deadline, PID2); - assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(deadline), Some(PID2)); - assert_eq!(timers.remove_next(deadline), None); -} - -#[test] -fn changing_never_added_deadline_adds_it() { - let timers = Timers::new(); - let epoch = timers.epoch.read().unwrap().time; - let deadline = epoch + Duration::from_millis(10); - timers.change(PID, deadline, PID2); - assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(deadline), Some(PID2)); -} - -#[test] -fn change_expired_deadline() { - let timers = Timers::new(); - let epoch = timers.epoch.read().unwrap().time; - let deadline = epoch + Duration::from_millis(10); - timers.add(PID, deadline); - assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(deadline), Some(PID)); - assert_eq!(timers.remove_next(deadline), None); - timers.change(PID, deadline, PID2); - assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(deadline), Some(PID2)); -} - -#[test] -fn change_deadline_from_all_slots() { - let timers = Timers::new(); - let epoch = timers.epoch.read().unwrap().time; - - // Add a deadline to all slots and the overflow list. - for n in 0..=SLOTS { - let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - timers.add(ProcessId(n), deadline); - } - - let first_deadline = epoch + Duration::from_nanos(10); - assert_eq!(timers.next(), Some(first_deadline)); - assert_eq!(timers.remove_next(epoch), None); - assert_eq!(timers.epoch.read().unwrap().index, 0); - - let mut next_deadline = first_deadline; - for n in 0..=SLOTS { - timers.change(ProcessId(n), next_deadline, ProcessId(100 + n)); - assert_eq!(timers.remove_next(next_deadline), Some(ProcessId(100 + n))); - assert_eq!(timers.remove_next(next_deadline), None); - next_deadline += DURATION_PER_SLOT; - - if n == SLOTS { - assert_eq!(timers.next(), None); - } else { - assert_eq!(timers.next(), Some(next_deadline)); - } - } -} - -#[test] -fn change_deadline_from_all_slots_interleaved() { - let timers = Timers::new(); - let epoch = timers.epoch.read().unwrap().time; - - // Add a deadline to all slots and the overflow list. - for n in 0..=SLOTS { - let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - timers.add(ProcessId(n), deadline); - timers.change(ProcessId(n), deadline, ProcessId(100 + n)); - } - - let now = epoch + Duration::from_nanos((SLOTS as u64 * NS_PER_SLOT as u64) + 10); - - let mut expected_index = 0; - for n in 0..=SLOTS { - assert_eq!(timers.remove_next(now), Some(ProcessId(100 + n))); - assert_eq!(timers.epoch.read().unwrap().index, expected_index); - expected_index = (expected_index + 1) % SLOTS as u8; - } - assert_eq!(timers.epoch.read().unwrap().index, 0); -} - -#[test] -fn change_deadline_after_epoch_advance() { - let timers = Timers::new(); - let epoch = timers.epoch.read().unwrap().time; - - // Add a deadline to all slots and the overflow list. - for n in 0..=SLOTS { - let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - timers.add(ProcessId(n), deadline); - } - - let first_deadline = epoch + Duration::from_nanos(10); - let now = epoch + DURATION_PER_SLOT; - assert_eq!(timers.next(), Some(first_deadline)); - assert_eq!(timers.remove_next(now), Some(ProcessId(0))); - assert_eq!(timers.remove_next(now), None); - assert_eq!(timers.epoch.read().unwrap().index, 1); - assert_eq!(timers.next(), Some(first_deadline + DURATION_PER_SLOT)); - - let mut next_deadline = first_deadline + DURATION_PER_SLOT; - for n in 1..=SLOTS { - timers.change(ProcessId(n), next_deadline, ProcessId(100 + n)); - assert_eq!(timers.remove_next(next_deadline), Some(ProcessId(100 + n))); - assert_eq!(timers.remove_next(next_deadline), None); - next_deadline += DURATION_PER_SLOT; - - if n == SLOTS { - assert_eq!(timers.next(), None); - } else { - assert_eq!(timers.next(), Some(next_deadline)); - } - } -} - -#[test] -fn change_deadline_in_the_past() { - let timers = Timers::new(); - let epoch = timers.epoch.read().unwrap().time; - let deadline = epoch - Duration::from_secs(1); - timers.add(PID, deadline); - assert_eq!(timers.next(), Some(epoch)); - timers.change(PID, deadline, PID2); - assert_eq!(timers.next(), Some(epoch)); - assert_eq!(timers.remove_next(epoch), Some(PID2)); -} - -#[test] -fn changing_never_added_deadline_in_the_past_adds_it() { - let timers = Timers::new(); - let epoch = timers.epoch.read().unwrap().time; - let deadline = epoch - Duration::from_secs(1); - timers.change(PID, deadline, PID2); - assert_eq!(timers.next(), Some(epoch)); - assert_eq!(timers.remove_next(epoch), Some(PID2)); -} diff --git a/rt/src/systemd.rs b/rt/src/systemd.rs index 2608138ae..f32d5b67d 100644 --- a/rt/src/systemd.rs +++ b/rt/src/systemd.rs @@ -293,7 +293,7 @@ where if let Some(timeout) = notify.watchdog_timeout() { debug!(timeout = as_debug!(timeout); "started via systemd with watchdog"); - let mut interval = Interval::every(&mut ctx, timeout); + let mut interval = Interval::every(ctx.runtime_ref().clone(), timeout); loop { match either(ctx.receive_next(), next(&mut interval)).await { Ok(Ok(msg)) => match msg { diff --git a/rt/src/timer.rs b/rt/src/timer.rs index 813c9da1d..0a5602e10 100644 --- a/rt/src/timer.rs +++ b/rt/src/timer.rs @@ -11,15 +11,12 @@ use std::async_iter::AsyncIterator; use std::future::Future; -use std::mem::ManuallyDrop; +use std::io; use std::pin::Pin; use std::task::{self, Poll}; use std::time::{Duration, Instant}; -use std::{io, ptr}; -use heph::actor; - -use crate::{self as rt, Bound}; +use crate::{self as rt}; /// Type returned when the deadline has passed. /// @@ -42,8 +39,8 @@ impl From for io::ErrorKind { /// A [`Future`] that represents a timer. /// /// If this future returns [`Poll::Ready`]`(`[`DeadlinePassed`]`)` it means that -/// the deadline has passed. If it returns [`Poll::Pending`] it's not yet -/// passed. +/// the deadline has passed. If it returns [`Poll::Pending`] the deadline has +/// not yet passed. /// /// # Examples /// @@ -74,10 +71,10 @@ impl From for io::ErrorKind { /// # Ok(()) /// # } /// # -/// async fn actor(mut ctx: actor::Context) { +/// async fn actor(ctx: actor::Context) { /// # let start = Instant::now(); /// // Create a timer, this will be ready once the timeout has passed. -/// let timeout = Timer::after(&mut ctx, Duration::from_millis(200)); +/// let timeout = Timer::after(ctx.runtime_ref().clone(), Duration::from_millis(200)); /// # assert!(timeout.deadline() >= start + Duration::from_millis(200)); /// /// // Wait for the timer to pass. @@ -91,28 +88,25 @@ impl From for io::ErrorKind { pub struct Timer { deadline: Instant, rt: RT, - // NOTE: when adding fields also add to [`Timer::wrap`]. + /// If `true` it means we've added a timer that hasn't expired yet. + timer_pending: bool, } impl Timer { /// Create a new `Timer`. - pub fn at(ctx: &mut actor::Context, deadline: Instant) -> Timer - where - RT: Clone, - { - let mut rt = ctx.runtime().clone(); - rt.add_deadline(deadline); - Timer { deadline, rt } + pub const fn at(rt: RT, deadline: Instant) -> Timer { + Timer { + deadline, + rt, + timer_pending: false, + } } /// Create a new timer, based on a timeout. /// - /// Same as calling `Timer::at(&mut ctx, Instant::now() + timeout)`. - pub fn after(ctx: &mut actor::Context, timeout: Duration) -> Timer - where - RT: Clone, - { - Timer::at(ctx, Instant::now() + timeout) + /// Same as calling `Timer::at(rt, Instant::now() + timeout)`. + pub fn after(rt: RT, timeout: Duration) -> Timer { + Timer::at(rt, Instant::now() + timeout) } /// Returns the deadline set for this `Timer`. @@ -126,20 +120,10 @@ impl Timer { } /// Wrap a future creating a new `Deadline`. - pub fn wrap(self, future: Fut) -> Deadline { - // We don't want to run the destructor as that would remove the - // deadline, which we need in `Deadline` as well. As a bonus we can - // safely move `RT` without having to clone it (which normally can't be - // done with `Drop` types). - // Safety: See [`ManuallyDrop::take`], rather then taking the entire - // thing struct at once we read (move out of) value by value. - let this = ManuallyDrop::new(self); - let deadline = unsafe { ptr::addr_of!(this.deadline).read() }; - let rt = unsafe { ptr::addr_of!(this.rt).read() }; + pub const fn wrap(self, future: Fut) -> Deadline { Deadline { - deadline, + timer: self, future, - rt, } } } @@ -147,38 +131,34 @@ impl Timer { impl Future for Timer { type Output = DeadlinePassed; - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { if self.has_passed() { - Poll::Ready(DeadlinePassed) - } else { - Poll::Pending + self.timer_pending = false; + return Poll::Ready(DeadlinePassed); + } else if !self.timer_pending { + let deadline = self.deadline; + self.rt.add_deadline(deadline); + self.timer_pending = true; } + Poll::Pending } } impl Unpin for Timer {} -impl Bound for Timer { - type Error = io::Error; - - fn bind_to(&mut self, ctx: &mut actor::Context) -> io::Result<()> { - let old_pid = self.rt.change_pid(ctx.runtime_ref().pid()); - self.rt.change_deadline(old_pid, self.deadline); - Ok(()) - } -} - impl Drop for Timer { fn drop(&mut self) { - self.rt.remove_deadline(self.deadline); + if self.timer_pending { + self.rt.remove_deadline(self.deadline); + } } } /// A [`Future`] that wraps another future setting a deadline for it. /// -/// When this future is polled it first checks if the deadline has passed, if so -/// it returns [`Poll::Ready`]`(Err(`[`DeadlinePassed`]`.into()))`. Otherwise -/// this will poll the future it wraps. +/// When this future is polled it first checks if the underlying future `Fut` +/// can make progress. If it returns pending it will check if the deadline has +/// expired. /// /// # Notes /// @@ -222,12 +202,12 @@ impl Drop for Timer { /// # } /// # } /// # -/// async fn actor(mut ctx: actor::Context) { +/// async fn actor(ctx: actor::Context) { /// // `OtherFuture` is a type that implements `Future`. /// let future = IoFuture; /// // Create our deadline. /// # let start = Instant::now(); -/// let deadline_future = Deadline::after(&mut ctx, Duration::from_millis(100), future); +/// let deadline_future = Deadline::after(ctx.runtime_ref().clone(), Duration::from_millis(100), future); /// # assert!(deadline_future.deadline() >= start + Duration::from_millis(100)); /// /// // Now we await the results. @@ -241,54 +221,34 @@ impl Drop for Timer { #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct Deadline { - deadline: Instant, + timer: Timer, future: Fut, - rt: RT, - // NOTE: when adding fields also add to [`Deadline::into_inner`]. } impl Deadline { /// Create a new `Deadline`. - pub fn at( - ctx: &mut actor::Context, - deadline: Instant, - future: Fut, - ) -> Deadline - where - RT: Clone, - { - let mut rt = ctx.runtime().clone(); - rt.add_deadline(deadline); + pub const fn at(rt: RT, deadline: Instant, future: Fut) -> Deadline { Deadline { - deadline, + timer: Timer::at(rt, deadline), future, - rt, } } /// Create a new deadline based on a timeout. /// - /// Same as calling `Deadline::at(&mut ctx, Instant::now() + timeout, - /// future)`. - pub fn after( - ctx: &mut actor::Context, - timeout: Duration, - future: Fut, - ) -> Deadline - where - RT: Clone, - { - Deadline::at(ctx, Instant::now() + timeout, future) + /// Same as calling `Deadline::at(rt, Instant::now() + timeout, future)`. + pub fn after(rt: RT, timeout: Duration, future: Fut) -> Deadline { + Deadline::at(rt, Instant::now() + timeout, future) } - /// Returns the deadline set for this `Deadline`. + /// Returns the deadline set. pub const fn deadline(&self) -> Instant { - self.deadline + self.timer.deadline } /// Returns `true` if the deadline has passed. pub fn has_passed(&self) -> bool { - self.deadline <= Instant::now() + self.timer.deadline <= Instant::now() } /// Returns a reference to the wrapped future. @@ -302,37 +262,10 @@ impl Deadline { } /// Returns the wrapped future. - pub fn into_inner(mut self) -> Fut { - self.rt.remove_deadline(self.deadline); - // Safety: See [`ManuallyDrop::take`], rather then taking the entire - // thing struct at once we read (move out of) value by value. - let mut this = ManuallyDrop::new(self); - unsafe { ptr::addr_of_mut!(this.deadline).drop_in_place() } - unsafe { ptr::addr_of_mut!(this.rt).drop_in_place() } - unsafe { ptr::addr_of!(this.future).read() } - } -} - -/* TODO: add this once `specialization` feature is stabilised. -impl Future for Deadline -where - Fut: Future, -{ - type Output = Result; - - fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { - if self.has_passed() { - Poll::Ready(Err(DeadlinePassed)) - } else { - // Safety: this is safe because we're not moving the future. - let future = unsafe { - Pin::map_unchecked_mut(self, |this| &mut this.future) - }; - future.poll(ctx).map(Ok) - } + pub fn into_inner(self) -> Fut { + self.future } } -*/ impl Future for Deadline where @@ -341,35 +274,25 @@ where { type Output = Result; - fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { - if self.has_passed() { - Poll::Ready(Err(DeadlinePassed.into())) - } else { - // Safety: this is safe because we're not moving the future. - let future = unsafe { Pin::map_unchecked_mut(self, |this| &mut this.future) }; - future.poll(ctx) + fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the future. + let future = unsafe { Pin::map_unchecked_mut(self.as_mut(), |this| &mut this.future) }; + match future.poll(ctx) { + Poll::Ready(result) => Poll::Ready(result), + Poll::Pending => { + // SAFETY: not moving the timer. + let timer = unsafe { Pin::map_unchecked_mut(self, |this| &mut this.timer) }; + match timer.poll(ctx) { + Poll::Ready(deadline) => Poll::Ready(Err(deadline.into())), + Poll::Pending => Poll::Pending, + } + } } } } impl Unpin for Deadline {} -impl Bound for Deadline { - type Error = io::Error; - - fn bind_to(&mut self, ctx: &mut actor::Context) -> io::Result<()> { - let old_pid = self.rt.change_pid(ctx.runtime_ref().pid()); - self.rt.change_deadline(old_pid, self.deadline); - Ok(()) - } -} - -impl Drop for Deadline { - fn drop(&mut self) { - self.rt.remove_deadline(self.deadline); - } -} - /// An [`AsyncIterator`] that yields an item after an interval has passed. /// /// This itertor will never return `None`, it will always set another deadline @@ -377,10 +300,13 @@ impl Drop for Deadline { /// /// # Notes /// -/// The next deadline will always will be set after this returns `Poll::Ready`. -/// This means that if the interval is very short and the iterator is not polled -/// often enough it's possible that the actual time between yielding two values -/// can become bigger then the specified interval. +/// The next deadline will always will be set for exactly the specified interval +/// after the last passed deadline. This means that if the iterator is not +/// polled often enoguh it can be that deadlines will be set that expire +/// immediately, yielding items in quick succession. +/// +/// If the above description behaviour is not desired, but you rather wait a +/// certain interval between work consider using a [`Timer`]. /// /// # Examples /// @@ -414,9 +340,9 @@ impl Drop for Deadline { /// # Ok(()) /// # } /// -/// async fn actor(mut ctx: actor::Context) { +/// async fn actor(ctx: actor::Context) { /// # let start = Instant::now(); -/// let mut interval = Interval::every(&mut ctx, Duration::from_millis(200)); +/// let mut interval = Interval::every(ctx.runtime_ref().clone(), Duration::from_millis(200)); /// # assert!(interval.next_deadline() >= start + Duration::from_millis(200)); /// loop { /// // Wait until the next timer expires. @@ -430,64 +356,39 @@ impl Drop for Deadline { #[derive(Debug)] #[must_use = "AsyncIterators do nothing unless polled"] pub struct Interval { - deadline: Instant, + timer: Timer, interval: Duration, - rt: RT, } impl Interval { /// Create a new `Interval`. - pub fn every(ctx: &mut actor::Context, interval: Duration) -> Interval - where - RT: Clone, - { - let deadline = Instant::now() + interval; - let mut rt = ctx.runtime().clone(); - rt.add_deadline(deadline); + pub fn every(rt: RT, interval: Duration) -> Interval { Interval { - deadline, interval, - rt, + timer: Timer::after(rt, interval), } } /// Returns the next deadline for this `Interval`. pub const fn next_deadline(&self) -> Instant { - self.deadline + self.timer.deadline } } impl AsyncIterator for Interval { type Item = DeadlinePassed; - fn poll_next(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll> { - if self.deadline <= Instant::now() { - // Determine the next deadline. - let next_deadline = Instant::now() + self.interval; - let this = Pin::get_mut(self); - this.deadline = next_deadline; - this.rt.add_deadline(next_deadline); - Poll::Ready(Some(DeadlinePassed)) - } else { - Poll::Pending + fn poll_next(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll> { + let this = Pin::into_inner(self); + match Pin::new(&mut this.timer).poll(ctx) { + Poll::Ready(deadline) => { + this.timer.deadline += this.interval; + this.timer.timer_pending = false; + Poll::Ready(Some(deadline)) + } + Poll::Pending => Poll::Pending, } } } impl Unpin for Interval {} - -impl Bound for Interval { - type Error = !; - - fn bind_to(&mut self, ctx: &mut actor::Context) -> Result<(), !> { - let old_pid = self.rt.change_pid(ctx.runtime_ref().pid()); - self.rt.change_deadline(old_pid, self.deadline); - Ok(()) - } -} - -impl Drop for Interval { - fn drop(&mut self) { - self.rt.remove_deadline(self.deadline); - } -} diff --git a/rt/tests/functional/test.rs b/rt/tests/functional/test.rs index 4fe15d288..55a63c711 100644 --- a/rt/tests/functional/test.rs +++ b/rt/tests/functional/test.rs @@ -98,8 +98,8 @@ fn catch_panics_spawned_future() { const SLEEP_TIME: Duration = Duration::from_millis(200); /// Actor that sleeps and then returns. -async fn sleepy_actor(mut ctx: actor::Context) { - let _ = Timer::after(&mut ctx, SLEEP_TIME).await; +async fn sleepy_actor(ctx: actor::Context) { + let _ = Timer::after(ctx.runtime_ref().clone(), SLEEP_TIME).await; } #[test] diff --git a/rt/tests/functional/timer.rs b/rt/tests/functional/timer.rs index b10aafd3e..537892178 100644 --- a/rt/tests/functional/timer.rs +++ b/rt/tests/functional/timer.rs @@ -5,13 +5,13 @@ use std::task::{self, Poll}; use std::thread; use std::time::{Duration, Instant}; +use heph::actor; use heph::supervisor::NoSupervisor; -use heph::{actor, ActorRef}; use heph_rt::spawn::ActorOptions; use heph_rt::test::{init_local_actor, poll_actor, poll_future, poll_next}; use heph_rt::timer::{Deadline, DeadlinePassed, Interval, Timer}; use heph_rt::util::next; -use heph_rt::{self as rt, Bound, Runtime, RuntimeRef, ThreadLocal, ThreadSafe}; +use heph_rt::{self as rt, Runtime, RuntimeRef, ThreadLocal, ThreadSafe}; use crate::util::{assert_size, count_polls, expect_pending}; @@ -20,12 +20,12 @@ const TIMEOUT: Duration = Duration::from_millis(100); #[test] fn size() { - assert_size::>(32); - assert_size::>(32); - assert_size::>(32); - assert_size::>(32); - assert_size::>(48); - assert_size::>(48); + assert_size::>(40); + assert_size::>(40); + assert_size::>(40); + assert_size::>(40); + assert_size::>(56); + assert_size::>(56); assert_size::(0); } @@ -37,9 +37,9 @@ fn deadline_passed_into_io_error() { #[test] fn timer() { - async fn actor(mut ctx: actor::Context) { + async fn actor(ctx: actor::Context) { let start = Instant::now(); - let mut timer = Timer::after(&mut ctx, TIMEOUT); + let mut timer = Timer::after(ctx.runtime_ref().clone(), TIMEOUT); assert!(timer.deadline() >= start + TIMEOUT); assert!(!timer.has_passed()); @@ -70,10 +70,10 @@ impl Future for AlwaysPending { #[test] fn timer_wrap() { - async fn actor(mut ctx: actor::Context) { + async fn actor(ctx: actor::Context) { let start = Instant::now(); let future = AlwaysPending; - let mut deadline = Timer::after(&mut ctx, TIMEOUT).wrap(future); + let mut deadline = Timer::after(ctx.runtime_ref().clone(), TIMEOUT).wrap(future); assert!(deadline.deadline() >= start + TIMEOUT); assert!(!deadline.has_passed()); @@ -94,10 +94,10 @@ fn timer_wrap() { #[test] fn deadline() { - async fn actor(mut ctx: actor::Context) { + async fn actor(ctx: actor::Context) { let start = Instant::now(); let future = AlwaysPending; - let mut deadline = Deadline::after(&mut ctx, TIMEOUT, future.clone()); + let mut deadline = Deadline::after(ctx.runtime_ref().clone(), TIMEOUT, future.clone()); assert!(deadline.deadline() >= start + TIMEOUT); assert!(!deadline.has_passed()); assert_eq!(*deadline.get_ref(), future); @@ -123,9 +123,9 @@ fn deadline() { #[test] fn interval() { - async fn actor(mut ctx: actor::Context) { + async fn actor(ctx: actor::Context) { let start = Instant::now(); - let mut interval = Interval::every(&mut ctx, TIMEOUT); + let mut interval = Interval::every(ctx.runtime_ref().clone(), TIMEOUT); assert!(interval.next_deadline() >= start + TIMEOUT); let _ = next(&mut interval).await; } @@ -141,29 +141,29 @@ fn interval() { #[test] fn triggered_timers_run_actors() { - async fn timer_actor(mut ctx: actor::Context) + async fn timer_actor(ctx: actor::Context) where RT: rt::Access + Clone, { - let timer = Timer::after(&mut ctx, TIMEOUT); + let timer = Timer::after(ctx.runtime_ref().clone(), TIMEOUT); let _ = timer.await; } - async fn deadline_actor(mut ctx: actor::Context) + async fn deadline_actor(ctx: actor::Context) where RT: rt::Access + Clone, { let future = AlwaysPending; - let deadline = Deadline::after(&mut ctx, TIMEOUT, future); + let deadline = Deadline::after(ctx.runtime_ref().clone(), TIMEOUT, future); let res: Result<(), DeadlinePassed> = deadline.await; assert_eq!(res, Err(DeadlinePassed)); } - async fn interval_actor(mut ctx: actor::Context) + async fn interval_actor(ctx: actor::Context) where RT: rt::Access + Clone + Unpin, { - let mut interval = Interval::every(&mut ctx, TIMEOUT); + let mut interval = Interval::every(ctx.runtime_ref().clone(), TIMEOUT); let _ = next(&mut interval).await; } @@ -217,45 +217,45 @@ fn triggered_timers_run_actors() { #[test] fn timers_dont_trigger_after_drop() { - async fn timer_actor(mut ctx: actor::Context) + async fn timer_actor(ctx: actor::Context) where RT: rt::Access + Clone, { // Setup an initial timer. - let mut timer = Timer::after(&mut ctx, SMALL_TIMEOUT); + let mut timer = Timer::after(ctx.runtime_ref().clone(), SMALL_TIMEOUT); expect_pending(poll_future(Pin::new(&mut timer))); // Dropping it should remove the timer. drop(timer); - let timer = Timer::after(&mut ctx, TIMEOUT); + let timer = Timer::after(ctx.runtime_ref().clone(), TIMEOUT); let (_, poll_count) = count_polls(timer).await; // Should only be polled twice, the first time the deadline // hasn't passed, but the second time its called it should. assert_eq!(poll_count, 2); } - async fn deadline_actor(mut ctx: actor::Context) + async fn deadline_actor(ctx: actor::Context) where RT: rt::Access + Clone, { - let mut deadline = Deadline::after(&mut ctx, SMALL_TIMEOUT, AlwaysPending); + let mut deadline = Deadline::after(ctx.runtime_ref().clone(), SMALL_TIMEOUT, AlwaysPending); expect_pending(poll_future(Pin::new(&mut deadline))); drop(deadline); - let deadline = Deadline::after(&mut ctx, TIMEOUT, AlwaysPending); + let deadline = Deadline::after(ctx.runtime_ref().clone(), TIMEOUT, AlwaysPending); let (_, poll_count) = count_polls(deadline).await; assert_eq!(poll_count, 2); } - async fn interval_actor(mut ctx: actor::Context) + async fn interval_actor(ctx: actor::Context) where RT: rt::Access + Clone + Unpin, { - let mut interval = Interval::every(&mut ctx, SMALL_TIMEOUT); + let mut interval = Interval::every(ctx.runtime_ref().clone(), SMALL_TIMEOUT); expect_pending(poll_next(Pin::new(&mut interval))); drop(interval); - let interval = Interval::every(&mut ctx, TIMEOUT); + let interval = Interval::every(ctx.runtime_ref().clone(), TIMEOUT); let (_, poll_count) = next(count_polls(interval)).await.unwrap(); assert_eq!(poll_count, 2); } @@ -308,287 +308,3 @@ fn timers_dont_trigger_after_drop() { runtime.start().unwrap(); } - -#[test] -fn timers_actor_bound() { - async fn timer_actor1(mut ctx: actor::Context, actor_ref: ActorRef>) - where - RT: rt::Access + Clone, - { - let timer = Timer::after(&mut ctx, TIMEOUT); - actor_ref.send(timer).await.unwrap(); - } - - async fn timer_actor2(mut ctx: actor::Context, RT>) - where - RT: rt::Access + Clone, - { - let mut timer = ctx.receive_next().await.unwrap(); - timer.bind_to(&mut ctx).unwrap(); - let _ = timer.await; - } - - async fn deadline_actor1( - mut ctx: actor::Context, - actor_ref: ActorRef>, - ) where - RT: rt::Access + Clone, - { - let future = AlwaysPending; - let deadline = Deadline::after(&mut ctx, TIMEOUT, future); - actor_ref.send(deadline).await.unwrap(); - } - - async fn deadline_actor2(mut ctx: actor::Context, RT>) - where - RT: rt::Access, - { - let mut deadline = ctx.receive_next().await.unwrap(); - deadline.bind_to(&mut ctx).unwrap(); - let res: Result<(), DeadlinePassed> = deadline.await; - assert_eq!(res, Err(DeadlinePassed)); - } - - async fn interval_actor1(mut ctx: actor::Context, actor_ref: ActorRef>) - where - RT: rt::Access + Clone, - { - let interval = Interval::every(&mut ctx, TIMEOUT); - actor_ref.send(interval).await.unwrap(); - } - - async fn interval_actor2(mut ctx: actor::Context, RT>) - where - RT: rt::Access + Unpin, - { - let mut interval = ctx.receive_next().await.unwrap(); - interval.bind_to(&mut ctx).unwrap(); - let _ = next(&mut interval).await; - } - - fn setup(mut runtime_ref: RuntimeRef) -> Result<(), !> { - // Spawn thread-local actors. - let actor_ref = runtime_ref.spawn_local( - NoSupervisor, - timer_actor2 as fn(_) -> _, - (), - ActorOptions::default(), - ); - let _ = runtime_ref.spawn_local( - NoSupervisor, - timer_actor1 as fn(_, _) -> _, - actor_ref, - ActorOptions::default(), - ); - - let actor_ref = runtime_ref.spawn_local( - NoSupervisor, - deadline_actor2 as fn(_) -> _, - (), - ActorOptions::default(), - ); - let _ = runtime_ref.spawn_local( - NoSupervisor, - deadline_actor1 as fn(_, _) -> _, - actor_ref, - ActorOptions::default(), - ); - - let actor_ref = runtime_ref.spawn_local( - NoSupervisor, - interval_actor2 as fn(_) -> _, - (), - ActorOptions::default(), - ); - let _ = runtime_ref.spawn_local( - NoSupervisor, - interval_actor1 as fn(_, _) -> _, - actor_ref, - ActorOptions::default(), - ); - - Ok(()) - } - - let mut runtime = Runtime::setup().build().unwrap(); - runtime.run_on_workers(setup).unwrap(); - - // Spawn thread-safe actors. - let actor_ref = runtime.spawn( - NoSupervisor, - timer_actor2 as fn(_) -> _, - (), - ActorOptions::default(), - ); - let _ = runtime.spawn( - NoSupervisor, - timer_actor1 as fn(_, _) -> _, - actor_ref, - ActorOptions::default(), - ); - let actor_ref = runtime.spawn( - NoSupervisor, - deadline_actor2 as fn(_) -> _, - (), - ActorOptions::default(), - ); - let _ = runtime.spawn( - NoSupervisor, - deadline_actor1 as fn(_, _) -> _, - actor_ref, - ActorOptions::default(), - ); - - runtime.start().unwrap(); -} - -#[test] -fn timers_dont_trigger_after_actor_bound() { - async fn timer_actor1(mut ctx: actor::Context, actor_ref: ActorRef>) - where - RT: rt::Access + Clone, - { - // Setup an initial timer. - let mut timer = Timer::after(&mut ctx, SMALL_TIMEOUT); - expect_pending(poll_future(Pin::new(&mut timer))); - // Letting another bind it should remove the timer. - actor_ref.send(timer).await.unwrap(); - - let timer = Timer::after(&mut ctx, TIMEOUT); - let (_, poll_count) = count_polls(timer).await; - // Should only be polled twice, the first time the deadline - // hasn't passed, but the second time its called it should. - assert_eq!(poll_count, 2); - } - - async fn timer_actor2(mut ctx: actor::Context, RT>) - where - RT: rt::Access + Clone, - { - let mut timer = ctx.receive_next().await.unwrap(); - timer.bind_to(&mut ctx).unwrap(); - let _ = timer.await; - } - - async fn deadline_actor1( - mut ctx: actor::Context, - actor_ref: ActorRef>, - ) where - RT: rt::Access + Clone, - { - let mut deadline = Deadline::after(&mut ctx, SMALL_TIMEOUT, AlwaysPending); - expect_pending(poll_future(Pin::new(&mut deadline))); - actor_ref.send(deadline).await.unwrap(); - - let deadline = Deadline::after(&mut ctx, TIMEOUT, AlwaysPending); - let (_, poll_count) = count_polls(deadline).await; - assert_eq!(poll_count, 2); - } - - async fn deadline_actor2(mut ctx: actor::Context, RT>) - where - RT: rt::Access, - { - let mut deadline = ctx.receive_next().await.unwrap(); - deadline.bind_to(&mut ctx).unwrap(); - let res: Result<(), DeadlinePassed> = deadline.await; - assert_eq!(res, Err(DeadlinePassed)); - } - - async fn interval_actor1(mut ctx: actor::Context, actor_ref: ActorRef>) - where - RT: rt::Access + Clone, - { - let mut interval = Interval::every(&mut ctx, SMALL_TIMEOUT); - expect_pending(poll_next(Pin::new(&mut interval))); - actor_ref.send(interval).await.unwrap(); - - let interval = Interval::every(&mut ctx, TIMEOUT); - let (_, poll_count) = next(count_polls(interval)).await.unwrap(); - assert_eq!(poll_count, 2); - } - - async fn interval_actor2(mut ctx: actor::Context, RT>) - where - RT: rt::Access + Unpin, - { - let mut interval = ctx.receive_next().await.unwrap(); - interval.bind_to(&mut ctx).unwrap(); - let _ = next(&mut interval).await; - } - - fn setup(mut runtime_ref: RuntimeRef) -> Result<(), !> { - // Spawn thread-local actors. - let actor_ref = runtime_ref.spawn_local( - NoSupervisor, - timer_actor2 as fn(_) -> _, - (), - ActorOptions::default(), - ); - let _ = runtime_ref.spawn_local( - NoSupervisor, - timer_actor1 as fn(_, _) -> _, - actor_ref, - ActorOptions::default(), - ); - - let actor_ref = runtime_ref.spawn_local( - NoSupervisor, - deadline_actor2 as fn(_) -> _, - (), - ActorOptions::default(), - ); - let _ = runtime_ref.spawn_local( - NoSupervisor, - deadline_actor1 as fn(_, _) -> _, - actor_ref, - ActorOptions::default(), - ); - - let actor_ref = runtime_ref.spawn_local( - NoSupervisor, - interval_actor2 as fn(_) -> _, - (), - ActorOptions::default(), - ); - let _ = runtime_ref.spawn_local( - NoSupervisor, - interval_actor1 as fn(_, _) -> _, - actor_ref, - ActorOptions::default(), - ); - - Ok(()) - } - - let mut runtime = Runtime::setup().build().unwrap(); - runtime.run_on_workers(setup).unwrap(); - - // Spawn thread-safe actors. - let actor_ref = runtime.spawn( - NoSupervisor, - timer_actor2 as fn(_) -> _, - (), - ActorOptions::default(), - ); - let _ = runtime.spawn( - NoSupervisor, - timer_actor1 as fn(_, _) -> _, - actor_ref, - ActorOptions::default(), - ); - let actor_ref = runtime.spawn( - NoSupervisor, - deadline_actor2 as fn(_) -> _, - (), - ActorOptions::default(), - ); - let _ = runtime.spawn( - NoSupervisor, - deadline_actor1 as fn(_, _) -> _, - actor_ref, - ActorOptions::default(), - ); - - runtime.start().unwrap(); -} diff --git a/rt/tests/util/mod.rs b/rt/tests/util/mod.rs index 98802804c..ca2e29245 100644 --- a/rt/tests/util/mod.rs +++ b/rt/tests/util/mod.rs @@ -203,7 +203,7 @@ where match TcpStream::connect(ctx.runtime_ref(), address).await { Ok(stream) => break Ok(stream), Err(_) if i >= 1 => { - Timer::after(ctx, Duration::from_millis(1)).await; + Timer::after(ctx.runtime_ref().clone(), Duration::from_millis(1)).await; i -= 1; continue; } From 0c5814c5b40b947036274b2af10f661adc6bfa3b Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 14 Apr 2023 13:12:31 +0200 Subject: [PATCH 054/177] Change shared timers to use task::Wakers Instead of ProcessIds. This another step in removing ProcessIds from the RT types ThreadLocal and ThreadSafe. It does mean the Timer type grows with 8 bytes to hold a TimerToken, which can be used to remove a timer. --- rt/src/access.rs | 27 ++-- rt/src/lib.rs | 19 ++- rt/src/local/timers.rs | 2 +- rt/src/shared/mod.rs | 25 ++-- rt/src/shared/timers.rs | 211 ++++++++++++++---------------- rt/src/shared/timers_tests.rs | 239 +++++++++++++++++++++++++--------- rt/src/timer.rs | 34 +++-- rt/src/worker.rs | 9 +- rt/tests/functional/timer.rs | 12 +- 9 files changed, 354 insertions(+), 224 deletions(-) diff --git a/rt/src/access.rs b/rt/src/access.rs index 33691b863..5cbcb21d2 100644 --- a/rt/src/access.rs +++ b/rt/src/access.rs @@ -42,6 +42,7 @@ use mio::{event, Interest}; use crate::process::ProcessId; use crate::spawn::{ActorOptions, FutureOptions, Spawn}; +use crate::timer::TimerToken; use crate::trace::{self, Trace}; use crate::{shared, RuntimeRef}; @@ -66,6 +67,7 @@ mod private { use mio::{event, Interest}; use crate::process::ProcessId; + use crate::timer::TimerToken; use crate::{trace, RuntimeRef}; /// Actual trait behind [`rt::Access`]. @@ -91,11 +93,11 @@ mod private { where S: event::Source + ?Sized; - /// Add a deadline. - fn add_deadline(&mut self, deadline: Instant); + /// Add a new timer expiring at `deadline` waking `waker`. + fn add_timer(&mut self, deadline: Instant, waker: task::Waker) -> TimerToken; - /// Remove a previously set deadline. - fn remove_deadline(&mut self, deadline: Instant); + /// Remove a previously set timer. + fn remove_timer(&mut self, deadline: Instant, expire_token: TimerToken); /// Create a new [`task::Waker`]. fn new_task_waker(runtime_ref: &mut RuntimeRef, pid: ProcessId) -> task::Waker; @@ -187,12 +189,13 @@ impl PrivateAccess for ThreadLocal { self.rt.reregister(source, self.pid.into(), interest) } - fn add_deadline(&mut self, deadline: Instant) { - self.rt.add_deadline(self.pid, deadline); + fn add_timer(&mut self, deadline: Instant, _: task::Waker) -> TimerToken { + self.rt.add_timer(self.pid, deadline); + TimerToken(self.pid.0) // NOTE: not used. } - fn remove_deadline(&mut self, deadline: Instant) { - self.rt.remove_deadline(self.pid, deadline); + fn remove_timer(&mut self, deadline: Instant, _: TimerToken) { + self.rt.remove_timer(self.pid, deadline); } fn new_task_waker(runtime_ref: &mut RuntimeRef, pid: ProcessId) -> task::Waker { @@ -330,12 +333,12 @@ impl PrivateAccess for ThreadSafe { self.rt.reregister(source, self.pid.into(), interest) } - fn add_deadline(&mut self, deadline: Instant) { - self.rt.add_deadline(self.pid, deadline); + fn add_timer(&mut self, deadline: Instant, waker: task::Waker) -> TimerToken { + self.rt.add_timer(deadline, waker) } - fn remove_deadline(&mut self, deadline: Instant) { - self.rt.remove_deadline(self.pid, deadline); + fn remove_timer(&mut self, deadline: Instant, expire_token: TimerToken) { + self.rt.remove_timer(deadline, expire_token); } fn new_task_waker(runtime_ref: &mut RuntimeRef, pid: ProcessId) -> task::Waker { diff --git a/rt/src/lib.rs b/rt/src/lib.rs index e1986086f..1e350b5e1 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -155,7 +155,8 @@ never_type, new_uninit, stmt_expr_attributes, - type_alias_impl_trait + type_alias_impl_trait, + waker_getters )] #![warn( anonymous_parameters, @@ -661,15 +662,19 @@ impl RuntimeRef { self.internals.shared.mark_ready(pid); } - /// Add a deadline. - fn add_deadline(&mut self, pid: ProcessId, deadline: Instant) { - ::log::trace!(pid = pid.0, deadline = as_debug!(deadline); "adding deadline"); + /// Add a timer. + /// + /// See [`Timers::add`]. + pub(crate) fn add_timer(&self, pid: ProcessId, deadline: Instant) { + ::log::trace!(deadline = as_debug!(deadline); "adding timer"); self.internals.timers.borrow_mut().add(pid, deadline); } - /// Remove a deadline. - fn remove_deadline(&mut self, pid: ProcessId, deadline: Instant) { - ::log::trace!(pid = pid.0, deadline = as_debug!(deadline); "removing deadline"); + /// Remove a previously set timer. + /// + /// See [`Timers::remove`]. + pub(crate) fn remove_timer(&self, pid: ProcessId, deadline: Instant) { + ::log::trace!(deadline = as_debug!(deadline); "removing timer"); self.internals.timers.borrow_mut().remove(pid, deadline); } diff --git a/rt/src/local/timers.rs b/rt/src/local/timers.rs index 1db2b244d..34afae716 100644 --- a/rt/src/local/timers.rs +++ b/rt/src/local/timers.rs @@ -49,7 +49,7 @@ type TimeOffset = u32; /// empty however. /// /// The `slots` hold the timers with a [`TimeOffset`] which is the number of -/// nanosecond since epoch times it's index. The `index` filed determines the +/// nanosecond since epoch times it's index. The `index` field determines the /// current zero-slot, meaning its timers will expire next and all have a /// deadline within `0..NS_PER_SLOT` nanoseconds after `epoch`. The /// `slots[index+1]` list will have timers that expire diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 6b3999209..66fb9049c 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -13,12 +13,13 @@ use heph::actor::{self, NewActor}; use heph::actor_ref::ActorRef; use heph::supervisor::Supervisor; use heph_inbox as inbox; -use log::{debug, error, trace}; +use log::{as_debug, debug, error, trace}; use mio::unix::SourceFd; use mio::{event, Events, Interest, Poll, Registry, Token}; use crate::spawn::{ActorOptions, FutureOptions}; use crate::thread_waker::ThreadWaker; +use crate::timer::TimerToken; use crate::{trace, ProcessId, ThreadSafe}; mod scheduler; @@ -205,19 +206,27 @@ impl RuntimeInternals { self.registry.reregister(source, token, interest) } + /// Add a timer. + /// /// See [`Timers::add`]. - pub(super) fn add_deadline(&self, pid: ProcessId, deadline: Instant) { - self.timers.add(pid, deadline); + pub(crate) fn add_timer(&self, deadline: Instant, waker: task::Waker) -> TimerToken { + trace!(deadline = as_debug!(deadline); "adding timer"); + self.timers.add(deadline, waker) } + /// Remove a previously set timer. + /// /// See [`Timers::remove`]. - pub(super) fn remove_deadline(&self, pid: ProcessId, deadline: Instant) { - self.timers.remove(pid, deadline); + pub(crate) fn remove_timer(&self, deadline: Instant, expire_token: TimerToken) { + trace!(deadline = as_debug!(deadline); "removing timer"); + self.timers.remove(deadline, expire_token); } - /// See [`Timers::remove_next`]. - pub(crate) fn remove_next_deadline(&self, now: Instant) -> Option { - self.timers.remove_next(now) + /// Wake all futures who's timers has expired. + /// + /// See [`Timers::expire_timers`]. + pub(crate) fn expire_timers(&self, now: Instant) -> usize { + self.timers.expire_timers(now) } /// Determine the timeout to use in polling based on the current time diff --git a/rt/src/shared/timers.rs b/rt/src/shared/timers.rs index 2cfa769d7..534920d01 100644 --- a/rt/src/shared/timers.rs +++ b/rt/src/shared/timers.rs @@ -2,11 +2,14 @@ //! //! Also see the local timers implementation. -use std::cmp::{min, Ordering}; +use std::cmp::min; use std::sync::RwLock; +use std::task::Waker; use std::time::{Duration, Instant}; -use crate::ProcessId; +use log::{as_debug, trace}; + +use crate::timer::TimerToken; #[cfg(test)] #[path = "timers_tests.rs"] @@ -50,7 +53,7 @@ type TimeOffset = u32; /// empty however. /// /// The `slots` hold the timers with a [`TimeOffset`] which is the number of -/// nanosecond since epoch times it's index. The `index` filed determines the +/// nanosecond since epoch times it's index. The `index` field determines the /// current zero-slot, meaning its timers will expire next and all have a /// deadline within `0..NS_PER_SLOT` nanoseconds after `epoch`. The /// `slots[index+1]` list will have timers that expire @@ -67,7 +70,7 @@ type TimeOffset = u32; /// /// Note that it's possible for a thread to read the epoch (index and time), /// than gets descheduled, another thread updates the epoch and finally the -/// second thread insert the time based on a now outdated epoch. This situation +/// second thread insert a timer based on a now outdated epoch. This situation /// is fine as the timer will still be added to the correct slot, but it has a /// higher change of being added to the overflow list (which /// `maybe_update_epoch` deals with correctly). @@ -87,18 +90,23 @@ struct Epoch { index: u8, } +/// A timer in [`Timers`]. +#[derive(Debug)] +struct Timer { + deadline: T, + waker: Waker, +} + impl Timers { /// Create a new collection of timers. pub(crate) fn new() -> Timers { + const EMPTY: RwLock>> = RwLock::new(Vec::new()); Timers { epoch: RwLock::new(Epoch { time: Instant::now(), index: 0, }), - // TODO: replace with `RwLock::new(Vec::new()); SLOTS]` once - // possible. - #[rustfmt::skip] - slots: [RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new()), RwLock::new(Vec::new())], + slots: [EMPTY; SLOTS], overflow: RwLock::new(Vec::new()), } } @@ -107,23 +115,13 @@ impl Timers { pub(crate) fn len(&self) -> usize { let mut timers = 0; for slots in &self.slots { - let slots = slots.read().unwrap(); - let len = slots.len(); - drop(slots); - timers += len; - } - { - let overflow = self.overflow.read().unwrap(); - timers += overflow.len(); + timers += slots.read().unwrap().len(); } + timers += self.overflow.read().unwrap().len(); timers } /// Returns the next deadline, if any. - /// - /// If this return `Some` `woke_from_polling` must be called after polling, - /// before removing timers. That thread must also wake other workers threads - /// as they will see `None` here, **even if there is a timer set**. pub(crate) fn next(&self) -> Option { let (epoch_time, index) = { let epoch = self.epoch.read().unwrap(); @@ -132,8 +130,7 @@ impl Timers { let (second, first) = self.slots.split_at(index); let iter = first.iter().chain(second.iter()); for (n, slot) in iter.enumerate() { - let deadline = { slot.read().unwrap().last().map(|timer| timer.deadline) }; - if let Some(deadline) = deadline { + if let Some(deadline) = { slot.read().unwrap().last().map(|timer| timer.deadline) } { let ns_since_epoch = u64::from(deadline) + (n as u64 * u64::from(NS_PER_SLOT)); let deadline = epoch_time + Duration::from_nanos(ns_since_epoch); return Some(deadline); @@ -157,24 +154,29 @@ impl Timers { } /// Add a new deadline. - pub(crate) fn add(&self, pid: ProcessId, deadline: Instant) { + pub(crate) fn add(&self, deadline: Instant, waker: Waker) -> TimerToken { // NOTE: it's possible that we call `add_timer` based on an outdated // epoch. - self.get_timers(pid, deadline, add_timer, add_timer); + self.get_timers(deadline, |timers| match timers { + TimerLocation::InSlot((timers, deadline)) => add_timer(timers, deadline, waker), + TimerLocation::Overflow((timers, deadline)) => add_timer(timers, deadline, waker), + }) } /// Remove a previously added deadline. - pub(crate) fn remove(&self, pid: ProcessId, deadline: Instant) { - self.get_timers(pid, deadline, remove_timer, remove_timer); + pub(crate) fn remove(&self, deadline: Instant, token: TimerToken) { + self.get_timers(deadline, |timers| match timers { + TimerLocation::InSlot((timers, deadline)) => remove_timer(timers, deadline, token), + TimerLocation::Overflow((timers, deadline)) => remove_timer(timers, deadline, token), + }); } - /// Determines in what list of timers a timer with `pid` and `deadline` - /// would be/go into. Then calls the `slot_f` function for a timer list in - /// the slots, or `overflow_f` with the overflow list. - fn get_timers(&self, pid: ProcessId, deadline: Instant, slot_f: SF, overflow_f: OF) + /// Determines in what list of timers a timer with `deadline` would be/go + /// into. Then calls the function `f` with either a slot or the overflow + /// list. + fn get_timers(&self, deadline: Instant, f: F) -> T where - SF: FnOnce(&mut Vec>, Timer), - OF: FnOnce(&mut Vec>, Timer), + F: FnOnce(TimerLocation<'_>) -> T, { let (epoch_time, epoch_index) = { let epoch = self.epoch.read().unwrap(); @@ -187,20 +189,23 @@ impl Timers { let index = ((ns_since_epoch >> NS_PER_SLOT_BITS) & ((1 << SLOT_BITS) - 1)) as usize; let index = (epoch_index as usize + index) % SLOTS; let mut timers = self.slots[index].write().unwrap(); - slot_f(&mut timers, Timer { pid, deadline }); + f(TimerLocation::InSlot((&mut *timers, deadline))) } else { // Too far into the future to fit in the slots. let mut overflow = self.overflow.write().unwrap(); - overflow_f(&mut overflow, Timer { pid, deadline }); + f(TimerLocation::Overflow((&mut *overflow, deadline))) } } - /// Remove the next deadline that passed `now` returning the pid. + /// Expire all timers that have elapsed based on `now`. Returns the amount + /// of expired timers. /// /// # Safety /// /// `now` may never go backwards between calls. - pub(crate) fn remove_next(&self, now: Instant) -> Option { + pub(crate) fn expire_timers(&self, now: Instant) -> usize { + trace!(now = as_debug!(now); "expiring timers"); + let mut amount = 0; loop { // NOTE: Each loop iteration needs to calculate the `epoch_offset` // as the epoch changes each iteration. @@ -208,29 +213,44 @@ impl Timers { let epoch = self.epoch.read().unwrap(); (epoch.time, epoch.index as usize) }; - // Safety: `now` can't go backwards, otherwise this will panic. + // SAFETY: `now` can't go backwards, otherwise this will panic. let epoch_offset = now.duration_since(epoch_time).as_nanos(); - // NOTE: this truncates, which is fine as we need a max. of + // NOTE: this truncates, which is fine as we need a max of // `NS_PER_SLOT` anyway. #[allow(clippy::cast_possible_truncation)] let epoch_offset = min(epoch_offset, u128::from(TimeOffset::MAX)) as TimeOffset; - let res = { - let mut timers = self.slots[index].write().unwrap(); - remove_if_before(&mut timers, epoch_offset) - }; - match res { - Ok(timer) => return Some(timer.pid), - Err(true) => { - // Safety: slot is empty, which makes calling - // `maybe_update_epoch` OK. - if !self.maybe_update_epoch(now) { - // Didn't update epoch, no more timers to process. - return None; + + loop { + // NOTE: don't inline this in the `match` statement, it will + // cause the log the be held for the entire match statement, + // which we don't want. + let result = + { remove_if_before(&mut self.slots[index].write().unwrap(), epoch_offset) }; + match result { + // Wake up the future. + Ok(timer) => { + timer.waker.wake(); + amount += 1; + // Try another timer in this slot. + continue; + } + Err(true) => { + // SAFETY: slot is empty, which makes calling + // `maybe_update_epoch` OK. + if !self.maybe_update_epoch(now) { + // Didn't update epoch, no more timers to process. + return amount; + } else { + // Process the next slot. + break; + } + } + // Slot has timers with a deadline past `now`, so no more + // timers to process. + Err(false) => { + return amount; } - // Else try again in the next loop. } - // Slot has timers with a deadline past `now`. - Err(false) => return None, } } } @@ -240,8 +260,8 @@ impl Timers { /// # Panics /// /// This panics if the current slot is not empty. - #[allow(clippy::cast_possible_truncation)] // TODO: move to new `epoch.index` line. fn maybe_update_epoch(&self, now: Instant) -> bool { + trace!(now = as_debug!(now); "maybe updating epoch"); let epoch_time = { let mut epoch = self.epoch.write().unwrap(); let new_epoch = epoch.time + DURATION_PER_SLOT; @@ -254,14 +274,17 @@ impl Timers { debug_assert!(self.slots[epoch.index as usize].read().unwrap().is_empty()); // Move to the next slot and update the epoch. + #[allow(clippy::cast_possible_truncation)] epoch.index = (epoch.index + 1) % self.slots.len() as u8; epoch.time = new_epoch; new_epoch }; + trace!(epoch_time = as_debug!(epoch_time); "new epoch time"); // Next move all the overflow timers that now fit in the slots. let time = epoch_time + OVERFLOW_DURATION; while let Ok(timer) = { remove_if_before(&mut self.overflow.write().unwrap(), time) } { + trace!(timer = as_debug!(timer); "moving overflow timer into wheel"); // NOTE: we can't use the same optimisation as we do in the local // version where we know that all timers removed here go into the // `self.index-1` slot. @@ -269,80 +292,48 @@ impl Timers { // could be that it add a timers to the overflow list which could // have fit in one of the slots. So we have to deal with that // possbility here. - self.add(timer.pid, timer.deadline); + _ = self.add(timer.deadline, timer.waker); } true } } -/// Add `timer` to `timers`, ensuring it remains sorted. -fn add_timer(timers: &mut Vec>, timer: Timer) -where - Timer: Ord + Copy, -{ - let idx = match timers.binary_search(&timer) { +/// Location of a timer. +enum TimerLocation<'a> { + /// In of the wheel's slots. + InSlot((&'a mut Vec>, TimeOffset)), + /// In the overflow vector. + Overflow((&'a mut Vec>, Instant)), +} + +/// Add a new timer to `timers`, ensuring it remains sorted. +fn add_timer(timers: &mut Vec>, deadline: T, waker: Waker) -> TimerToken { + let idx = match timers.binary_search_by(|timer| timer.deadline.cmp(&deadline)) { Ok(idx) | Err(idx) => idx, }; - timers.insert(idx, timer); + let token = TimerToken(waker.as_raw().data() as usize); + timers.insert(idx, Timer { deadline, waker }); + token } -/// Remove a previously added `timer` from `timers`, ensuring it remains sorted. -fn remove_timer(timers: &mut Vec>, timer: Timer) -where - Timer: Ord + Copy, -{ - if let Ok(idx) = timers.binary_search(&timer) { - _ = timers.remove(idx); +/// Remove a previously added `deadline` from `timers`, ensuring it remains sorted. +fn remove_timer(timers: &mut Vec>, deadline: T, token: TimerToken) { + if let Ok(idx) = timers.binary_search_by(|timer| timer.deadline.cmp(&deadline)) { + if timers[idx].waker.as_raw().data() as usize == token.0 { + _ = timers.remove(idx); + } } } /// Remove the first timer if it's before `time`. /// /// Returns `Ok(timer)` if there is a timer with a deadline before `time`. -/// Returns `Err(is_empty)`, indicating if `timers` is empty. Returns -/// `Err(true)` is `timers` is empty, `Err(false)` if the are more timers in -/// `timers`, but none with a deadline before `time`. -fn remove_if_before(timers: &mut Vec>, time: T) -> Result, bool> -where - T: Ord + Copy, -{ +/// Otherwise this returns `Err(true)` if `timers` is empty or `Err(false)` if +/// the are more timers in `timers`, but none with a deadline before `time`. +fn remove_if_before(timers: &mut Vec>, time: T) -> Result, bool> { match timers.last() { - // TODO: is the `unwrap` removed here? Or do we need `unwrap_unchecked`? Some(timer) if timer.deadline <= time => Ok(timers.pop().unwrap()), Some(_) => Err(false), None => Err(true), } } - -/// A timer. -/// -/// # Notes -/// -/// The [`Ord`] implementation is in reverse order, i.e. the deadline to expire -/// first will have the highest ordering value. Furthermore the ordering is only -/// done base on the deadline, the process id is ignored in ordering. This -/// allows `change_timer` to not worry about order when changing the process id -/// of a timer. -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -struct Timer { - pid: ProcessId, - deadline: T, -} - -impl Ord for Timer -where - T: Ord, -{ - fn cmp(&self, other: &Self) -> Ordering { - other.deadline.cmp(&self.deadline) - } -} - -impl PartialOrd for Timer -where - T: Ord, -{ - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} diff --git a/rt/src/shared/timers_tests.rs b/rt/src/shared/timers_tests.rs index 2af20a0df..9a9e4078a 100644 --- a/rt/src/shared/timers_tests.rs +++ b/rt/src/shared/timers_tests.rs @@ -1,69 +1,140 @@ +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use std::task::{Wake, Waker}; use std::time::Duration; -use crate::process::ProcessId; +use crate::shared::timers::{Timers, DURATION_PER_SLOT, NS_PER_SLOT, SLOTS}; +use crate::timer::TimerToken; -use super::{Timers, DURATION_PER_SLOT, NS_PER_SLOT, SLOTS}; +struct WakerBuilder { + awoken: Arc<[AtomicBool; N]>, + n: usize, +} + +impl WakerBuilder { + fn new() -> WakerBuilder { + const FALSE: AtomicBool = AtomicBool::new(false); + WakerBuilder { + awoken: Arc::new([FALSE; N]), + n: 0, + } + } -const PID: ProcessId = ProcessId(100); -const PID2: ProcessId = ProcessId(200); + fn task_waker(&mut self) -> (usize, Waker) { + let n = self.n; + self.n += 1; + assert!(n <= N, "created too many task::Wakers"); + ( + n, + Waker::from(Arc::new(TaskWaker { + awoken: self.awoken.clone(), + n, + })), + ) + } + + fn is_awoken(&self, n: usize) -> bool { + self.awoken[n].load(Ordering::SeqCst) + } +} + +/// [`Wake`] implementation. +struct TaskWaker { + awoken: Arc<[AtomicBool; N]>, + n: usize, +} + +impl Wake for TaskWaker { + fn wake(self: Arc) { + self.awoken[self.n].store(true, Ordering::SeqCst) + } +} #[test] fn add_deadline_first_slot() { let timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); let epoch = timers.epoch.read().unwrap().time; let deadline = epoch + Duration::from_millis(100); - timers.add(PID, deadline); + + let (n, waker) = wakers.task_waker(); + _ = timers.add(deadline, waker); assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(epoch), None); - assert_eq!(timers.remove_next(deadline), Some(PID)); - assert_eq!(timers.remove_next(deadline), None); + + // Not yet expired. + assert_eq!(timers.expire_timers(epoch), 0); + assert!(!wakers.is_awoken(n)); + + // Waker is called when the deadline is expired. + assert_eq!(timers.expire_timers(deadline), 1); + assert!(wakers.is_awoken(n)); + + // No more timers. + assert_eq!(timers.expire_timers(deadline + Duration::from_secs(100)), 0); } #[test] fn add_deadline_second_slot() { let timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); let epoch = timers.epoch.read().unwrap().time; + let deadline = epoch + Duration::from_nanos(NS_PER_SLOT as u64 + 100); - timers.add(PID, deadline); + let (n, waker) = wakers.task_waker(); + _ = timers.add(deadline, waker); assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(epoch), None); + + assert_eq!(timers.expire_timers(epoch), 0); assert_eq!(timers.epoch.read().unwrap().index, 0); assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(deadline), Some(PID)); + + assert_eq!(timers.expire_timers(deadline), 1); + assert!(wakers.is_awoken(n)); + assert_eq!(timers.epoch.read().unwrap().index, 1); - assert_eq!(timers.remove_next(deadline), None); + assert_eq!(timers.expire_timers(epoch), 0); } #[test] fn add_deadline_overflow() { let timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); let epoch = timers.epoch.read().unwrap().time; + let deadline = epoch + Duration::from_nanos(SLOTS as u64 * NS_PER_SLOT as u64 + 10); - timers.add(PID, deadline); + let (n, waker) = wakers.task_waker(); + _ = timers.add(deadline, waker); assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(epoch), None); + + assert_eq!(timers.expire_timers(epoch), 0); assert_eq!(timers.epoch.read().unwrap().index, 0); assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(deadline), Some(PID)); + + assert_eq!(timers.expire_timers(deadline), 1); + assert!(wakers.is_awoken(n)); + // Should have advanced the epoch to come back around to 0. assert_eq!(timers.epoch.read().unwrap().index, 0); - assert_eq!(timers.remove_next(deadline), None); + assert_eq!(timers.expire_timers(epoch), 0); } #[test] fn add_deadline_to_all_slots() { let timers = Timers::new(); + let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); let epoch = timers.epoch.read().unwrap().time; // Add a deadline to all slots and the overflow list. for n in 0..=SLOTS { let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - timers.add(ProcessId(n), deadline); + let (n2, waker) = wakers.task_waker(); + assert_eq!(n, n2); + _ = timers.add(deadline, waker); } let first_deadline = epoch + Duration::from_nanos(10); assert_eq!(timers.next(), Some(first_deadline)); - assert_eq!(timers.remove_next(epoch), None); + assert_eq!(timers.expire_timers(epoch), 0); assert_eq!(timers.epoch.read().unwrap().index, 0); let mut expected_next_deadline = first_deadline; @@ -71,9 +142,11 @@ fn add_deadline_to_all_slots() { for n in 0..=SLOTS { assert_eq!(timers.next(), Some(expected_next_deadline)); let now = expected_next_deadline + Duration::from_nanos(1); - assert_eq!(timers.remove_next(now), Some(ProcessId(n))); + assert_eq!(timers.expire_timers(now), 1); + assert!(wakers.is_awoken(n)); assert_eq!(timers.epoch.read().unwrap().index, expected_index); - assert_eq!(timers.remove_next(now), None); + + assert_eq!(timers.expire_timers(now), 0); assert_eq!(timers.epoch.read().unwrap().index, expected_index); expected_index = (expected_index + 1) % SLOTS as u8; @@ -84,82 +157,112 @@ fn add_deadline_to_all_slots() { #[test] fn add_deadline_in_the_past() { let timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); let epoch = timers.epoch.read().unwrap().time; - timers.add(PID, epoch - Duration::from_secs(1)); + + let (n, waker) = wakers.task_waker(); + _ = timers.add(epoch - Duration::from_secs(1), waker); assert_eq!(timers.next(), Some(epoch)); - assert_eq!(timers.remove_next(epoch), Some(PID)); + + assert_eq!(timers.expire_timers(epoch), 1); + assert!(wakers.is_awoken(n)); } #[test] fn adding_earlier_deadline() { let timers = Timers::new(); + let mut wakers = WakerBuilder::<2>::new(); let epoch = timers.epoch.read().unwrap().time; + + let (n1, waker) = wakers.task_waker(); let deadline1 = epoch + Duration::from_secs(2); + _ = timers.add(deadline1, waker); + assert_eq!(timers.next(), Some(deadline1)); + + let (n2, waker) = wakers.task_waker(); let deadline2 = epoch + Duration::from_secs(1); - timers.add(PID, deadline1); - timers.add(PID2, deadline2); + _ = timers.add(deadline2, waker); assert_eq!(timers.next(), Some(deadline2)); - assert_eq!(timers.remove_next(deadline1), Some(PID2)); - assert_eq!(timers.remove_next(deadline1), Some(PID)); - assert_eq!(timers.remove_next(deadline1), None); + + assert_eq!(timers.expire_timers(deadline1), 2); + assert!(wakers.is_awoken(n1)); + assert!(wakers.is_awoken(n2)); + assert_eq!(timers.expire_timers(deadline1), 0); } #[test] fn remove_deadline() { let timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); let epoch = timers.epoch.read().unwrap().time; + let deadline = epoch + Duration::from_millis(10); - timers.add(PID, deadline); - timers.remove(PID, deadline); + let (_, waker) = wakers.task_waker(); + let token = timers.add(deadline, waker); + assert_eq!(timers.next(), Some(deadline)); + + timers.remove(deadline, token); assert_eq!(timers.next(), None); - assert_eq!(timers.remove_next(deadline), None); + assert_eq!(timers.expire_timers(epoch), 0); } #[test] fn remove_never_added_deadline() { let timers = Timers::new(); let epoch = timers.epoch.read().unwrap().time; - let deadline = epoch + Duration::from_millis(10); + assert_eq!(timers.next(), None); - assert_eq!(timers.remove_next(deadline), None); - timers.remove(PID, deadline); + assert_eq!(timers.expire_timers(epoch), 0); + let deadline = epoch + Duration::from_millis(10); + timers.remove(deadline, TimerToken(0)); assert_eq!(timers.next(), None); - assert_eq!(timers.remove_next(deadline), None); + assert_eq!(timers.expire_timers(epoch), 0); } #[test] fn remove_expired_deadline() { let timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); let epoch = timers.epoch.read().unwrap().time; + let deadline = epoch + Duration::from_millis(10); - timers.add(PID, deadline); + let (n, waker) = wakers.task_waker(); + let token = timers.add(deadline, waker); assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(deadline), Some(PID)); - assert_eq!(timers.remove_next(deadline), None); - timers.remove(PID, deadline); + + assert_eq!(timers.expire_timers(deadline), 1); + assert!(wakers.is_awoken(n)); + + timers.remove(deadline, token); assert_eq!(timers.next(), None); - assert_eq!(timers.remove_next(deadline), None); + assert_eq!(timers.expire_timers(epoch), 0); } #[test] fn remove_deadline_from_all_slots() { let timers = Timers::new(); + let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); let epoch = timers.epoch.read().unwrap().time; // Add a deadline to all slots and the overflow list. - for n in 0..=SLOTS { - let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - timers.add(ProcessId(n), deadline); - } + let tokens: Vec = (0..=SLOTS) + .into_iter() + .map(|n| { + let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); + let (n2, waker) = wakers.task_waker(); + assert_eq!(n, n2); + timers.add(deadline, waker) + }) + .collect(); let first_deadline = epoch + Duration::from_nanos(10); assert_eq!(timers.next(), Some(first_deadline)); - assert_eq!(timers.remove_next(epoch), None); + assert_eq!(timers.expire_timers(epoch), 0); assert_eq!(timers.epoch.read().unwrap().index, 0); let mut next_deadline = first_deadline; - for n in 0..=SLOTS { - timers.remove(ProcessId(n), next_deadline); + for (n, token) in tokens.into_iter().enumerate() { + timers.remove(next_deadline, token); next_deadline += DURATION_PER_SLOT; if n == SLOTS { @@ -173,45 +276,55 @@ fn remove_deadline_from_all_slots() { #[test] fn remove_deadline_from_all_slots_interleaved() { let timers = Timers::new(); + let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); let epoch = timers.epoch.read().unwrap().time; // Add a deadline to all slots and the overflow list. for n in 0..=SLOTS { let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - timers.add(ProcessId(n), deadline); - timers.remove(ProcessId(n), deadline); + let (n2, waker) = wakers.task_waker(); + assert_eq!(n, n2); + let token = timers.add(deadline, waker); + timers.remove(deadline, token); } assert_eq!(timers.next(), None); - assert_eq!(timers.remove_next(epoch), None); + assert_eq!(timers.expire_timers(epoch), 0); assert_eq!(timers.epoch.read().unwrap().index, 0); } #[test] fn remove_deadline_after_epoch_advance() { let timers = Timers::new(); + let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); let epoch = timers.epoch.read().unwrap().time; // Add a deadline to all slots and the overflow list. - for n in 0..=SLOTS { - let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - timers.add(ProcessId(n), deadline); - } + let tokens: Vec = (0..=SLOTS) + .into_iter() + .map(|n| { + let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); + let (n2, waker) = wakers.task_waker(); + assert_eq!(n, n2); + timers.add(deadline, waker) + }) + .skip(1) + .collect(); let first_deadline = epoch + Duration::from_nanos(10); let now = epoch + DURATION_PER_SLOT; assert_eq!(timers.next(), Some(first_deadline)); - assert_eq!(timers.remove_next(now), Some(ProcessId(0))); - assert_eq!(timers.remove_next(now), None); + assert_eq!(timers.expire_timers(now), 1); + assert!(wakers.is_awoken(0)); assert_eq!(timers.epoch.read().unwrap().index, 1); assert_eq!(timers.next(), Some(first_deadline + DURATION_PER_SLOT)); let mut next_deadline = first_deadline + DURATION_PER_SLOT; - for n in 1..=SLOTS { - timers.remove(ProcessId(n), next_deadline); + for (n, token) in tokens.into_iter().enumerate() { + timers.remove(next_deadline, token); next_deadline += DURATION_PER_SLOT; - if n == SLOTS { + if n == SLOTS - 1 { assert_eq!(timers.next(), None); } else { assert_eq!(timers.next(), Some(next_deadline)); @@ -222,11 +335,15 @@ fn remove_deadline_after_epoch_advance() { #[test] fn remove_deadline_in_the_past() { let timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); let epoch = timers.epoch.read().unwrap().time; + let deadline = epoch - Duration::from_secs(1); - timers.add(PID, deadline); + let (_, waker) = wakers.task_waker(); + let token = timers.add(deadline, waker); assert_eq!(timers.next(), Some(epoch)); - timers.remove(PID, deadline); + + timers.remove(deadline, token); assert_eq!(timers.next(), None); - assert_eq!(timers.remove_next(epoch), None); + assert_eq!(timers.expire_timers(epoch), 0); } diff --git a/rt/src/timer.rs b/rt/src/timer.rs index 0a5602e10..0fc3e9560 100644 --- a/rt/src/timer.rs +++ b/rt/src/timer.rs @@ -18,6 +18,19 @@ use std::time::{Duration, Instant}; use crate::{self as rt}; +mod private { + //! [`TimerToken`] needs to be public because it's used in the + //! private-in-public trait [`PrivateAccess`]. + //! + //! [`PrivateAccess`]: crate::access::private::PrivateAccess + + /// Token used to expire a timer. + #[derive(Copy, Clone, Debug)] + pub struct TimerToken(pub(crate) usize); +} + +pub(crate) use private::TimerToken; + /// Type returned when the deadline has passed. /// /// Can be converted into [`io::ErrorKind::TimedOut`]. @@ -88,8 +101,8 @@ impl From for io::ErrorKind { pub struct Timer { deadline: Instant, rt: RT, - /// If `true` it means we've added a timer that hasn't expired yet. - timer_pending: bool, + /// If `Some` it means we've added a timer that hasn't expired yet. + timer_pending: Option, } impl Timer { @@ -98,7 +111,7 @@ impl Timer { Timer { deadline, rt, - timer_pending: false, + timer_pending: None, } } @@ -131,14 +144,13 @@ impl Timer { impl Future for Timer { type Output = DeadlinePassed; - fn poll(mut self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { if self.has_passed() { - self.timer_pending = false; + self.timer_pending = None; return Poll::Ready(DeadlinePassed); - } else if !self.timer_pending { + } else if self.timer_pending.is_none() { let deadline = self.deadline; - self.rt.add_deadline(deadline); - self.timer_pending = true; + self.timer_pending = Some(self.rt.add_timer(deadline, ctx.waker().clone())); } Poll::Pending } @@ -148,8 +160,8 @@ impl Unpin for Timer {} impl Drop for Timer { fn drop(&mut self) { - if self.timer_pending { - self.rt.remove_deadline(self.deadline); + if let Some(expire_token) = self.timer_pending { + self.rt.remove_timer(self.deadline, expire_token); } } } @@ -383,7 +395,7 @@ impl AsyncIterator for Interval { match Pin::new(&mut this.timer).poll(ctx) { Poll::Ready(deadline) => { this.timer.deadline += this.interval; - this.timer.timer_pending = false; + this.timer.timer_pending = None; Poll::Ready(Some(deadline)) } Poll::Pending => Poll::Pending, diff --git a/rt/src/worker.rs b/rt/src/worker.rs index b5e16a700..52abe110f 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -619,14 +619,7 @@ impl Worker { fn schedule_from_shared_timers(&mut self, now: Instant) -> usize { trace!(worker_id = self.internals.id.get(); "polling shared timers"); let timing = trace::start(&*self.internals.trace_log.borrow()); - - let mut amount: usize = 0; - while let Some(pid) = self.internals.shared.remove_next_deadline(now) { - trace!(worker_id = self.internals.id.get(), pid = pid.0; "expiring timer for shared process"); - self.internals.shared.mark_ready(pid); - amount += 1; - } - + let amount = self.internals.shared.expire_timers(now); trace::finish_rt( self.internals.trace_log.borrow_mut().as_mut(), timing, diff --git a/rt/tests/functional/timer.rs b/rt/tests/functional/timer.rs index 537892178..4f6e1cb02 100644 --- a/rt/tests/functional/timer.rs +++ b/rt/tests/functional/timer.rs @@ -20,12 +20,12 @@ const TIMEOUT: Duration = Duration::from_millis(100); #[test] fn size() { - assert_size::>(40); - assert_size::>(40); - assert_size::>(40); - assert_size::>(40); - assert_size::>(56); - assert_size::>(56); + assert_size::>(48); + assert_size::>(48); + assert_size::>(48); + assert_size::>(48); + assert_size::>(64); + assert_size::>(64); assert_size::(0); } From ad79c943ea097302d7f4f091e5486b56aa4a22e4 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 14 Apr 2023 14:14:47 +0200 Subject: [PATCH 055/177] Change local timers to use task::Wakers Instead of ProcessIds. Same as what we did for shared timers. --- rt/src/access.rs | 15 +- rt/src/lib.rs | 9 +- rt/src/local/timers.rs | 222 ++++++++++++---------------- rt/src/local/timers_tests.rs | 271 +++++++++++++++++++++++------------ rt/src/shared/mod.rs | 4 +- rt/src/shared/timers.rs | 4 +- rt/src/timer.rs | 4 +- rt/src/worker.rs | 10 +- 8 files changed, 285 insertions(+), 254 deletions(-) diff --git a/rt/src/access.rs b/rt/src/access.rs index 5cbcb21d2..bab8cb11f 100644 --- a/rt/src/access.rs +++ b/rt/src/access.rs @@ -97,7 +97,7 @@ mod private { fn add_timer(&mut self, deadline: Instant, waker: task::Waker) -> TimerToken; /// Remove a previously set timer. - fn remove_timer(&mut self, deadline: Instant, expire_token: TimerToken); + fn remove_timer(&mut self, deadline: Instant, token: TimerToken); /// Create a new [`task::Waker`]. fn new_task_waker(runtime_ref: &mut RuntimeRef, pid: ProcessId) -> task::Waker; @@ -189,13 +189,12 @@ impl PrivateAccess for ThreadLocal { self.rt.reregister(source, self.pid.into(), interest) } - fn add_timer(&mut self, deadline: Instant, _: task::Waker) -> TimerToken { - self.rt.add_timer(self.pid, deadline); - TimerToken(self.pid.0) // NOTE: not used. + fn add_timer(&mut self, deadline: Instant, waker: task::Waker) -> TimerToken { + self.rt.add_timer(deadline, waker) } - fn remove_timer(&mut self, deadline: Instant, _: TimerToken) { - self.rt.remove_timer(self.pid, deadline); + fn remove_timer(&mut self, deadline: Instant, token: TimerToken) { + self.rt.remove_timer(deadline, token); } fn new_task_waker(runtime_ref: &mut RuntimeRef, pid: ProcessId) -> task::Waker { @@ -337,8 +336,8 @@ impl PrivateAccess for ThreadSafe { self.rt.add_timer(deadline, waker) } - fn remove_timer(&mut self, deadline: Instant, expire_token: TimerToken) { - self.rt.remove_timer(deadline, expire_token); + fn remove_timer(&mut self, deadline: Instant, token: TimerToken) { + self.rt.remove_timer(deadline, token); } fn new_task_waker(runtime_ref: &mut RuntimeRef, pid: ProcessId) -> task::Waker { diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 1e350b5e1..35a7b95c6 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -252,6 +252,7 @@ use coordinator::Coordinator; use local::waker::MAX_THREADS; use spawn::{ActorOptions, FutureOptions, Spawn, SyncActorOptions}; use sync_worker::SyncWorker; +use timer::TimerToken; const SYNC_WORKER_ID_START: usize = 10000; const SYNC_WORKER_ID_END: usize = SYNC_WORKER_ID_START + 10000; @@ -665,17 +666,17 @@ impl RuntimeRef { /// Add a timer. /// /// See [`Timers::add`]. - pub(crate) fn add_timer(&self, pid: ProcessId, deadline: Instant) { + pub(crate) fn add_timer(&self, deadline: Instant, waker: task::Waker) -> TimerToken { ::log::trace!(deadline = as_debug!(deadline); "adding timer"); - self.internals.timers.borrow_mut().add(pid, deadline); + self.internals.timers.borrow_mut().add(deadline, waker) } /// Remove a previously set timer. /// /// See [`Timers::remove`]. - pub(crate) fn remove_timer(&self, pid: ProcessId, deadline: Instant) { + pub(crate) fn remove_timer(&self, deadline: Instant, token: TimerToken) { ::log::trace!(deadline = as_debug!(deadline); "removing timer"); - self.internals.timers.borrow_mut().remove(pid, deadline); + self.internals.timers.borrow_mut().remove(deadline, token); } /// Returns a copy of the shared internals. diff --git a/rt/src/local/timers.rs b/rt/src/local/timers.rs index 34afae716..112d516d1 100644 --- a/rt/src/local/timers.rs +++ b/rt/src/local/timers.rs @@ -2,10 +2,11 @@ //! //! Also see the shared timers implementation. -use std::cmp::{min, Ordering}; +use std::cmp::{max, min}; +use std::task::Waker; use std::time::{Duration, Instant}; -use crate::ProcessId; +use crate::timer::TimerToken; #[cfg(test)] #[path = "timers_tests.rs"] @@ -79,6 +80,13 @@ pub(crate) struct Timers { cached_next_deadline: CachedInstant, } +/// A timer in [`Timers`]. +#[derive(Debug)] +struct Timer { + deadline: T, + waker: Waker, +} + impl Timers { /// Create a new collection of timers. pub(crate) fn new() -> Timers { @@ -143,26 +151,33 @@ impl Timers { } /// Add a new deadline. - pub(crate) fn add(&mut self, pid: ProcessId, deadline: Instant) { - let deadline = self.checked_deadline(deadline); + pub(crate) fn add(&mut self, deadline: Instant, waker: Waker) -> TimerToken { + // Can't have deadline before the epoch, so we'll add a deadline with + // same time as the epoch instead. + let deadline = max(deadline, self.epoch); self.cached_next_deadline.update(deadline); - self.get_timers(pid, deadline, add_timer, add_timer); + self.get_timers(deadline, |timers| match timers { + TimerLocation::InSlot((timers, deadline)) => add_timer(timers, deadline, waker), + TimerLocation::Overflow((timers, deadline)) => add_timer(timers, deadline, waker), + }) } /// Remove a previously added deadline. - pub(crate) fn remove(&mut self, pid: ProcessId, deadline: Instant) { - let deadline = self.checked_deadline(deadline); + pub(crate) fn remove(&mut self, deadline: Instant, token: TimerToken) { + let deadline = max(deadline, self.epoch); self.cached_next_deadline.invalidate(deadline); - self.get_timers(pid, deadline, remove_timer, remove_timer); + self.get_timers(deadline, |timers| match timers { + TimerLocation::InSlot((timers, deadline)) => remove_timer(timers, deadline, token), + TimerLocation::Overflow((timers, deadline)) => remove_timer(timers, deadline, token), + }); } /// Determines in what list of timers a timer with `pid` and `deadline` /// would be/go into. Then calls the `slot_f` function for a timer list in /// the slots, or `overflow_f` with the overflow list. - fn get_timers(&mut self, pid: ProcessId, deadline: Instant, slot_f: SF, overflow_f: OF) + fn get_timers(&mut self, deadline: Instant, f: F) -> T where - SF: FnOnce(&mut Vec>, Timer), - OF: FnOnce(&mut Vec>, Timer), + F: FnOnce(TimerLocation<'_>) -> T, { let ns_since_epoch = deadline.saturating_duration_since(self.epoch).as_nanos(); if ns_since_epoch < u128::from(NS_OVERFLOW) { @@ -175,52 +190,52 @@ impl Timers { self.epoch + Duration::from_nanos((index as u64 * u64::from(NS_PER_SLOT)) + u64::from(offset)) ); let index = (self.index as usize + index) % SLOTS; - let timer = Timer { - pid, - deadline: offset, - }; - slot_f(&mut self.slots[index], timer); + f(TimerLocation::InSlot((&mut self.slots[index], offset))) } else { // Too far into the future to fit in the slots. - overflow_f(&mut self.overflow, Timer { pid, deadline }); + f(TimerLocation::Overflow((&mut self.overflow, deadline))) } } - /// Returns all deadlines that have expired (i.e. deadline < `now`). - pub(crate) fn deadlines(&mut self, now: Instant) -> Deadlines<'_> { - Deadlines { timers: self, now } - } - - /// Remove the next deadline that passed `now` returning the pid. + /// Expire all timers that have elapsed based on `now`. Returns the amount + /// of expired timers. /// /// # Safety /// /// `now` may never go backwards between calls. - fn remove_next(&mut self, now: Instant) -> Option { + pub(crate) fn expire_timers(&mut self, now: Instant) -> usize { + let mut amount = 0; + self.cached_next_deadline = CachedInstant::Unset; loop { // NOTE: Each loop iteration needs to calculate the `epoch_offset` // as the epoch changes each iteration. let epoch_offset = now.duration_since(self.epoch).as_nanos(); #[allow(clippy::cast_possible_truncation)] let epoch_offset = min(epoch_offset, u128::from(TimeOffset::MAX)) as TimeOffset; - match remove_if_before(self.current_slot(), epoch_offset) { - Ok(timer) => { - // Since we've just removed the first timer, invalid the - // cache. - self.cached_next_deadline = CachedInstant::Unset; - return Some(timer.pid); - } - Err(true) => { - // Safety: slot is empty, which makes calling - // `maybe_update_epoch` OK. - if !self.maybe_update_epoch(epoch_offset) { - // Didn't update epoch, no more timers to process. - return None; + let slot = self.current_slot(); + loop { + match remove_if_before(slot, epoch_offset) { + Ok(timer) => { + timer.waker.wake(); + amount += 1; + // Try another timer in this slot. + continue; } - // Else try again in the next loop. + Err(true) => { + // SAFETY: slot is empty, which makes calling + // `maybe_update_epoch` OK. + if !self.maybe_update_epoch(epoch_offset) { + // Didn't update epoch, no more timers to process. + return amount; + } else { + // Process the next slot. + break; + } + } + // Slot has timers with a deadline past `now`, so no more + // timers to process. + Err(false) => return amount, } - // Slot has timers with a deadline past `now`. - Err(false) => return None, } } } @@ -230,8 +245,6 @@ impl Timers { /// # Panics /// /// This panics if the current slot is not empty. - #[allow(clippy::cast_possible_truncation)] // TODO: move to new `self.index` line. - #[allow(clippy::debug_assert_with_mut_call)] fn maybe_update_epoch(&mut self, epoch_offset: TimeOffset) -> bool { if epoch_offset < NS_PER_SLOT { // Can't move to the next slot yet. @@ -254,83 +267,74 @@ impl Timers { // We add the timers in reverse order here as we remove the timer // first to expire from overflow first. timers.push(Timer { - pid: timer.pid, deadline: as_offset(slot_epoch, timer.deadline), + waker: timer.waker, }); } // At this point the timer first to expire is the first timer, but it // needs to be the last. So we reverse the order, which ensures the list // is sorted again. timers.reverse(); - debug_assert!(timers.is_sorted()); + debug_assert!(timers.is_sorted_by(|t1, t2| Some(t1.deadline.cmp(&t2.deadline)))); true } - /// Returns the `deadline` that can safely be added to the timers. Any - /// deadline before the current epoch is set to the current epoch. - fn checked_deadline(&self, deadline: Instant) -> Instant { - if deadline < self.epoch { - self.epoch - } else { - deadline - } - } - fn current_slot(&mut self) -> &mut Vec> { // Safety: `self.index` is always valid. &mut self.slots[self.index as usize] } } -/// Add `timer` to `timers`, ensuring it remains sorted. -fn add_timer(timers: &mut Vec>, timer: Timer) -where - Timer: Ord, -{ - let idx = match timers.binary_search(&timer) { +/// Location of a timer. +enum TimerLocation<'a> { + /// In of the wheel's slots. + InSlot((&'a mut Vec>, TimeOffset)), + /// In the overflow vector. + Overflow((&'a mut Vec>, Instant)), +} + +/// Add a new timer to `timers`, ensuring it remains sorted. +fn add_timer(timers: &mut Vec>, deadline: T, waker: Waker) -> TimerToken { + let idx = match timers.binary_search_by(|timer| timer.deadline.cmp(&deadline)) { Ok(idx) | Err(idx) => idx, }; - timers.insert(idx, timer); + let token = TimerToken(waker.as_raw().data() as usize); + timers.insert(idx, Timer { deadline, waker }); + token } -/// Remove a previously added `timer` from `timers`, ensuring it remains sorted. -fn remove_timer(timers: &mut Vec>, timer: Timer) -where - Timer: Ord + Copy, -{ - if let Ok(idx) = timers.binary_search(&timer) { - _ = timers.remove(idx); +/// Remove a previously added `deadline` from `timers`, ensuring it remains sorted. +fn remove_timer(timers: &mut Vec>, deadline: T, token: TimerToken) { + if let Ok(idx) = timers.binary_search_by(|timer| timer.deadline.cmp(&deadline)) { + if timers[idx].waker.as_raw().data() as usize == token.0 { + _ = timers.remove(idx); + } } } -/// Returns the different between `epoch` and `time`, truncated to -/// [`TimeOffset`]. -#[allow(clippy::cast_possible_truncation)] // TODO: move to last line. -fn as_offset(epoch: Instant, time: Instant) -> TimeOffset { - let nanos = time.duration_since(epoch).as_nanos(); - debug_assert!(nanos < u128::from(NS_PER_SLOT)); - (nanos & NS_SLOT_MASK) as TimeOffset -} - /// Remove the first timer if it's before `time`. /// /// Returns `Ok(timer)` if there is a timer with a deadline before `time`. -/// Returns `Err(is_empty)`, indicating if `timers` is empty. Returns -/// `Err(true)` is `timers` is empty, `Err(false)` if the are more timers in -/// `timers`, but none with a deadline before `time`. -fn remove_if_before(timers: &mut Vec>, time: T) -> Result, bool> -where - T: Ord + Copy, -{ +/// Otherwise this returns `Err(true)` if `timers` is empty or `Err(false)` if +/// the are more timers in `timers`, but none with a deadline before `time`. +fn remove_if_before(timers: &mut Vec>, time: T) -> Result, bool> { match timers.last() { - // TODO: is the `unwrap` removed here? Or do we need `unwrap_unchecked`? Some(timer) if timer.deadline <= time => Ok(timers.pop().unwrap()), Some(_) => Err(false), None => Err(true), } } +/// Returns the different between `epoch` and `time`, truncated to +/// [`TimeOffset`]. +#[allow(clippy::cast_possible_truncation)] // TODO: move to last line. +fn as_offset(epoch: Instant, time: Instant) -> TimeOffset { + let nanos = time.duration_since(epoch).as_nanos(); + debug_assert!(nanos < u128::from(NS_PER_SLOT)); + (nanos & NS_SLOT_MASK) as TimeOffset +} + /// To avoid having to check all slots and the overflow for timers in an /// [`Timers`] this type caches the earliest deadline. This speeds up /// [`Timers::next`]. @@ -373,51 +377,3 @@ impl CachedInstant { } } } - -/// A timer. -/// -/// # Notes -/// -/// The [`Ord`] implementation is in reverse order, i.e. the deadline to expire -/// first will have the highest ordering value. Furthermore the ordering is only -/// done base on the deadline, the process id is ignored in ordering. This -/// allows `change_timer` to not worry about order when changing the process id -/// of a timer. -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -struct Timer { - pid: ProcessId, - deadline: T, -} - -impl Ord for Timer -where - T: Ord, -{ - fn cmp(&self, other: &Self) -> Ordering { - other.deadline.cmp(&self.deadline) - } -} - -impl PartialOrd for Timer -where - T: Ord, -{ - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -/// Returns all timers that have passed (since the iterator was created). -#[derive(Debug)] -pub(crate) struct Deadlines<'t> { - timers: &'t mut Timers, - now: Instant, -} - -impl<'t> Iterator for Deadlines<'t> { - type Item = ProcessId; - - fn next(&mut self) -> Option { - self.timers.remove_next(self.now) - } -} diff --git a/rt/src/local/timers_tests.rs b/rt/src/local/timers_tests.rs index 43faa9f60..0b6eda596 100644 --- a/rt/src/local/timers_tests.rs +++ b/rt/src/local/timers_tests.rs @@ -1,65 +1,137 @@ +#![allow(unused_imports)] + +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use std::task::{Wake, Waker}; use std::time::Duration; -use crate::process::ProcessId; +use crate::local::timers::{Timers, DURATION_PER_SLOT, NS_PER_SLOT, OVERFLOW_DURATION, SLOTS}; +use crate::timer::TimerToken; + +struct WakerBuilder { + awoken: Arc<[AtomicBool; N]>, + n: usize, +} + +impl WakerBuilder { + fn new() -> WakerBuilder { + const FALSE: AtomicBool = AtomicBool::new(false); + WakerBuilder { + awoken: Arc::new([FALSE; N]), + n: 0, + } + } + + fn task_waker(&mut self) -> (usize, Waker) { + let n = self.n; + self.n += 1; + assert!(n <= N, "created too many task::Wakers"); + ( + n, + Waker::from(Arc::new(TaskWaker { + awoken: self.awoken.clone(), + n, + })), + ) + } + + fn is_awoken(&self, n: usize) -> bool { + self.awoken[n].load(Ordering::SeqCst) + } +} -use super::{Timers, DURATION_PER_SLOT, NS_PER_SLOT, OVERFLOW_DURATION, SLOTS}; +/// [`Wake`] implementation. +struct TaskWaker { + awoken: Arc<[AtomicBool; N]>, + n: usize, +} -const PID: ProcessId = ProcessId(100); -const PID2: ProcessId = ProcessId(200); +impl Wake for TaskWaker { + fn wake(self: Arc) { + self.awoken[self.n].store(true, Ordering::SeqCst) + } +} #[test] fn add_deadline_first_slot() { let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + let deadline = timers.epoch + Duration::from_millis(100); - timers.add(PID, deadline); + let (n, waker) = wakers.task_waker(); + _ = timers.add(deadline, waker); assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(timers.epoch), None); - assert_eq!(timers.remove_next(deadline), Some(PID)); - assert_eq!(timers.remove_next(deadline), None); + + // Not yet expired. + assert_eq!(timers.expire_timers(timers.epoch), 0); + + // Waker is called when the deadline is expired. + assert_eq!(timers.expire_timers(deadline), 1); + assert!(wakers.is_awoken(n)); + + // No more timers. + assert_eq!(timers.expire_timers(deadline), 0); } #[test] fn add_deadline_second_slot() { let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + let deadline = timers.epoch + Duration::from_nanos(NS_PER_SLOT as u64 + 10); - timers.add(PID, deadline); + let (n, waker) = wakers.task_waker(); + _ = timers.add(deadline, waker); assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(timers.epoch), None); + + assert_eq!(timers.expire_timers(timers.epoch), 0); assert_eq!(timers.index, 0); assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(deadline), Some(PID)); + + assert_eq!(timers.expire_timers(deadline), 1); + assert!(wakers.is_awoken(n)); assert_eq!(timers.index, 1); - assert_eq!(timers.remove_next(deadline), None); + + assert_eq!(timers.expire_timers(timers.epoch), 0); } #[test] fn add_deadline_overflow() { let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + let deadline = timers.epoch + Duration::from_nanos(SLOTS as u64 * NS_PER_SLOT as u64 + 10); - timers.add(PID, deadline); + let (n, waker) = wakers.task_waker(); + _ = timers.add(deadline, waker); assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(timers.epoch), None); + + assert_eq!(timers.expire_timers(timers.epoch), 0); assert_eq!(timers.index, 0); assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(deadline), Some(PID)); + + assert_eq!(timers.expire_timers(deadline), 1); + assert!(wakers.is_awoken(n)); // Should have advanced the epoch to come back around to 0. assert_eq!(timers.index, 0); - assert_eq!(timers.remove_next(deadline), None); + + assert_eq!(timers.expire_timers(timers.epoch), 0); } #[test] fn add_deadline_to_all_slots() { let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); // Add a deadline to all slots and the overflow list. for n in 0..=SLOTS { let deadline = timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - timers.add(ProcessId(n), deadline); + let (n2, waker) = wakers.task_waker(); + assert_eq!(n, n2); + _ = timers.add(deadline, waker); } let first_deadline = timers.epoch + Duration::from_nanos(10); assert_eq!(timers.next(), Some(first_deadline)); - assert_eq!(timers.remove_next(timers.epoch), None); + assert_eq!(timers.expire_timers(timers.epoch), 0); assert_eq!(timers.index, 0); let mut expected_next_deadline = first_deadline; @@ -67,9 +139,11 @@ fn add_deadline_to_all_slots() { for n in 0..=SLOTS { assert_eq!(timers.next(), Some(expected_next_deadline)); let now = expected_next_deadline + Duration::from_nanos(1); - assert_eq!(timers.remove_next(now), Some(ProcessId(n))); + assert_eq!(timers.expire_timers(now), 1); + assert!(wakers.is_awoken(n)); assert_eq!(timers.index, expected_index); - assert_eq!(timers.remove_next(now), None); + + assert_eq!(timers.expire_timers(timers.epoch), 0); assert_eq!(timers.index, expected_index); expected_index = (expected_index + 1) % SLOTS as u8; @@ -80,76 +154,105 @@ fn add_deadline_to_all_slots() { #[test] fn add_deadline_in_the_past() { let mut timers = Timers::new(); - timers.add(PID, timers.epoch - Duration::from_secs(1)); + let mut wakers = WakerBuilder::<1>::new(); + + let (n, waker) = wakers.task_waker(); + _ = timers.add(timers.epoch - Duration::from_secs(1), waker); assert_eq!(timers.next(), Some(timers.epoch)); - assert_eq!(timers.remove_next(timers.epoch), Some(PID)); + + assert_eq!(timers.expire_timers(timers.epoch), 1); + assert!(wakers.is_awoken(n)); } #[test] fn adding_earlier_deadline_updates_cache() { let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<2>::new(); + let deadline1 = timers.epoch + Duration::from_secs(2); + let (n1, waker) = wakers.task_waker(); + _ = timers.add(deadline1, waker); let deadline2 = timers.epoch + Duration::from_secs(1); - timers.add(PID, deadline1); - timers.add(PID2, deadline2); + let (n2, waker) = wakers.task_waker(); + _ = timers.add(deadline2, waker); assert_eq!(timers.next(), Some(deadline2)); - assert_eq!(timers.remove_next(deadline1), Some(PID2)); - assert_eq!(timers.remove_next(deadline1), Some(PID)); - assert_eq!(timers.remove_next(deadline1), None); + + assert_eq!(timers.expire_timers(deadline1), 2); + assert!(wakers.is_awoken(n1)); + assert!(wakers.is_awoken(n2)); + assert_eq!(timers.expire_timers(deadline1), 0); } #[test] fn remove_deadline() { let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + let deadline = timers.epoch + Duration::from_millis(10); - timers.add(PID, deadline); - timers.remove(PID, deadline); + let (n, waker) = wakers.task_waker(); + let token = timers.add(deadline, waker); + timers.remove(deadline, token); assert_eq!(timers.next(), None); - assert_eq!(timers.remove_next(deadline), None); + assert_eq!(timers.expire_timers(timers.epoch), 0); + assert!(!wakers.is_awoken(n)); } #[test] fn remove_never_added_deadline() { let mut timers = Timers::new(); + let deadline = timers.epoch + Duration::from_millis(10); assert_eq!(timers.next(), None); - assert_eq!(timers.remove_next(deadline), None); - timers.remove(PID, deadline); + assert_eq!(timers.expire_timers(timers.epoch), 0); + timers.remove(deadline, TimerToken(0)); assert_eq!(timers.next(), None); - assert_eq!(timers.remove_next(deadline), None); + assert_eq!(timers.expire_timers(timers.epoch), 0); } #[test] fn remove_expired_deadline() { let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + let deadline = timers.epoch + Duration::from_millis(10); - timers.add(PID, deadline); + let (n, waker) = wakers.task_waker(); + let token = timers.add(deadline, waker); + assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.remove_next(deadline), Some(PID)); - assert_eq!(timers.remove_next(deadline), None); - timers.remove(PID, deadline); + assert_eq!(timers.expire_timers(deadline), 1); + assert!(wakers.is_awoken(n)); + assert_eq!(timers.expire_timers(deadline), 0); + + timers.remove(deadline, token); assert_eq!(timers.next(), None); - assert_eq!(timers.remove_next(deadline), None); + assert_eq!(timers.expire_timers(deadline), 0); } #[test] fn remove_deadline_from_all_slots() { let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); // Add a deadline to all slots and the overflow list. - for n in 0..=SLOTS { - let deadline = timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - timers.add(ProcessId(n), deadline); - } + let tokens: Vec = (0..=SLOTS) + .into_iter() + .map(|n| { + let deadline = + timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); + let (n2, waker) = wakers.task_waker(); + assert_eq!(n2, n); + timers.add(deadline, waker) + }) + .collect(); let first_deadline = timers.epoch + Duration::from_nanos(10); assert_eq!(timers.next(), Some(first_deadline)); - assert_eq!(timers.remove_next(timers.epoch), None); + assert_eq!(timers.expire_timers(timers.epoch), 0); assert_eq!(timers.index, 0); let mut next_deadline = first_deadline; - for n in 0..=SLOTS { - timers.remove(ProcessId(n), next_deadline); + for (n, token) in tokens.into_iter().enumerate() { + timers.remove(next_deadline, token); next_deadline += DURATION_PER_SLOT; if n == SLOTS { @@ -163,43 +266,54 @@ fn remove_deadline_from_all_slots() { #[test] fn remove_deadline_from_all_slots_interleaved() { let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); // Add a deadline to all slots and the overflow list. for n in 0..=SLOTS { let deadline = timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - timers.add(ProcessId(n), deadline); - timers.remove(ProcessId(n), deadline); + let (n2, waker) = wakers.task_waker(); + assert_eq!(n2, n); + let token = timers.add(deadline, waker); + timers.remove(deadline, token); } assert_eq!(timers.next(), None); - assert_eq!(timers.remove_next(timers.epoch), None); + assert_eq!(timers.expire_timers(timers.epoch), 0); assert_eq!(timers.index, 0); } #[test] fn remove_deadline_after_epoch_advance() { let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); // Add a deadline to all slots and the overflow list. - for n in 0..=SLOTS { - let deadline = timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - timers.add(ProcessId(n), deadline); - } + let tokens: Vec = (0..=SLOTS) + .into_iter() + .map(|n| { + let deadline = + timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); + let (n2, waker) = wakers.task_waker(); + assert_eq!(n2, n); + timers.add(deadline, waker) + }) + .collect(); let first_deadline = timers.epoch + Duration::from_nanos(10); let now = timers.epoch + DURATION_PER_SLOT; assert_eq!(timers.next(), Some(first_deadline)); - assert_eq!(timers.remove_next(now), Some(ProcessId(0))); - assert_eq!(timers.remove_next(now), None); + assert_eq!(timers.expire_timers(now), 1); + assert!(wakers.is_awoken(0)); + assert_eq!(timers.expire_timers(timers.epoch), 0); assert_eq!(timers.index, 1); assert_eq!(timers.next(), Some(first_deadline + DURATION_PER_SLOT)); let mut next_deadline = first_deadline + DURATION_PER_SLOT; - for n in 1..=SLOTS { - timers.remove(ProcessId(n), next_deadline); + for (n, token) in tokens.into_iter().skip(1).enumerate() { + timers.remove(next_deadline, token); next_deadline += DURATION_PER_SLOT; - if n == SLOTS { + if n == SLOTS - 1 { assert_eq!(timers.next(), None); } else { assert_eq!(timers.next(), Some(next_deadline)); @@ -210,43 +324,14 @@ fn remove_deadline_after_epoch_advance() { #[test] fn remove_deadline_in_the_past() { let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + let deadline = timers.epoch - Duration::from_secs(1); - timers.add(PID, deadline); + let (n, waker) = wakers.task_waker(); + let token = timers.add(deadline, waker); assert_eq!(timers.next(), Some(timers.epoch)); - timers.remove(PID, deadline); + timers.remove(deadline, token); assert_eq!(timers.next(), None); - assert_eq!(timers.remove_next(timers.epoch), None); -} - -#[test] -fn deadlines() { - let mut timers = Timers::new(); - - for n in 0..=SLOTS { - let deadline = timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - timers.add(ProcessId(n), deadline); - } - - let deadlines = timers.deadlines(timers.epoch + OVERFLOW_DURATION + DURATION_PER_SLOT); - let mut n = 0; - for pid in deadlines { - assert_eq!(pid, ProcessId(n)); - n += 1; - } - assert_eq!(n, SLOTS + 1); -} - -#[test] -fn empty_deadlines() { - let mut timers = Timers::new(); - let mut deadline = timers.deadlines(timers.epoch); - assert_eq!(deadline.next(), None); -} - -#[test] -fn deadlines_not_yet_expired() { - let mut timers = Timers::new(); - timers.add(PID, timers.epoch + Duration::from_secs(1)); - let mut deadline = timers.deadlines(timers.epoch); - assert_eq!(deadline.next(), None); + assert_eq!(timers.expire_timers(timers.epoch), 0); + assert!(!wakers.is_awoken(n)); } diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 66fb9049c..27161e90b 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -217,9 +217,9 @@ impl RuntimeInternals { /// Remove a previously set timer. /// /// See [`Timers::remove`]. - pub(crate) fn remove_timer(&self, deadline: Instant, expire_token: TimerToken) { + pub(crate) fn remove_timer(&self, deadline: Instant, token: TimerToken) { trace!(deadline = as_debug!(deadline); "removing timer"); - self.timers.remove(deadline, expire_token); + self.timers.remove(deadline, token); } /// Wake all futures who's timers has expired. diff --git a/rt/src/shared/timers.rs b/rt/src/shared/timers.rs index 534920d01..9ffb8b320 100644 --- a/rt/src/shared/timers.rs +++ b/rt/src/shared/timers.rs @@ -247,9 +247,7 @@ impl Timers { } // Slot has timers with a deadline past `now`, so no more // timers to process. - Err(false) => { - return amount; - } + Err(false) => return amount, } } } diff --git a/rt/src/timer.rs b/rt/src/timer.rs index 0fc3e9560..57db8a4c6 100644 --- a/rt/src/timer.rs +++ b/rt/src/timer.rs @@ -160,8 +160,8 @@ impl Unpin for Timer {} impl Drop for Timer { fn drop(&mut self) { - if let Some(expire_token) = self.timer_pending { - self.rt.remove_timer(self.deadline, expire_token); + if let Some(token) = self.timer_pending { + self.rt.remove_timer(self.deadline, token); } } } diff --git a/rt/src/worker.rs b/rt/src/worker.rs index 52abe110f..aad2f2cb8 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -597,15 +597,7 @@ impl Worker { fn schedule_from_local_timers(&mut self, now: Instant) -> usize { trace!(worker_id = self.internals.id.get(); "polling local timers"); let timing = trace::start(&*self.internals.trace_log.borrow()); - - let mut scheduler = self.internals.scheduler.borrow_mut(); - let mut amount: usize = 0; - for pid in self.internals.timers.borrow_mut().deadlines(now) { - trace!(worker_id = self.internals.id.get(), pid = pid.0; "expiring timer for local process"); - scheduler.mark_ready(pid); - amount += 1; - } - + let amount = self.internals.timers.borrow_mut().expire_timers(now); trace::finish_rt( self.internals.trace_log.borrow_mut().as_mut(), timing, From a78f3cd980a74623ca023f158bc4063c77e0f795 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 14 Apr 2023 14:37:50 +0200 Subject: [PATCH 056/177] Merge timers modules Creates a new timers module at the root of the Heph-rt crate which holds both the local and shared versions. --- rt/src/access.rs | 4 +- rt/src/lib.rs | 3 +- rt/src/local/mod.rs | 3 +- rt/src/local/timers_tests.rs | 337 --------- rt/src/shared/mod.rs | 5 +- rt/src/shared/timers_tests.rs | 349 ---------- rt/src/timer.rs | 14 +- rt/src/{local/timers.rs => timers/mod.rs} | 99 ++- rt/src/{shared/timers.rs => timers/shared.rs} | 133 +--- rt/src/timers/tests.rs | 640 ++++++++++++++++++ 10 files changed, 723 insertions(+), 864 deletions(-) delete mode 100644 rt/src/local/timers_tests.rs delete mode 100644 rt/src/shared/timers_tests.rs rename rt/src/{local/timers.rs => timers/mod.rs} (83%) rename rt/src/{shared/timers.rs => timers/shared.rs} (61%) create mode 100644 rt/src/timers/tests.rs diff --git a/rt/src/access.rs b/rt/src/access.rs index bab8cb11f..b35de4fbd 100644 --- a/rt/src/access.rs +++ b/rt/src/access.rs @@ -42,7 +42,7 @@ use mio::{event, Interest}; use crate::process::ProcessId; use crate::spawn::{ActorOptions, FutureOptions, Spawn}; -use crate::timer::TimerToken; +use crate::timers::TimerToken; use crate::trace::{self, Trace}; use crate::{shared, RuntimeRef}; @@ -67,7 +67,7 @@ mod private { use mio::{event, Interest}; use crate::process::ProcessId; - use crate::timer::TimerToken; + use crate::timers::TimerToken; use crate::{trace, RuntimeRef}; /// Actual trait behind [`rt::Access`]. diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 35a7b95c6..673e02766 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -235,6 +235,7 @@ pub mod systemd; pub mod test; mod thread_waker; pub mod timer; +mod timers; pub mod trace; #[doc(hidden)] pub mod util; @@ -252,7 +253,7 @@ use coordinator::Coordinator; use local::waker::MAX_THREADS; use spawn::{ActorOptions, FutureOptions, Spawn, SyncActorOptions}; use sync_worker::SyncWorker; -use timer::TimerToken; +use timers::TimerToken; const SYNC_WORKER_ID_START: usize = 10000; const SYNC_WORKER_ID_END: usize = SYNC_WORKER_ID_START + 10000; diff --git a/rt/src/local/mod.rs b/rt/src/local/mod.rs index 7f9c448fd..57c615c56 100644 --- a/rt/src/local/mod.rs +++ b/rt/src/local/mod.rs @@ -10,11 +10,10 @@ use mio::Poll; use crate::{shared, trace, Signal}; mod scheduler; -mod timers; pub(super) mod waker; +use crate::timers::Timers; use scheduler::Scheduler; -use timers::Timers; use waker::WakerId; /// Internals of the runtime, to which `RuntimeRef`s have a reference. diff --git a/rt/src/local/timers_tests.rs b/rt/src/local/timers_tests.rs deleted file mode 100644 index 0b6eda596..000000000 --- a/rt/src/local/timers_tests.rs +++ /dev/null @@ -1,337 +0,0 @@ -#![allow(unused_imports)] - -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; -use std::task::{Wake, Waker}; -use std::time::Duration; - -use crate::local::timers::{Timers, DURATION_PER_SLOT, NS_PER_SLOT, OVERFLOW_DURATION, SLOTS}; -use crate::timer::TimerToken; - -struct WakerBuilder { - awoken: Arc<[AtomicBool; N]>, - n: usize, -} - -impl WakerBuilder { - fn new() -> WakerBuilder { - const FALSE: AtomicBool = AtomicBool::new(false); - WakerBuilder { - awoken: Arc::new([FALSE; N]), - n: 0, - } - } - - fn task_waker(&mut self) -> (usize, Waker) { - let n = self.n; - self.n += 1; - assert!(n <= N, "created too many task::Wakers"); - ( - n, - Waker::from(Arc::new(TaskWaker { - awoken: self.awoken.clone(), - n, - })), - ) - } - - fn is_awoken(&self, n: usize) -> bool { - self.awoken[n].load(Ordering::SeqCst) - } -} - -/// [`Wake`] implementation. -struct TaskWaker { - awoken: Arc<[AtomicBool; N]>, - n: usize, -} - -impl Wake for TaskWaker { - fn wake(self: Arc) { - self.awoken[self.n].store(true, Ordering::SeqCst) - } -} - -#[test] -fn add_deadline_first_slot() { - let mut timers = Timers::new(); - let mut wakers = WakerBuilder::<1>::new(); - - let deadline = timers.epoch + Duration::from_millis(100); - let (n, waker) = wakers.task_waker(); - _ = timers.add(deadline, waker); - assert_eq!(timers.next(), Some(deadline)); - - // Not yet expired. - assert_eq!(timers.expire_timers(timers.epoch), 0); - - // Waker is called when the deadline is expired. - assert_eq!(timers.expire_timers(deadline), 1); - assert!(wakers.is_awoken(n)); - - // No more timers. - assert_eq!(timers.expire_timers(deadline), 0); -} - -#[test] -fn add_deadline_second_slot() { - let mut timers = Timers::new(); - let mut wakers = WakerBuilder::<1>::new(); - - let deadline = timers.epoch + Duration::from_nanos(NS_PER_SLOT as u64 + 10); - let (n, waker) = wakers.task_waker(); - _ = timers.add(deadline, waker); - assert_eq!(timers.next(), Some(deadline)); - - assert_eq!(timers.expire_timers(timers.epoch), 0); - assert_eq!(timers.index, 0); - assert_eq!(timers.next(), Some(deadline)); - - assert_eq!(timers.expire_timers(deadline), 1); - assert!(wakers.is_awoken(n)); - assert_eq!(timers.index, 1); - - assert_eq!(timers.expire_timers(timers.epoch), 0); -} - -#[test] -fn add_deadline_overflow() { - let mut timers = Timers::new(); - let mut wakers = WakerBuilder::<1>::new(); - - let deadline = timers.epoch + Duration::from_nanos(SLOTS as u64 * NS_PER_SLOT as u64 + 10); - let (n, waker) = wakers.task_waker(); - _ = timers.add(deadline, waker); - assert_eq!(timers.next(), Some(deadline)); - - assert_eq!(timers.expire_timers(timers.epoch), 0); - assert_eq!(timers.index, 0); - assert_eq!(timers.next(), Some(deadline)); - - assert_eq!(timers.expire_timers(deadline), 1); - assert!(wakers.is_awoken(n)); - // Should have advanced the epoch to come back around to 0. - assert_eq!(timers.index, 0); - - assert_eq!(timers.expire_timers(timers.epoch), 0); -} - -#[test] -fn add_deadline_to_all_slots() { - let mut timers = Timers::new(); - let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); - - // Add a deadline to all slots and the overflow list. - for n in 0..=SLOTS { - let deadline = timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - let (n2, waker) = wakers.task_waker(); - assert_eq!(n, n2); - _ = timers.add(deadline, waker); - } - - let first_deadline = timers.epoch + Duration::from_nanos(10); - assert_eq!(timers.next(), Some(first_deadline)); - assert_eq!(timers.expire_timers(timers.epoch), 0); - assert_eq!(timers.index, 0); - - let mut expected_next_deadline = first_deadline; - let mut expected_index = 0; - for n in 0..=SLOTS { - assert_eq!(timers.next(), Some(expected_next_deadline)); - let now = expected_next_deadline + Duration::from_nanos(1); - assert_eq!(timers.expire_timers(now), 1); - assert!(wakers.is_awoken(n)); - assert_eq!(timers.index, expected_index); - - assert_eq!(timers.expire_timers(timers.epoch), 0); - assert_eq!(timers.index, expected_index); - - expected_index = (expected_index + 1) % SLOTS as u8; - expected_next_deadline += DURATION_PER_SLOT; - } -} - -#[test] -fn add_deadline_in_the_past() { - let mut timers = Timers::new(); - let mut wakers = WakerBuilder::<1>::new(); - - let (n, waker) = wakers.task_waker(); - _ = timers.add(timers.epoch - Duration::from_secs(1), waker); - assert_eq!(timers.next(), Some(timers.epoch)); - - assert_eq!(timers.expire_timers(timers.epoch), 1); - assert!(wakers.is_awoken(n)); -} - -#[test] -fn adding_earlier_deadline_updates_cache() { - let mut timers = Timers::new(); - let mut wakers = WakerBuilder::<2>::new(); - - let deadline1 = timers.epoch + Duration::from_secs(2); - let (n1, waker) = wakers.task_waker(); - _ = timers.add(deadline1, waker); - let deadline2 = timers.epoch + Duration::from_secs(1); - let (n2, waker) = wakers.task_waker(); - _ = timers.add(deadline2, waker); - assert_eq!(timers.next(), Some(deadline2)); - - assert_eq!(timers.expire_timers(deadline1), 2); - assert!(wakers.is_awoken(n1)); - assert!(wakers.is_awoken(n2)); - assert_eq!(timers.expire_timers(deadline1), 0); -} - -#[test] -fn remove_deadline() { - let mut timers = Timers::new(); - let mut wakers = WakerBuilder::<1>::new(); - - let deadline = timers.epoch + Duration::from_millis(10); - let (n, waker) = wakers.task_waker(); - let token = timers.add(deadline, waker); - timers.remove(deadline, token); - assert_eq!(timers.next(), None); - assert_eq!(timers.expire_timers(timers.epoch), 0); - assert!(!wakers.is_awoken(n)); -} - -#[test] -fn remove_never_added_deadline() { - let mut timers = Timers::new(); - - let deadline = timers.epoch + Duration::from_millis(10); - assert_eq!(timers.next(), None); - assert_eq!(timers.expire_timers(timers.epoch), 0); - timers.remove(deadline, TimerToken(0)); - assert_eq!(timers.next(), None); - assert_eq!(timers.expire_timers(timers.epoch), 0); -} - -#[test] -fn remove_expired_deadline() { - let mut timers = Timers::new(); - let mut wakers = WakerBuilder::<1>::new(); - - let deadline = timers.epoch + Duration::from_millis(10); - let (n, waker) = wakers.task_waker(); - let token = timers.add(deadline, waker); - - assert_eq!(timers.next(), Some(deadline)); - assert_eq!(timers.expire_timers(deadline), 1); - assert!(wakers.is_awoken(n)); - assert_eq!(timers.expire_timers(deadline), 0); - - timers.remove(deadline, token); - assert_eq!(timers.next(), None); - assert_eq!(timers.expire_timers(deadline), 0); -} - -#[test] -fn remove_deadline_from_all_slots() { - let mut timers = Timers::new(); - let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); - - // Add a deadline to all slots and the overflow list. - let tokens: Vec = (0..=SLOTS) - .into_iter() - .map(|n| { - let deadline = - timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - let (n2, waker) = wakers.task_waker(); - assert_eq!(n2, n); - timers.add(deadline, waker) - }) - .collect(); - - let first_deadline = timers.epoch + Duration::from_nanos(10); - assert_eq!(timers.next(), Some(first_deadline)); - assert_eq!(timers.expire_timers(timers.epoch), 0); - assert_eq!(timers.index, 0); - - let mut next_deadline = first_deadline; - for (n, token) in tokens.into_iter().enumerate() { - timers.remove(next_deadline, token); - next_deadline += DURATION_PER_SLOT; - - if n == SLOTS { - assert_eq!(timers.next(), None); - } else { - assert_eq!(timers.next(), Some(next_deadline)); - } - } -} - -#[test] -fn remove_deadline_from_all_slots_interleaved() { - let mut timers = Timers::new(); - let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); - - // Add a deadline to all slots and the overflow list. - for n in 0..=SLOTS { - let deadline = timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - let (n2, waker) = wakers.task_waker(); - assert_eq!(n2, n); - let token = timers.add(deadline, waker); - timers.remove(deadline, token); - } - - assert_eq!(timers.next(), None); - assert_eq!(timers.expire_timers(timers.epoch), 0); - assert_eq!(timers.index, 0); -} - -#[test] -fn remove_deadline_after_epoch_advance() { - let mut timers = Timers::new(); - let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); - - // Add a deadline to all slots and the overflow list. - let tokens: Vec = (0..=SLOTS) - .into_iter() - .map(|n| { - let deadline = - timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - let (n2, waker) = wakers.task_waker(); - assert_eq!(n2, n); - timers.add(deadline, waker) - }) - .collect(); - - let first_deadline = timers.epoch + Duration::from_nanos(10); - let now = timers.epoch + DURATION_PER_SLOT; - assert_eq!(timers.next(), Some(first_deadline)); - assert_eq!(timers.expire_timers(now), 1); - assert!(wakers.is_awoken(0)); - assert_eq!(timers.expire_timers(timers.epoch), 0); - assert_eq!(timers.index, 1); - assert_eq!(timers.next(), Some(first_deadline + DURATION_PER_SLOT)); - - let mut next_deadline = first_deadline + DURATION_PER_SLOT; - for (n, token) in tokens.into_iter().skip(1).enumerate() { - timers.remove(next_deadline, token); - next_deadline += DURATION_PER_SLOT; - - if n == SLOTS - 1 { - assert_eq!(timers.next(), None); - } else { - assert_eq!(timers.next(), Some(next_deadline)); - } - } -} - -#[test] -fn remove_deadline_in_the_past() { - let mut timers = Timers::new(); - let mut wakers = WakerBuilder::<1>::new(); - - let deadline = timers.epoch - Duration::from_secs(1); - let (n, waker) = wakers.task_waker(); - let token = timers.add(deadline, waker); - assert_eq!(timers.next(), Some(timers.epoch)); - timers.remove(deadline, token); - assert_eq!(timers.next(), None); - assert_eq!(timers.expire_timers(timers.epoch), 0); - assert!(!wakers.is_awoken(n)); -} diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 27161e90b..93a14400e 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -19,15 +19,14 @@ use mio::{event, Events, Interest, Poll, Registry, Token}; use crate::spawn::{ActorOptions, FutureOptions}; use crate::thread_waker::ThreadWaker; -use crate::timer::TimerToken; +use crate::timers::TimerToken; use crate::{trace, ProcessId, ThreadSafe}; mod scheduler; -mod timers; pub(crate) mod waker; +use crate::timers::shared::Timers; use scheduler::{ProcessData, Scheduler}; -use timers::Timers; use waker::WakerId; /// Setup of [`RuntimeInternals`]. diff --git a/rt/src/shared/timers_tests.rs b/rt/src/shared/timers_tests.rs deleted file mode 100644 index 9a9e4078a..000000000 --- a/rt/src/shared/timers_tests.rs +++ /dev/null @@ -1,349 +0,0 @@ -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; -use std::task::{Wake, Waker}; -use std::time::Duration; - -use crate::shared::timers::{Timers, DURATION_PER_SLOT, NS_PER_SLOT, SLOTS}; -use crate::timer::TimerToken; - -struct WakerBuilder { - awoken: Arc<[AtomicBool; N]>, - n: usize, -} - -impl WakerBuilder { - fn new() -> WakerBuilder { - const FALSE: AtomicBool = AtomicBool::new(false); - WakerBuilder { - awoken: Arc::new([FALSE; N]), - n: 0, - } - } - - fn task_waker(&mut self) -> (usize, Waker) { - let n = self.n; - self.n += 1; - assert!(n <= N, "created too many task::Wakers"); - ( - n, - Waker::from(Arc::new(TaskWaker { - awoken: self.awoken.clone(), - n, - })), - ) - } - - fn is_awoken(&self, n: usize) -> bool { - self.awoken[n].load(Ordering::SeqCst) - } -} - -/// [`Wake`] implementation. -struct TaskWaker { - awoken: Arc<[AtomicBool; N]>, - n: usize, -} - -impl Wake for TaskWaker { - fn wake(self: Arc) { - self.awoken[self.n].store(true, Ordering::SeqCst) - } -} - -#[test] -fn add_deadline_first_slot() { - let timers = Timers::new(); - let mut wakers = WakerBuilder::<1>::new(); - let epoch = timers.epoch.read().unwrap().time; - let deadline = epoch + Duration::from_millis(100); - - let (n, waker) = wakers.task_waker(); - _ = timers.add(deadline, waker); - assert_eq!(timers.next(), Some(deadline)); - - // Not yet expired. - assert_eq!(timers.expire_timers(epoch), 0); - assert!(!wakers.is_awoken(n)); - - // Waker is called when the deadline is expired. - assert_eq!(timers.expire_timers(deadline), 1); - assert!(wakers.is_awoken(n)); - - // No more timers. - assert_eq!(timers.expire_timers(deadline + Duration::from_secs(100)), 0); -} - -#[test] -fn add_deadline_second_slot() { - let timers = Timers::new(); - let mut wakers = WakerBuilder::<1>::new(); - let epoch = timers.epoch.read().unwrap().time; - - let deadline = epoch + Duration::from_nanos(NS_PER_SLOT as u64 + 100); - let (n, waker) = wakers.task_waker(); - _ = timers.add(deadline, waker); - assert_eq!(timers.next(), Some(deadline)); - - assert_eq!(timers.expire_timers(epoch), 0); - assert_eq!(timers.epoch.read().unwrap().index, 0); - assert_eq!(timers.next(), Some(deadline)); - - assert_eq!(timers.expire_timers(deadline), 1); - assert!(wakers.is_awoken(n)); - - assert_eq!(timers.epoch.read().unwrap().index, 1); - assert_eq!(timers.expire_timers(epoch), 0); -} - -#[test] -fn add_deadline_overflow() { - let timers = Timers::new(); - let mut wakers = WakerBuilder::<1>::new(); - let epoch = timers.epoch.read().unwrap().time; - - let deadline = epoch + Duration::from_nanos(SLOTS as u64 * NS_PER_SLOT as u64 + 10); - let (n, waker) = wakers.task_waker(); - _ = timers.add(deadline, waker); - assert_eq!(timers.next(), Some(deadline)); - - assert_eq!(timers.expire_timers(epoch), 0); - assert_eq!(timers.epoch.read().unwrap().index, 0); - assert_eq!(timers.next(), Some(deadline)); - - assert_eq!(timers.expire_timers(deadline), 1); - assert!(wakers.is_awoken(n)); - - // Should have advanced the epoch to come back around to 0. - assert_eq!(timers.epoch.read().unwrap().index, 0); - assert_eq!(timers.expire_timers(epoch), 0); -} - -#[test] -fn add_deadline_to_all_slots() { - let timers = Timers::new(); - let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); - let epoch = timers.epoch.read().unwrap().time; - - // Add a deadline to all slots and the overflow list. - for n in 0..=SLOTS { - let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - let (n2, waker) = wakers.task_waker(); - assert_eq!(n, n2); - _ = timers.add(deadline, waker); - } - - let first_deadline = epoch + Duration::from_nanos(10); - assert_eq!(timers.next(), Some(first_deadline)); - assert_eq!(timers.expire_timers(epoch), 0); - assert_eq!(timers.epoch.read().unwrap().index, 0); - - let mut expected_next_deadline = first_deadline; - let mut expected_index = 0; - for n in 0..=SLOTS { - assert_eq!(timers.next(), Some(expected_next_deadline)); - let now = expected_next_deadline + Duration::from_nanos(1); - assert_eq!(timers.expire_timers(now), 1); - assert!(wakers.is_awoken(n)); - assert_eq!(timers.epoch.read().unwrap().index, expected_index); - - assert_eq!(timers.expire_timers(now), 0); - assert_eq!(timers.epoch.read().unwrap().index, expected_index); - - expected_index = (expected_index + 1) % SLOTS as u8; - expected_next_deadline += DURATION_PER_SLOT; - } -} - -#[test] -fn add_deadline_in_the_past() { - let timers = Timers::new(); - let mut wakers = WakerBuilder::<1>::new(); - let epoch = timers.epoch.read().unwrap().time; - - let (n, waker) = wakers.task_waker(); - _ = timers.add(epoch - Duration::from_secs(1), waker); - assert_eq!(timers.next(), Some(epoch)); - - assert_eq!(timers.expire_timers(epoch), 1); - assert!(wakers.is_awoken(n)); -} - -#[test] -fn adding_earlier_deadline() { - let timers = Timers::new(); - let mut wakers = WakerBuilder::<2>::new(); - let epoch = timers.epoch.read().unwrap().time; - - let (n1, waker) = wakers.task_waker(); - let deadline1 = epoch + Duration::from_secs(2); - _ = timers.add(deadline1, waker); - assert_eq!(timers.next(), Some(deadline1)); - - let (n2, waker) = wakers.task_waker(); - let deadline2 = epoch + Duration::from_secs(1); - _ = timers.add(deadline2, waker); - assert_eq!(timers.next(), Some(deadline2)); - - assert_eq!(timers.expire_timers(deadline1), 2); - assert!(wakers.is_awoken(n1)); - assert!(wakers.is_awoken(n2)); - assert_eq!(timers.expire_timers(deadline1), 0); -} - -#[test] -fn remove_deadline() { - let timers = Timers::new(); - let mut wakers = WakerBuilder::<1>::new(); - let epoch = timers.epoch.read().unwrap().time; - - let deadline = epoch + Duration::from_millis(10); - let (_, waker) = wakers.task_waker(); - let token = timers.add(deadline, waker); - assert_eq!(timers.next(), Some(deadline)); - - timers.remove(deadline, token); - assert_eq!(timers.next(), None); - assert_eq!(timers.expire_timers(epoch), 0); -} - -#[test] -fn remove_never_added_deadline() { - let timers = Timers::new(); - let epoch = timers.epoch.read().unwrap().time; - - assert_eq!(timers.next(), None); - assert_eq!(timers.expire_timers(epoch), 0); - let deadline = epoch + Duration::from_millis(10); - timers.remove(deadline, TimerToken(0)); - assert_eq!(timers.next(), None); - assert_eq!(timers.expire_timers(epoch), 0); -} - -#[test] -fn remove_expired_deadline() { - let timers = Timers::new(); - let mut wakers = WakerBuilder::<1>::new(); - let epoch = timers.epoch.read().unwrap().time; - - let deadline = epoch + Duration::from_millis(10); - let (n, waker) = wakers.task_waker(); - let token = timers.add(deadline, waker); - assert_eq!(timers.next(), Some(deadline)); - - assert_eq!(timers.expire_timers(deadline), 1); - assert!(wakers.is_awoken(n)); - - timers.remove(deadline, token); - assert_eq!(timers.next(), None); - assert_eq!(timers.expire_timers(epoch), 0); -} - -#[test] -fn remove_deadline_from_all_slots() { - let timers = Timers::new(); - let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); - let epoch = timers.epoch.read().unwrap().time; - - // Add a deadline to all slots and the overflow list. - let tokens: Vec = (0..=SLOTS) - .into_iter() - .map(|n| { - let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - let (n2, waker) = wakers.task_waker(); - assert_eq!(n, n2); - timers.add(deadline, waker) - }) - .collect(); - - let first_deadline = epoch + Duration::from_nanos(10); - assert_eq!(timers.next(), Some(first_deadline)); - assert_eq!(timers.expire_timers(epoch), 0); - assert_eq!(timers.epoch.read().unwrap().index, 0); - - let mut next_deadline = first_deadline; - for (n, token) in tokens.into_iter().enumerate() { - timers.remove(next_deadline, token); - next_deadline += DURATION_PER_SLOT; - - if n == SLOTS { - assert_eq!(timers.next(), None); - } else { - assert_eq!(timers.next(), Some(next_deadline)); - } - } -} - -#[test] -fn remove_deadline_from_all_slots_interleaved() { - let timers = Timers::new(); - let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); - let epoch = timers.epoch.read().unwrap().time; - - // Add a deadline to all slots and the overflow list. - for n in 0..=SLOTS { - let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - let (n2, waker) = wakers.task_waker(); - assert_eq!(n, n2); - let token = timers.add(deadline, waker); - timers.remove(deadline, token); - } - - assert_eq!(timers.next(), None); - assert_eq!(timers.expire_timers(epoch), 0); - assert_eq!(timers.epoch.read().unwrap().index, 0); -} - -#[test] -fn remove_deadline_after_epoch_advance() { - let timers = Timers::new(); - let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); - let epoch = timers.epoch.read().unwrap().time; - - // Add a deadline to all slots and the overflow list. - let tokens: Vec = (0..=SLOTS) - .into_iter() - .map(|n| { - let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); - let (n2, waker) = wakers.task_waker(); - assert_eq!(n, n2); - timers.add(deadline, waker) - }) - .skip(1) - .collect(); - - let first_deadline = epoch + Duration::from_nanos(10); - let now = epoch + DURATION_PER_SLOT; - assert_eq!(timers.next(), Some(first_deadline)); - assert_eq!(timers.expire_timers(now), 1); - assert!(wakers.is_awoken(0)); - assert_eq!(timers.epoch.read().unwrap().index, 1); - assert_eq!(timers.next(), Some(first_deadline + DURATION_PER_SLOT)); - - let mut next_deadline = first_deadline + DURATION_PER_SLOT; - for (n, token) in tokens.into_iter().enumerate() { - timers.remove(next_deadline, token); - next_deadline += DURATION_PER_SLOT; - - if n == SLOTS - 1 { - assert_eq!(timers.next(), None); - } else { - assert_eq!(timers.next(), Some(next_deadline)); - } - } -} - -#[test] -fn remove_deadline_in_the_past() { - let timers = Timers::new(); - let mut wakers = WakerBuilder::<1>::new(); - let epoch = timers.epoch.read().unwrap().time; - - let deadline = epoch - Duration::from_secs(1); - let (_, waker) = wakers.task_waker(); - let token = timers.add(deadline, waker); - assert_eq!(timers.next(), Some(epoch)); - - timers.remove(deadline, token); - assert_eq!(timers.next(), None); - assert_eq!(timers.expire_timers(epoch), 0); -} diff --git a/rt/src/timer.rs b/rt/src/timer.rs index 57db8a4c6..739422ad0 100644 --- a/rt/src/timer.rs +++ b/rt/src/timer.rs @@ -16,21 +16,9 @@ use std::pin::Pin; use std::task::{self, Poll}; use std::time::{Duration, Instant}; +use crate::timers::TimerToken; use crate::{self as rt}; -mod private { - //! [`TimerToken`] needs to be public because it's used in the - //! private-in-public trait [`PrivateAccess`]. - //! - //! [`PrivateAccess`]: crate::access::private::PrivateAccess - - /// Token used to expire a timer. - #[derive(Copy, Clone, Debug)] - pub struct TimerToken(pub(crate) usize); -} - -pub(crate) use private::TimerToken; - /// Type returned when the deadline has passed. /// /// Can be converted into [`io::ErrorKind::TimedOut`]. diff --git a/rt/src/local/timers.rs b/rt/src/timers/mod.rs similarity index 83% rename from rt/src/local/timers.rs rename to rt/src/timers/mod.rs index 112d516d1..90c384a88 100644 --- a/rt/src/local/timers.rs +++ b/rt/src/timers/mod.rs @@ -1,17 +1,70 @@ -//! Module with the local timers implementation. +//! Timers implementation. //! -//! Also see the shared timers implementation. +//! This module hold the timer**s** implementation, that is the collection of +//! timers currently in the runtime. Also see the [`timer`] implementation, +//! which exposes types to the user. +//! +//! [`timer`]: crate::timer +//! +//! +//! # Implementation +//! +//! This implementation is based on a Timing Wheel as discussed in the paper +//! "Hashed and hierarchical timing wheels: efficient data structures for +//! implementing a timer facility" by George Varghese and Anthony Lauck (1997). +//! +//! This uses a scheme that splits the timers based on when they're going to +//! expire. It has 64 ([`SLOTS`]) slots each representing roughly a second of +//! time ([`NS_PER_SLOT`]). This allows us to only consider a portion of all +//! timers when processing the timers. Any timers that don't fit into these +//! slots, i.e. timers with a deadline more than 68 seconds ([`NS_OVERFLOW`]) +//! past `epoch`, are put in a overflow list. Ideally this overflow list is +//! empty however. +//! +//! The `slots` hold the timers with a [`TimeOffset`] which is the number of +//! nanosecond since epoch times it's index. The `index` field determines the +//! current zero-slot, meaning its timers will expire next and all have a +//! deadline within `0..NS_PER_SLOT` nanoseconds after `epoch`. The +//! `slots[index+1]` list will have timers that expire +//! `NS_PER_SLOT..2*NS_PER_SLOT` nanoseconds after `epoch`. In other words each +//! slot holds the timers that expire in the ~second after the previous slot. +//! +//! Whenever timers are expired by `expire_timers` it will attempt to update the +//! `epoch`, which is used as anchor point to determine in what slot/overflow +//! the timer must go (see above). When updating the epoch it will increase the +//! `index` by 1 and the `epoch` by [`NS_PER_SLOT`] nanoseconds. This means the +//! next slot (now `slots[index+1]`) holds timers that expire `0..NS_PER_SLOT` +//! nanoseconds after `epoch`. +//! +//! Note that for the `shared` version, which uses the same implementation as +//! described above, it's possible for a thread to read the epoch (index and +//! time), than gets descheduled, another thread updates the epoch and finally +//! the second thread insert a timer based on a now outdated epoch. This +//! situation is fine as the timer will still be added to the correct slot, but +//! it has a higher change of being added to the overflow list (which +//! `maybe_update_epoch` deals with correctly). + +pub(crate) mod shared; +#[cfg(test)] +mod tests; + +mod private { + //! [`TimerToken`] needs to be public because it's used in the + //! private-in-public trait [`PrivateAccess`]. + //! + //! [`PrivateAccess`]: crate::access::private::PrivateAccess + + /// Token used to expire a timer. + #[derive(Copy, Clone, Debug)] + pub struct TimerToken(pub(crate) usize); +} + +pub(crate) use private::TimerToken; use std::cmp::{max, min}; use std::task::Waker; use std::time::{Duration, Instant}; -use crate::timer::TimerToken; - -#[cfg(test)] -#[path = "timers_tests.rs"] -mod timers_tests; - /// Bits needed for the number of slots. const SLOT_BITS: usize = 6; /// Number of slots in the [`Timers`] wheel, 64. @@ -35,34 +88,7 @@ const NS_SLOT_MASK: u128 = (1 << NS_PER_SLOT_BITS) - 1; /// Must fit [`NS_PER_SLOT`]. type TimeOffset = u32; -/// Timers. -/// -/// This implementation is based on a Timing Wheel as discussed in the paper -/// "Hashed and hierarchical timing wheels: efficient data structures for -/// implementing a timer facility" by George Varghese and Anthony Lauck (1997). -/// -/// This uses a scheme that splits the timers based on when they're going to -/// expire. It has 64 ([`SLOTS`]) slots each representing roughly a second of -/// time ([`NS_PER_SLOT`]). This allows us to only consider a portion of all -/// timers when processing the timers. Any timers that don't fit into these -/// slots, i.e. timers with a deadline more than 68 seconds ([`NS_OVERFLOW`]) -/// past `epoch`, are put in a overflow list. Ideally this overflow list is -/// empty however. -/// -/// The `slots` hold the timers with a [`TimeOffset`] which is the number of -/// nanosecond since epoch times it's index. The `index` field determines the -/// current zero-slot, meaning its timers will expire next and all have a -/// deadline within `0..NS_PER_SLOT` nanoseconds after `epoch`. The -/// `slots[index+1]` list will have timers that expire -/// `NS_PER_SLOT..2*NS_PER_SLOT` nanoseconds after `epoch`. In other words each -/// slot holds the timers that expire in the ~second after the previous slot. -/// -/// Whenever timers are removed by `remove_next` it will attempt to update the -/// `epoch`, which is used as anchor point to determine in what slot/overflow -/// the timer must go (see above). When updating the epoch it will increase the -/// `index` by 1 and the `epoch` by [`NS_PER_SLOT`] nanoseconds. This means the -/// next slot (now `slots[index+1]`) holds timers that expire `0..NS_PER_SLOT` -/// nanoseconds after `epoch`. +/// Local timers. #[derive(Debug)] pub(crate) struct Timers { /// Current epoch. @@ -328,7 +354,6 @@ fn remove_if_before(timers: &mut Vec>, time: T) -> Result TimeOffset { let nanos = time.duration_since(epoch).as_nanos(); debug_assert!(nanos < u128::from(NS_PER_SLOT)); diff --git a/rt/src/shared/timers.rs b/rt/src/timers/shared.rs similarity index 61% rename from rt/src/shared/timers.rs rename to rt/src/timers/shared.rs index 9ffb8b320..32e459ad8 100644 --- a/rt/src/shared/timers.rs +++ b/rt/src/timers/shared.rs @@ -1,79 +1,17 @@ -//! Module with the shared timers implementation. -//! -//! Also see the local timers implementation. +//! Threadsafe version of `Timers`. use std::cmp::min; use std::sync::RwLock; use std::task::Waker; use std::time::{Duration, Instant}; -use log::{as_debug, trace}; +use crate::timers::{ + add_timer, remove_if_before, remove_timer, TimeOffset, Timer, TimerLocation, TimerToken, + DURATION_PER_SLOT, NS_OVERFLOW, NS_PER_SLOT, NS_PER_SLOT_BITS, NS_SLOT_MASK, OVERFLOW_DURATION, + SLOTS, SLOT_BITS, +}; -use crate::timer::TimerToken; - -#[cfg(test)] -#[path = "timers_tests.rs"] -mod timers_tests; - -/// Bits needed for the number of slots. -const SLOT_BITS: usize = 6; -/// Number of slots in the [`Timers`] wheel, 64. -const SLOTS: usize = 1 << SLOT_BITS; -/// Bits needed for the nanoseconds per slot. -const NS_PER_SLOT_BITS: usize = 30; -/// Nanoseconds per slot, 1073741824 ns ~= 1 second. -const NS_PER_SLOT: TimeOffset = 1 << NS_PER_SLOT_BITS; -/// Duration per slot, [`NS_PER_SLOT`] as [`Duration`]. -const DURATION_PER_SLOT: Duration = Duration::from_nanos(NS_PER_SLOT as u64); -/// Timers within `((1 << 6) * (1 << 30))` ~= 68 seconds since the epoch fit in -/// the wheel, others get added to the overflow. -const NS_OVERFLOW: u64 = SLOTS as u64 * NS_PER_SLOT as u64; -/// Duration per slot, [`NS_OVERFLOW`] as [`Duration`]. -const OVERFLOW_DURATION: Duration = Duration::from_nanos(NS_OVERFLOW); -/// Mask to get the nanoseconds for a slot. -const NS_SLOT_MASK: u128 = (1 << NS_PER_SLOT_BITS) - 1; - -/// Time offset since the epoch of [`Timers::epoch`]. -/// -/// Must fit [`NS_PER_SLOT`]. -type TimeOffset = u32; - -/// Timers. -/// -/// This implementation is based on a Timing Wheel as discussed in the paper -/// "Hashed and hierarchical timing wheels: efficient data structures for -/// implementing a timer facility" by George Varghese and Anthony Lauck (1997). -/// -/// This uses a scheme that splits the timers based on when they're going to -/// expire. It has 64 ([`SLOTS`]) slots each representing roughly a second of -/// time ([`NS_PER_SLOT`]). This allows us to only consider a portion of all -/// timers when processing the timers. Any timers that don't fit into these -/// slots, i.e. timers with a deadline more than 68 seconds ([`NS_OVERFLOW`]) -/// past `epoch`, are put in a overflow list. Ideally this overflow list is -/// empty however. -/// -/// The `slots` hold the timers with a [`TimeOffset`] which is the number of -/// nanosecond since epoch times it's index. The `index` field determines the -/// current zero-slot, meaning its timers will expire next and all have a -/// deadline within `0..NS_PER_SLOT` nanoseconds after `epoch`. The -/// `slots[index+1]` list will have timers that expire -/// `NS_PER_SLOT..2*NS_PER_SLOT` nanoseconds after `epoch`. In other words each -/// slot holds the timers that expire in the ~second after the previous slot. -/// -/// Whenever timers are removed by `remove_next` it will attempt to update the -/// `epoch`, which is used as anchor point to determine in what slot/overflow -/// the timer must go (see above). When updating the epoch it will increase the -/// `index` by 1 and the `epoch` by [`NS_PER_SLOT`] nanoseconds in a single -/// atomic step (thus requiring a lock around `Epoch`). This means the next slot -/// (now `slots[index+1]`) holds timers that expire `0..NS_PER_SLOT` nanoseconds -/// after `epoch`. -/// -/// Note that it's possible for a thread to read the epoch (index and time), -/// than gets descheduled, another thread updates the epoch and finally the -/// second thread insert a timer based on a now outdated epoch. This situation -/// is fine as the timer will still be added to the correct slot, but it has a -/// higher change of being added to the overflow list (which -/// `maybe_update_epoch` deals with correctly). +/// Shared timers. #[derive(Debug)] pub(crate) struct Timers { epoch: RwLock, @@ -90,13 +28,6 @@ struct Epoch { index: u8, } -/// A timer in [`Timers`]. -#[derive(Debug)] -struct Timer { - deadline: T, - waker: Waker, -} - impl Timers { /// Create a new collection of timers. pub(crate) fn new() -> Timers { @@ -121,6 +52,12 @@ impl Timers { timers } + #[cfg(test)] + pub(crate) fn epoch(&self) -> (Instant, u8) { + let epoch = self.epoch.read().unwrap(); + (epoch.time, epoch.index) + } + /// Returns the next deadline, if any. pub(crate) fn next(&self) -> Option { let (epoch_time, index) = { @@ -204,7 +141,6 @@ impl Timers { /// /// `now` may never go backwards between calls. pub(crate) fn expire_timers(&self, now: Instant) -> usize { - trace!(now = as_debug!(now); "expiring timers"); let mut amount = 0; loop { // NOTE: Each loop iteration needs to calculate the `epoch_offset` @@ -259,7 +195,6 @@ impl Timers { /// /// This panics if the current slot is not empty. fn maybe_update_epoch(&self, now: Instant) -> bool { - trace!(now = as_debug!(now); "maybe updating epoch"); let epoch_time = { let mut epoch = self.epoch.write().unwrap(); let new_epoch = epoch.time + DURATION_PER_SLOT; @@ -277,12 +212,10 @@ impl Timers { epoch.time = new_epoch; new_epoch }; - trace!(epoch_time = as_debug!(epoch_time); "new epoch time"); // Next move all the overflow timers that now fit in the slots. let time = epoch_time + OVERFLOW_DURATION; while let Ok(timer) = { remove_if_before(&mut self.overflow.write().unwrap(), time) } { - trace!(timer = as_debug!(timer); "moving overflow timer into wheel"); // NOTE: we can't use the same optimisation as we do in the local // version where we know that all timers removed here go into the // `self.index-1` slot. @@ -295,43 +228,3 @@ impl Timers { true } } - -/// Location of a timer. -enum TimerLocation<'a> { - /// In of the wheel's slots. - InSlot((&'a mut Vec>, TimeOffset)), - /// In the overflow vector. - Overflow((&'a mut Vec>, Instant)), -} - -/// Add a new timer to `timers`, ensuring it remains sorted. -fn add_timer(timers: &mut Vec>, deadline: T, waker: Waker) -> TimerToken { - let idx = match timers.binary_search_by(|timer| timer.deadline.cmp(&deadline)) { - Ok(idx) | Err(idx) => idx, - }; - let token = TimerToken(waker.as_raw().data() as usize); - timers.insert(idx, Timer { deadline, waker }); - token -} - -/// Remove a previously added `deadline` from `timers`, ensuring it remains sorted. -fn remove_timer(timers: &mut Vec>, deadline: T, token: TimerToken) { - if let Ok(idx) = timers.binary_search_by(|timer| timer.deadline.cmp(&deadline)) { - if timers[idx].waker.as_raw().data() as usize == token.0 { - _ = timers.remove(idx); - } - } -} - -/// Remove the first timer if it's before `time`. -/// -/// Returns `Ok(timer)` if there is a timer with a deadline before `time`. -/// Otherwise this returns `Err(true)` if `timers` is empty or `Err(false)` if -/// the are more timers in `timers`, but none with a deadline before `time`. -fn remove_if_before(timers: &mut Vec>, time: T) -> Result, bool> { - match timers.last() { - Some(timer) if timer.deadline <= time => Ok(timers.pop().unwrap()), - Some(_) => Err(false), - None => Err(true), - } -} diff --git a/rt/src/timers/tests.rs b/rt/src/timers/tests.rs new file mode 100644 index 000000000..4ecb10125 --- /dev/null +++ b/rt/src/timers/tests.rs @@ -0,0 +1,640 @@ +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use std::task::{Wake, Waker}; +use std::time::Duration; + +use crate::timers::{TimerToken, Timers, DURATION_PER_SLOT, NS_PER_SLOT, SLOTS}; + +struct WakerBuilder { + awoken: Arc<[AtomicBool; N]>, + n: usize, +} + +impl WakerBuilder { + fn new() -> WakerBuilder { + const FALSE: AtomicBool = AtomicBool::new(false); + WakerBuilder { + awoken: Arc::new([FALSE; N]), + n: 0, + } + } + + fn task_waker(&mut self) -> (usize, Waker) { + let n = self.n; + self.n += 1; + assert!(n <= N, "created too many task::Wakers"); + ( + n, + Waker::from(Arc::new(TaskWaker { + awoken: self.awoken.clone(), + n, + })), + ) + } + + fn is_awoken(&self, n: usize) -> bool { + self.awoken[n].load(Ordering::SeqCst) + } +} + +/// [`Wake`] implementation. +struct TaskWaker { + awoken: Arc<[AtomicBool; N]>, + n: usize, +} + +impl Wake for TaskWaker { + fn wake(self: Arc) { + self.awoken[self.n].store(true, Ordering::SeqCst) + } +} + +#[test] +fn add_deadline_first_slot() { + let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + + let deadline = timers.epoch + Duration::from_millis(100); + let (n, waker) = wakers.task_waker(); + _ = timers.add(deadline, waker); + assert_eq!(timers.next(), Some(deadline)); + + // Not yet expired. + assert_eq!(timers.expire_timers(timers.epoch), 0); + + // Waker is called when the deadline is expired. + assert_eq!(timers.expire_timers(deadline), 1); + assert!(wakers.is_awoken(n)); + + // No more timers. + assert_eq!(timers.expire_timers(deadline), 0); +} + +#[test] +fn add_deadline_second_slot() { + let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + + let deadline = timers.epoch + Duration::from_nanos(NS_PER_SLOT as u64 + 10); + let (n, waker) = wakers.task_waker(); + _ = timers.add(deadline, waker); + assert_eq!(timers.next(), Some(deadline)); + + assert_eq!(timers.expire_timers(timers.epoch), 0); + assert_eq!(timers.index, 0); + assert_eq!(timers.next(), Some(deadline)); + + assert_eq!(timers.expire_timers(deadline), 1); + assert!(wakers.is_awoken(n)); + assert_eq!(timers.index, 1); + + assert_eq!(timers.expire_timers(timers.epoch), 0); +} + +#[test] +fn add_deadline_overflow() { + let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + + let deadline = timers.epoch + Duration::from_nanos(SLOTS as u64 * NS_PER_SLOT as u64 + 10); + let (n, waker) = wakers.task_waker(); + _ = timers.add(deadline, waker); + assert_eq!(timers.next(), Some(deadline)); + + assert_eq!(timers.expire_timers(timers.epoch), 0); + assert_eq!(timers.index, 0); + assert_eq!(timers.next(), Some(deadline)); + + assert_eq!(timers.expire_timers(deadline), 1); + assert!(wakers.is_awoken(n)); + // Should have advanced the epoch to come back around to 0. + assert_eq!(timers.index, 0); + + assert_eq!(timers.expire_timers(timers.epoch), 0); +} + +#[test] +fn add_deadline_to_all_slots() { + let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); + + // Add a deadline to all slots and the overflow list. + for n in 0..=SLOTS { + let deadline = timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); + let (n2, waker) = wakers.task_waker(); + assert_eq!(n, n2); + _ = timers.add(deadline, waker); + } + + let first_deadline = timers.epoch + Duration::from_nanos(10); + assert_eq!(timers.next(), Some(first_deadline)); + assert_eq!(timers.expire_timers(timers.epoch), 0); + assert_eq!(timers.index, 0); + + let mut expected_next_deadline = first_deadline; + let mut expected_index = 0; + for n in 0..=SLOTS { + assert_eq!(timers.next(), Some(expected_next_deadline)); + let now = expected_next_deadline + Duration::from_nanos(1); + assert_eq!(timers.expire_timers(now), 1); + assert!(wakers.is_awoken(n)); + assert_eq!(timers.index, expected_index); + + assert_eq!(timers.expire_timers(timers.epoch), 0); + assert_eq!(timers.index, expected_index); + + expected_index = (expected_index + 1) % SLOTS as u8; + expected_next_deadline += DURATION_PER_SLOT; + } +} + +#[test] +fn add_deadline_in_the_past() { + let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + + let (n, waker) = wakers.task_waker(); + _ = timers.add(timers.epoch - Duration::from_secs(1), waker); + assert_eq!(timers.next(), Some(timers.epoch)); + + assert_eq!(timers.expire_timers(timers.epoch), 1); + assert!(wakers.is_awoken(n)); +} + +#[test] +fn adding_earlier_deadline_updates_cache() { + let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<2>::new(); + + let deadline1 = timers.epoch + Duration::from_secs(2); + let (n1, waker) = wakers.task_waker(); + _ = timers.add(deadline1, waker); + let deadline2 = timers.epoch + Duration::from_secs(1); + let (n2, waker) = wakers.task_waker(); + _ = timers.add(deadline2, waker); + assert_eq!(timers.next(), Some(deadline2)); + + assert_eq!(timers.expire_timers(deadline1), 2); + assert!(wakers.is_awoken(n1)); + assert!(wakers.is_awoken(n2)); + assert_eq!(timers.expire_timers(deadline1), 0); +} + +#[test] +fn remove_deadline() { + let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + + let deadline = timers.epoch + Duration::from_millis(10); + let (n, waker) = wakers.task_waker(); + let token = timers.add(deadline, waker); + timers.remove(deadline, token); + assert_eq!(timers.next(), None); + assert_eq!(timers.expire_timers(timers.epoch), 0); + assert!(!wakers.is_awoken(n)); +} + +#[test] +fn remove_never_added_deadline() { + let mut timers = Timers::new(); + + let deadline = timers.epoch + Duration::from_millis(10); + assert_eq!(timers.next(), None); + assert_eq!(timers.expire_timers(timers.epoch), 0); + timers.remove(deadline, TimerToken(0)); + assert_eq!(timers.next(), None); + assert_eq!(timers.expire_timers(timers.epoch), 0); +} + +#[test] +fn remove_expired_deadline() { + let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + + let deadline = timers.epoch + Duration::from_millis(10); + let (n, waker) = wakers.task_waker(); + let token = timers.add(deadline, waker); + + assert_eq!(timers.next(), Some(deadline)); + assert_eq!(timers.expire_timers(deadline), 1); + assert!(wakers.is_awoken(n)); + assert_eq!(timers.expire_timers(deadline), 0); + + timers.remove(deadline, token); + assert_eq!(timers.next(), None); + assert_eq!(timers.expire_timers(deadline), 0); +} + +#[test] +fn remove_deadline_from_all_slots() { + let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); + + // Add a deadline to all slots and the overflow list. + let tokens: Vec = (0..=SLOTS) + .into_iter() + .map(|n| { + let deadline = + timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); + let (n2, waker) = wakers.task_waker(); + assert_eq!(n2, n); + timers.add(deadline, waker) + }) + .collect(); + + let first_deadline = timers.epoch + Duration::from_nanos(10); + assert_eq!(timers.next(), Some(first_deadline)); + assert_eq!(timers.expire_timers(timers.epoch), 0); + assert_eq!(timers.index, 0); + + let mut next_deadline = first_deadline; + for (n, token) in tokens.into_iter().enumerate() { + timers.remove(next_deadline, token); + next_deadline += DURATION_PER_SLOT; + + if n == SLOTS { + assert_eq!(timers.next(), None); + } else { + assert_eq!(timers.next(), Some(next_deadline)); + } + } +} + +#[test] +fn remove_deadline_from_all_slots_interleaved() { + let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); + + // Add a deadline to all slots and the overflow list. + for n in 0..=SLOTS { + let deadline = timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); + let (n2, waker) = wakers.task_waker(); + assert_eq!(n2, n); + let token = timers.add(deadline, waker); + timers.remove(deadline, token); + } + + assert_eq!(timers.next(), None); + assert_eq!(timers.expire_timers(timers.epoch), 0); + assert_eq!(timers.index, 0); +} + +#[test] +fn remove_deadline_after_epoch_advance() { + let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); + + // Add a deadline to all slots and the overflow list. + let tokens: Vec = (0..=SLOTS) + .into_iter() + .map(|n| { + let deadline = + timers.epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); + let (n2, waker) = wakers.task_waker(); + assert_eq!(n2, n); + timers.add(deadline, waker) + }) + .collect(); + + let first_deadline = timers.epoch + Duration::from_nanos(10); + let now = timers.epoch + DURATION_PER_SLOT; + assert_eq!(timers.next(), Some(first_deadline)); + assert_eq!(timers.expire_timers(now), 1); + assert!(wakers.is_awoken(0)); + assert_eq!(timers.expire_timers(timers.epoch), 0); + assert_eq!(timers.index, 1); + assert_eq!(timers.next(), Some(first_deadline + DURATION_PER_SLOT)); + + let mut next_deadline = first_deadline + DURATION_PER_SLOT; + for (n, token) in tokens.into_iter().skip(1).enumerate() { + timers.remove(next_deadline, token); + next_deadline += DURATION_PER_SLOT; + + if n == SLOTS - 1 { + assert_eq!(timers.next(), None); + } else { + assert_eq!(timers.next(), Some(next_deadline)); + } + } +} + +#[test] +fn remove_deadline_in_the_past() { + let mut timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + + let deadline = timers.epoch - Duration::from_secs(1); + let (n, waker) = wakers.task_waker(); + let token = timers.add(deadline, waker); + assert_eq!(timers.next(), Some(timers.epoch)); + timers.remove(deadline, token); + assert_eq!(timers.next(), None); + assert_eq!(timers.expire_timers(timers.epoch), 0); + assert!(!wakers.is_awoken(n)); +} + +mod shared { + use std::time::Duration; + + use crate::timers::shared::Timers; + use crate::timers::tests::WakerBuilder; + use crate::timers::{TimerToken, DURATION_PER_SLOT, NS_PER_SLOT, SLOTS}; + + #[test] + fn add_deadline_first_slot() { + let timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + let epoch = timers.epoch().0; + let deadline = epoch + Duration::from_millis(100); + + let (n, waker) = wakers.task_waker(); + _ = timers.add(deadline, waker); + assert_eq!(timers.next(), Some(deadline)); + + // Not yet expired. + assert_eq!(timers.expire_timers(epoch), 0); + assert!(!wakers.is_awoken(n)); + + // Waker is called when the deadline is expired. + assert_eq!(timers.expire_timers(deadline), 1); + assert!(wakers.is_awoken(n)); + + // No more timers. + assert_eq!(timers.expire_timers(deadline + Duration::from_secs(100)), 0); + } + + #[test] + fn add_deadline_second_slot() { + let timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + let epoch = timers.epoch().0; + + let deadline = epoch + Duration::from_nanos(NS_PER_SLOT as u64 + 100); + let (n, waker) = wakers.task_waker(); + _ = timers.add(deadline, waker); + assert_eq!(timers.next(), Some(deadline)); + + assert_eq!(timers.expire_timers(epoch), 0); + assert_eq!(timers.epoch().1, 0); + assert_eq!(timers.next(), Some(deadline)); + + assert_eq!(timers.expire_timers(deadline), 1); + assert!(wakers.is_awoken(n)); + + assert_eq!(timers.epoch().1, 1); + assert_eq!(timers.expire_timers(epoch), 0); + } + + #[test] + fn add_deadline_overflow() { + let timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + let epoch = timers.epoch().0; + + let deadline = epoch + Duration::from_nanos(SLOTS as u64 * NS_PER_SLOT as u64 + 10); + let (n, waker) = wakers.task_waker(); + _ = timers.add(deadline, waker); + assert_eq!(timers.next(), Some(deadline)); + + assert_eq!(timers.expire_timers(epoch), 0); + assert_eq!(timers.epoch().1, 0); + assert_eq!(timers.next(), Some(deadline)); + + assert_eq!(timers.expire_timers(deadline), 1); + assert!(wakers.is_awoken(n)); + + // Should have advanced the epoch to come back around to 0. + assert_eq!(timers.epoch().1, 0); + assert_eq!(timers.expire_timers(epoch), 0); + } + + #[test] + fn add_deadline_to_all_slots() { + let timers = Timers::new(); + let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); + let epoch = timers.epoch().0; + + // Add a deadline to all slots and the overflow list. + for n in 0..=SLOTS { + let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); + let (n2, waker) = wakers.task_waker(); + assert_eq!(n, n2); + _ = timers.add(deadline, waker); + } + + let first_deadline = epoch + Duration::from_nanos(10); + assert_eq!(timers.next(), Some(first_deadline)); + assert_eq!(timers.expire_timers(epoch), 0); + assert_eq!(timers.epoch().1, 0); + + let mut expected_next_deadline = first_deadline; + let mut expected_index = 0; + for n in 0..=SLOTS { + assert_eq!(timers.next(), Some(expected_next_deadline)); + let now = expected_next_deadline + Duration::from_nanos(1); + assert_eq!(timers.expire_timers(now), 1); + assert!(wakers.is_awoken(n)); + assert_eq!(timers.epoch().1, expected_index); + + assert_eq!(timers.expire_timers(now), 0); + assert_eq!(timers.epoch().1, expected_index); + + expected_index = (expected_index + 1) % SLOTS as u8; + expected_next_deadline += DURATION_PER_SLOT; + } + } + + #[test] + fn add_deadline_in_the_past() { + let timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + let epoch = timers.epoch().0; + + let (n, waker) = wakers.task_waker(); + _ = timers.add(epoch - Duration::from_secs(1), waker); + assert_eq!(timers.next(), Some(epoch)); + + assert_eq!(timers.expire_timers(epoch), 1); + assert!(wakers.is_awoken(n)); + } + + #[test] + fn adding_earlier_deadline() { + let timers = Timers::new(); + let mut wakers = WakerBuilder::<2>::new(); + let epoch = timers.epoch().0; + + let (n1, waker) = wakers.task_waker(); + let deadline1 = epoch + Duration::from_secs(2); + _ = timers.add(deadline1, waker); + assert_eq!(timers.next(), Some(deadline1)); + + let (n2, waker) = wakers.task_waker(); + let deadline2 = epoch + Duration::from_secs(1); + _ = timers.add(deadline2, waker); + assert_eq!(timers.next(), Some(deadline2)); + + assert_eq!(timers.expire_timers(deadline1), 2); + assert!(wakers.is_awoken(n1)); + assert!(wakers.is_awoken(n2)); + assert_eq!(timers.expire_timers(deadline1), 0); + } + + #[test] + fn remove_deadline() { + let timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + let epoch = timers.epoch().0; + + let deadline = epoch + Duration::from_millis(10); + let (_, waker) = wakers.task_waker(); + let token = timers.add(deadline, waker); + assert_eq!(timers.next(), Some(deadline)); + + timers.remove(deadline, token); + assert_eq!(timers.next(), None); + assert_eq!(timers.expire_timers(epoch), 0); + } + + #[test] + fn remove_never_added_deadline() { + let timers = Timers::new(); + let epoch = timers.epoch().0; + + assert_eq!(timers.next(), None); + assert_eq!(timers.expire_timers(epoch), 0); + let deadline = epoch + Duration::from_millis(10); + timers.remove(deadline, TimerToken(0)); + assert_eq!(timers.next(), None); + assert_eq!(timers.expire_timers(epoch), 0); + } + + #[test] + fn remove_expired_deadline() { + let timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + let epoch = timers.epoch().0; + + let deadline = epoch + Duration::from_millis(10); + let (n, waker) = wakers.task_waker(); + let token = timers.add(deadline, waker); + assert_eq!(timers.next(), Some(deadline)); + + assert_eq!(timers.expire_timers(deadline), 1); + assert!(wakers.is_awoken(n)); + + timers.remove(deadline, token); + assert_eq!(timers.next(), None); + assert_eq!(timers.expire_timers(epoch), 0); + } + + #[test] + fn remove_deadline_from_all_slots() { + let timers = Timers::new(); + let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); + let epoch = timers.epoch().0; + + // Add a deadline to all slots and the overflow list. + let tokens: Vec = (0..=SLOTS) + .into_iter() + .map(|n| { + let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); + let (n2, waker) = wakers.task_waker(); + assert_eq!(n, n2); + timers.add(deadline, waker) + }) + .collect(); + + let first_deadline = epoch + Duration::from_nanos(10); + assert_eq!(timers.next(), Some(first_deadline)); + assert_eq!(timers.expire_timers(epoch), 0); + assert_eq!(timers.epoch().1, 0); + + let mut next_deadline = first_deadline; + for (n, token) in tokens.into_iter().enumerate() { + timers.remove(next_deadline, token); + next_deadline += DURATION_PER_SLOT; + + if n == SLOTS { + assert_eq!(timers.next(), None); + } else { + assert_eq!(timers.next(), Some(next_deadline)); + } + } + } + + #[test] + fn remove_deadline_from_all_slots_interleaved() { + let timers = Timers::new(); + let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); + let epoch = timers.epoch().0; + + // Add a deadline to all slots and the overflow list. + for n in 0..=SLOTS { + let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); + let (n2, waker) = wakers.task_waker(); + assert_eq!(n, n2); + let token = timers.add(deadline, waker); + timers.remove(deadline, token); + } + + assert_eq!(timers.next(), None); + assert_eq!(timers.expire_timers(epoch), 0); + assert_eq!(timers.epoch().1, 0); + } + + #[test] + fn remove_deadline_after_epoch_advance() { + let timers = Timers::new(); + let mut wakers = WakerBuilder::<{ SLOTS + 1 }>::new(); + let epoch = timers.epoch().0; + + // Add a deadline to all slots and the overflow list. + let tokens: Vec = (0..=SLOTS) + .into_iter() + .map(|n| { + let deadline = epoch + Duration::from_nanos((n as u64 * NS_PER_SLOT as u64) + 10); + let (n2, waker) = wakers.task_waker(); + assert_eq!(n, n2); + timers.add(deadline, waker) + }) + .skip(1) + .collect(); + + let first_deadline = epoch + Duration::from_nanos(10); + let now = epoch + DURATION_PER_SLOT; + assert_eq!(timers.next(), Some(first_deadline)); + assert_eq!(timers.expire_timers(now), 1); + assert!(wakers.is_awoken(0)); + assert_eq!(timers.epoch().1, 1); + assert_eq!(timers.next(), Some(first_deadline + DURATION_PER_SLOT)); + + let mut next_deadline = first_deadline + DURATION_PER_SLOT; + for (n, token) in tokens.into_iter().enumerate() { + timers.remove(next_deadline, token); + next_deadline += DURATION_PER_SLOT; + + if n == SLOTS - 1 { + assert_eq!(timers.next(), None); + } else { + assert_eq!(timers.next(), Some(next_deadline)); + } + } + } + + #[test] + fn remove_deadline_in_the_past() { + let timers = Timers::new(); + let mut wakers = WakerBuilder::<1>::new(); + let epoch = timers.epoch().0; + + let deadline = epoch - Duration::from_secs(1); + let (_, waker) = wakers.task_waker(); + let token = timers.add(deadline, waker); + assert_eq!(timers.next(), Some(epoch)); + + timers.remove(deadline, token); + assert_eq!(timers.next(), None); + assert_eq!(timers.expire_timers(epoch), 0); + } +} From d7d51e6185cc6f840746763e3aa2b42c2ab15f9b Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 14 Apr 2023 14:41:40 +0200 Subject: [PATCH 057/177] Derive Default for ActorOptions Now that it's possible. --- rt/src/spawn/options.rs | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/rt/src/spawn/options.rs b/rt/src/spawn/options.rs index d4d83c189..857897b79 100644 --- a/rt/src/spawn/options.rs +++ b/rt/src/spawn/options.rs @@ -34,7 +34,7 @@ use std::time::Duration; /// let opts = ActorOptions::default().with_priority(Priority::HIGH); /// # drop(opts); // Silence unused variable warning. /// ``` -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] #[must_use] pub struct ActorOptions { priority: Priority, @@ -53,14 +53,6 @@ impl ActorOptions { } } -impl Default for ActorOptions { - fn default() -> ActorOptions { - ActorOptions { - priority: Priority::default(), - } - } -} - /// Priority for an actor or future in the scheduler. /// /// Actors and futures with a higher priority will be scheduled to run more From 40ea26b4f60f1153d86748f4f266df4ca1da2056 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 14 Apr 2023 14:42:00 +0200 Subject: [PATCH 058/177] Fix and ignore Clippy lints --- rt/src/timers/mod.rs | 8 +++++--- rt/src/timers/shared.rs | 8 ++++---- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/rt/src/timers/mod.rs b/rt/src/timers/mod.rs index 90c384a88..911a97573 100644 --- a/rt/src/timers/mod.rs +++ b/rt/src/timers/mod.rs @@ -253,10 +253,9 @@ impl Timers { if !self.maybe_update_epoch(epoch_offset) { // Didn't update epoch, no more timers to process. return amount; - } else { - // Process the next slot. - break; } + // Process the next slot. + break; } // Slot has timers with a deadline past `now`, so no more // timers to process. @@ -271,6 +270,7 @@ impl Timers { /// # Panics /// /// This panics if the current slot is not empty. + #[allow(clippy::debug_assert_with_mut_call, clippy::cast_possible_truncation)] fn maybe_update_epoch(&mut self, epoch_offset: TimeOffset) -> bool { if epoch_offset < NS_PER_SLOT { // Can't move to the next slot yet. @@ -331,6 +331,7 @@ fn add_timer(timers: &mut Vec>, deadline: T, waker: Waker) -> T } /// Remove a previously added `deadline` from `timers`, ensuring it remains sorted. +#[allow(clippy::needless_pass_by_value)] fn remove_timer(timers: &mut Vec>, deadline: T, token: TimerToken) { if let Ok(idx) = timers.binary_search_by(|timer| timer.deadline.cmp(&deadline)) { if timers[idx].waker.as_raw().data() as usize == token.0 { @@ -344,6 +345,7 @@ fn remove_timer(timers: &mut Vec>, deadline: T, token: TimerTok /// Returns `Ok(timer)` if there is a timer with a deadline before `time`. /// Otherwise this returns `Err(true)` if `timers` is empty or `Err(false)` if /// the are more timers in `timers`, but none with a deadline before `time`. +#[allow(clippy::needless_pass_by_value)] fn remove_if_before(timers: &mut Vec>, time: T) -> Result, bool> { match timers.last() { Some(timer) if timer.deadline <= time => Ok(timers.pop().unwrap()), diff --git a/rt/src/timers/shared.rs b/rt/src/timers/shared.rs index 32e459ad8..e493ccdda 100644 --- a/rt/src/timers/shared.rs +++ b/rt/src/timers/shared.rs @@ -31,6 +31,7 @@ struct Epoch { impl Timers { /// Create a new collection of timers. pub(crate) fn new() -> Timers { + #[allow(clippy::declare_interior_mutable_const)] const EMPTY: RwLock>> = RwLock::new(Vec::new()); Timers { epoch: RwLock::new(Epoch { @@ -176,10 +177,9 @@ impl Timers { if !self.maybe_update_epoch(now) { // Didn't update epoch, no more timers to process. return amount; - } else { - // Process the next slot. - break; } + // Process the next slot. + break; } // Slot has timers with a deadline past `now`, so no more // timers to process. @@ -194,6 +194,7 @@ impl Timers { /// # Panics /// /// This panics if the current slot is not empty. + #[allow(clippy::cast_possible_truncation)] // For `epoch.index`. fn maybe_update_epoch(&self, now: Instant) -> bool { let epoch_time = { let mut epoch = self.epoch.write().unwrap(); @@ -207,7 +208,6 @@ impl Timers { debug_assert!(self.slots[epoch.index as usize].read().unwrap().is_empty()); // Move to the next slot and update the epoch. - #[allow(clippy::cast_possible_truncation)] epoch.index = (epoch.index + 1) % self.slots.len() as u8; epoch.time = new_epoch; new_epoch From 54501b382b95d9c54d3afb9229a391115a873c22 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 14 Apr 2023 14:43:15 +0200 Subject: [PATCH 059/177] Remove actor::Bound trait No longer used! --- rt/src/lib.rs | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 673e02766..f561017c8 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -788,31 +788,3 @@ fn cpu_usage(clock_id: libc::clockid_t) -> Duration { ) } } - -/// Types that are bound to an [`Actor`]. -/// -/// A marker trait to indicate the type is bound to an [`Actor`]. How the type -/// is bound to the actor is different for each type. For most futures it means -/// that if progress can be made (when the [future is awoken]) the actor will be -/// run. This has the unfortunate consequence that those types can't be moved -/// away from the actor without [(re)binding] it first, otherwise the new actor -/// will never be run and the actor that created the type will run instead. -/// -/// Most types that are bound can only be created with a (mutable) reference to -/// an [`actor::Context`]. Examples of this are `TcpStream`, `UdpSocket` and -/// all futures in the `heph_rt::timer` module. -/// -/// [`Actor`]: actor::Actor -/// [future is awoken]: std::task::Waker::wake -/// [(re)binding]: Bound::bind_to -pub trait Bound { - /// Error type used in [`bind_to`]. - /// - /// [`bind_to`]: Bound::bind_to - type Error; - - /// Bind a type to the [`Actor`] that owns the `ctx`. - /// - /// [`Actor`]: actor::Actor - fn bind_to(&mut self, ctx: &mut actor::Context) -> Result<(), Self::Error>; -} From 6a93beb91543089d8576c047e227079653a3f13d Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 14 Apr 2023 18:28:04 +0200 Subject: [PATCH 060/177] Add Limited buffer wrapper Limits the amount of bytes read or written from/to the buffer. --- rt/src/io/buf.rs | 88 +++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 79 insertions(+), 9 deletions(-) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index aa1369b90..1a5b8d759 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -1,5 +1,6 @@ //! Buffers. +use std::cmp::min; use std::mem::MaybeUninit; use std::sync::Arc; @@ -13,7 +14,7 @@ use std::sync::Arc; /// # Safety /// /// Unlike normal buffers the buffer implementations for Heph have additional -/// requirements because Heph uses I/O uring. +/// requirements because Heph uses io_uring. /// /// If the operation (that uses this buffer) is not polled to completion, i.e. /// the `Future` is dropped before it returns `Poll::Ready`, the kernel still @@ -74,7 +75,20 @@ pub unsafe trait BufMut: 'static { /// Returns `true` if the buffer has spare capacity. fn has_spare_capacity(&self) -> bool { - self.spare_capacity() == 0 + self.spare_capacity() != 0 + } + + /// Wrap the buffer in `Limited`, which limits the amount of bytes used to + /// `limit`. + /// + /// [`Limited::into_inner`] can be used to retrieve the buffer again, + /// or a mutable reference to the buffer can be used and the limited buffer + /// be dropped after usage. + fn limit(self, limit: usize) -> Limited + where + Self: Sized, + { + Limited { buf: self, limit } } } @@ -87,7 +101,7 @@ pub unsafe trait BufMut: 'static { /// untouched. /// /// ``` -/// use heph_rt::bytes::Bytes; +/// use heph_rt::io::BufMut; /// /// let mut buf = Vec::with_capacity(100); /// buf.extend(b"Hello world!"); @@ -96,15 +110,15 @@ pub unsafe trait BufMut: 'static { /// /// assert_eq!(&*buf, b"Hello world! Hello mars!"); /// -/// fn write_bytes(src: &[u8], mut buf: B) where B: Bytes { +/// fn write_bytes(src: &[u8], buf: &mut B) { /// // Writes `src` to `buf`. -/// # let dst = buf.as_bytes(); -/// # let len = std::cmp::min(src.len(), dst.len()); +/// # let (dst, len) = unsafe { buf.parts_mut() }; +/// # let len = std::cmp::min(src.len(), len); /// # // Safety: both the src and dst pointers are good. And we've ensured /// # // that the length is correct, not overwriting data we don't own or /// # // reading data we don't own. /// # unsafe { -/// # std::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr().cast(), len); +/// # std::ptr::copy_nonoverlapping(src.as_ptr(), dst, len); /// # buf.update_length(len); /// # } /// } @@ -153,7 +167,7 @@ pub trait BufMutSlice: private::BufMutSlice + 'static { /// Returns `true` at least one of the buffer has spare capacity. fn has_spare_capacity(&self) -> bool { - self.total_spare_capacity() == 0 + self.total_spare_capacity() != 0 } } @@ -215,7 +229,7 @@ unsafe impl private::BufMutSlice for [B; N] { /// # Safety /// /// Unlike normal buffers the buffer implementations for Heph have additional -/// requirements because Heph uses I/O uring. +/// requirements because Heph uses io_uring. /// /// If the operation (that uses this buffer) is not polled to completion, i.e. /// the `Future` is dropped before it returns `Poll::Ready`, the kernel still @@ -236,6 +250,19 @@ pub unsafe trait Buf: 'static { /// other words the memory the pointer and length are pointing to must be a /// valid memory address and owned by the buffer. unsafe fn parts(&self) -> (*const u8, usize); + + /// Wrap the buffer in `Limited`, which limits the amount of bytes used to + /// `limit`. + /// + /// [`Limited::into_inner`] can be used to retrieve the buffer again, or a + /// mutable reference to the buffer can be used and the limited buffer be + /// dropped after usage. + fn limit(self, limit: usize) -> Limited + where + Self: Sized, + { + Limited { buf: self, limit } + } } // SAFETY: `Vec` manages the allocation of the bytes, so as long as it's @@ -544,3 +571,46 @@ unsafe impl, const N: usize> private::BufSlice for BufWrapper< self.0.as_iovecs() } } + +/// Wrapper to limit the number of bytes `B` can use. +/// +/// See [`Buf::limit`] and [`BufMut::limit`]. +#[derive(Debug)] +pub struct Limited { + buf: B, + limit: usize, +} + +impl Limited { + /// Returns the underlying buffer. + pub fn into_inner(self) -> B { + self.buf + } +} + +unsafe impl BufMut for Limited { + unsafe fn parts_mut(&mut self) -> (*mut u8, usize) { + let (ptr, size) = self.buf.parts_mut(); + (ptr, min(size, self.limit)) + } + + unsafe fn update_length(&mut self, n: usize) { + self.limit -= n; // For use in read N bytes kind of calls. + self.buf.update_length(n); + } + + fn spare_capacity(&self) -> usize { + min(self.buf.spare_capacity(), self.limit) + } + + fn has_spare_capacity(&self) -> bool { + self.limit != 0 && self.buf.has_spare_capacity() + } +} + +unsafe impl Buf for Limited { + unsafe fn parts(&self) -> (*const u8, usize) { + let (ptr, size) = self.buf.parts(); + (ptr, min(size, self.limit)) + } +} From 05febaff592d904d308f1c37e37774a354224083 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 14 Apr 2023 18:28:49 +0200 Subject: [PATCH 061/177] Remove the bytes module The traits are no longer used as switched to the traits in the io module. --- rt/src/bytes.rs | 601 ------------------------------------------ rt/src/coordinator.rs | 4 +- rt/src/io/mod.rs | 31 ++- rt/src/lib.rs | 1 - rt/src/local/mod.rs | 2 +- rt/src/shared/mod.rs | 10 +- rt/src/timer.rs | 2 + rt/src/worker.rs | 10 +- 8 files changed, 38 insertions(+), 623 deletions(-) delete mode 100644 rt/src/bytes.rs diff --git a/rt/src/bytes.rs b/rt/src/bytes.rs deleted file mode 100644 index 168dd8a64..000000000 --- a/rt/src/bytes.rs +++ /dev/null @@ -1,601 +0,0 @@ -//! Traits to work with bytes. -//! -//! This module contains two traits: -//! * [`Bytes`] to work with a single buffer, e.g. `Vec`, and -//! * [`BytesVectored`] to work with multiple buffers and using vectored I/O, -//! e.g. `Vec>`. -//! -//! The basic design of both traits is the same and is fairly simple. It's split -//! into two methods. Usage starts with a call to [`as_bytes`]/[`as_bufs`], -//! which returns a slice of uninitialised bytes. The caller should then fill -//! that slice with valid bytes, e.g. by receiving bytes from a socket. Once the -//! slice is (partially) filled the caller should call -//! [`update_length`]/[`update_lengths`], which updates the length of the -//! buffer(s). -//! -//! [`as_bytes`]: Bytes::as_bytes -//! [`as_bufs`]: BytesVectored::as_bufs -//! [`update_length`]: Bytes::update_length -//! [`update_lengths`]: BytesVectored::update_lengths - -use std::cmp::min; -use std::mem::MaybeUninit; -use std::ops::{Deref, DerefMut}; -use std::{fmt, slice}; - -/// Trait to make easier to work with uninitialised buffers. -/// -/// This is implemented for common types such as `Vec`, [see below]. -/// -/// [see below]: #foreign-impls -pub trait Bytes { - /// Returns itself as a slice of bytes that may or may not be initialised. - /// - /// # Notes - /// - /// The implementation must guarantee that two calls (without a call to - /// [`update_length`] in between) returns the same slice of bytes. - /// - /// [`update_length`]: Bytes::update_length - fn as_bytes(&mut self) -> &mut [MaybeUninit]; - - /// Returns the length of the buffer as returned by [`as_bytes`]. - /// - /// [`as_bytes`]: Bytes::as_bytes - fn spare_capacity(&self) -> usize; - - /// Returns `true` if the buffer has spare capacity. - fn has_spare_capacity(&self) -> bool { - self.spare_capacity() == 0 - } - - /// Update the length of the byte slice, marking `n` bytes as initialised. - /// - /// # Safety - /// - /// The caller must ensure that at least the first `n` bytes returned by - /// [`as_bytes`] are initialised. - /// - /// [`as_bytes`]: Bytes::as_bytes - /// - /// # Notes - /// - /// If this method is not implemented correctly methods such as - /// [`TcpStream::recv_n`] will not work correctly (as the buffer will - /// overwrite itself on successive reads). - /// - /// [`TcpStream::recv_n`]: crate::net::TcpStream::recv_n - unsafe fn update_length(&mut self, n: usize); - - /// Wrap the buffer in `LimitedBytes`, which limits the amount of bytes used - /// to `limit`. - /// - /// [`LimitedBytes::into_inner`] can be used to retrieve the buffer again, - /// or a mutable reference to the buffer can be used and the limited buffer - /// be dropped after usage. - fn limit(self, limit: usize) -> LimitedBytes - where - Self: Sized, - { - LimitedBytes { buf: self, limit } - } -} - -impl Bytes for &mut B -where - B: Bytes + ?Sized, -{ - fn as_bytes(&mut self) -> &mut [MaybeUninit] { - (**self).as_bytes() - } - - fn spare_capacity(&self) -> usize { - (**self).spare_capacity() - } - - fn has_spare_capacity(&self) -> bool { - (**self).has_spare_capacity() - } - - unsafe fn update_length(&mut self, n: usize) { - (**self).update_length(n); - } -} - -/// The implementation for `Vec` only uses the uninitialised capacity of the -/// vector. In other words the bytes currently in the vector remain untouched. -/// -/// # Examples -/// -/// The following example shows that the bytes already in the vector remain -/// untouched. -/// -/// ``` -/// use heph_rt::bytes::Bytes; -/// -/// let mut buf = Vec::with_capacity(100); -/// buf.extend(b"Hello world!"); -/// -/// write_bytes(b" Hello mars!", &mut buf); -/// -/// assert_eq!(&*buf, b"Hello world! Hello mars!"); -/// -/// fn write_bytes(src: &[u8], mut buf: B) where B: Bytes { -/// // Writes `src` to `buf`. -/// # let dst = buf.as_bytes(); -/// # let len = std::cmp::min(src.len(), dst.len()); -/// # // Safety: both the src and dst pointers are good. And we've ensured -/// # // that the length is correct, not overwriting data we don't own or -/// # // reading data we don't own. -/// # unsafe { -/// # std::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr().cast(), len); -/// # buf.update_length(len); -/// # } -/// } -/// ``` -impl Bytes for Vec { - fn as_bytes(&mut self) -> &mut [MaybeUninit] { - self.spare_capacity_mut() - } - - fn spare_capacity(&self) -> usize { - self.capacity() - self.len() - } - - fn has_spare_capacity(&self) -> bool { - self.capacity() != self.len() - } - - unsafe fn update_length(&mut self, n: usize) { - let new = self.len() + n; - debug_assert!(self.capacity() >= new); - self.set_len(new); - } -} - -/// A version of [`IoSliceMut`] that allows the buffer to be uninitialised. -/// -/// [`IoSliceMut`]: std::io::IoSliceMut -#[repr(transparent)] -pub struct MaybeUninitSlice<'a>(socket2::MaybeUninitSlice<'a>); - -impl<'a> fmt::Debug for MaybeUninitSlice<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - -impl<'a> MaybeUninitSlice<'a> { - /// Creates a new `MaybeUninitSlice` wrapping a byte slice. - /// - /// # Panics - /// - /// Panics on Windows if the slice is larger than 4GB. - pub fn new(buf: &'a mut [MaybeUninit]) -> MaybeUninitSlice<'a> { - MaybeUninitSlice(socket2::MaybeUninitSlice::new(buf)) - } - - /// Creates a new `MaybeUninitSlice` from a [`Vec`]tor. - /// - /// Similar to the [`Bytes`] implementation for `Vec` this only uses the - /// uninitialised capacity of the vector. - /// - /// # Panics - /// - /// Panics on Windows if the vector's uninitialised capacity is larger than - /// 4GB. - pub fn from_vec(buf: &'a mut Vec) -> MaybeUninitSlice<'a> { - MaybeUninitSlice(socket2::MaybeUninitSlice::new(buf.as_bytes())) - } - - fn limit(&mut self, limit: usize) { - let len = self.len(); - assert!(len >= limit); - self.0 = unsafe { - // SAFETY: this should be the line below, but I couldn't figure out - // the lifetime. Since we're only making the slices smaller (as - // checked by the assert above) this should be safe. - //self.0 = socket2::MaybeUninitSlice::new(&mut self[..limit]); - socket2::MaybeUninitSlice::new(slice::from_raw_parts_mut(self.0.as_mut_ptr(), limit)) - }; - } -} - -impl<'a> Deref for MaybeUninitSlice<'a> { - type Target = [MaybeUninit]; - - fn deref(&self) -> &[MaybeUninit] { - &self.0 - } -} - -impl<'a> DerefMut for MaybeUninitSlice<'a> { - fn deref_mut(&mut self) -> &mut [MaybeUninit] { - &mut self.0 - } -} - -/// Trait to work with uninitialised buffers using vectored I/O. -/// -/// This trait is implemented for arrays and tuples. When all of buffers are -/// *homogeneous*, i.e. of the same type, the array implementation is the -/// easiest to use along side with the [`Bytes`] trait. If however the buffers -/// are *heterogeneous*, i.e. of different types, the tuple implementation can -/// be used. See the examples below. -/// -/// # Examples -/// -/// Using the homogeneous array implementation. -/// -/// ``` -/// # #![feature(maybe_uninit_write_slice)] -/// use heph_rt::bytes::BytesVectored; -/// -/// let mut buf1 = Vec::with_capacity(12); -/// let mut buf2 = Vec::with_capacity(1); -/// let mut buf3 = Vec::with_capacity(5); -/// let mut buf4 = Vec::with_capacity(10); // Has extra capacity. -/// -/// let bufs = [&mut buf1, &mut buf2, &mut buf3, &mut buf4]; -/// let text = b"Hello world. From mars!"; -/// let bytes_written = write_vectored(bufs, text); -/// assert_eq!(bytes_written, text.len()); -/// -/// assert_eq!(buf1, b"Hello world."); -/// assert_eq!(buf2, b" "); -/// assert_eq!(buf3, b"From "); -/// assert_eq!(buf4, b"mars!"); -/// -/// /// Writes `text` to the `bufs`. -/// fn write_vectored(mut bufs: B, text: &[u8]) -> usize -/// where B: BytesVectored, -/// { -/// // Implementation is not relevant to the example. -/// # let mut written = 0; -/// # let mut left = text; -/// # for buf in bufs.as_bufs().as_mut().iter_mut() { -/// # let n = std::cmp::min(buf.len(), left.len()); -/// # let _ = std::mem::MaybeUninit::write_slice(&mut buf[..n], &left[..n]); -/// # left = &left[n..]; -/// # written += n; -/// # if left.is_empty() { -/// # break; -/// # } -/// # } -/// # // NOTE: we could update the length of the buffers in the loop above, -/// # // but this also acts as a smoke test for the implementation and this is -/// # // what would happen with actual vectored I/O. -/// # unsafe { bufs.update_lengths(written); } -/// # written -/// } -/// ``` -/// -/// Using the heterogeneous tuple implementation. -/// -/// ``` -/// # #![feature(maybe_uninit_uninit_array, maybe_uninit_slice, maybe_uninit_write_slice)] -/// use std::mem::MaybeUninit; -/// -/// use heph_rt::bytes::{Bytes, BytesVectored}; -/// -/// // Buffers of different types. -/// let mut buf1 = Vec::with_capacity(12); -/// let mut buf2 = StackBuf::new(); // Has extra capacity. -/// -/// // Using tuples we can use different kind of buffers. Here we use a `Vec` and -/// // our own `StackBuf` type. -/// let bufs = (&mut buf1, &mut buf2); -/// let text = b"Hello world. From mars!"; -/// let bytes_written = write_vectored(bufs, text); -/// assert_eq!(bytes_written, text.len()); -/// -/// assert_eq!(buf1, b"Hello world."); -/// assert_eq!(buf2.bytes(), b" From mars!"); -/// -/// /// Writes `text` to the `bufs`. -/// fn write_vectored(mut bufs: B, text: &[u8]) -> usize -/// where B: BytesVectored, -/// { -/// // Implementation is not relevant to the example. -/// # let mut written = 0; -/// # let mut left = text; -/// # for buf in bufs.as_bufs().as_mut().iter_mut() { -/// # let n = std::cmp::min(buf.len(), left.len()); -/// # let _ = MaybeUninit::write_slice(&mut buf[..n], &left[..n]); -/// # left = &left[n..]; -/// # written += n; -/// # if left.is_empty() { -/// # break; -/// # } -/// # } -/// # // NOTE: we could update the length of the buffers in the loop above, -/// # // but this also acts as a smoke test for the implementation and this is -/// # // what would happen with actual vectored I/O. -/// # unsafe { bufs.update_lengths(written); } -/// # written -/// } -/// -/// /// Custom stack buffer type that implements the `Bytes` trait. -/// struct StackBuf { -/// bytes: [MaybeUninit; 4096], -/// initialised: usize, -/// } -/// -/// impl StackBuf { -/// fn new() -> StackBuf { -/// StackBuf { -/// bytes: MaybeUninit::uninit_array(), -/// initialised: 0, -/// } -/// } -/// -/// fn bytes(&self) -> &[u8] { -/// unsafe { MaybeUninit::slice_assume_init_ref(&self.bytes[..self.initialised]) } -/// } -/// } -/// -/// impl Bytes for StackBuf { -/// fn as_bytes(&mut self) -> &mut [MaybeUninit] { -/// &mut self.bytes[self.initialised..] -/// } -/// -/// fn spare_capacity(&self) -> usize { -/// self.bytes.len() - self.initialised -/// } -/// -/// fn has_spare_capacity(&self) -> bool { -/// self.bytes.len() != self.initialised -/// } -/// -/// unsafe fn update_length(&mut self, n: usize) { -/// self.initialised += n; -/// } -/// } -/// ``` -pub trait BytesVectored { - /// Type used as slice of buffers, usually this is an array. - type Bufs<'b>: AsMut<[MaybeUninitSlice<'b>]> - where - Self: 'b; - - /// Returns itself as a slice of [`MaybeUninitSlice`]. - fn as_bufs<'b>(&'b mut self) -> Self::Bufs<'b>; - - /// Returns the total length of the buffers as returned by [`as_bufs`]. - /// - /// [`as_bufs`]: BytesVectored::as_bufs - fn spare_capacity(&self) -> usize; - - /// Returns `true` if (one of) the buffers has spare capacity. - fn has_spare_capacity(&self) -> bool { - self.spare_capacity() == 0 - } - - /// Update the length of the buffers in the slice. - /// - /// # Safety - /// - /// The caller must ensure that at least the first `n` bytes returned by - /// [`as_bufs`] are initialised, starting at the first buffer continuing - /// into the next one, etc. - /// - /// [`as_bufs`]: BytesVectored::as_bufs - /// - /// # Notes - /// - /// If this method is not implemented correctly methods such as - /// [`TcpStream::recv_n_vectored`] will not work correctly (as the buffer - /// will overwrite itself on successive reads). - /// - /// [`TcpStream::recv_n_vectored`]: crate::net::TcpStream::recv_n_vectored - unsafe fn update_lengths(&mut self, n: usize); - - /// Wrap the buffer in `LimitedBytes`, which limits the amount of bytes used - /// to `limit`. - /// - /// [`LimitedBytes::into_inner`] can be used to retrieve the buffer again, - /// or a mutable reference to the buffer can be used and the limited buffer - /// be dropped after usage. - fn limit(self, limit: usize) -> LimitedBytes - where - Self: Sized, - { - LimitedBytes { buf: self, limit } - } -} - -impl BytesVectored for &mut B -where - B: BytesVectored + ?Sized, -{ - type Bufs<'b> = B::Bufs<'b> where Self: 'b; - - fn as_bufs<'b>(&'b mut self) -> Self::Bufs<'b> { - (**self).as_bufs() - } - - fn spare_capacity(&self) -> usize { - (**self).spare_capacity() - } - - fn has_spare_capacity(&self) -> bool { - (**self).has_spare_capacity() - } - - unsafe fn update_lengths(&mut self, n: usize) { - (**self).update_lengths(n); - } -} - -impl BytesVectored for [B; N] -where - B: Bytes, -{ - type Bufs<'b> = [MaybeUninitSlice<'b>; N] where Self: 'b; - - fn as_bufs<'b>(&'b mut self) -> Self::Bufs<'b> { - let mut bufs = MaybeUninit::uninit_array::(); - for (i, buf) in self.iter_mut().enumerate() { - _ = bufs[i].write(MaybeUninitSlice::new(buf.as_bytes())); - } - // Safety: initialised the buffers above. - unsafe { MaybeUninit::array_assume_init(bufs) } - } - - fn spare_capacity(&self) -> usize { - self.iter().map(Bytes::spare_capacity).sum() - } - - fn has_spare_capacity(&self) -> bool { - self.iter().any(Bytes::has_spare_capacity) - } - - unsafe fn update_lengths(&mut self, n: usize) { - let mut left = n; - for buf in self.iter_mut() { - let n = min(left, buf.spare_capacity()); - buf.update_length(n); - left -= n; - if left == 0 { - return; - } - } - } -} - -macro_rules! impl_vectored_bytes_tuple { - ( $N: tt : $( $t: ident $idx: tt ),+ ) => { - impl<$( $t ),+> BytesVectored for ( $( $t ),+ ) - where $( $t: Bytes ),+ - { - type Bufs<'b> = [MaybeUninitSlice<'b>; $N] where Self: 'b; - - fn as_bufs<'b>(&'b mut self) -> Self::Bufs<'b> { - let mut bufs = MaybeUninit::uninit_array::<$N>(); - $( - _ = bufs[$idx].write(MaybeUninitSlice::new(self.$idx.as_bytes())); - )+ - // Safety: initialised the buffers above. - unsafe { MaybeUninit::array_assume_init(bufs) } - } - - fn spare_capacity(&self) -> usize { - $( self.$idx.spare_capacity() + )+ 0 - } - - fn has_spare_capacity(&self) -> bool { - $( self.$idx.has_spare_capacity() || )+ false - } - - unsafe fn update_lengths(&mut self, n: usize) { - let mut left = n; - $( - let n = min(left, self.$idx.spare_capacity()); - self.$idx.update_length(n); - left -= n; - if left == 0 { - return; - } - )+ - } - } - }; -} - -impl_vectored_bytes_tuple! { 12: B0 0, B1 1, B2 2, B3 3, B4 4, B5 5, B6 6, B7 7, B8 8, B9 9, B10 10, B11 11 } -impl_vectored_bytes_tuple! { 11: B0 0, B1 1, B2 2, B3 3, B4 4, B5 5, B6 6, B7 7, B8 8, B9 9, B10 10 } -impl_vectored_bytes_tuple! { 10: B0 0, B1 1, B2 2, B3 3, B4 4, B5 5, B6 6, B7 7, B8 8, B9 9 } -impl_vectored_bytes_tuple! { 9: B0 0, B1 1, B2 2, B3 3, B4 4, B5 5, B6 6, B7 7, B8 8 } -impl_vectored_bytes_tuple! { 8: B0 0, B1 1, B2 2, B3 3, B4 4, B5 5, B6 6, B7 7 } -impl_vectored_bytes_tuple! { 7: B0 0, B1 1, B2 2, B3 3, B4 4, B5 5, B6 6 } -impl_vectored_bytes_tuple! { 6: B0 0, B1 1, B2 2, B3 3, B4 4, B5 5 } -impl_vectored_bytes_tuple! { 5: B0 0, B1 1, B2 2, B3 3, B4 4 } -impl_vectored_bytes_tuple! { 4: B0 0, B1 1, B2 2, B3 3 } -impl_vectored_bytes_tuple! { 3: B0 0, B1 1, B2 2 } -impl_vectored_bytes_tuple! { 2: B0 0, B1 1 } - -/// Wrapper to limit the number of bytes `B` can use. -/// -/// See [`Bytes::limit`] and [`BytesVectored::limit`]. -#[derive(Debug)] -pub struct LimitedBytes { - buf: B, - limit: usize, -} - -impl LimitedBytes { - /// Returns the underlying buffer. - pub fn into_inner(self) -> B { - self.buf - } -} - -impl Bytes for LimitedBytes -where - B: Bytes, -{ - fn as_bytes(&mut self) -> &mut [MaybeUninit] { - let bytes = self.buf.as_bytes(); - if bytes.len() > self.limit { - &mut bytes[..self.limit] - } else { - bytes - } - } - - fn spare_capacity(&self) -> usize { - min(self.buf.spare_capacity(), self.limit) - } - - fn has_spare_capacity(&self) -> bool { - self.spare_capacity() > 0 - } - - unsafe fn update_length(&mut self, n: usize) { - self.buf.update_length(n); - self.limit -= n; - } -} - -impl BytesVectored for LimitedBytes -where - B: BytesVectored, -{ - type Bufs<'b> = B::Bufs<'b> where Self: 'b; - - fn as_bufs<'b>(&'b mut self) -> Self::Bufs<'b> { - let mut bufs = self.buf.as_bufs(); - let mut left = self.limit; - let mut iter = bufs.as_mut().iter_mut(); - while let Some(buf) = iter.next() { - let len = buf.len(); - if left > len { - left -= len; - } else { - buf.limit(left); - for buf in iter { - *buf = MaybeUninitSlice::new(&mut []); - } - break; - } - } - bufs - } - - fn spare_capacity(&self) -> usize { - if self.limit == 0 { - 0 - } else { - min(self.buf.spare_capacity(), self.limit) - } - } - - fn has_spare_capacity(&self) -> bool { - self.limit != 0 && self.buf.has_spare_capacity() - } - - unsafe fn update_lengths(&mut self, n: usize) { - self.buf.update_lengths(n); - self.limit -= n; - } -} diff --git a/rt/src/coordinator.rs b/rt/src/coordinator.rs index 8d5521348..911359e4b 100644 --- a/rt/src/coordinator.rs +++ b/rt/src/coordinator.rs @@ -45,7 +45,7 @@ const RING: Token = Token(usize::MAX - 1); /// Coordinator responsible for coordinating the Heph runtime. #[derive(Debug)] pub(super) struct Coordinator { - /// I/O uring. + /// io_uring completion ring. ring: a10::Ring, /// OS poll, used to poll the status of the (sync) worker threads and /// process `signals`. @@ -200,7 +200,7 @@ impl Coordinator { fn poll_os(&mut self, events: &mut Events) -> io::Result<()> { match self.poll.poll(events, None) { Ok(()) => Ok(()), - // The I/O uring will interrupt us. + // The io_uring will interrupt us. Err(ref err) if err.kind() == io::ErrorKind::Interrupted => Ok(()), Err(err) => Err(err), } diff --git a/rt/src/io/mod.rs b/rt/src/io/mod.rs index 6122860e3..7b2234c9b 100644 --- a/rt/src/io/mod.rs +++ b/rt/src/io/mod.rs @@ -1,21 +1,36 @@ //! Type definitions for I/O functionality. //! -//! The main types of this module are the [`Buf`] and [`BufMut`] traits, which -//! define the requirements on buffers in use in I/O. Additionally the -//! [`BufSlice`] and [`BufMutSlice`] traits define the behaviour of buffers in -//! vectored I/O. +//! # Working with Buffers //! -//! Finally this module contains a number of [`Future`] implementations that -//! facilitate I/O operations. +//! For working with buffers we define two plus two traits. For "regular", i.e. +//! single buffer I/O, we have the following traits: +//! * [`Buf`] is used in writing/sending. +//! * [`BufMut`] is used in reading/receiving. //! -//! [`Future`]: std::future::Future +//! The basic design of both traits is the same and is fairly simple. Usage +//! starts with a call to [`parts`]/[`parts_mut`], which returns a pointer to +//! the bytes in the bufer to read from or writing into. For `BufMut` the caller +//! an write into the buffer and update the length using [`update_length`], +//! though normally this is done by an I/O operation. +//! +//! For vectored I/O we have the same two traits as above, but suffixed with +//! `Slice`: +//! * [`BufSlice`] is used in vectored writing/sending. +//! * [`BufMutSlice`] is used in vectored reading/receiving. +//! +//! Neither of these traits can be implemented outside of the crate, but it's +//! already implemented for tuples and arrays. +//! +//! [`parts`]: Buf::parts +//! [`parts_mut`]: BufMut::parts_mut +//! [`update_length`]: BufMut::update_length // For ease of use within the crate. pub(crate) use std::io::{Error, Result}; mod buf; pub(crate) use buf::BufWrapper; -pub use buf::{Buf, BufMut, BufMutSlice, BufSlice}; +pub use buf::{Buf, BufMut, BufMutSlice, BufSlice, Limited}; mod futures; pub(crate) use futures::{ diff --git a/rt/src/lib.rs b/rt/src/lib.rs index f561017c8..eacdd20c7 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -214,7 +214,6 @@ use heph_inbox as inbox; use mio::{event, Interest, Token}; pub mod access; -pub mod bytes; mod channel; mod coordinator; mod error; diff --git a/rt/src/local/mod.rs b/rt/src/local/mod.rs index 57c615c56..e776be699 100644 --- a/rt/src/local/mod.rs +++ b/rt/src/local/mod.rs @@ -29,7 +29,7 @@ pub(super) struct RuntimeInternals { pub(super) scheduler: RefCell, /// OS poll, used for event notifications to support non-blocking I/O. pub(super) poll: RefCell, - /// I/O uring. + /// io_uring completion ring. pub(super) ring: RefCell, /// Timers, deadlines and timeouts. pub(crate) timers: RefCell, diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 93a14400e..09ae228db 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -84,7 +84,7 @@ pub(crate) struct RuntimeInternals { /// Poll instance for all shared event sources. This is polled by the worker /// thread. poll: Mutex, - /// I/O uring. + /// io_uring completion ring. ring: Mutex, /// SubmissionQueue for the `ring`. sq: a10::SubmissionQueue, @@ -160,21 +160,21 @@ impl RuntimeInternals { } } - /// Polls the I/O uring if it's currently not being polled. + /// Polls the io_uring completion ring if it's currently not being polled. pub(crate) fn try_poll_ring(&self) -> io::Result<()> { match self.ring.try_lock() { Ok(mut ring) => ring.poll(Some(Duration::ZERO)), Err(TryLockError::WouldBlock) => Ok(()), - Err(TryLockError::Poisoned(err)) => panic!("failed to lock shared I/O uring: {err}"), + Err(TryLockError::Poisoned(err)) => panic!("failed to lock shared io_uring: {err}"), } } - /// Return the file descriptor for the I/O uring. + /// Return the file descriptor for the io_uring. pub(crate) fn ring_fd(&self) -> RawFd { self.ring.lock().unwrap().as_fd().as_raw_fd() } - /// Returns the I/O uring submission queue. + /// Returns the io_uring submission queue. pub(crate) fn submission_queue(&self) -> &a10::SubmissionQueue { &self.sq } diff --git a/rt/src/timer.rs b/rt/src/timer.rs index 739422ad0..135947b8d 100644 --- a/rt/src/timer.rs +++ b/rt/src/timer.rs @@ -166,6 +166,8 @@ impl Drop for Timer { /// dealing with lifetime issue, e.g. when calling /// [`actor::Context::receive_next`] and wrapping that in a `Deadline`. /// +/// [`actor::Context::receive_next`]: heph::actor::Context::receive_next +/// /// # Examples /// /// Setting a timeout for a future. diff --git a/rt/src/worker.rs b/rt/src/worker.rs index aad2f2cb8..88fde601b 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -52,9 +52,9 @@ const COMMS: Token = Token(usize::MAX - 1); /// Token used to indicate the shared [`Poll`] (in [`shared::RuntimeInternals`]) /// has events. const SHARED_POLL: Token = Token(usize::MAX - 2); -/// Token used to indicate the I/O uring has events. +/// Token used to indicate the io_uring completion ring has events. const RING: Token = Token(usize::MAX - 3); -/// Token used to indicate the shared I/O uring has events. +/// Token used to indicate the shared io_uring completion ring has events. const SHARED_RING: Token = Token(usize::MAX - 4); /// Setup a new worker thread. @@ -88,7 +88,7 @@ pub(super) struct WorkerSetup { /// Poll instance for the worker thread. This is needed before starting the /// thread to initialise the [`rt::local::waker`]. poll: Poll, - /// I/O uring. + /// io_uring completion ring. ring: a10::Ring, /// Waker id used to create a `Waker` for thread-local actors. waker_id: WakerId, @@ -222,13 +222,13 @@ impl Worker { // Register the shared poll intance. let poll = setup.poll; - trace!(worker_id = setup.id.get(); "registering I/O uring"); + trace!(worker_id = setup.id.get(); "registering io_uring completion ring"); let ring = setup.ring; let ring_fd = ring.as_fd().as_raw_fd(); poll.registry() .register(&mut SourceFd(&ring_fd), RING, Interest::READABLE) .map_err(Error::Init)?; - trace!(worker_id = setup.id.get(); "registering shared I/O uring"); + trace!(worker_id = setup.id.get(); "registering shared io_uring completion ring"); let shared_ring_fd = shared_internals.ring_fd(); poll.registry() .register( From 3133ac2dea2f9ace9bb9482e73f4cc966db3c290 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 14 Apr 2023 18:30:12 +0200 Subject: [PATCH 062/177] Implement Buf for Box<[u8]> --- rt/src/io/buf.rs | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index 1a5b8d759..7cdb099f3 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -284,9 +284,19 @@ unsafe impl Buf for String { } } -// SAFETY: `Arc` manages the allocation of the bytes, so as long as it's -// alive, so is the slice of bytes. When the `Vec`tor is leaked the allocation -// will also be leaked. +// SAFETY: `Box<[u8]>` manages the allocation of the bytes, so as long as it's +// alive, so is the slice of bytes. When the `Box` is leaked the allocation will +// also be leaked. +unsafe impl Buf for Box<[u8]> { + unsafe fn parts(&self) -> (*const u8, usize) { + let slice: &[u8] = self; + (slice.as_ptr().cast(), slice.len()) + } +} + +// SAFETY: `Arc<[u8]>` manages the allocation of the bytes, so as long as it's +// alive, so is the slice of bytes. When the `Arc` is leaked the allocation will +// also be leaked. unsafe impl Buf for Arc<[u8]> { unsafe fn parts(&self) -> (*const u8, usize) { let slice: &[u8] = self; From 42c9fad19ed6f56b928e14ebd8c3183cb627459a Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 14 Apr 2023 18:44:15 +0200 Subject: [PATCH 063/177] Add io::{stdin, stdout, stderr} Handles to standard in, out and error I/O streams. --- rt/src/io/mod.rs | 165 +++++++++++++++++++++++++++++++++++++++++++++++ rt/src/pipe.rs | 2 +- 2 files changed, 166 insertions(+), 1 deletion(-) diff --git a/rt/src/io/mod.rs b/rt/src/io/mod.rs index 7b2234c9b..a0490417c 100644 --- a/rt/src/io/mod.rs +++ b/rt/src/io/mod.rs @@ -24,6 +24,21 @@ //! [`parts`]: Buf::parts //! [`parts_mut`]: BufMut::parts_mut //! [`update_length`]: BufMut::update_length +//! +//! # Working with Standard I/O Stream +//! +//! The [`stdin`], [`stdout`] and [`stderr`] function provide handles to +//! standard I/O streams of all Unix processes. All I/O performed using these +//! handles will use io_uring. +//! +//! Note that these handles are **not** buffered, unlike the ones found in the +//! standard library (e.g. [`std::io::stdout`]). Furthermore these handle do not +//! flush the buffer used by the standard library, so it's not advised to use +//! both the handle from standard library and Heph simultaneously. + +use a10::Extract; + +use crate as rt; // For ease of use within the crate. pub(crate) use std::io::{Error, Result}; @@ -36,3 +51,153 @@ mod futures; pub(crate) use futures::{ Read, ReadN, ReadNVectored, ReadVectored, Write, WriteAll, WriteAllVectored, WriteVectored, }; + +macro_rules! stdio { + ( + $fn: ident () -> $name: ident, $fd: expr + ) => { + #[doc = concat!("Create a new `", stringify!($name), "`.\n\n")] + pub fn $fn(rt: &RT) -> $name { + $name(std::mem::ManuallyDrop::new(unsafe { a10::AsyncFd::new( + $fd, + rt.submission_queue(), + )})) + } + + #[doc = concat!( + "A handle for ", stringify!($fn), " of the process.\n\n", + "# Notes\n\n", + "This directly writes to the raw file descriptor, which means it's not buffered and will not flush anything buffered by the standard library.\n\n", + "When this type is dropped it will not close ", stringify!($fn), ".", + )] + #[derive(Debug)] + pub struct $name(std::mem::ManuallyDrop); + }; +} + +stdio!(stdin() -> Stdin, libc::STDIN_FILENO); +stdio!(stdout() -> Stdout, libc::STDOUT_FILENO); +stdio!(stderr() -> Stderr, libc::STDERR_FILENO); + +impl Stdin { + /// Read bytes from standard in, writing them into `buf`. + pub async fn read(&self, buf: B) -> Result { + Read(self.0.read(BufWrapper(buf))).await + } + + /// Read at least `n` bytes from standard in, writing them into `buf`. + /// + /// This returns [`io::ErrorKind::UnexpectedEof`] if less than `n` bytes + /// could be read. + /// + /// [`io::ErrorKind::UnexpectedEof`]: std::io::ErrorKind::UnexpectedEof + pub async fn read_n(&self, buf: B, n: usize) -> Result { + debug_assert!( + buf.spare_capacity() >= n, + "called `Receiver::read_n` with a buffer smaller than `n`", + ); + ReadN(self.0.read_n(BufWrapper(buf), n)).await + } + + /// Read bytes from standard in, writing them into `bufs`. + pub async fn read_vectored, const N: usize>(&self, bufs: B) -> Result { + ReadVectored(self.0.read_vectored(BufWrapper(bufs))).await + } + + /// Read at least `n` bytes from standard in, writing them into `bufs`. + pub async fn read_n_vectored, const N: usize>( + &self, + bufs: B, + n: usize, + ) -> Result { + debug_assert!( + bufs.total_spare_capacity() >= n, + "called `Receiver::read_n_vectored` with buffers smaller than `n`" + ); + ReadNVectored(self.0.read_n_vectored(BufWrapper(bufs), n)).await + } +} + +impl Stdout { + /// Write the bytes in `buf` to standard out. + /// + /// Return the number of bytes written. This may we fewer than the length of + /// `buf`. To ensure that all bytes are written use [`Stdout::write_all`]. + pub async fn write(&self, buf: B) -> Result<(B, usize)> { + Write(self.0.write(BufWrapper(buf)).extract()).await + } + + /// Write the all bytes in `buf` to standard out. + /// + /// If this fails to write all bytes (this happens if a write returns + /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. + /// + /// [`io::ErrorKind::WriteZero`]: std::io::ErrorKind::WriteZero + pub async fn write_all(&self, buf: B) -> Result { + WriteAll(self.0.write_all(BufWrapper(buf)).extract()).await + } + + /// Write the bytes in `bufs` to standard out. + /// + /// Return the number of bytes written. This may we fewer than the length of + /// `bufs`. To ensure that all bytes are written use + /// [`Stdout::write_vectored_all`]. + pub async fn write_vectored, const N: usize>( + &self, + bufs: B, + ) -> Result<(B, usize)> { + WriteVectored(self.0.write_vectored(BufWrapper(bufs)).extract()).await + } + + /// Write the all bytes in `bufs` to standard out. + /// + /// If this fails to write all bytes (this happens if a write returns + /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. + /// + /// [`io::ErrorKind::WriteZero`]: std::io::ErrorKind::WriteZero + pub async fn write_vectored_all, const N: usize>(&self, bufs: B) -> Result { + WriteAllVectored(self.0.write_all_vectored(BufWrapper(bufs)).extract()).await + } +} + +impl Stderr { + /// Write the bytes in `buf` to standard error. + /// + /// Return the number of bytes written. This may we fewer than the length of + /// `buf`. To ensure that all bytes are written use [`Stderr::write_all`]. + pub async fn write(&self, buf: B) -> Result<(B, usize)> { + Write(self.0.write(BufWrapper(buf)).extract()).await + } + + /// Write the all bytes in `buf` to standard error. + /// + /// If this fails to write all bytes (this happens if a write returns + /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. + /// + /// [`io::ErrorKind::WriteZero`]: std::io::ErrorKind::WriteZero + pub async fn write_all(&self, buf: B) -> Result { + WriteAll(self.0.write_all(BufWrapper(buf)).extract()).await + } + + /// Write the bytes in `bufs` to standard error. + /// + /// Return the number of bytes written. This may we fewer than the length of + /// `bufs`. To ensure that all bytes are written use + /// [`Stderr::write_vectored_all`]. + pub async fn write_vectored, const N: usize>( + &self, + bufs: B, + ) -> Result<(B, usize)> { + WriteVectored(self.0.write_vectored(BufWrapper(bufs)).extract()).await + } + + /// Write the all bytes in `bufs` to standard error. + /// + /// If this fails to write all bytes (this happens if a write returns + /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. + /// + /// [`io::ErrorKind::WriteZero`]: std::io::ErrorKind::WriteZero + pub async fn write_vectored_all, const N: usize>(&self, bufs: B) -> Result { + WriteAllVectored(self.0.write_all_vectored(BufWrapper(bufs)).extract()).await + } +} diff --git a/rt/src/pipe.rs b/rt/src/pipe.rs index 861482db5..886b85c75 100644 --- a/rt/src/pipe.rs +++ b/rt/src/pipe.rs @@ -166,7 +166,7 @@ impl Sender { WriteAll(self.fd.write_all(BufWrapper(buf)).extract()).await } - /// Write the bytes in `bufs` intoto the pipe. + /// Write the bytes in `bufs` into the pipe. /// /// Return the number of bytes written. This may we fewer than the length of /// `bufs`. To ensure that all bytes are written use From 56a5d11efcee152bf6578a9286e18530397fd861 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 14 Apr 2023 20:12:58 +0200 Subject: [PATCH 064/177] Update the buffer tests --- rt/tests/functional.rs | 2 +- rt/tests/functional/bytes.rs | 170 ----------------------------------- rt/tests/functional/io.rs | 170 +++++++++++++++++++++++++++++++++++ 3 files changed, 171 insertions(+), 171 deletions(-) delete mode 100644 rt/tests/functional/bytes.rs create mode 100644 rt/tests/functional/io.rs diff --git a/rt/tests/functional.rs b/rt/tests/functional.rs index 4c5d0b0d0..8af72eb4d 100644 --- a/rt/tests/functional.rs +++ b/rt/tests/functional.rs @@ -18,9 +18,9 @@ mod functional { mod actor_context; mod actor_group; mod actor_ref; - mod bytes; mod from_message; mod future; + mod io; mod pipe; mod restart_supervisor; mod runtime; diff --git a/rt/tests/functional/bytes.rs b/rt/tests/functional/bytes.rs deleted file mode 100644 index 6bdc82571..000000000 --- a/rt/tests/functional/bytes.rs +++ /dev/null @@ -1,170 +0,0 @@ -//! Tests for the [`Bytes`] trait. - -use std::cmp::min; -use std::ptr; - -use heph_rt::bytes::{Bytes, BytesVectored}; - -const DATA: &[u8] = b"Hello world!"; -const DATA2: &[u8] = b"Hello mars."; - -fn write_bytes(src: &[u8], mut buf: B) -> usize -where - B: Bytes, -{ - let spare_capacity = buf.spare_capacity(); - let dst = buf.as_bytes(); - assert_eq!(dst.len(), spare_capacity); - let len = min(src.len(), dst.len()); - // Safety: both the `src` and `dst` pointers are good. And we've ensured - // that the length is correct, not overwriting data we don't own or reading - // data we don't own. - unsafe { - ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr().cast(), len); - buf.update_length(len); - } - len -} - -fn write_bytes_vectored(src: &[u8], mut bufs: B) -> usize -where - B: BytesVectored, -{ - let mut written = 0; - let mut left = src; - for buf in bufs.as_bufs().as_mut().iter_mut() { - let len = min(left.len(), buf.len()); - // Safety: both the `left` and `dst` pointers are good. And we've - // ensured that the length is correct, not overwriting data we don't own - // or reading data we don't own. - unsafe { - ptr::copy_nonoverlapping(left.as_ptr(), buf.as_mut_ptr().cast(), len); - } - - written += len; - left = &left[len..]; - if left.is_empty() { - break; - } - } - unsafe { bufs.update_lengths(written) } - written -} - -#[test] -fn impl_for_vec() { - let mut buf = Vec::::with_capacity(2 * DATA.len()); - assert_eq!(buf.spare_capacity(), 2 * DATA.len()); - assert!(buf.has_spare_capacity()); - let n = write_bytes(DATA, &mut buf); - assert_eq!(n, DATA.len()); - assert_eq!(buf.len(), DATA.len()); - assert_eq!(&*buf, DATA); - assert_eq!(buf.spare_capacity(), DATA.len()); - assert!(buf.has_spare_capacity()); -} - -#[test] -fn dont_overwrite_existing_bytes_in_vec() { - let mut buf = Vec::::with_capacity(2 * DATA.len()); - assert_eq!(buf.spare_capacity(), 2 * DATA.len()); - assert!(buf.has_spare_capacity()); - buf.extend(DATA2); - assert_eq!(buf.spare_capacity(), 2 * DATA.len() - DATA2.len()); - assert!(buf.has_spare_capacity()); - let start = buf.len(); - let n = write_bytes(DATA, &mut buf); - assert_eq!(n, DATA.len()); - assert_eq!(buf.len(), DATA2.len() + DATA.len()); - assert_eq!(&buf[..start], DATA2); // Original bytes untouched. - assert_eq!(&buf[start..start + n], DATA); - assert_eq!(buf.spare_capacity(), 1); - assert!(buf.has_spare_capacity()); - buf.push(b'a'); - assert_eq!(buf.spare_capacity(), 0); - assert!(!buf.has_spare_capacity()); -} - -#[test] -fn limited_bytes() { - const LIMIT: usize = 5; - let mut buf = Vec::::with_capacity(2 * DATA.len()).limit(LIMIT); - assert_eq!(buf.spare_capacity(), 5); - assert!(buf.has_spare_capacity()); - - let n = write_bytes(DATA, &mut buf); - assert_eq!(n, LIMIT); - assert_eq!(buf.spare_capacity(), 0); - assert!(!buf.has_spare_capacity()); - let buf = buf.into_inner(); - assert_eq!(&*buf, &DATA[..LIMIT]); - assert_eq!(buf.len(), LIMIT); -} - -#[test] -fn vectored_array() { - let mut bufs = [Vec::with_capacity(1), Vec::with_capacity(DATA.len())]; - assert_eq!(bufs.spare_capacity(), 1 + DATA.len()); - assert!(bufs.has_spare_capacity()); - let n = write_bytes_vectored(DATA, &mut bufs); - assert_eq!(n, DATA.len()); - assert_eq!(bufs[0].len(), 1); - assert_eq!(bufs[1].len(), DATA.len() - 1); - assert_eq!(bufs[0], &DATA[..1]); - assert_eq!(bufs[1], &DATA[1..]); - assert_eq!(bufs.spare_capacity(), 1); - assert!(bufs.has_spare_capacity()); - bufs[1].push(b'a'); - assert_eq!(bufs.spare_capacity(), 0); - assert!(!bufs.has_spare_capacity()); -} - -#[test] -fn vectored_tuple() { - let mut bufs = ( - Vec::with_capacity(1), - Vec::with_capacity(3), - Vec::with_capacity(DATA.len()), - ); - assert_eq!(bufs.spare_capacity(), 1 + 3 + DATA.len()); - assert!(bufs.has_spare_capacity()); - let n = write_bytes_vectored(DATA, &mut bufs); - assert_eq!(n, DATA.len()); - assert_eq!(bufs.0.len(), 1); - assert_eq!(bufs.1.len(), 3); - assert_eq!(bufs.2.len(), DATA.len() - 4); - assert_eq!(bufs.0, &DATA[..1]); - assert_eq!(bufs.1, &DATA[1..4]); - assert_eq!(bufs.2, &DATA[4..]); - assert_eq!(bufs.spare_capacity(), 4); - assert!(bufs.has_spare_capacity()); - bufs.2.extend_from_slice(b"aaaa"); - assert_eq!(bufs.spare_capacity(), 0); - assert!(!bufs.has_spare_capacity()); -} - -#[test] -fn limited_bytes_vectored() { - const LIMIT: usize = 5; - - let mut bufs = [ - Vec::with_capacity(1), - Vec::with_capacity(DATA.len()), - Vec::with_capacity(10), - ] - .limit(LIMIT); - assert_eq!(bufs.spare_capacity(), LIMIT); - assert!(bufs.has_spare_capacity()); - - let n = write_bytes_vectored(DATA, &mut bufs); - assert_eq!(n, LIMIT); - assert_eq!(bufs.spare_capacity(), 0); - assert!(!bufs.has_spare_capacity()); - let bufs = bufs.into_inner(); - assert_eq!(bufs[0].len(), 1); - assert_eq!(bufs[1].len(), LIMIT - 1); - assert_eq!(bufs[2].len(), 0); - assert_eq!(bufs[0], &DATA[..1]); - assert_eq!(bufs[1], &DATA[1..LIMIT]); - assert_eq!(bufs[2], &[]); -} diff --git a/rt/tests/functional/io.rs b/rt/tests/functional/io.rs new file mode 100644 index 000000000..6637e42a7 --- /dev/null +++ b/rt/tests/functional/io.rs @@ -0,0 +1,170 @@ +//! Tests for the io module. + +use std::cmp::min; +use std::ptr; +use std::sync::Arc; + +use heph_rt::io::{Buf, BufMut, BufMutSlice}; + +const DATA: &[u8] = b"Hello world!"; +const DATA2: &[u8] = b"Hello mars."; + +fn write_bytes(src: &[u8], buf: &mut B) -> usize { + let spare_capacity = buf.spare_capacity(); + let (dst, len) = unsafe { buf.parts_mut() }; + assert_eq!(len, spare_capacity); + let len = min(src.len(), len); + // Safety: both the `src` and `dst` pointers are good. And we've ensured + // that the length is correct, not overwriting data we don't own or reading + // data we don't own. + unsafe { + ptr::copy_nonoverlapping(src.as_ptr(), dst, len); + buf.update_length(len); + } + len +} + +fn write_bytes_vectored, const N: usize>(src: &[u8], bufs: &mut B) -> usize { + let mut written = 0; + let mut left = src; + for iovec in unsafe { bufs.as_iovecs_mut() } { + let len = min(left.len(), iovec.iov_len); + // Safety: both the `left` and `dst` pointers are good. And we've + // ensured that the length is correct, not overwriting data we don't own + // or reading data we don't own. + unsafe { + ptr::copy_nonoverlapping(left.as_ptr(), iovec.iov_base.cast(), len); + } + + written += len; + left = &left[len..]; + if left.is_empty() { + break; + } + } + unsafe { bufs.update_length(written) } + written +} + +#[test] +fn buf_mut_for_vec() { + test_buf_mut(Vec::with_capacity(TEST_BUF_MUT_CAPACITY)); +} + +#[test] +fn buf_mut_for_limited() { + let buf = Vec::with_capacity(TEST_BUF_MUT_CAPACITY + 10); + test_buf_mut(BufMut::limit(buf, TEST_BUF_MUT_CAPACITY)); +} + +const TEST_BUF_MUT_CAPACITY: usize = DATA.len() + DATA2.len(); + +fn test_buf_mut(mut buf: B) { + let capacity = TEST_BUF_MUT_CAPACITY; + let (_, len) = unsafe { buf.parts_mut() }; + assert_eq!(len, capacity); + assert_eq!(buf.spare_capacity(), capacity); + assert!(buf.has_spare_capacity()); + + let written = write_bytes(DATA, &mut buf); + let capacity_left = capacity - written; + let (_, len) = unsafe { buf.parts_mut() }; + assert_eq!(len, capacity_left); + assert_eq!(buf.spare_capacity(), capacity_left); + assert!(buf.has_spare_capacity()); + + let written = write_bytes(DATA2, &mut buf); + assert_eq!(written, capacity_left); + let (_, len) = unsafe { buf.parts_mut() }; + assert_eq!(len, 0); + assert_eq!(buf.spare_capacity(), 0); + assert!(!buf.has_spare_capacity()); +} + +#[test] +fn buf_for_vec() { + test_buf(Vec::from(DATA)) +} + +#[test] +fn buf_for_string() { + test_buf(String::from(std::str::from_utf8(DATA).unwrap())) +} + +#[test] +fn buf_for_boxed_slice() { + test_buf(Box::<[u8]>::from(DATA)) +} + +#[test] +fn buf_for_arc_slice() { + test_buf(Arc::<[u8]>::from(DATA)) +} + +#[test] +fn buf_for_static_slice() { + test_buf(DATA) +} + +#[test] +fn buf_for_static_str() { + test_buf(DATA) +} + +fn test_buf(buf: B) { + let (ptr, len) = unsafe { buf.parts() }; + let got = unsafe { std::slice::from_raw_parts(ptr, len) }; + assert_eq!(got, DATA); +} + +#[test] +fn buf_for_limited() { + test_buf(DATA.limit(DATA.len())); // Same length. + test_buf(DATA.limit(DATA.len() + 1)); // Larger. + let mut buf = Vec::new(); + buf.push(DATA); + buf.push(DATA2); + test_buf(DATA.limit(DATA.len())); // Smaller. +} + +#[test] +fn buf_mut_slice_for_array() { + let mut bufs = [Vec::with_capacity(1), Vec::with_capacity(DATA.len())]; + assert_eq!(bufs.total_spare_capacity(), 1 + DATA.len()); + assert!(bufs.has_spare_capacity()); + let n = write_bytes_vectored(DATA, &mut bufs); + assert_eq!(n, DATA.len()); + assert_eq!(bufs[0].len(), 1); + assert_eq!(bufs[1].len(), DATA.len() - 1); + assert_eq!(bufs[0], &DATA[..1]); + assert_eq!(bufs[1], &DATA[1..]); + assert_eq!(bufs.total_spare_capacity(), 1); + assert!(bufs.has_spare_capacity()); + bufs[1].push(b'a'); + assert_eq!(bufs.total_spare_capacity(), 0); + assert!(!bufs.has_spare_capacity()); +} + +#[test] +fn buf_mut_slice_for_tuple() { + let mut bufs = ( + Vec::with_capacity(1), + Vec::with_capacity(3), + Vec::with_capacity(DATA.len()), + ); + assert_eq!(bufs.total_spare_capacity(), 1 + 3 + DATA.len()); + assert!(bufs.has_spare_capacity()); + let n = write_bytes_vectored(DATA, &mut bufs); + assert_eq!(n, DATA.len()); + assert_eq!(bufs.0.len(), 1); + assert_eq!(bufs.1.len(), 3); + assert_eq!(bufs.2.len(), DATA.len() - 4); + assert_eq!(bufs.0, &DATA[..1]); + assert_eq!(bufs.1, &DATA[1..4]); + assert_eq!(bufs.2, &DATA[4..]); + assert_eq!(bufs.total_spare_capacity(), 4); + assert!(bufs.has_spare_capacity()); + bufs.2.extend_from_slice(b"aaaa"); + assert_eq!(bufs.total_spare_capacity(), 0); + assert!(!bufs.has_spare_capacity()); +} From 6155fd9e09f8d81fce40354ec1b1f7c57541cbd4 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 14 Apr 2023 20:24:49 +0200 Subject: [PATCH 065/177] Remove unused methods from PrivateAccess trait These are no longer used. --- rt/src/access.rs | 68 ++------------------------------------------ rt/src/lib.rs | 25 ---------------- rt/src/shared/mod.rs | 39 ++----------------------- 3 files changed, 4 insertions(+), 128 deletions(-) diff --git a/rt/src/access.rs b/rt/src/access.rs index b35de4fbd..1e9f4be31 100644 --- a/rt/src/access.rs +++ b/rt/src/access.rs @@ -29,16 +29,14 @@ //! [`TcpStream::connect`]: crate::net::TcpStream::connect use std::future::Future; -use std::mem::replace; use std::ops::{Deref, DerefMut}; use std::sync::Arc; use std::time::Instant; -use std::{fmt, io, task}; +use std::{fmt, task}; use heph::actor::{self, NewActor, SyncContext}; use heph::actor_ref::ActorRef; use heph::supervisor::Supervisor; -use mio::{event, Interest}; use crate::process::ProcessId; use crate::spawn::{ActorOptions, FutureOptions, Spawn}; @@ -61,10 +59,8 @@ use crate::{shared, RuntimeRef}; pub trait Access: PrivateAccess {} mod private { + use std::task; use std::time::Instant; - use std::{io, task}; - - use mio::{event, Interest}; use crate::process::ProcessId; use crate::timers::TimerToken; @@ -74,25 +70,9 @@ mod private { /// /// [`rt::Access`]: crate::Access pub trait PrivateAccess { - /// Returns the process id. - fn pid(&self) -> ProcessId; - - /// Changes the process id to `new_pid`, returning the old process id. - fn change_pid(&mut self, new_pid: ProcessId) -> ProcessId; - /// Get access to the `SubmissionQueue`. fn submission_queue(&self) -> a10::SubmissionQueue; - /// Registers the `source`. - fn register(&mut self, source: &mut S, interest: Interest) -> io::Result<()> - where - S: event::Source + ?Sized; - - /// Reregisters the `source`. - fn reregister(&mut self, source: &mut S, interest: Interest) -> io::Result<()> - where - S: event::Source + ?Sized; - /// Add a new timer expiring at `deadline` waking `waker`. fn add_timer(&mut self, deadline: Instant, waker: task::Waker) -> TimerToken; @@ -163,32 +143,10 @@ impl DerefMut for ThreadLocal { impl Access for ThreadLocal {} impl PrivateAccess for ThreadLocal { - fn pid(&self) -> ProcessId { - self.pid - } - - fn change_pid(&mut self, new_pid: ProcessId) -> ProcessId { - replace(&mut self.pid, new_pid) - } - fn submission_queue(&self) -> a10::SubmissionQueue { self.rt.internals.ring.borrow().submission_queue().clone() } - fn register(&mut self, source: &mut S, interest: Interest) -> io::Result<()> - where - S: event::Source + ?Sized, - { - self.rt.register(source, self.pid.into(), interest) - } - - fn reregister(&mut self, source: &mut S, interest: Interest) -> io::Result<()> - where - S: event::Source + ?Sized, - { - self.rt.reregister(source, self.pid.into(), interest) - } - fn add_timer(&mut self, deadline: Instant, waker: task::Waker) -> TimerToken { self.rt.add_timer(deadline, waker) } @@ -306,32 +264,10 @@ impl ThreadSafe { impl Access for ThreadSafe {} impl PrivateAccess for ThreadSafe { - fn pid(&self) -> ProcessId { - self.pid - } - - fn change_pid(&mut self, new_pid: ProcessId) -> ProcessId { - replace(&mut self.pid, new_pid) - } - fn submission_queue(&self) -> a10::SubmissionQueue { self.rt.submission_queue().clone() } - fn register(&mut self, source: &mut S, interest: Interest) -> io::Result<()> - where - S: event::Source + ?Sized, - { - self.rt.register(source, self.pid.into(), interest) - } - - fn reregister(&mut self, source: &mut S, interest: Interest) -> io::Result<()> - where - S: event::Source + ?Sized, - { - self.rt.reregister(source, self.pid.into(), interest) - } - fn add_timer(&mut self, deadline: Instant, waker: task::Waker) -> TimerToken { self.rt.add_timer(deadline, waker) } diff --git a/rt/src/lib.rs b/rt/src/lib.rs index eacdd20c7..f8d5dc39f 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -211,7 +211,6 @@ use heph::actor::{self, NewActor, SyncActor}; use heph::actor_ref::{ActorGroup, ActorRef}; use heph::supervisor::{Supervisor, SyncSupervisor}; use heph_inbox as inbox; -use mio::{event, Interest, Token}; pub mod access; mod channel; @@ -614,30 +613,6 @@ impl RuntimeRef { .add_unique(actor_ref); } - /// Register an `event::Source`, see [`mio::Registry::register`]. - fn register(&mut self, source: &mut S, token: Token, interest: Interest) -> io::Result<()> - where - S: event::Source + ?Sized, - { - self.internals - .poll - .borrow() - .registry() - .register(source, token, interest) - } - - /// Reregister an `event::Source`, see [`mio::Registry::reregister`]. - fn reregister(&mut self, source: &mut S, token: Token, interest: Interest) -> io::Result<()> - where - S: event::Source + ?Sized, - { - self.internals - .poll - .borrow() - .registry() - .reregister(source, token, interest) - } - /// Get a clone of the sending end of the notification channel. /// /// # Notes diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 09ae228db..b2747d5d1 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -15,7 +15,7 @@ use heph::supervisor::Supervisor; use heph_inbox as inbox; use log::{as_debug, debug, error, trace}; use mio::unix::SourceFd; -use mio::{event, Events, Interest, Poll, Registry, Token}; +use mio::{Events, Interest, Poll, Registry, Token}; use crate::spawn::{ActorOptions, FutureOptions}; use crate::thread_waker::ThreadWaker; @@ -42,7 +42,6 @@ use waker::WakerId; pub(crate) struct RuntimeSetup { poll: Poll, ring: a10::Ring, - registry: Registry, } impl RuntimeSetup { @@ -63,7 +62,6 @@ impl RuntimeSetup { poll: Mutex::new(self.poll), ring: Mutex::new(self.ring), sq, - registry: self.registry, scheduler: Scheduler::new(), timers: Timers::new(), trace_log, @@ -88,8 +86,6 @@ pub(crate) struct RuntimeInternals { ring: Mutex, /// SubmissionQueue for the `ring`. sq: a10::SubmissionQueue, - /// Registry for the `Coordinator`'s `Poll` instance. - registry: Registry, /// Scheduler for thread-safe actors. scheduler: Scheduler, /// Timers for thread-safe actors. @@ -118,12 +114,7 @@ impl RuntimeInternals { let poll = Poll::new()?; // TODO: configure ring. let ring = a10::Ring::new(512)?; - let registry = poll.registry().try_clone()?; - Ok(RuntimeSetup { - poll, - ring, - registry, - }) + Ok(RuntimeSetup { poll, ring }) } /// Returns metrics about the shared scheduler and timers. @@ -179,32 +170,6 @@ impl RuntimeInternals { &self.sq } - /// Register an `event::Source`, see [`mio::Registry::register`]. - pub(crate) fn register( - &self, - source: &mut S, - token: Token, - interest: Interest, - ) -> io::Result<()> - where - S: event::Source + ?Sized, - { - self.registry.register(source, token, interest) - } - - /// Reregister an `event::Source`, see [`mio::Registry::reregister`]. - pub(crate) fn reregister( - &self, - source: &mut S, - token: Token, - interest: Interest, - ) -> io::Result<()> - where - S: event::Source + ?Sized, - { - self.registry.reregister(source, token, interest) - } - /// Add a timer. /// /// See [`Timers::add`]. From 50d4beffd5bfdf779381ad8e053cdaf183ec5502 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sat, 15 Apr 2023 13:42:05 +0200 Subject: [PATCH 066/177] Couple of small cleanups in trace module --- rt/src/trace.rs | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/rt/src/trace.rs b/rt/src/trace.rs index 7588c1262..e0acb9b2f 100644 --- a/rt/src/trace.rs +++ b/rt/src/trace.rs @@ -66,7 +66,7 @@ //! [Trace Format]: https://github.com/Thomasdezeeuw/heph/blob/main/doc/Trace%20Format.md //! [Chrome's Trace Event Format]: https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview //! [Catapult]: https://chromium.googlesource.com/catapult/+/refs/heads/master/tracing/README.md -//! [Example 8 "Runtime Tracing"]: https://github.com/Thomasdezeeuw/heph/blob/main/examples/README.md#8-runtime-tracing +//! [Example 8 "Runtime Tracing"]: https://github.com/Thomasdezeeuw/heph/tree/main/rt/examples#8-runtime-tracing use std::cell::RefCell; use std::fs::{File, OpenOptions}; @@ -229,7 +229,7 @@ impl CoordinatorLog { /// Returns the next stream counter. fn next_stream_count(&mut self) -> u32 { - // Safety: needs to sync with itself. + // SAFETY: needs to sync with itself. self.shared.counter.fetch_add(1, atomic::Ordering::AcqRel) } } @@ -291,13 +291,13 @@ fn write_epoch_metadata(buf: &mut Vec, time: SystemTime) { #[allow(clippy::unreadable_literal)] const MAGIC: u32 = 0x75D11D4D; const PACKET_SIZE: u32 = 23; - // Safety: `OPTION` is small enough to fit it's length in `u16`. + // SAFETY: `OPTION` is small enough to fit it's length in `u16`. #[allow(clippy::cast_possible_truncation)] const OPTION_LENGTH: u16 = OPTION.len() as u16; const OPTION: &[u8] = b"epoch"; // Number of nanoseconds since Unix epoch as u64. - // Safety: this overflows in the year 2500+, so this will be good for a + // SAFETY: this overflows in the year 2500+, so this will be good for a // while. #[allow(clippy::cast_possible_truncation)] let nanos_since_unix = time @@ -348,11 +348,7 @@ pub(crate) fn start(log: &Option) -> Option where L: TraceLog, { - if log.is_some() { - Some(EventTiming::start()) - } else { - None - } + log.is_some().then(EventTiming::start) } /// Trait to call [`finish`] on both [`CoordinatorLog`] and [`Log`]. @@ -416,7 +412,7 @@ impl<'a> TraceLog for &'a SharedLog { BUF.with(|buf| { let mut buf = buf.borrow_mut(); - // Safety: needs to sync with itself. + // SAFETY: needs to sync with itself. let stream_count = self.counter.fetch_add(1, atomic::Ordering::AcqRel); format_event( &mut buf, @@ -447,7 +443,7 @@ fn format_event( let start_nanos: u64 = nanos_since_epoch(epoch, event.start); let end_nanos: u64 = nanos_since_epoch(epoch, event.end); let description: &[u8] = event.description.as_bytes(); - // Safety: length has a debug_assert in `finish`. + // SAFETY: length has a debug_assert in `finish`. #[allow(clippy::cast_possible_truncation)] let description_len: u16 = description.len() as u16; @@ -480,7 +476,7 @@ fn format_event( #[track_caller] #[allow(clippy::cast_possible_truncation)] fn nanos_since_epoch(epoch: Instant, time: Instant) -> u64 { - // Safety: this overflows after 500+ years as per the function doc. + // SAFETY: this overflows after 500+ years as per the function doc. time.duration_since(epoch).as_nanos() as u64 } @@ -540,10 +536,9 @@ impl EventTiming { description: &'e str, attributes: &'e [(&'e str, &'e dyn AttributeValue)], ) -> Event<'e> { - let end = Instant::now(); Event { start: self.start, - end, + end: Instant::now(), description, attributes, } @@ -551,7 +546,6 @@ impl EventTiming { } /// A trace event. -// NOTE: `pub(crate)` because of `TraceLog`. pub(crate) struct Event<'e> { start: Instant, end: Instant, From 3cd7c2538a66001f46d4f796b0daa94581149ceb Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sat, 15 Apr 2023 13:45:36 +0200 Subject: [PATCH 067/177] Don't log panics using std-logger Useful in production, not so much during testing. --- rt/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rt/Cargo.toml b/rt/Cargo.toml index f5a30d3f4..88078afa6 100644 --- a/rt/Cargo.toml +++ b/rt/Cargo.toml @@ -30,7 +30,7 @@ socket2 = { version = "0.5.2", default-features = false, features = [" [dev-dependencies] getrandom = { version = "0.2.2", default-features = false, features = ["std"] } -std-logger = { version = "0.5.0", default-features = false, features = ["log-panic", "nightly"] } +std-logger = { version = "0.5.0", default-features = false, features = ["nightly"] } [[test]] name = "functional" From 15be1221506510faf581877f069c83e692bccf4c Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sat, 15 Apr 2023 13:51:08 +0200 Subject: [PATCH 068/177] Move coordinator into it's own directory --- rt/src/{coordinator.rs => coordinator/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename rt/src/{coordinator.rs => coordinator/mod.rs} (100%) diff --git a/rt/src/coordinator.rs b/rt/src/coordinator/mod.rs similarity index 100% rename from rt/src/coordinator.rs rename to rt/src/coordinator/mod.rs From 4d07ceb6e0013ace8769da4d8a277d07a596d344 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sat, 15 Apr 2023 17:32:36 +0200 Subject: [PATCH 069/177] Allow the coordinator to run Futures These will be special futures that handle asynchronous processes for the coordinator, e.g. handling process signals. --- rt/src/coordinator/bitmap.rs | 201 +++++++++++++++++++++++++++++++++++ rt/src/coordinator/mod.rs | 57 ++++++++-- rt/src/coordinator/waker.rs | 111 +++++++++++++++++++ rt/src/lib.rs | 1 + 4 files changed, 362 insertions(+), 8 deletions(-) create mode 100644 rt/src/coordinator/bitmap.rs create mode 100644 rt/src/coordinator/waker.rs diff --git a/rt/src/coordinator/bitmap.rs b/rt/src/coordinator/bitmap.rs new file mode 100644 index 000000000..ce86d7a55 --- /dev/null +++ b/rt/src/coordinator/bitmap.rs @@ -0,0 +1,201 @@ +//! Atomic bit map. +//! +//! See [`AtomicBitMap`]. + +use std::fmt; +use std::mem::MaybeUninit; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; + +/// Variable sized atomic bitmap. +#[repr(transparent)] +pub(crate) struct AtomicBitMap { + data: [AtomicUsize], +} + +impl AtomicBitMap { + /// Create a new `AtomicBitMap`. + /// + /// # Notes + /// + /// `entries` is a minimum number of slots, use `capacity` to determine the + /// actual number of bits available. + pub(crate) fn new(entries: usize) -> Arc { + let mut size = entries / usize::BITS as usize; + if (entries % usize::BITS as usize) != 0 { + size += 1; + } + let arc: Arc<[MaybeUninit]> = Arc::new_zeroed_slice(size); + // SAFETY: This cast does two things: + // * `[MaybeUninit]` -> `[AtomicUsize]`: this is safe + // because all zeroes is valid for `AtomicUsize`. + // * `Arc<[AtomicUsize]>` -> `Arc`: this is safe because + // of the use of `repr(transparent)` on `AtomicBitMap` ensuring it + // has the same layout as `[AtomicUsize]`. + unsafe { Arc::from_raw(Arc::into_raw(arc) as _) } + } + + /// Returns the number of indices the bitmap can manage. + pub(crate) const fn capacity(&self) -> usize { + self.data.len() * usize::BITS as usize + } + + /// Returns the index of the set slot, or `None`. + pub(crate) fn next_set(&self) -> Option { + for (idx, data) in self.data.iter().enumerate() { + let mut value = data.load(Ordering::Relaxed); + let mut i = value.trailing_zeros(); + while i < usize::BITS { + // Attempt to unset the bit, claiming the slot. + value = data.fetch_and(!(1 << i), Ordering::SeqCst); + // Another thread could have attempted to unset the same bit + // we're setting, so we need to make sure we actually set the + // bit (i.e. check if was set in the previous state). + if is_set(value, i as usize) { + return Some((idx * usize::BITS as usize) + i as usize); + } + i += (value >> i).trailing_zeros(); + } + } + None + } + + /// Set the bit at `index`. + pub(crate) fn set(&self, index: usize) { + let idx = index / usize::BITS as usize; + let n = index % usize::BITS as usize; + let old_value = self.data[idx].fetch_or(1 << n, Ordering::SeqCst); + debug_assert!(!is_set(old_value, n)); + } +} + +/// Returns true if bit `n` is set in `value`. `n` is zero indexed, i.e. must be +/// in the range 0..usize::BITS (64). +const fn is_set(value: usize, n: usize) -> bool { + ((value >> n) & 1) == 1 +} + +impl fmt::Debug for AtomicBitMap { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + const WIDTH: usize = usize::BITS as usize; + for data in self.data.iter() { + let value = data.load(Ordering::Relaxed); + write!(f, "{value:0WIDTH$b}")?; + } + Ok(()) + } +} + +#[test] +fn setting_and_unsetting_one() { + setting_and_unsetting(64) +} + +#[test] +fn setting_and_unsetting_two() { + setting_and_unsetting(128) +} + +#[test] +fn setting_and_unsetting_three() { + setting_and_unsetting(192) +} + +#[test] +fn setting_and_unsetting_four() { + setting_and_unsetting(256) +} + +#[test] +fn setting_and_unsetting_eight() { + setting_and_unsetting(512) +} + +#[test] +fn setting_and_unsetting_sixteen() { + setting_and_unsetting(1024) +} + +#[cfg(test)] +fn setting_and_unsetting(entries: usize) { + let map = AtomicBitMap::new(entries); + assert_eq!(map.capacity(), entries); + + // Set all indices. + for n in 0..entries { + map.set(n); + } + + // All bits should be set. + for data in &map.data { + assert!(data.load(Ordering::Relaxed) == usize::MAX); + } + + // Unset all indices again. + for n in 0..entries { + assert_eq!(map.next_set(), Some(n)); + } + // Bitmap should be zeroed. + for data in &map.data { + assert!(data.load(Ordering::Relaxed) == 0); + } + + // Test unsetting an index not in order. + map.set(63); + map.set(0); + assert!(matches!(map.next_set(), Some(i) if i == 0)); + assert!(matches!(map.next_set(), Some(i) if i == 63)); + + // Next avaiable index should be 0 again. + assert_eq!(map.next_set(), None); +} + +#[test] +fn setting_and_unsetting_concurrent() { + use std::sync::{Arc, Barrier}; + use std::thread; + + const N: usize = 4; + const M: usize = 1024; + + let bitmap = Arc::new(AtomicBitMap::new(N * M)); + + for n in 0..N * M { + bitmap.set(n); + } + + let barrier = Arc::new(Barrier::new(N + 1)); + let handles = (0..N) + .map(|i| { + let bitmap = bitmap.clone(); + let barrier = barrier.clone(); + thread::spawn(move || { + let mut indices = Vec::with_capacity(M); + _ = barrier.wait(); + + if i % 2 == 0 { + for _ in 0..M { + let idx = bitmap.next_set().expect("failed to get index"); + indices.push(idx); + } + + for idx in indices { + bitmap.set(idx); + } + } else { + for _ in 0..M { + let idx = bitmap.next_set().expect("failed to get index"); + bitmap.set(idx); + } + } + }) + }) + .collect::>(); + + _ = barrier.wait(); + handles + .into_iter() + .map(|handle| handle.join()) + .collect::>() + .unwrap(); +} diff --git a/rt/src/coordinator/mod.rs b/rt/src/coordinator/mod.rs index 911359e4b..83251a9f7 100644 --- a/rt/src/coordinator/mod.rs +++ b/rt/src/coordinator/mod.rs @@ -16,11 +16,13 @@ //! [sync worker threads]: crate::sync_worker use std::env::consts::ARCH; +use std::future::{pending, Future}; use std::os::fd::{AsFd, AsRawFd}; use std::os::unix::process::parent_id; +use std::pin::Pin; use std::sync::Arc; use std::time::{Duration, Instant}; -use std::{fmt, io, process}; +use std::{fmt, io, process, task}; use heph::actor_ref::{ActorGroup, Delivery}; use log::{as_debug, as_display, debug, error, info, trace}; @@ -30,20 +32,22 @@ use mio::{Events, Interest, Poll, Registry, Token}; use mio_signals::{SignalSet, Signals}; use crate::setup::{host_id, host_info, Uuid}; -use crate::shared::waker; use crate::thread_waker::ThreadWaker; -use crate::trace; use crate::{ - self as rt, cpu_usage, shared, worker, Signal, SyncWorker, SYNC_WORKER_ID_END, + self as rt, cpu_usage, shared, trace, worker, Signal, SyncWorker, SYNC_WORKER_ID_END, SYNC_WORKER_ID_START, }; +mod bitmap; +mod waker; + +use bitmap::AtomicBitMap; + /// Token used to receive process signals. const SIGNAL: Token = Token(usize::MAX); const RING: Token = Token(usize::MAX - 1); /// Coordinator responsible for coordinating the Heph runtime. -#[derive(Debug)] pub(super) struct Coordinator { /// io_uring completion ring. ring: a10::Ring, @@ -52,6 +56,10 @@ pub(super) struct Coordinator { poll: Poll, /// Process signal notifications. signals: Signals, + /// Collection of coordinator [`Future`]s. + futures: Box<[Pin>>]>, + /// Bitmap indicating which `futures` are ready to run. + futures_ready: Arc, /// Internals shared between the coordinator and all workers. internals: Arc, @@ -88,7 +96,7 @@ impl Coordinator { let setup = shared::RuntimeInternals::setup()?; let internals = Arc::new_cyclic(|shared_internals| { - let waker_id = waker::init(shared_internals.clone()); + let waker_id = shared::waker::init(shared_internals.clone()); setup.complete(waker_id, worker_wakers, trace_log) }); @@ -98,6 +106,8 @@ impl Coordinator { ring, poll, signals, + futures: Box::new([]), + futures_ready: AtomicBitMap::new(0), internals, start: Instant::now(), app_name, @@ -134,8 +144,9 @@ impl Coordinator { trace::finish_rt(trace_log.as_mut(), timing, "Polling for OS events", &[]); let timing = trace::start(&trace_log); + // Poll for events. for event in events.iter() { - trace!("got OS event: {event:?}"); + trace!(event = as_debug!(event); "got OS event"); match event.token() { SIGNAL => { @@ -185,7 +196,20 @@ impl Coordinator { &[], ); } - _ => debug!("unexpected OS event: {event:?}"), + _ => debug!(event = as_debug!(event); "unexpected OS event"), + } + } + + // Run all coordinator futures that are ready. + while let Some(idx) = self.futures_ready.next_set() { + let waker = waker::new(self.futures_ready.clone(), idx); + let mut ctx = task::Context::from_waker(&waker); + match self.futures[idx].as_mut().poll(&mut ctx) { + task::Poll::Ready(()) => { + // Ensure we don't poll the future again. + self.futures[idx] = Box::pin(pending()); + } + task::Poll::Pending => { /* Nothing to do. */ } } } trace::finish_rt(trace_log.as_mut(), timing, "Handling OS events", &[]); @@ -295,6 +319,23 @@ impl Coordinator { } } +impl fmt::Debug for Coordinator { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Coordinator") + .field("ring", &self.ring) + .field("poll", &self.poll) + .field("signals", &self.signals) + .field("internals", &self.internals) + .field("futures_ready", &self.futures_ready) + .field("start", &self.start) + .field("app_name", &self.app_name) + .field("host_os", &self.host_os) + .field("host_name", &self.host_name) + .field("host_id", &self.host_id) + .finish() + } +} + /// Set of signals we're listening for. const SIGNAL_SET: SignalSet = SignalSet::all(); diff --git a/rt/src/coordinator/waker.rs b/rt/src/coordinator/waker.rs new file mode 100644 index 000000000..2963c6f70 --- /dev/null +++ b/rt/src/coordinator/waker.rs @@ -0,0 +1,111 @@ +//! Waker implementation for the coordinator. +//! +//! # Implementation +//! +//! The implementation is fairly simple. All it does is set a bit in an +//! [`AtomicBitMap`] contained in an [`Arc`]. + +use std::sync::Arc; +use std::{ptr, task}; + +use crate::coordinator::bitmap::AtomicBitMap; + +/// Maximum number of wakers this module supports. +pub(crate) const MAX_WAKERS: usize = 1 << PTR_BITS_UNUSED; +/// Number of bits we expect a 64 bit pointer to not used, leaving them for us +/// to fill with our index (into `AtomicBitMap`). +const PTR_BITS_UNUSED: usize = 16; +/// Amount of bits to shift to not overwrite the pointer address. +const PTR_DATA_SHIFT: usize = usize::BITS as usize - PTR_BITS_UNUSED; +/// Mask to get the data from a pointer. +const DATA_MASK: usize = ((1 << PTR_BITS_UNUSED) - 1) << PTR_DATA_SHIFT; +/// Mask to get the pointer to the `AtomicBitMap`. +const PTR_MASK: usize = (1 << PTR_DATA_SHIFT) - 1; + +pub(super) fn new(bitmap: Arc, id: usize) -> task::Waker { + let data = into_data_ptr(bitmap, id); + let raw_waker = task::RawWaker::new(data, &WAKER_VTABLE); + unsafe { task::Waker::from_raw(raw_waker) } +} + +/// # Panics +/// +/// This will panic if the capacity of `bitmap` is smaller than `id`. `id` must +/// be smallar then [`MAX_WAKERS`]. +fn into_data_ptr(bitmap: Arc, id: usize) -> *const () { + // Check the input is valid. + assert!(bitmap.capacity() >= id); + assert!(id <= MAX_WAKERS); + + // This is a "fat" pointer, a pointer to `AtomicBitMap` and a length. + let bitmap_ptr = Arc::into_raw(bitmap); + // This will point to the start of the `AtomicBitMap` as is "thin". + let bitmap_start = bitmap_ptr as *const (); + // Ensure we have bit to put our `id`. + assert!(bitmap_start as usize & PTR_BITS_UNUSED == 0); + // Squash the pointer and our `id` together. + ((bitmap_start as usize) & (id << PTR_DATA_SHIFT)) as *const () +} + +static WAKER_VTABLE: task::RawWakerVTable = + task::RawWakerVTable::new(clone_wake_data, wake, wake_by_ref, drop_wake_data); + +unsafe fn clone_wake_data(data: *const ()) -> task::RawWaker { + let (bitmap_ptr, _) = data_as_raw_ptr(data); + Arc::increment_strong_count(bitmap_ptr); + // After we incremented the strong count we can reuse the same data. + task::RawWaker::new(data, &WAKER_VTABLE) +} + +unsafe fn wake(data: *const ()) { + let (bitmap, id) = from_data_ptr(data); + bitmap.set(id); +} + +unsafe fn wake_by_ref(data: *const ()) { + let (bitmap_ptr, id) = data_as_raw_ptr(data); + let bitmap = &*bitmap_ptr; + bitmap.set(id); +} + +unsafe fn drop_wake_data(data: *const ()) { + drop(from_data_ptr(data)); +} + +/// # Safety +/// +/// `data` MUST be created by [`into_data_ptr`]. +unsafe fn from_data_ptr(data: *const ()) -> (Arc, usize) { + let (bitmap_ptr, id) = data_as_raw_ptr(data); + (Arc::from_raw(bitmap_ptr), id) +} + +/// Returns a raw pointer to the `AtomicBitMap` inside of an `Arc`. +/// +/// # Safety +/// +/// `data` MUST be created by [`into_data_ptr`]. +unsafe fn data_as_raw_ptr(data: *const ()) -> (*const AtomicBitMap, usize) { + // SAFETY: the caller must ensure that `data` is created using + // `into_data_ptr`. That guarantees us two things, 1) `id` is valid and 2) + // that the pointer is valid and the bitmap has enough capacity for the + // `id`. + // The above guarantees ensure that calling `min_bitmap_size` results in a + // bitmap that has at least enough capacity that we can set the `id`-th bit. + // The returned pointer might be a shorter than the true length of + // `AtomicBitMap`, but we can work with that. + let id = data as usize & DATA_MASK; + let bitmap_start = (data as usize & PTR_MASK) as *const (); + let bitmap_size = min_bitmap_size(id); + let bitmap_ptr = ptr::from_raw_parts(bitmap_start, bitmap_size); + (bitmap_ptr, id) +} + +/// Returns the minimum bitmap size such that `id` can be set. +fn min_bitmap_size(id: usize) -> usize { + let mut bitmap_size = id / usize::BITS as usize; + if (id % usize::BITS as usize) != 0 { + bitmap_size += 1; + } + bitmap_size +} diff --git a/rt/src/lib.rs b/rt/src/lib.rs index f8d5dc39f..9a2e16421 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -154,6 +154,7 @@ maybe_uninit_uninit_array, never_type, new_uninit, + ptr_metadata, stmt_expr_attributes, type_alias_impl_trait, waker_getters From 6317c1af0cd745a2b285199592e1e3761b2d0ca7 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 16 Apr 2023 13:58:07 +0200 Subject: [PATCH 070/177] Reuse io_uring kernel resources By attaching all a10::Rings together. --- rt/src/coordinator/mod.rs | 4 ++-- rt/src/setup.rs | 11 ++++++++--- rt/src/shared/mod.rs | 13 +++++++++++-- rt/src/shared/waker.rs | 2 +- rt/src/test.rs | 2 +- rt/src/worker.rs | 15 ++++++++++----- 6 files changed, 33 insertions(+), 14 deletions(-) diff --git a/rt/src/coordinator/mod.rs b/rt/src/coordinator/mod.rs index 83251a9f7..ce66248f4 100644 --- a/rt/src/coordinator/mod.rs +++ b/rt/src/coordinator/mod.rs @@ -84,17 +84,17 @@ impl Coordinator { /// This must be called before creating the worker threads to properly catch /// process signals. pub(super) fn init( + ring: a10::Ring, app_name: Box, worker_wakers: Box<[&'static ThreadWaker]>, trace_log: Option>, ) -> io::Result { - let ring = a10::Ring::config(512).build()?; let poll = Poll::new()?; // NOTE: on Linux this MUST be created before starting the worker // threads. let signals = setup_signals(poll.registry())?; - let setup = shared::RuntimeInternals::setup()?; + let setup = shared::RuntimeInternals::setup(ring.submission_queue())?; let internals = Arc::new_cyclic(|shared_internals| { let waker_id = shared::waker::init(shared_internals.clone()); setup.complete(waker_id, worker_wakers, trace_log) diff --git a/rt/src/setup.rs b/rt/src/setup.rs index f8c07dd64..53e061fc2 100644 --- a/rt/src/setup.rs +++ b/rt/src/setup.rs @@ -151,6 +151,8 @@ impl Setup { let name = name.unwrap_or_else(default_app_name).into_boxed_str(); debug!(name = name, workers = threads; "building Heph runtime"); + let coordinator_ring = a10::Ring::new(512).map_err(Error::init_coordinator)?; + // Setup the worker threads. let timing = trace::start(&trace_log); let mut worker_setups = Vec::with_capacity(threads); @@ -158,7 +160,9 @@ impl Setup { for id in 1..=threads { // Coordinator has id 0. let id = NonZeroUsize::new(id).unwrap(); - let (worker_setup, thread_waker) = worker::setup(id).map_err(Error::start_worker)?; + let (worker_setup, thread_waker) = + worker::setup(id, coordinator_ring.submission_queue()) + .map_err(Error::start_worker)?; worker_setups.push(worker_setup); thread_wakers.push(thread_waker); } @@ -166,8 +170,9 @@ impl Setup { // Create the coordinator to oversee all workers. let thread_wakers = thread_wakers.into_boxed_slice(); let shared_trace_log = trace_log.as_ref().map(trace::CoordinatorLog::clone_shared); - let coordinator = Coordinator::init(name, thread_wakers, shared_trace_log) - .map_err(Error::init_coordinator)?; + let coordinator = + Coordinator::init(coordinator_ring, name, thread_wakers, shared_trace_log) + .map_err(Error::init_coordinator)?; // Spawn the worker threads. let workers = worker_setups diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index b2747d5d1..6be3e90e0 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -110,9 +110,18 @@ pub(crate) struct Metrics { impl RuntimeInternals { /// Setup new runtime internals. - pub(crate) fn setup() -> io::Result { + pub(crate) fn setup(coordinator_sq: &a10::SubmissionQueue) -> io::Result { + let poll = Poll::new()?; + let ring = a10::Ring::config(512) + .attach_queue(coordinator_sq) + .build()?; + Ok(RuntimeSetup { poll, ring }) + } + + /// Same as [`setup`], but doesn't attach to an existing [`a10::Ring`]. + #[cfg(any(test, feature = "test"))] + pub(crate) fn test_setup() -> io::Result { let poll = Poll::new()?; - // TODO: configure ring. let ring = a10::Ring::new(512)?; Ok(RuntimeSetup { poll, ring }) } diff --git a/rt/src/shared/waker.rs b/rt/src/shared/waker.rs index fd59cd147..e76eb6bd5 100644 --- a/rt/src/shared/waker.rs +++ b/rt/src/shared/waker.rs @@ -318,7 +318,7 @@ mod tests { } fn new_internals() -> Arc { - let setup = RuntimeInternals::setup().unwrap(); + let setup = RuntimeInternals::test_setup().unwrap(); Arc::new_cyclic(|shared_internals| { let waker_id = waker::init(shared_internals.clone()); let worker_wakers = vec![test::noop_waker()].into_boxed_slice(); diff --git a/rt/src/test.rs b/rt/src/test.rs index 99606c47d..b0aed739c 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -92,7 +92,7 @@ fn shared_internals() -> Arc { static SHARED_INTERNALS: OnceLock> = OnceLock::new(); SHARED_INTERNALS .get_or_init(|| { - let setup = shared::RuntimeInternals::setup() + let setup = shared::RuntimeInternals::test_setup() .expect("failed to setup runtime internals for test module"); Arc::new_cyclic(|shared_internals| { let waker_id = waker::init(shared_internals.clone()); diff --git a/rt/src/worker.rs b/rt/src/worker.rs index 88fde601b..270f9c3bd 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -60,10 +60,14 @@ const SHARED_RING: Token = Token(usize::MAX - 4); /// Setup a new worker thread. /// /// Use [`WorkerSetup::start`] to spawn the worker thread. -pub(super) fn setup(id: NonZeroUsize) -> io::Result<(WorkerSetup, &'static ThreadWaker)> { +pub(super) fn setup( + id: NonZeroUsize, + coordinator_sq: &a10::SubmissionQueue, +) -> io::Result<(WorkerSetup, &'static ThreadWaker)> { let poll = Poll::new()?; - // TODO: configure ring. - let ring = a10::Ring::new(512)?; + let ring = a10::Ring::config(512) + .attach_queue(coordinator_sq) + .build()?; // Setup the waking mechanism. let (waker_sender, waker_events) = crossbeam_channel::unbounded(); @@ -283,8 +287,9 @@ impl Worker { mut receiver: rt::channel::Receiver, ) -> io::Result { let poll = Poll::new()?; - // TODO: configure ring. - let ring = a10::Ring::new(512)?; + let ring = a10::Ring::config(512) + .attach_queue(shared_internals.submission_queue()) + .build()?; // TODO: this channel will grow unbounded as the waker implementation // sends pids into it. From 32093002c4cf0bd5f9e55a93a6002413dfbd9369 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 16 Apr 2023 14:08:48 +0200 Subject: [PATCH 071/177] Cleanup some error related documentation --- rt/src/error.rs | 60 ++++++++++++++++++++++-------------------------- rt/src/worker.rs | 17 +++++++------- 2 files changed, 36 insertions(+), 41 deletions(-) diff --git a/rt/src/error.rs b/rt/src/error.rs index 052a0ab29..5825da205 100644 --- a/rt/src/error.rs +++ b/rt/src/error.rs @@ -38,8 +38,6 @@ enum ErrorInner { } impl Error { - const DESC: &'static str = "error running Heph runtime"; - /// Create an error to act as user-defined setup error. /// /// The `err` will be converted into a [`String`] (using [`ToString`], which @@ -85,9 +83,8 @@ impl Error { } pub(super) fn worker_panic(err: Box) -> Error { - let msg = convert_panic(err); Error { - inner: ErrorInner::WorkerPanic(msg), + inner: ErrorInner::WorkerPanic(convert_panic(err)), } } @@ -98,9 +95,8 @@ impl Error { } pub(super) fn sync_actor_panic(err: Box) -> Error { - let msg = convert_panic(err); Error { - inner: ErrorInner::SyncActorPanic(msg), + inner: ErrorInner::SyncActorPanic(convert_panic(err)), } } } @@ -132,31 +128,30 @@ impl fmt::Debug for Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use ErrorInner::*; - let desc = Self::DESC; + const DESC: &'static str = "error running Heph runtime"; match self.inner { - Setup(ref err) => { - write!(f, "{desc}: error in user-defined setup: {err}") + ErrorInner::Setup(ref err) => { + write!(f, "{DESC}: error in user-defined setup: {err}") } - SetupTrace(ref err) => { - write!(f, "{desc}: error setting up trace infrastructure: {err}") + ErrorInner::SetupTrace(ref err) => { + write!(f, "{DESC}: error setting up trace infrastructure: {err}") } - InitCoordinator(ref err) => { - write!(f, "{desc}: error creating coordinator: {err}") + ErrorInner::InitCoordinator(ref err) => { + write!(f, "{DESC}: error creating coordinator: {err}") } - Coordinator(ref err) => { - write!(f, "{desc}: error in coordinator thread: {err}") + ErrorInner::Coordinator(ref err) => { + write!(f, "{DESC}: error in coordinator thread: {err}") } - StartWorker(ref err) => { - write!(f, "{desc}: error starting worker thread: {err}") + ErrorInner::StartWorker(ref err) => { + write!(f, "{DESC}: error starting worker thread: {err}") } - Worker(ref err) => write!(f, "{desc}: error in worker thread: {err}"), - WorkerPanic(ref err) => write!(f, "{desc}: panic in worker thread: {err}"), - StartSyncActor(ref err) => { - write!(f, "{desc}: error starting synchronous actor: {err}") + ErrorInner::Worker(ref err) => write!(f, "{DESC}: error in worker thread: {err}"), + ErrorInner::WorkerPanic(ref err) => write!(f, "{DESC}: panic in worker thread: {err}"), + ErrorInner::StartSyncActor(ref err) => { + write!(f, "{DESC}: error starting synchronous actor: {err}") } - SyncActorPanic(ref err) => { - write!(f, "{desc}: panic in synchronous actor thread: {err}") + ErrorInner::SyncActorPanic(ref err) => { + write!(f, "{DESC}: panic in synchronous actor thread: {err}") } } } @@ -164,17 +159,18 @@ impl fmt::Display for Error { impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - use ErrorInner::*; match self.inner { // All `io::Error`. - SetupTrace(ref err) - | InitCoordinator(ref err) - | StartWorker(ref err) - | StartSyncActor(ref err) => Some(err), - Coordinator(ref err) => Some(err), - Worker(ref err) => Some(err), + ErrorInner::SetupTrace(ref err) + | ErrorInner::InitCoordinator(ref err) + | ErrorInner::StartWorker(ref err) + | ErrorInner::StartSyncActor(ref err) => Some(err), + ErrorInner::Coordinator(ref err) => Some(err), + ErrorInner::Worker(ref err) => Some(err), // All `StringError`. - Setup(ref err) | WorkerPanic(ref err) | SyncActorPanic(ref err) => Some(err), + ErrorInner::Setup(ref err) + | ErrorInner::WorkerPanic(ref err) + | ErrorInner::SyncActorPanic(ref err) => Some(err), } } } diff --git a/rt/src/worker.rs b/rt/src/worker.rs index 270f9c3bd..75088a36c 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -854,12 +854,12 @@ impl Worker { } } -/// Error running a [`Runtime`]. +/// Error running a [`Worker`]. #[derive(Debug)] pub(crate) enum Error { - /// Error in [`Runtime::new`]. + /// Error in [`Worker::setup`]. Init(io::Error), - /// Error polling [`Poll`]. + /// Error polling for OS events. Polling(io::Error), /// Error receiving message from communication channel. RecvMsg(io::Error), @@ -872,16 +872,15 @@ pub(crate) enum Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use Error::*; match self { - Init(err) => write!(f, "error initialising local runtime: {err}"), - Polling(err) => write!(f, "error polling OS: {err}"), - RecvMsg(err) => write!(f, "error receiving message(s): {err}"), - ProcessInterrupted => write!( + Error::Init(err) => write!(f, "error initialising local runtime: {err}"), + Error::Polling(err) => write!(f, "error polling OS: {err}"), + Error::RecvMsg(err) => write!(f, "error receiving message(s): {err}"), + Error::ProcessInterrupted => write!( f, "received process signal, but no receivers for it: stopping runtime" ), - UserFunction(err) => write!(f, "error running user function: {err}"), + Error::UserFunction(err) => write!(f, "error running user function: {err}"), } } } From 3792af2e90d1a6a439cd2d01c9a465fa742668b2 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 14:11:29 +0200 Subject: [PATCH 072/177] Don't use wildcard imports --- rt/src/coordinator/mod.rs | 28 ++++++++++++++-------------- rt/src/net/tcp/server.rs | 5 ++--- rt/src/worker.rs | 15 ++++++--------- 3 files changed, 22 insertions(+), 26 deletions(-) diff --git a/rt/src/coordinator/mod.rs b/rt/src/coordinator/mod.rs index ce66248f4..04cbab45e 100644 --- a/rt/src/coordinator/mod.rs +++ b/rt/src/coordinator/mod.rs @@ -480,30 +480,30 @@ pub(super) enum Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use Error::*; match self { - Startup(err) => write!(f, "error starting coordinator: {err}"), - RegisteringWorkers(err) => write!(f, "error registering worker threads: {err}"), - RegisteringSyncActors(err) => { + Error::Startup(err) => write!(f, "error starting coordinator: {err}"), + Error::RegisteringWorkers(err) => write!(f, "error registering worker threads: {err}"), + Error::RegisteringSyncActors(err) => { write!(f, "error registering synchronous actor threads: {err}") } - Polling(err) => write!(f, "error polling for OS events: {err}"), - SendingStartSignal(err) => write!(f, "error sending start signal to worker: {err}"), - SendingFunc(err) => write!(f, "error sending function to worker: {err}"), + Error::Polling(err) => write!(f, "error polling for OS events: {err}"), + Error::SendingStartSignal(err) => { + write!(f, "error sending start signal to worker: {err}") + } + Error::SendingFunc(err) => write!(f, "error sending function to worker: {err}"), } } } impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - use Error::*; match self { - Startup(ref err) - | RegisteringWorkers(ref err) - | RegisteringSyncActors(ref err) - | Polling(ref err) - | SendingStartSignal(ref err) - | SendingFunc(ref err) => Some(err), + Error::Startup(ref err) + | Error::RegisteringWorkers(ref err) + | Error::RegisteringSyncActors(ref err) + | Error::Polling(ref err) + | Error::SendingStartSignal(ref err) + | Error::SendingFunc(ref err) => Some(err), } } } diff --git a/rt/src/net/tcp/server.rs b/rt/src/net/tcp/server.rs index 947492149..2f2ec5ab9 100644 --- a/rt/src/net/tcp/server.rs +++ b/rt/src/net/tcp/server.rs @@ -525,10 +525,9 @@ pub enum Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use Error::*; match self { - Accept(err) => write!(f, "error accepting TCP stream: {err}"), - NewActor(err) => write!(f, "error creating new actor: {err}"), + Error::Accept(err) => write!(f, "error accepting TCP stream: {err}"), + Error::NewActor(err) => write!(f, "error creating new actor: {err}"), } } } diff --git a/rt/src/worker.rs b/rt/src/worker.rs index 75088a36c..2ccc3b678 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -887,11 +887,10 @@ impl fmt::Display for Error { impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - use Error::*; match self { - Init(ref err) | Polling(ref err) | RecvMsg(ref err) => Some(err), - ProcessInterrupted => None, - UserFunction(ref err) => Some(err), + Error::Init(ref err) | Error::Polling(ref err) | Error::RecvMsg(ref err) => Some(err), + Error::ProcessInterrupted => None, + Error::UserFunction(ref err) => Some(err), } } } @@ -909,12 +908,10 @@ pub(crate) enum Control { impl fmt::Debug for Control { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use Control::*; - f.write_str("Control::")?; match self { - Started => f.write_str("Started"), - Signal(signal) => f.debug_tuple("Signal").field(&signal).finish(), - Run(..) => f.write_str("Run(..)"), + Control::Started => f.write_str("Control::Started"), + Control::Signal(signal) => f.debug_tuple("Control::Signal").field(&signal).finish(), + Control::Run(..) => f.write_str("Control::Run(..)"), } } } From 716955587c9c8a1a50d550481b5c381c46550615 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 14:59:35 +0200 Subject: [PATCH 073/177] Use ActorFuture for thread-safe actors --- rt/src/shared/mod.rs | 15 ++++++--------- rt/src/shared/scheduler/mod.rs | 22 +++++----------------- rt/src/shared/scheduler/tests.rs | 22 ++++++++-------------- rt/src/test.rs | 18 ++++++++++++++++++ 4 files changed, 37 insertions(+), 40 deletions(-) diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 6be3e90e0..2252bc9c5 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -9,10 +9,9 @@ use std::sync::{Arc, Mutex, TryLockError}; use std::time::{Duration, Instant}; use std::{io, task}; -use heph::actor::{self, NewActor}; +use heph::actor::{ActorFuture, NewActor}; use heph::actor_ref::ActorRef; use heph::supervisor::Supervisor; -use heph_inbox as inbox; use log::{as_debug, debug, error, trace}; use mio::unix::SourceFd; use mio::{Events, Interest, Poll, Registry, Token}; @@ -228,7 +227,7 @@ impl RuntimeInternals { pub(crate) fn try_spawn( self: &Arc, supervisor: S, - mut new_actor: NA, + new_actor: NA, arg: NA::Argument, options: ActorOptions, ) -> Result, NA::Error> @@ -244,14 +243,12 @@ impl RuntimeInternals { let name = NA::name(); debug!(pid = pid.0, name = name; "spawning thread-safe actor"); - // Create our actor context and our actor with it. - let (manager, sender, receiver) = inbox::Manager::new_small_channel(); - let actor_ref = ActorRef::local(sender); - let ctx = actor::Context::new(receiver, ThreadSafe::new(pid, self.clone())); - let actor = new_actor.new(ctx, arg)?; + // Create the `ActorFuture`. + let rt = ThreadSafe::new(pid, self.clone()); + let (future, actor_ref) = ActorFuture::new(supervisor, new_actor, arg, rt)?; // Add the actor to the scheduler. - actor_entry.add(options.priority(), supervisor, new_actor, actor, manager); + actor_entry.add(future, options.priority()); Ok(actor_ref) } diff --git a/rt/src/shared/scheduler/mod.rs b/rt/src/shared/scheduler/mod.rs index 5d22ab494..8994ff280 100644 --- a/rt/src/shared/scheduler/mod.rs +++ b/rt/src/shared/scheduler/mod.rs @@ -8,12 +8,9 @@ use std::future::Future; use std::mem::MaybeUninit; use std::pin::Pin; -use heph::actor::NewActor; -use heph::supervisor::Supervisor; -use heph_inbox::Manager; use log::{debug, trace}; -use crate::process::{self, ActorProcess, FutureProcess, Process, ProcessId}; +use crate::process::{self, FutureProcess, Process, ProcessId}; use crate::spawn::options::Priority; use crate::{ptr_as_usize, ThreadSafe}; @@ -218,18 +215,9 @@ impl<'s> AddActor<'s> { } /// Add a new thread-safe actor to the scheduler. - pub(super) fn add( - self, - priority: Priority, - supervisor: S, - new_actor: NA, - actor: NA::Actor, - inbox: Manager, - ) where - S: Supervisor + Send + Sync + 'static, - NA: NewActor + Send + Sync + 'static, - NA::Actor: Send + Sync + 'static, - NA::Message: Send, + pub(super) fn add(self, future: Fut, priority: Priority) + where + Fut: Future + Send + Sync + 'static, { debug_assert!( inactive::ok_ptr(self.alloc.as_ptr().cast()), @@ -238,7 +226,7 @@ impl<'s> AddActor<'s> { let process = ProcessData::new( priority, - Box::pin(ActorProcess::new(supervisor, new_actor, actor, inbox)), + Box::pin(FutureProcess::::new(future)), ); let AddActor { scheduler, diff --git a/rt/src/shared/scheduler/tests.rs b/rt/src/shared/scheduler/tests.rs index 894195872..de09d5afd 100644 --- a/rt/src/shared/scheduler/tests.rs +++ b/rt/src/shared/scheduler/tests.rs @@ -9,7 +9,7 @@ use heph::supervisor::NoSupervisor; use crate::process::{ProcessId, ProcessResult}; use crate::shared::scheduler::{Priority, ProcessData, Scheduler}; -use crate::test::{self, init_actor_with_inbox, AssertUnmoved}; +use crate::test::{self, init_actor_future, AssertUnmoved}; use crate::ThreadSafe; fn assert_size(expected: usize) { @@ -42,8 +42,8 @@ fn adding_actor() { let actor_entry = scheduler.add_actor(); let pid = actor_entry.pid(); let new_actor = simple_actor as fn(_) -> _; - let (actor, inbox, _) = init_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add(Priority::NORMAL, NoSupervisor, new_actor, actor, inbox); + let (future, _) = init_actor_future(NoSupervisor, new_actor, ()).unwrap(); + actor_entry.add(future, Priority::NORMAL); // Newly added processes are ready by default. assert!(scheduler.has_process()); @@ -118,8 +118,9 @@ fn scheduler_run_order() { for (id, priority) in priorities.iter().enumerate() { let actor_entry = scheduler.add_actor(); pids.push(actor_entry.pid()); - let (actor, inbox, _) = init_actor_with_inbox(new_actor, (id, run_order.clone())).unwrap(); - actor_entry.add(*priority, NoSupervisor, new_actor, actor, inbox); + let (future, _) = + init_actor_future(NoSupervisor, new_actor, (id, run_order.clone())).unwrap(); + actor_entry.add(future, *priority); } assert!(scheduler.has_process()); @@ -161,17 +162,10 @@ fn assert_actor_process_unmoved() { let scheduler = Scheduler::new(); let mut runtime_ref = test::runtime(); - let (actor, inbox, _) = init_actor_with_inbox(TestAssertUnmovedNewActor, ()).unwrap(); - let actor_entry = scheduler.add_actor(); let pid = actor_entry.pid(); - actor_entry.add( - Priority::NORMAL, - NoSupervisor, - TestAssertUnmovedNewActor, - actor, - inbox, - ); + let (future, _) = init_actor_future(NoSupervisor, TestAssertUnmovedNewActor, ()).unwrap(); + actor_entry.add(future, Priority::NORMAL); // Run the process multiple times, ensure it's not moved in the // process. diff --git a/rt/src/test.rs b/rt/src/test.rs index b0aed739c..ce9018dc2 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -56,6 +56,8 @@ use std::task::{self, Poll}; use std::time::{Duration, Instant}; use std::{io, slice, thread}; +#[cfg(test)] +use heph::actor::ActorFuture; use heph::actor::{self, Actor, NewActor, SyncActor, SyncWaker}; use heph::actor_ref::{ActorGroup, ActorRef}; use heph::supervisor::{Supervisor, SyncSupervisor}; @@ -385,6 +387,22 @@ where init_actor_with_inbox(new_actor, arg).map(|(actor, _, actor_ref)| (actor, actor_ref)) } +/// Initialise a thread-safe `ActorFuture`. +#[allow(clippy::type_complexity)] +#[cfg(test)] +pub(crate) fn init_actor_future( + supervisor: S, + new_actor: NA, + argument: NA::Argument, +) -> Result<(ActorFuture, ActorRef), NA::Error> +where + S: Supervisor, + NA: NewActor, +{ + let rt = ThreadSafe::new(TEST_PID, shared_internals()); + ActorFuture::new(supervisor, new_actor, argument, rt) +} + /// Initialise a thread-local actor with access to it's inbox. #[allow(clippy::type_complexity)] pub(crate) fn init_local_actor_with_inbox( From eb1e9115e2d76629be8fc50db2dd99871129b0ef Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 15:14:23 +0200 Subject: [PATCH 074/177] Use ActorFuture for thead-local actors Removes the ActorProcess type as it's no longer used. --- rt/src/lib.rs | 26 +--- rt/src/local/scheduler/mod.rs | 20 +-- rt/src/local/scheduler/tests.rs | 55 ++++---- rt/src/process/actor.rs | 228 -------------------------------- rt/src/process/mod.rs | 2 - rt/src/process/tests.rs | 145 +------------------- rt/src/test.rs | 16 ++- 7 files changed, 53 insertions(+), 439 deletions(-) delete mode 100644 rt/src/process/actor.rs diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 9a2e16421..7ce013a04 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -208,10 +208,9 @@ use std::task; use std::time::{Duration, Instant}; use ::log::{as_debug, debug, warn}; -use heph::actor::{self, NewActor, SyncActor}; +use heph::actor::{ActorFuture, NewActor, SyncActor}; use heph::actor_ref::{ActorGroup, ActorRef}; use heph::supervisor::{Supervisor, SyncSupervisor}; -use heph_inbox as inbox; pub mod access; mod channel; @@ -629,16 +628,6 @@ impl RuntimeRef { self.internals.shared.new_task_waker(pid) } - /// Mark the process, with `pid`, as ready to run. - fn mark_ready_local(&mut self, pid: ProcessId) { - self.internals.scheduler.borrow_mut().mark_ready(pid); - } - - /// Mark the shared process, with `pid`, as ready to run. - fn mark_ready_shared(&mut self, pid: ProcessId) { - self.internals.shared.mark_ready(pid); - } - /// Add a timer. /// /// See [`Timers::add`]. @@ -694,7 +683,7 @@ where fn try_spawn( &mut self, supervisor: S, - mut new_actor: NA, + new_actor: NA, arg: NA::Argument, options: ActorOptions, ) -> Result, NA::Error> @@ -709,15 +698,12 @@ where let name = NA::name(); debug!(pid = pid.0, name = name; "spawning thread-local actor"); - // Create our actor context and our actor with it. - let (manager, sender, receiver) = inbox::Manager::new_small_channel(); - let actor_ref = ActorRef::local(sender); - let ctx = actor::Context::new(receiver, ThreadLocal::new(pid, self.clone())); - // Create our actor argument, running any setup required by the caller. - let actor = new_actor.new(ctx, arg)?; + // Create the `ActorFuture`. + let rt = ThreadLocal::new(pid, self.clone()); + let (future, actor_ref) = ActorFuture::new(supervisor, new_actor, arg, rt)?; // Add the actor to the scheduler. - actor_entry.add(options.priority(), supervisor, new_actor, actor, manager); + actor_entry.add(future, options.priority()); Ok(actor_ref) } diff --git a/rt/src/local/scheduler/mod.rs b/rt/src/local/scheduler/mod.rs index 2a472cba8..a3e66a347 100644 --- a/rt/src/local/scheduler/mod.rs +++ b/rt/src/local/scheduler/mod.rs @@ -9,12 +9,9 @@ use std::future::Future; use std::mem::MaybeUninit; use std::pin::Pin; -use heph::actor::NewActor; -use heph::supervisor::Supervisor; -use heph_inbox::Manager; use log::{debug, trace}; -use crate::process::{self, ActorProcess, FutureProcess, ProcessId}; +use crate::process::{self, FutureProcess, ProcessId}; use crate::spawn::options::Priority; use crate::{ptr_as_usize, ThreadLocal}; @@ -127,16 +124,9 @@ impl<'s> AddActor<'s> { } /// Add a new inactive actor to the scheduler. - pub(crate) fn add( - self, - priority: Priority, - supervisor: S, - new_actor: NA, - actor: NA::Actor, - inbox: Manager, - ) where - S: Supervisor + 'static, - NA: NewActor + 'static, + pub(crate) fn add(self, future: Fut, priority: Priority) + where + Fut: Future + 'static, { debug_assert!( inactive::ok_ptr(self.alloc.as_ptr().cast::<()>()), @@ -144,7 +134,7 @@ impl<'s> AddActor<'s> { ); let process = ProcessData::new( priority, - Box::pin(ActorProcess::new(supervisor, new_actor, actor, inbox)), + Box::pin(FutureProcess::::new(future)), ); let AddActor { scheduler, diff --git a/rt/src/local/scheduler/tests.rs b/rt/src/local/scheduler/tests.rs index 0227f4886..d4e7c3c21 100644 --- a/rt/src/local/scheduler/tests.rs +++ b/rt/src/local/scheduler/tests.rs @@ -12,7 +12,7 @@ use heph::supervisor::NoSupervisor; use crate::local::scheduler::{ProcessData, Scheduler}; use crate::process::{Process, ProcessId, ProcessResult}; use crate::spawn::options::Priority; -use crate::test::{self, init_local_actor_with_inbox, AssertUnmoved}; +use crate::test::{self, init_local_actor_future, AssertUnmoved}; use crate::{RuntimeRef, ThreadLocal}; fn assert_size(expected: usize) { @@ -60,8 +60,8 @@ fn add_actor() { let actor_entry = scheduler.add_actor(); let new_actor = simple_actor as fn(_) -> _; - let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add(Priority::NORMAL, NoSupervisor, new_actor, actor, inbox); + let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); + actor_entry.add(future, Priority::NORMAL); assert!(scheduler.has_process()); assert!(scheduler.has_ready_process()); } @@ -76,8 +76,8 @@ fn mark_ready() { let actor_entry = scheduler.add_actor(); let pid = actor_entry.pid(); let new_actor = simple_actor as fn(_) -> _; - let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add(Priority::NORMAL, NoSupervisor, new_actor, actor, inbox); + let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); + actor_entry.add(future, Priority::NORMAL); assert!(scheduler.has_process()); assert!(scheduler.has_ready_process()); @@ -97,8 +97,8 @@ fn mark_ready_before_run() { let actor_entry = scheduler.add_actor(); let pid = actor_entry.pid(); let new_actor = simple_actor as fn(_) -> _; - let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add(Priority::NORMAL, NoSupervisor, new_actor, actor, inbox); + let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); + actor_entry.add(future, Priority::NORMAL); assert!(scheduler.has_process()); assert!(scheduler.has_ready_process()); @@ -115,8 +115,8 @@ fn next_process() { let actor_entry = scheduler.add_actor(); let pid = actor_entry.pid(); let new_actor = simple_actor as fn(_) -> _; - let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add(Priority::NORMAL, NoSupervisor, new_actor, actor, inbox); + let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); + actor_entry.add(future, Priority::NORMAL); if let Some(process) = scheduler.next_process() { assert_eq!(process.as_ref().id(), pid); @@ -135,18 +135,18 @@ fn next_process_order() { // Actor 1. let actor_entry = scheduler.add_actor(); let pid1 = actor_entry.pid(); - let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add(Priority::LOW, NoSupervisor, new_actor, actor, inbox); + let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); + actor_entry.add(future, Priority::LOW); // Actor 2. let actor_entry = scheduler.add_actor(); let pid2 = actor_entry.pid(); - let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add(Priority::HIGH, NoSupervisor, new_actor, actor, inbox); + let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); + actor_entry.add(future, Priority::HIGH); // Actor 3. let actor_entry = scheduler.add_actor(); let pid3 = actor_entry.pid(); - let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add(Priority::NORMAL, NoSupervisor, new_actor, actor, inbox); + let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); + actor_entry.add(future, Priority::NORMAL); assert!(scheduler.has_process()); assert!(scheduler.has_ready_process()); @@ -176,8 +176,8 @@ fn add_process() { let actor_entry = scheduler.add_actor(); let pid = actor_entry.pid(); let new_actor = simple_actor as fn(_) -> _; - let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add(Priority::NORMAL, NoSupervisor, new_actor, actor, inbox); + let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); + actor_entry.add(future, Priority::NORMAL); assert!(scheduler.has_process()); assert!(scheduler.has_ready_process()); @@ -196,8 +196,8 @@ fn add_process_marked_ready() { let actor_entry = scheduler.add_actor(); let pid = actor_entry.pid(); let new_actor = simple_actor as fn(_) -> _; - let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - actor_entry.add(Priority::NORMAL, NoSupervisor, new_actor, actor, inbox); + let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); + actor_entry.add(future, Priority::NORMAL); let process = scheduler.next_process().unwrap(); scheduler.add_process(process); @@ -234,9 +234,9 @@ fn scheduler_run_order() { for (id, priority) in priorities.iter().enumerate() { let actor_entry = scheduler.add_actor(); pids.push(actor_entry.pid()); - let (actor, inbox, _) = - init_local_actor_with_inbox(new_actor, (id, run_order.clone())).unwrap(); - actor_entry.add(*priority, NoSupervisor, new_actor, actor, inbox); + let (future, _) = + init_local_actor_future(NoSupervisor, new_actor, (id, run_order.clone())).unwrap(); + actor_entry.add(future, *priority); } assert!(scheduler.has_process()); @@ -278,17 +278,10 @@ fn assert_actor_process_unmoved() { let mut scheduler = Scheduler::new(); let mut runtime_ref = test::runtime(); - let (actor, inbox, _) = init_local_actor_with_inbox(TestAssertUnmovedNewActor, ()).unwrap(); - let actor_entry = scheduler.add_actor(); let pid = actor_entry.pid(); - actor_entry.add( - Priority::NORMAL, - NoSupervisor, - TestAssertUnmovedNewActor, - actor, - inbox, - ); + let (future, _) = init_local_actor_future(NoSupervisor, TestAssertUnmovedNewActor, ()).unwrap(); + actor_entry.add(future, Priority::NORMAL); // Run the process multiple times, ensure it's not moved in the process. let mut process = scheduler.next_process().unwrap(); diff --git a/rt/src/process/actor.rs b/rt/src/process/actor.rs deleted file mode 100644 index c0b2a622d..000000000 --- a/rt/src/process/actor.rs +++ /dev/null @@ -1,228 +0,0 @@ -//! Module containing the implementation of the [`Process`] trait for -//! [`Actor`]s. - -use std::any::Any; -use std::panic::{catch_unwind, AssertUnwindSafe}; -use std::pin::Pin; -use std::task::{self, Poll}; - -use heph::actor::{self, Actor, NewActor}; -use heph::supervisor::{Supervisor, SupervisorStrategy}; -use heph_inbox::{Manager, Receiver}; -use log::error; - -use crate::access::PrivateAccess; -use crate::process::{panic_message, Process, ProcessId, ProcessResult}; -use crate::{self as rt, RuntimeRef, ThreadLocal, ThreadSafe}; - -/// A process that represent an [`Actor`]. -pub(crate) struct ActorProcess { - /// The actor's supervisor used to determine what to do when the actor, or - /// the [`NewActor`] implementation, returns an error. - supervisor: S, - /// The [`NewActor`] implementation used to restart the actor. - new_actor: NA, - /// The inbox of the actor, used in creating a new [`actor::Context`] - /// if the actor is restarted. - inbox: Manager, - /// The running actor. - actor: NA::Actor, -} - -impl ActorProcess -where - S: Supervisor, - NA: NewActor, - NA::RuntimeAccess: RuntimeSupport, -{ - /// Create a new `ActorProcess`. - pub(crate) const fn new( - supervisor: S, - new_actor: NA, - actor: NA::Actor, - inbox: Manager, - ) -> ActorProcess { - ActorProcess { - supervisor, - new_actor, - inbox, - actor, - } - } - - /// Returns `ProcessResult::Pending` if the actor was successfully - /// restarted, `ProcessResult::Complete` if the actor wasn't restarted or an - /// error if the actor failed to restart. - fn handle_actor_error( - &mut self, - runtime_ref: &mut RuntimeRef, - pid: ProcessId, - err: ::Error, - ) -> ProcessResult { - match self.supervisor.decide(err) { - SupervisorStrategy::Restart(arg) => { - match self.create_new_actor(runtime_ref, pid, arg) { - Ok(()) => { - // Mark the actor as ready just in case progress can be - // made already, this required because we use edge - // triggers for I/O. - NA::RuntimeAccess::mark_ready(runtime_ref, pid); - ProcessResult::Pending - } - Err(err) => self.handle_restart_error(runtime_ref, pid, err), - } - } - SupervisorStrategy::Stop => ProcessResult::Complete, - _ => unreachable!(), - } - } - - /// Returns `ProcessResult::Pending` if the actor was successfully - /// restarted, `ProcessResult::Complete` if the actor wasn't restarted. - fn handle_actor_panic( - &mut self, - runtime_ref: &mut RuntimeRef, - pid: ProcessId, - panic: Box, - ) -> ProcessResult { - match self.supervisor.decide_on_panic(panic) { - SupervisorStrategy::Restart(arg) => { - match self.create_new_actor(runtime_ref, pid, arg) { - Ok(()) => { - // Mark the actor as ready just in case progress can be - // made already, this required because we use edge - // triggers for I/O. - NA::RuntimeAccess::mark_ready(runtime_ref, pid); - ProcessResult::Pending - } - Err(err) => self.handle_restart_error(runtime_ref, pid, err), - } - } - SupervisorStrategy::Stop => ProcessResult::Complete, - _ => unreachable!(), - } - } - - /// Same as `handle_actor_error` but handles [`NewActor::Error`]s instead. - fn handle_restart_error( - &mut self, - runtime_ref: &mut RuntimeRef, - pid: ProcessId, - err: NA::Error, - ) -> ProcessResult { - match self.supervisor.decide_on_restart_error(err) { - SupervisorStrategy::Restart(arg) => { - match self.create_new_actor(runtime_ref, pid, arg) { - Ok(()) => { - // Mark the actor as ready, same reason as for - // `handle_actor_error`. - NA::RuntimeAccess::mark_ready(runtime_ref, pid); - ProcessResult::Pending - } - Err(err) => { - // Let the supervisor know. - self.supervisor.second_restart_error(err); - ProcessResult::Complete - } - } - } - SupervisorStrategy::Stop => ProcessResult::Complete, - _ => unreachable!(), - } - } - - /// Creates a new actor and, if successful, replaces the old actor with it. - fn create_new_actor( - &mut self, - runtime_ref: &mut RuntimeRef, - pid: ProcessId, - arg: NA::Argument, - ) -> Result<(), NA::Error> { - let receiver = self.inbox.new_receiver().expect( - "failed to create new receiver for actor's inbox. Was the `actor::Context` leaked?", - ); - let ctx = NA::RuntimeAccess::new_context(pid, receiver, runtime_ref); - self.new_actor.new(ctx, arg).map(|actor| { - // We pin the actor here to ensure its dropped in place when - // replacing it with out new actor. - unsafe { Pin::new_unchecked(&mut self.actor) }.set(actor); - }) - } -} - -impl Process for ActorProcess -where - S: Supervisor, - NA: NewActor, - NA::RuntimeAccess: rt::Access + RuntimeSupport, -{ - fn name(&self) -> &'static str { - NA::name() - } - - fn run(self: Pin<&mut Self>, runtime_ref: &mut RuntimeRef, pid: ProcessId) -> ProcessResult { - // This is safe because we're not moving the actor. - let this = unsafe { Pin::get_unchecked_mut(self) }; - // The actor need to be called with `Pin`. So we're undoing the previous - // operation, still ensuring that the actor is not moved. - let mut actor = unsafe { Pin::new_unchecked(&mut this.actor) }; - - let waker = NA::RuntimeAccess::new_task_waker(runtime_ref, pid); - let mut task_ctx = task::Context::from_waker(&waker); - match catch_unwind(AssertUnwindSafe(|| actor.as_mut().try_poll(&mut task_ctx))) { - Ok(Poll::Ready(Ok(()))) => ProcessResult::Complete, - Ok(Poll::Ready(Err(err))) => this.handle_actor_error(runtime_ref, pid, err), - Ok(Poll::Pending) => ProcessResult::Pending, - Err(panic) => { - let name = NA::name(); - let msg = panic_message(&*panic); - error!("actor '{name}' panicked at '{msg}'"); - this.handle_actor_panic(runtime_ref, pid, panic) - } - } - } -} - -/// Trait to support different kind of runtime access, e.g. [`ThreadSafe`] and -/// [`ThreadLocal`], within the same implementation of [`ActorProcess`]. -pub(crate) trait RuntimeSupport { - /// Creates a new context. - fn new_context( - pid: ProcessId, - inbox: Receiver, - runtime_ref: &mut RuntimeRef, - ) -> actor::Context - where - Self: Sized; - - /// Schedule the actor with `pid` for running (used after restart). - fn mark_ready(runtime_ref: &mut RuntimeRef, pid: ProcessId); -} - -impl RuntimeSupport for ThreadLocal { - fn new_context( - pid: ProcessId, - inbox: Receiver, - runtime_ref: &mut RuntimeRef, - ) -> actor::Context { - actor::Context::new(inbox, ThreadLocal::new(pid, runtime_ref.clone())) - } - - fn mark_ready(runtime_ref: &mut RuntimeRef, pid: ProcessId) { - runtime_ref.mark_ready_local(pid); - } -} - -impl RuntimeSupport for ThreadSafe { - fn new_context( - pid: ProcessId, - inbox: Receiver, - runtime_ref: &mut RuntimeRef, - ) -> actor::Context { - actor::Context::new(inbox, ThreadSafe::new(pid, runtime_ref.clone_shared())) - } - - fn mark_ready(runtime_ref: &mut RuntimeRef, pid: ProcessId) { - runtime_ref.mark_ready_shared(pid); - } -} diff --git a/rt/src/process/mod.rs b/rt/src/process/mod.rs index 6c45fcaf6..74dd570e8 100644 --- a/rt/src/process/mod.rs +++ b/rt/src/process/mod.rs @@ -12,12 +12,10 @@ use mio::Token; use crate::spawn::options::Priority; use crate::RuntimeRef; -mod actor; mod future; #[cfg(test)] mod tests; -pub(crate) use actor::ActorProcess; pub(crate) use future::FutureProcess; /// Process id, or pid for short, is an identifier for a process in an diff --git a/rt/src/process/tests.rs b/rt/src/process/tests.rs index 58e80a7e1..d33f9875b 100644 --- a/rt/src/process/tests.rs +++ b/rt/src/process/tests.rs @@ -1,21 +1,17 @@ //! Tests for the process module. use std::cmp::Ordering; -use std::future::{pending, Pending}; +use std::future::pending; use std::mem::size_of; use std::pin::Pin; -use std::sync::atomic::{self, AtomicBool}; -use std::sync::Arc; use std::thread::sleep; use std::time::Duration; -use heph::actor::{self, Actor, NewActor}; -use heph::supervisor::{NoSupervisor, Supervisor, SupervisorStrategy}; use mio::Token; -use crate::process::{ActorProcess, FutureProcess, Process, ProcessData, ProcessId, ProcessResult}; +use crate::process::{FutureProcess, Process, ProcessData, ProcessId, ProcessResult}; use crate::spawn::options::Priority; -use crate::test::{self, init_local_actor_with_inbox, AssertUnmoved, TEST_PID}; +use crate::test::{self, AssertUnmoved}; use crate::{RuntimeRef, ThreadLocal, ThreadSafe}; #[test] @@ -156,141 +152,6 @@ fn process_data_runtime_increase() { assert!(process.fair_runtime >= SLEEP_TIME); } -async fn ok_actor(mut ctx: actor::Context<(), ThreadLocal>) { - assert_eq!(ctx.receive_next().await, Ok(())); -} - -#[test] -fn actor_process() { - // Create our actor. - let new_actor = ok_actor as fn(_) -> _; - let (actor, inbox, actor_ref) = init_local_actor_with_inbox(new_actor, ()).unwrap(); - - // Create our process. - let process = ActorProcess::new(NoSupervisor, new_actor, actor, inbox); - let mut process = Box::pin(process); - - // Actor should return `Poll::Pending` in the first call, since no message - // is available. - let mut runtime_ref = test::runtime(); - let res = process.as_mut().run(&mut runtime_ref, TEST_PID); - assert_eq!(res, ProcessResult::Pending); - - // Send the message and the actor should return Ok. - actor_ref.try_send(()).unwrap(); - let res = process.as_mut().run(&mut runtime_ref, TEST_PID); - assert_eq!(res, ProcessResult::Complete); -} - -async fn error_actor(mut ctx: actor::Context<(), ThreadLocal>, fail: bool) -> Result<(), ()> { - if fail { - Err(()) - } else { - assert_eq!(ctx.receive_next().await, Ok(())); - Ok(()) - } -} - -#[test] -fn erroneous_actor_process() { - // Create our actor. - let new_actor = error_actor as fn(_, _) -> _; - let (actor, inbox, _) = init_local_actor_with_inbox(new_actor, true).unwrap(); - - // Create our process. - let process = ActorProcess::new(|_| SupervisorStrategy::Stop, new_actor, actor, inbox); - let mut process = Box::pin(process); - - // Actor should return Err. - let mut runtime_ref = test::runtime(); - let res = process.as_mut().run(&mut runtime_ref, TEST_PID); - assert_eq!(res, ProcessResult::Complete); -} - -#[test] -fn restarting_erroneous_actor_process() { - struct TestSupervisor(Arc); - - impl Supervisor for TestSupervisor - where - NA: NewActor, - { - fn decide(&mut self, _: ::Error) -> SupervisorStrategy { - self.0.store(true, atomic::Ordering::SeqCst); - SupervisorStrategy::Restart(false) - } - - fn decide_on_restart_error(&mut self, _: NA::Error) -> SupervisorStrategy { - self.0.store(true, atomic::Ordering::SeqCst); - SupervisorStrategy::Restart(false) - } - - fn second_restart_error(&mut self, _: NA::Error) { - unreachable!("test call to second_restart_error in ActorProcess"); - } - } - - // Create our actor. - let new_actor = error_actor as fn(_, _) -> _; - let (actor, inbox, actor_ref) = init_local_actor_with_inbox(new_actor, true).unwrap(); - - let supervisor_called = Arc::new(AtomicBool::new(false)); - let supervisor = TestSupervisor(Arc::clone(&supervisor_called)); - - // Create our process. - let process = ActorProcess::new(supervisor, new_actor, actor, inbox); - let mut process: Pin> = Box::pin(process); - - // In the first call to run the actor should return an error. Then it should - // be restarted. The restarted actor waits for a message, returning - // `Poll::Pending`. - let mut runtime_ref = test::runtime(); - let res = process.as_mut().run(&mut runtime_ref, ProcessId(0)); - assert_eq!(res, ProcessResult::Pending); - // Supervisor must be called and the actor restarted. - assert!(supervisor_called.load(atomic::Ordering::SeqCst)); - - // Now we send a message to the restarted actor, which should return `Ok`. - actor_ref.try_send(()).unwrap(); - let res = process.as_mut().run(&mut runtime_ref, ProcessId(0)); - assert_eq!(res, ProcessResult::Complete); -} - -struct TestAssertUnmovedNewActor; - -impl NewActor for TestAssertUnmovedNewActor { - type Message = (); - type Argument = (); - type Actor = AssertUnmoved>>; - type Error = !; - type RuntimeAccess = ThreadLocal; - - fn new( - &mut self, - _: actor::Context, - _: Self::Argument, - ) -> Result { - Ok(AssertUnmoved::new(pending())) - } -} - -#[test] -fn actor_process_assert_actor_unmoved() { - let (actor, inbox, _) = init_local_actor_with_inbox(TestAssertUnmovedNewActor, ()).unwrap(); - let process = ActorProcess::new(NoSupervisor, TestAssertUnmovedNewActor, actor, inbox); - let mut process: Pin> = Box::pin(process); - - // All we do is run it a couple of times, it should panic if the actor is - // moved. - let mut runtime_ref = test::runtime(); - let res = process.as_mut().run(&mut runtime_ref, ProcessId(0)); - assert_eq!(res, ProcessResult::Pending); - let res = process.as_mut().run(&mut runtime_ref, ProcessId(0)); - assert_eq!(res, ProcessResult::Pending); - let res = process.as_mut().run(&mut runtime_ref, ProcessId(0)); - assert_eq!(res, ProcessResult::Pending); -} - #[test] fn future_process_thread_local_assert_future_unmoved() { let process = FutureProcess::<_, ThreadLocal>::new(AssertUnmoved::new(pending())); diff --git a/rt/src/test.rs b/rt/src/test.rs index ce9018dc2..c793b5257 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -387,8 +387,22 @@ where init_actor_with_inbox(new_actor, arg).map(|(actor, _, actor_ref)| (actor, actor_ref)) } +/// Initialise a thread-local `ActorFuture`. +#[cfg(test)] +pub(crate) fn init_local_actor_future( + supervisor: S, + new_actor: NA, + argument: NA::Argument, +) -> Result<(ActorFuture, ActorRef), NA::Error> +where + S: Supervisor, + NA: NewActor, +{ + let rt = ThreadLocal::new(TEST_PID, runtime()); + ActorFuture::new(supervisor, new_actor, argument, rt) +} + /// Initialise a thread-safe `ActorFuture`. -#[allow(clippy::type_complexity)] #[cfg(test)] pub(crate) fn init_actor_future( supervisor: S, From 12e433aad63d3fe200841adef6a3c1e5086dc7a9 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 16:01:00 +0200 Subject: [PATCH 075/177] Rewrite process module Instead of having our own Process::run method depend on the Future trait. --- rt/src/access.rs | 14 +----- rt/src/lib.rs | 6 --- rt/src/local/scheduler/mod.rs | 18 +++---- rt/src/process/future.rs | 63 ----------------------- rt/src/process/mod.rs | 91 +++++++++++++--------------------- rt/src/shared/scheduler/mod.rs | 14 ++---- rt/src/worker.rs | 31 ++++++------ 7 files changed, 63 insertions(+), 174 deletions(-) delete mode 100644 rt/src/process/future.rs diff --git a/rt/src/access.rs b/rt/src/access.rs index 1e9f4be31..4e3b10498 100644 --- a/rt/src/access.rs +++ b/rt/src/access.rs @@ -62,9 +62,8 @@ mod private { use std::task; use std::time::Instant; - use crate::process::ProcessId; use crate::timers::TimerToken; - use crate::{trace, RuntimeRef}; + use crate::trace; /// Actual trait behind [`rt::Access`]. /// @@ -79,9 +78,6 @@ mod private { /// Remove a previously set timer. fn remove_timer(&mut self, deadline: Instant, token: TimerToken); - /// Create a new [`task::Waker`]. - fn new_task_waker(runtime_ref: &mut RuntimeRef, pid: ProcessId) -> task::Waker; - /// Returns the CPU the thread is bound to, if any. fn cpu(&self) -> Option; @@ -155,10 +151,6 @@ impl PrivateAccess for ThreadLocal { self.rt.remove_timer(deadline, token); } - fn new_task_waker(runtime_ref: &mut RuntimeRef, pid: ProcessId) -> task::Waker { - runtime_ref.new_local_task_waker(pid) - } - fn cpu(&self) -> Option { self.rt.cpu() } @@ -276,10 +268,6 @@ impl PrivateAccess for ThreadSafe { self.rt.remove_timer(deadline, token); } - fn new_task_waker(runtime_ref: &mut RuntimeRef, pid: ProcessId) -> task::Waker { - runtime_ref.new_shared_task_waker(pid) - } - fn cpu(&self) -> Option { None } diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 7ce013a04..d9da40b98 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -622,12 +622,6 @@ impl RuntimeRef { local::waker::new(self.internals.waker_id, pid) } - /// Same as [`RuntimeRef::new_local_task_waker`] but provides a waker - /// implementation for thread-safe actors. - fn new_shared_task_waker(&self, pid: ProcessId) -> task::Waker { - self.internals.shared.new_task_waker(pid) - } - /// Add a timer. /// /// See [`Timers::add`]. diff --git a/rt/src/local/scheduler/mod.rs b/rt/src/local/scheduler/mod.rs index a3e66a347..776d21a3a 100644 --- a/rt/src/local/scheduler/mod.rs +++ b/rt/src/local/scheduler/mod.rs @@ -9,11 +9,13 @@ use std::future::Future; use std::mem::MaybeUninit; use std::pin::Pin; +use heph::actor::{self, Actor, ActorFuture, NewActor}; +use heph::supervisor::Supervisor; use log::{debug, trace}; -use crate::process::{self, FutureProcess, ProcessId}; +use crate::process::{self, FutureProcess, Process, ProcessId}; +use crate::ptr_as_usize; use crate::spawn::options::Priority; -use crate::{ptr_as_usize, ThreadLocal}; mod inactive; #[cfg(test)] @@ -74,10 +76,7 @@ impl Scheduler { where Fut: Future + 'static, { - let process = Box::pin(ProcessData::new( - priority, - Box::pin(FutureProcess::::new(future)), - )); + let process = Box::pin(ProcessData::new(priority, Box::pin(FutureProcess(future)))); debug!(pid = process.as_ref().id().0; "spawning thread-local future"); self.ready.push(process); } @@ -126,16 +125,13 @@ impl<'s> AddActor<'s> { /// Add a new inactive actor to the scheduler. pub(crate) fn add(self, future: Fut, priority: Priority) where - Fut: Future + 'static, + Fut: Process + 'static, { debug_assert!( inactive::ok_ptr(self.alloc.as_ptr().cast::<()>()), "SKIP_BITS invalid" ); - let process = ProcessData::new( - priority, - Box::pin(FutureProcess::::new(future)), - ); + let process = ProcessData::new(priority, Box::pin(future)); let AddActor { scheduler, mut alloc, diff --git a/rt/src/process/future.rs b/rt/src/process/future.rs deleted file mode 100644 index 8c8306a4c..000000000 --- a/rt/src/process/future.rs +++ /dev/null @@ -1,63 +0,0 @@ -//! Module containing the implementation of the [`Process`] trait for -//! [`Future`]s. - -use std::future::Future; -use std::marker::PhantomData; -use std::panic::{catch_unwind, AssertUnwindSafe}; -use std::pin::Pin; -use std::task::{self, Poll}; - -use log::error; - -use crate::process::{panic_message, Process, ProcessId, ProcessResult}; -use crate::{self as rt, RuntimeRef}; - -/// A process that represent a [`Future`]. -pub(crate) struct FutureProcess { - future: Fut, - /// We need to know whether we need to create thread-local or thread-safe - /// waker. - _phantom: PhantomData, -} - -impl FutureProcess { - pub(crate) const fn new(future: Fut) -> FutureProcess { - FutureProcess { - future, - _phantom: PhantomData, - } - } -} - -impl Process for FutureProcess -where - Fut: Future, - RT: rt::Access, -{ - fn name(&self) -> &'static str { - name::() - } - - fn run(self: Pin<&mut Self>, runtime_ref: &mut RuntimeRef, pid: ProcessId) -> ProcessResult { - // This is safe because we're not moving the future. - let future = unsafe { Pin::map_unchecked_mut(self, |this| &mut this.future) }; - - let waker = RT::new_task_waker(runtime_ref, pid); - let mut task_ctx = task::Context::from_waker(&waker); - - match catch_unwind(AssertUnwindSafe(|| Future::poll(future, &mut task_ctx))) { - Ok(Poll::Ready(())) => ProcessResult::Complete, - Ok(Poll::Pending) => ProcessResult::Pending, - Err(panic) => { - let name = name::(); - let msg = panic_message(&*panic); - error!("future '{name}' panicked at '{msg}'"); - ProcessResult::Complete - } - } - } -} - -fn name() -> &'static str { - heph::actor::name::() -} diff --git a/rt/src/process/mod.rs b/rt/src/process/mod.rs index 74dd570e8..ec1772fdf 100644 --- a/rt/src/process/mod.rs +++ b/rt/src/process/mod.rs @@ -1,35 +1,34 @@ //! Module containing the `Process` trait, related types and implementations. +#![allow(unused_imports)] // FIXME. + use std::any::Any; use std::cmp::Ordering; use std::fmt; +use std::future::Future; use std::pin::Pin; +use std::task::{self, Poll}; use std::time::{Duration, Instant}; +use heph::actor::{self, Actor, ActorFuture, NewActor}; +use heph::supervisor::Supervisor; use log::{as_debug, trace}; use mio::Token; use crate::spawn::options::Priority; use crate::RuntimeRef; -mod future; #[cfg(test)] mod tests; -pub(crate) use future::FutureProcess; - -/// Process id, or pid for short, is an identifier for a process in an -/// [`Runtime`]. +/// Process id, or pid for short, is an identifier for a process in the runtime. /// /// This can only be created by one of the schedulers and should be seen as an /// opaque type for the rest of the crate. For convince this can converted from /// and into an [`Token`] as used by Mio. -/// -/// [`Runtime`]: crate::Runtime -// NOTE: public because it used in the `RuntimeAccess` trait. #[derive(Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)] #[repr(transparent)] -pub struct ProcessId(pub(crate) usize); +pub(crate) struct ProcessId(pub(crate) usize); impl From for ProcessId { fn from(id: Token) -> ProcessId { @@ -56,54 +55,36 @@ impl fmt::Display for ProcessId { } /// The trait that represents a process. -/// -/// This currently has a single implementation: -/// - `ActorProcess`, which wraps an `Actor` to implement this trait. -pub(crate) trait Process { +pub(crate) trait Process: Future { /// Return the name of this process, used in logging. - fn name(&self) -> &'static str; - - /// Run the process. - /// - /// Once the process returns `ProcessResult::Complete` it will be removed - /// from the scheduler and will no longer run. - /// - /// If it returns `ProcessResult::Pending` it will be considered inactive - /// and the process itself must make sure its gets scheduled again. - fn run(self: Pin<&mut Self>, runtime_ref: &mut RuntimeRef, pid: ProcessId) -> ProcessResult; + fn name(&self) -> &'static str { + // Best we can do. + actor::name::() + } } -/// The result of running a [`Process`]. -/// -/// See [`Process::run`]. -#[must_use] -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub(crate) enum ProcessResult { - /// The process is complete. - /// - /// Similar to [`Poll::Ready`]. - /// - /// [`Poll::Ready`]: std::task::Poll::Ready - Complete, - /// Process completion is pending, but for now no further progress can be - /// made without blocking. The process itself is responsible for scheduling - /// itself again. - /// - /// Similar to [`Poll::Pending`]. - /// - /// [`Poll::Pending`]: std::task::Poll::Pending - Pending, +/// Wrapper around a [`Future`] to implement [`Process`]. +pub(crate) struct FutureProcess(pub(crate) Fut); + +impl Future for FutureProcess { + type Output = Fut::Output; + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the `Fut`ure. + unsafe { Fut::poll(Pin::map_unchecked_mut(self, |s| &mut s.0), ctx) } + } } -/// Attempts to extract a message from a panic, defaulting to ``. -/// Note: be sure to derefence the `Box`! -fn panic_message<'a>(panic: &'a (dyn Any + Send + 'static)) -> &'a str { - match panic.downcast_ref::<&'static str>() { - Some(s) => s, - None => match panic.downcast_ref::() { - Some(s) => s, - None => "", - }, +impl Process for FutureProcess where Fut: Future {} + +impl Process for ActorFuture +where + S: Supervisor, + NA: NewActor, + RT: Clone, +{ + fn name(&self) -> &'static str { + NA::name() } } @@ -158,13 +139,13 @@ impl ProcessData

{ /// Run the process. /// /// Returns the completion state of the process. - pub(crate) fn run(mut self: Pin<&mut Self>, runtime_ref: &mut RuntimeRef) -> ProcessResult { + pub(crate) fn run(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<()> { let pid = self.as_ref().id(); let name = self.process.name(); trace!(pid = pid.0, name = name; "running process"); let start = Instant::now(); - let result = self.process.as_mut().run(runtime_ref, pid); + let result = self.process.as_mut().poll(ctx); let elapsed = start.elapsed(); let fair_elapsed = elapsed * self.priority; self.fair_runtime += fair_elapsed; @@ -181,7 +162,6 @@ impl Eq for ProcessData

{} impl PartialEq for ProcessData

{ fn eq(&self, other: &Self) -> bool { - // FIXME: is this correct? Pin::new(self).id() == Pin::new(other).id() } } @@ -203,7 +183,6 @@ impl PartialOrd for ProcessData

{ impl fmt::Debug for ProcessData

{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Process") - // FIXME: is this unsafe? .field("id", &Pin::new(self).id()) .field("name", &self.process.name()) .field("priority", &self.priority) diff --git a/rt/src/shared/scheduler/mod.rs b/rt/src/shared/scheduler/mod.rs index 8994ff280..ffca8d42c 100644 --- a/rt/src/shared/scheduler/mod.rs +++ b/rt/src/shared/scheduler/mod.rs @@ -11,8 +11,8 @@ use std::pin::Pin; use log::{debug, trace}; use crate::process::{self, FutureProcess, Process, ProcessId}; +use crate::ptr_as_usize; use crate::spawn::options::Priority; -use crate::{ptr_as_usize, ThreadSafe}; mod inactive; mod runqueue; @@ -151,10 +151,7 @@ impl Scheduler { where Fut: Future + Send + Sync + 'static, { - let process = Box::pin(ProcessData::new( - priority, - Box::pin(FutureProcess::::new(future)), - )); + let process = Box::pin(ProcessData::new(priority, Box::pin(future))); debug!(pid = process.as_ref().id().0; "spawning thread-safe future"); self.ready.add(process); } @@ -217,17 +214,14 @@ impl<'s> AddActor<'s> { /// Add a new thread-safe actor to the scheduler. pub(super) fn add(self, future: Fut, priority: Priority) where - Fut: Future + Send + Sync + 'static, + Fut: Process + Send + Sync + 'static, { debug_assert!( inactive::ok_ptr(self.alloc.as_ptr().cast()), "SKIP_BITS invalid" ); - let process = ProcessData::new( - priority, - Box::pin(FutureProcess::::new(future)), - ); + let process = ProcessData::new(priority, Box::pin(future)); let AddActor { scheduler, mut alloc, diff --git a/rt/src/worker.rs b/rt/src/worker.rs index 2ccc3b678..fb7f0ac01 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -22,7 +22,7 @@ use std::panic::{catch_unwind, AssertUnwindSafe}; use std::rc::Rc; use std::sync::Arc; use std::time::{Duration, Instant}; -use std::{fmt, io, thread}; +use std::{fmt, io, task, thread}; use crossbeam_channel::{self, Receiver}; use heph::actor_ref::{Delivery, SendError}; @@ -33,7 +33,7 @@ use mio::{Events, Interest, Poll, Registry, Token}; use crate::error::StringError; use crate::local::waker::{self, WakerId}; use crate::local::RuntimeInternals; -use crate::process::{ProcessId, ProcessResult}; +use crate::process::ProcessId; use crate::setup::set_cpu_affinity; use crate::thread_waker::ThreadWaker; use crate::{self as rt, cpu_usage, shared, trace, RuntimeRef, Signal}; @@ -326,22 +326,19 @@ impl Worker { /// Run the worker. pub(crate) fn run(mut self) -> Result<(), Error> { debug!(worker_id = self.internals.id.get(); "starting worker"); - // Runtime reference used in running the processes. - let mut runtime_ref = self.create_ref(); - loop { // We first run the processes and only poll after to ensure that we // return if there are no processes to run. trace!(worker_id = self.internals.id.get(); "running processes"); let mut n = 0; while n < RUN_POLL_RATIO { - if !self.run_local_process(&mut runtime_ref) { + if !self.run_local_process() { break; } n += 1; } while n < RUN_POLL_RATIO { - if !self.run_shared_process(&mut runtime_ref) { + if !self.run_shared_process() { break; } n += 1; @@ -359,19 +356,21 @@ impl Worker { /// Attempts to run a single local process. Returns `true` if it ran a /// process, `false` otherwise. - fn run_local_process(&mut self, runtime_ref: &mut RuntimeRef) -> bool { + fn run_local_process(&mut self) -> bool { let process = self.internals.scheduler.borrow_mut().next_process(); match process { Some(mut process) => { let timing = trace::start(&*self.internals.trace_log.borrow()); let pid = process.as_ref().id(); let name = process.as_ref().name(); - match process.as_mut().run(runtime_ref) { - ProcessResult::Complete => { + let waker = waker::new(self.internals.waker_id, pid); + let mut ctx = task::Context::from_waker(&waker); + match process.as_mut().run(&mut ctx) { + task::Poll::Ready(()) => { // Don't want to panic when dropping the process. drop(catch_unwind(AssertUnwindSafe(move || drop(process)))); } - ProcessResult::Pending => { + task::Poll::Pending => { self.internals.scheduler.borrow_mut().add_process(process); } } @@ -389,18 +388,20 @@ impl Worker { /// Attempts to run a single shared process. Returns `true` if it ran a /// process, `false` otherwise. - fn run_shared_process(&mut self, runtime_ref: &mut RuntimeRef) -> bool { + fn run_shared_process(&mut self) -> bool { let process = self.internals.shared.remove_process(); match process { Some(mut process) => { let timing = trace::start(&*self.internals.trace_log.borrow()); let pid = process.as_ref().id(); let name = process.as_ref().name(); - match process.as_mut().run(runtime_ref) { - ProcessResult::Complete => { + let waker = self.internals.shared.new_task_waker(pid); + let mut ctx = task::Context::from_waker(&waker); + match process.as_mut().run(&mut ctx) { + task::Poll::Ready(()) => { self.internals.shared.complete(process); } - ProcessResult::Pending => { + task::Poll::Pending => { self.internals.shared.add_process(process); } } From 80905411454a5bc3180b99cf7fd4c84d051dc0b5 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 16:01:54 +0200 Subject: [PATCH 076/177] Add ActorFuture::name Returns the name of the actor. --- src/actor/future.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/actor/future.rs b/src/actor/future.rs index 0fc9f3f10..968fb8f28 100644 --- a/src/actor/future.rs +++ b/src/actor/future.rs @@ -74,6 +74,13 @@ where Ok((future, actor_ref)) } + /// Returns the name of the actor. + /// + /// Based on the [`NewActor::name`] implementation. + pub fn name() -> &'static str { + NA::name() + } + /// Returns `Poll::Pending` if the actor was successfully restarted, /// `Poll::Ready` if the actor wasn't restarted (or failed to restart). fn handle_actor_error( From be83d01b4a011c0dbe6e71f120af5d2165de726d Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 16:02:07 +0200 Subject: [PATCH 077/177] Allow name to be called on unsized actors --- src/actor/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/actor/mod.rs b/src/actor/mod.rs index bdbcb4aad..301da9c62 100644 --- a/src/actor/mod.rs +++ b/src/actor/mod.rs @@ -557,11 +557,10 @@ mod private { /// /// This is the default implementation of [`NewActor::name`]. #[doc(hidden)] // Not part of the stable API. -pub fn name() -> &'static str { +pub fn name() -> &'static str { format_name(type_name::()) } -// NOTE: split for easier testing. fn format_name(full_name: &'static str) -> &'static str { const GEN_FUTURE: &str = "GenFuture<"; const GENERIC_START: &str = "<"; From 9961b300189e0ee54c07f233516c9bcb3af062de Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 16:24:11 +0200 Subject: [PATCH 078/177] Rework adding process to shared scheduler Instead of returning a type we use a callback function. We also use it for adding Future so we can add a debug log message. --- rt/src/process/mod.rs | 6 +-- rt/src/shared/mod.rs | 37 +++++++------ rt/src/shared/scheduler/mod.rs | 98 ++++++++++------------------------ rt/src/worker.rs | 2 +- 4 files changed, 48 insertions(+), 95 deletions(-) diff --git a/rt/src/process/mod.rs b/rt/src/process/mod.rs index ec1772fdf..5275212e6 100644 --- a/rt/src/process/mod.rs +++ b/rt/src/process/mod.rs @@ -1,8 +1,5 @@ //! Module containing the `Process` trait, related types and implementations. -#![allow(unused_imports)] // FIXME. - -use std::any::Any; use std::cmp::Ordering; use std::fmt; use std::future::Future; @@ -10,13 +7,12 @@ use std::pin::Pin; use std::task::{self, Poll}; use std::time::{Duration, Instant}; -use heph::actor::{self, Actor, ActorFuture, NewActor}; +use heph::actor::{self, ActorFuture, NewActor}; use heph::supervisor::Supervisor; use log::{as_debug, trace}; use mio::Token; use crate::spawn::options::Priority; -use crate::RuntimeRef; #[cfg(test)] mod tests; diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 2252bc9c5..f7055b5f3 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -16,10 +16,11 @@ use log::{as_debug, debug, error, trace}; use mio::unix::SourceFd; use mio::{Events, Interest, Poll, Registry, Token}; +use crate::process::{FutureProcess, Process, ProcessId}; use crate::spawn::{ActorOptions, FutureOptions}; use crate::thread_waker::ThreadWaker; use crate::timers::TimerToken; -use crate::{trace, ProcessId, ThreadSafe}; +use crate::{trace, ThreadSafe}; mod scheduler; pub(crate) mod waker; @@ -223,6 +224,7 @@ impl RuntimeInternals { } } + /// Spawn a thread-safe actor. #[allow(clippy::needless_pass_by_value)] // For `ActorOptions`. pub(crate) fn try_spawn( self: &Arc, @@ -237,20 +239,12 @@ impl RuntimeInternals { NA::Actor: Send + Sync + 'static, NA::Message: Send, { - // Setup adding a new process to the scheduler. - let actor_entry = self.scheduler.add_actor(); - let pid = actor_entry.pid(); - let name = NA::name(); - debug!(pid = pid.0, name = name; "spawning thread-safe actor"); - - // Create the `ActorFuture`. - let rt = ThreadSafe::new(pid, self.clone()); - let (future, actor_ref) = ActorFuture::new(supervisor, new_actor, arg, rt)?; - - // Add the actor to the scheduler. - actor_entry.add(future, options.priority()); - - Ok(actor_ref) + self.scheduler.add_new_process(options.priority(), |pid| { + let name = NA::name(); + debug!(pid = pid.0, name = name; "spawning thread-safe actor"); + let rt = ThreadSafe::new(pid, self.clone()); + ActorFuture::new(supervisor, new_actor, arg, rt) + }) } /// Spawn a thread-safe `future`. @@ -259,7 +253,12 @@ impl RuntimeInternals { where Fut: Future + Send + Sync + 'static, { - self.scheduler.add_future(future, options.priority()); + _ = self.scheduler.add_new_process(options.priority(), |pid| { + let process = FutureProcess(future); + let name = process.name(); + debug!(pid = pid.0, name = name; "spawning thread-safe future"); + Ok::<_, !>((process, ())) + }); } /// See [`Scheduler::mark_ready`]. @@ -321,9 +320,9 @@ impl RuntimeInternals { self.scheduler.remove() } - /// See [`Scheduler::add_process`]. - pub(crate) fn add_process(&self, process: Pin>) { - self.scheduler.add_process(process); + /// See [`Scheduler::add_back_process`]. + pub(crate) fn add_back_process(&self, process: Pin>) { + self.scheduler.add_back_process(process); } /// See [`Scheduler::complete`]. diff --git a/rt/src/shared/scheduler/mod.rs b/rt/src/shared/scheduler/mod.rs index ffca8d42c..741876e64 100644 --- a/rt/src/shared/scheduler/mod.rs +++ b/rt/src/shared/scheduler/mod.rs @@ -4,13 +4,12 @@ //! //! [`RuntimeRef::try_spawn`]: crate::RuntimeRef::try_spawn -use std::future::Future; use std::mem::MaybeUninit; use std::pin::Pin; -use log::{debug, trace}; +use log::trace; -use crate::process::{self, FutureProcess, Process, ProcessId}; +use crate::process::{self, Process, ProcessId}; use crate::ptr_as_usize; use crate::spawn::options::Priority; @@ -51,17 +50,11 @@ pub(super) type ProcessData = process::ProcessData; /// * Stopped: final state of a process, at this point its deallocated and its /// resources cleaned up. /// -/// ## Adding actors (processes) +/// ## Adding processes /// -/// Adding new actors to the scheduler is a two step process. First, the -/// resources are allocated in [`Scheduler::add_actor`], which returns an -/// [`AddActor`] structure. This `AddActor` can be used to determine the -/// [`ProcessId`] (pid) of the actor and can be used in setting up the actor, -/// before the actor itself is initialised. -/// -/// Second, after the actor is initialised, it can be added to the scheduler -/// using [`AddActor::add`]. This adds to the [`RunQueue`] or [`Inactive`] list -/// depending on whether its ready to run. +/// Adding new processes can be done using [`Scheduler::add_new_process`]. It +/// accepts a callback function to get access to the PID before the process is +/// actually added. /// /// ## Marking a process as ready to run /// @@ -84,7 +77,7 @@ pub(super) type ProcessData = process::ProcessData; /// /// If `remove` returns `Some(process)` the process must be run. Depending on /// the result of the process it should be added back the schduler using -/// [`Scheduler::add_process`], adding it back to the [`Inactive`] list, or +/// [`Scheduler::add_back_process`], adding it back to the [`Inactive`] list, or /// marked as completed using [`Scheduler::complete`], which cleans up any /// resources assiociated with the process. /// @@ -139,21 +132,28 @@ impl Scheduler { self.ready.has_process() } - /// Add a new actor to the scheduler. - pub(super) fn add_actor<'s>(&'s self) -> AddActor<'s> { - AddActor { - scheduler: self, - alloc: Box::new_uninit(), - } - } - - pub(super) fn add_future(&self, future: Fut, priority: Priority) + /// Add a new proces to the scheduler. + pub(crate) fn add_new_process(&self, priority: Priority, setup: F) -> Result where - Fut: Future + Send + Sync + 'static, + F: FnOnce(ProcessId) -> Result<(P, T), E>, + P: Process + Send + Sync + 'static, { - let process = Box::pin(ProcessData::new(priority, Box::pin(future))); - debug!(pid = process.as_ref().id().0; "spawning thread-safe future"); + // Allocate some memory for the process. + let mut alloc: Box> = Box::new_uninit(); + debug_assert!(inactive::ok_ptr(alloc.as_ptr().cast()), "SKIP_BITS invalid"); + // Based on the allocation we can determine its process id. + let pid = ProcessId(ptr_as_usize(alloc.as_ptr())); + // Let the caller create the actual process (using the pid). + let (process, ret) = setup(pid)?; + let process = ProcessData::new(priority, Box::pin(process)); + // SAFETY: we write the processes and then safetly assume it's initialised. + let process = unsafe { + _ = alloc.write(process); + Pin::from(alloc.assume_init()) + }; + // Finally add it to ready queue. self.ready.add(process); + Ok(ret) } /// Mark the process, with `pid`, as ready to run. @@ -165,7 +165,7 @@ impl Scheduler { trace!(pid = pid.0; "marking process as ready"); self.inactive.mark_ready(pid, &self.ready); // NOTE: if the process in currently not in the `Inactive` list it will - // be marked as ready-to-run and `Scheduler::add_process` will add it to + // be marked as ready-to-run and `Scheduler::add_back_process` will add it to // the run queue once its done running. } @@ -179,7 +179,7 @@ impl Scheduler { /// Add back a process that was previously removed via /// [`Scheduler::remove`] and add it to the inactive list. - pub(super) fn add_process(&self, process: Pin>) { + pub(super) fn add_back_process(&self, process: Pin>) { let pid = process.as_ref().id(); trace!(pid = pid.0; "adding back process"); self.inactive.add(process, &self.ready); @@ -193,45 +193,3 @@ impl Scheduler { self.inactive.complete(process); } } - -/// A handle to add a process to the scheduler. -/// -/// This allows the `ProcessId` to be determined before the process is actually -/// added. This is used in registering with the system poller. -pub(super) struct AddActor<'s> { - scheduler: &'s Scheduler, - /// Already allocated `ProcessData`, used to determine the `ProcessId`. - alloc: Box>, -} - -impl<'s> AddActor<'s> { - /// Get the would be `ProcessId` for the process. - pub(super) const fn pid(&self) -> ProcessId { - #[allow(clippy::borrow_as_ptr)] - ProcessId(ptr_as_usize(&*self.alloc as *const _)) - } - - /// Add a new thread-safe actor to the scheduler. - pub(super) fn add(self, future: Fut, priority: Priority) - where - Fut: Process + Send + Sync + 'static, - { - debug_assert!( - inactive::ok_ptr(self.alloc.as_ptr().cast()), - "SKIP_BITS invalid" - ); - - let process = ProcessData::new(priority, Box::pin(future)); - let AddActor { - scheduler, - mut alloc, - } = self; - let process: Pin<_> = unsafe { - _ = alloc.write(process); - // Safe because we write into the allocation above. - alloc.assume_init().into() - }; - - scheduler.ready.add(process); - } -} diff --git a/rt/src/worker.rs b/rt/src/worker.rs index fb7f0ac01..4cbd8eeb6 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -402,7 +402,7 @@ impl Worker { self.internals.shared.complete(process); } task::Poll::Pending => { - self.internals.shared.add_process(process); + self.internals.shared.add_back_process(process); } } trace::finish_rt( From e430f3178e39fb6957220146acd61263079a2495 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 16:31:36 +0200 Subject: [PATCH 079/177] Rework adding processes to local scheduler Same change we made to the shared scheduler. --- rt/src/lib.rs | 35 +++++++------- rt/src/local/scheduler/mod.rs | 86 +++++++++++------------------------ rt/src/worker.rs | 5 +- 3 files changed, 49 insertions(+), 77 deletions(-) diff --git a/rt/src/lib.rs b/rt/src/lib.rs index d9da40b98..093f5e8f3 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -247,6 +247,7 @@ pub use error::Error; pub use setup::Setup; pub use signal::Signal; +use crate::process::{FutureProcess, Process}; use coordinator::Coordinator; use local::waker::MAX_THREADS; use spawn::{ActorOptions, FutureOptions, Spawn, SyncActorOptions}; @@ -582,10 +583,16 @@ impl RuntimeRef { where Fut: Future + 'static, { - self.internals + _ = self + .internals .scheduler .borrow_mut() - .add_future(future, options.priority()); + .add_new_process(options.priority(), |pid| { + let process = FutureProcess(future); + let name = process.name(); + debug!(pid = pid.0, name = name; "spawning thread-local future"); + Ok::<_, !>((process, ())) + }); } /// Spawn a thread-safe [`Future`]. @@ -685,21 +692,15 @@ where S: Supervisor, NA: NewActor, { - // Setup adding a new process to the scheduler. - let mut scheduler = self.internals.scheduler.borrow_mut(); - let actor_entry = scheduler.add_actor(); - let pid = actor_entry.pid(); - let name = NA::name(); - debug!(pid = pid.0, name = name; "spawning thread-local actor"); - - // Create the `ActorFuture`. - let rt = ThreadLocal::new(pid, self.clone()); - let (future, actor_ref) = ActorFuture::new(supervisor, new_actor, arg, rt)?; - - // Add the actor to the scheduler. - actor_entry.add(future, options.priority()); - - Ok(actor_ref) + self.internals + .scheduler + .borrow_mut() + .add_new_process(options.priority(), |pid| { + let name = NA::name(); + debug!(pid = pid.0, name = name; "spawning thread-local actor"); + let rt = ThreadLocal::new(pid, self.clone()); + ActorFuture::new(supervisor, new_actor, arg, rt) + }) } } diff --git a/rt/src/local/scheduler/mod.rs b/rt/src/local/scheduler/mod.rs index 776d21a3a..867cbc6a5 100644 --- a/rt/src/local/scheduler/mod.rs +++ b/rt/src/local/scheduler/mod.rs @@ -5,15 +5,12 @@ //! [`RuntimeRef::try_spawn_local`]: crate::RuntimeRef::try_spawn_local use std::collections::BinaryHeap; -use std::future::Future; use std::mem::MaybeUninit; use std::pin::Pin; -use heph::actor::{self, Actor, ActorFuture, NewActor}; -use heph::supervisor::Supervisor; -use log::{debug, trace}; +use log::trace; -use crate::process::{self, FutureProcess, Process, ProcessId}; +use crate::process::{self, Process, ProcessId}; use crate::ptr_as_usize; use crate::spawn::options::Priority; @@ -23,7 +20,7 @@ mod tests; use inactive::Inactive; -type ProcessData = process::ProcessData; +type ProcessData = process::ProcessData; #[derive(Debug)] pub(crate) struct Scheduler { @@ -64,21 +61,32 @@ impl Scheduler { !self.ready.is_empty() } - /// Add an actor to the scheduler. - pub(crate) fn add_actor<'s>(&'s mut self) -> AddActor<'s> { - AddActor { - scheduler: self, - alloc: Box::new_uninit(), - } - } - - pub(crate) fn add_future(&mut self, future: Fut, priority: Priority) + /// Add a new proces to the scheduler. + pub(crate) fn add_new_process( + &mut self, + priority: Priority, + setup: F, + ) -> Result where - Fut: Future + 'static, + F: FnOnce(ProcessId) -> Result<(P, T), E>, + P: Process + 'static, { - let process = Box::pin(ProcessData::new(priority, Box::pin(FutureProcess(future)))); - debug!(pid = process.as_ref().id().0; "spawning thread-local future"); + // Allocate some memory for the process. + let mut alloc: Box> = Box::new_uninit(); + debug_assert!(inactive::ok_ptr(alloc.as_ptr().cast()), "SKIP_BITS invalid"); + // Based on the allocation we can determine its process id. + let pid = ProcessId(ptr_as_usize(alloc.as_ptr())); + // Let the caller create the actual process (using the pid). + let (process, ret) = setup(pid)?; + let process = ProcessData::new(priority, Box::pin(process)); + // SAFETY: we write the processes and then safetly assume it's initialised. + let process = unsafe { + _ = alloc.write(process); + Pin::from(alloc.assume_init()) + }; + // Finally add it to ready queue. self.ready.push(process); + Ok(ret) } /// Mark the process, with `pid`, as ready to run. @@ -100,47 +108,7 @@ impl Scheduler { /// Add back a process that was previously removed via /// [`Scheduler::next_process`]. - pub(crate) fn add_process(&mut self, process: Pin>) { + pub(crate) fn add_back_process(&mut self, process: Pin>) { self.inactive.add(process); } } - -/// A handle to add a process to the scheduler. -/// -/// This allows the `ProcessId` to be determined before the process is actually -/// added. This is used in registering with the system poller. -pub(crate) struct AddActor<'s> { - scheduler: &'s mut Scheduler, - /// Already allocated `ProcessData`, used to determine the `ProcessId`. - alloc: Box>, -} - -impl<'s> AddActor<'s> { - /// Get the would be `ProcessId` for the process. - pub(crate) const fn pid(&self) -> ProcessId { - #[allow(clippy::borrow_as_ptr)] - ProcessId(ptr_as_usize(&*self.alloc as *const _)) - } - - /// Add a new inactive actor to the scheduler. - pub(crate) fn add(self, future: Fut, priority: Priority) - where - Fut: Process + 'static, - { - debug_assert!( - inactive::ok_ptr(self.alloc.as_ptr().cast::<()>()), - "SKIP_BITS invalid" - ); - let process = ProcessData::new(priority, Box::pin(future)); - let AddActor { - scheduler, - mut alloc, - } = self; - let process: Pin<_> = unsafe { - _ = alloc.write(process); - // Safe because we write into the allocation above. - alloc.assume_init().into() - }; - scheduler.ready.push(process); - } -} diff --git a/rt/src/worker.rs b/rt/src/worker.rs index 4cbd8eeb6..d8195e08b 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -371,7 +371,10 @@ impl Worker { drop(catch_unwind(AssertUnwindSafe(move || drop(process)))); } task::Poll::Pending => { - self.internals.scheduler.borrow_mut().add_process(process); + self.internals + .scheduler + .borrow_mut() + .add_back_process(process); } } trace::finish_rt( From b0d5cc6e254d53534672eba5893939448295c483 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 17:34:22 +0200 Subject: [PATCH 080/177] Fuxp scheduler (related) tests Updates all the tests to use the new API. --- rt/src/local/scheduler/inactive.rs | 29 ++-- rt/src/local/scheduler/mod.rs | 6 +- rt/src/local/scheduler/tests.rs | 196 ++++++++++++---------------- rt/src/process/tests.rs | 76 +++++------ rt/src/shared/scheduler/inactive.rs | 29 ++-- rt/src/shared/scheduler/mod.rs | 6 +- rt/src/shared/scheduler/runqueue.rs | 17 ++- rt/src/shared/scheduler/tests.rs | 113 ++++++++-------- rt/src/shared/waker.rs | 40 ++++-- rt/src/test.rs | 47 ++----- rt/src/timers/mod.rs | 1 - 11 files changed, 270 insertions(+), 290 deletions(-) diff --git a/rt/src/local/scheduler/inactive.rs b/rt/src/local/scheduler/inactive.rs index 383b27c21..c86bce62f 100644 --- a/rt/src/local/scheduler/inactive.rs +++ b/rt/src/local/scheduler/inactive.rs @@ -300,14 +300,15 @@ const fn skip_bits(pid: ProcessId, depth: usize) -> usize { #[cfg(test)] mod tests { use std::cmp::max; + use std::future::Future; use std::mem::{align_of, size_of}; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; + use std::task::{self, Poll}; - use crate::process::{Process, ProcessId, ProcessResult}; + use crate::process::{Process, ProcessId}; use crate::spawn::options::Priority; - use crate::RuntimeRef; use super::{ diff_branch_depth, Branch, Inactive, Pointer, ProcessData, LEVEL_SHIFT, N_BRANCHES, @@ -316,14 +317,18 @@ mod tests { struct TestProcess; + impl Future for TestProcess { + type Output = (); + + fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<()> { + unimplemented!(); + } + } + impl Process for TestProcess { fn name(&self) -> &'static str { "TestProcess" } - - fn run(self: Pin<&mut Self>, _: &mut RuntimeRef, _: ProcessId) -> ProcessResult { - unimplemented!() - } } fn test_process() -> Pin> { @@ -445,14 +450,18 @@ mod tests { } } + impl Future for DropTest { + type Output = (); + + fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<()> { + unimplemented!(); + } + } + impl Process for DropTest { fn name(&self) -> &'static str { "DropTest" } - - fn run(self: Pin<&mut Self>, _: &mut RuntimeRef, _: ProcessId) -> ProcessResult { - unimplemented!() - } } let dropped = Arc::new(AtomicUsize::new(0)); diff --git a/rt/src/local/scheduler/mod.rs b/rt/src/local/scheduler/mod.rs index 867cbc6a5..e48365098 100644 --- a/rt/src/local/scheduler/mod.rs +++ b/rt/src/local/scheduler/mod.rs @@ -1,8 +1,4 @@ -//! Module with the thread-local scheduler. -//! -//! Scheduler for the actors started with [`RuntimeRef::try_spawn_local`]. -//! -//! [`RuntimeRef::try_spawn_local`]: crate::RuntimeRef::try_spawn_local +//! Thread-local scheduler. use std::collections::BinaryHeap; use std::mem::MaybeUninit; diff --git a/rt/src/local/scheduler/tests.rs b/rt/src/local/scheduler/tests.rs index d4e7c3c21..57dd606ed 100644 --- a/rt/src/local/scheduler/tests.rs +++ b/rt/src/local/scheduler/tests.rs @@ -1,19 +1,21 @@ //! Tests for the local scheduler. use std::cell::RefCell; +use std::future::Future; use std::future::{pending, Pending}; use std::mem; use std::pin::Pin; use std::rc::Rc; +use std::task::{self, Poll}; -use heph::actor::{self, NewActor}; +use heph::actor::{self, ActorFuture, NewActor}; use heph::supervisor::NoSupervisor; use crate::local::scheduler::{ProcessData, Scheduler}; -use crate::process::{Process, ProcessId, ProcessResult}; +use crate::process::{FutureProcess, Process, ProcessId}; use crate::spawn::options::Priority; -use crate::test::{self, init_local_actor_future, AssertUnmoved}; -use crate::{RuntimeRef, ThreadLocal}; +use crate::test::{self, nop_task_waker, AssertUnmoved, TEST_PID}; +use crate::ThreadLocal; fn assert_size(expected: usize) { assert_eq!(mem::size_of::(), expected); @@ -27,14 +29,18 @@ fn size_assertions() { #[derive(Debug)] struct NopTestProcess; +impl Future for NopTestProcess { + type Output = (); + + fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<()> { + unimplemented!(); + } +} + impl Process for NopTestProcess { fn name(&self) -> &'static str { "NopTestProcess" } - - fn run(self: Pin<&mut Self>, _: &mut RuntimeRef, _: ProcessId) -> ProcessResult { - unimplemented!(); - } } #[test] @@ -43,13 +49,11 @@ fn has_process() { assert!(!scheduler.has_process()); assert!(!scheduler.has_ready_process()); - let process: Pin> = Box::pin(ProcessData::new( - Priority::default(), - Box::pin(NopTestProcess), - )); - scheduler.add_process(process); + let _ = scheduler.add_new_process(Priority::NORMAL, |_| { + Ok::<_, !>((FutureProcess(NopTestProcess), ())) + }); assert!(scheduler.has_process()); - assert!(!scheduler.has_ready_process()); + assert!(scheduler.has_ready_process()); } async fn simple_actor(_: actor::Context) {} @@ -57,11 +61,13 @@ async fn simple_actor(_: actor::Context) {} #[test] fn add_actor() { let mut scheduler = Scheduler::new(); - - let actor_entry = scheduler.add_actor(); - let new_actor = simple_actor as fn(_) -> _; - let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); - actor_entry.add(future, Priority::NORMAL); + let _ = scheduler + .add_new_process(Priority::NORMAL, |_| { + let new_actor = simple_actor as fn(_) -> _; + let rt = ThreadLocal::new(TEST_PID, test::runtime()); + ActorFuture::new(NoSupervisor, new_actor, (), rt) + }) + .unwrap(); assert!(scheduler.has_process()); assert!(scheduler.has_ready_process()); } @@ -73,17 +79,19 @@ fn mark_ready() { // Incorrect (outdated) pid should be ok. scheduler.mark_ready(ProcessId(1)); - let actor_entry = scheduler.add_actor(); - let pid = actor_entry.pid(); - let new_actor = simple_actor as fn(_) -> _; - let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); - actor_entry.add(future, Priority::NORMAL); + let pid = scheduler + .add_new_process(Priority::NORMAL, |pid| { + let new_actor = simple_actor as fn(_) -> _; + let rt = ThreadLocal::new(TEST_PID, test::runtime()); + ActorFuture::new(NoSupervisor, new_actor, (), rt).map(|(future, _)| (future, pid)) + }) + .unwrap(); assert!(scheduler.has_process()); assert!(scheduler.has_ready_process()); let process = scheduler.next_process().unwrap(); - scheduler.add_process(process); + scheduler.add_back_process(process); scheduler.mark_ready(pid); } @@ -94,29 +102,21 @@ fn mark_ready_before_run() { // Incorrect (outdated) pid should be ok. scheduler.mark_ready(ProcessId(1)); - let actor_entry = scheduler.add_actor(); - let pid = actor_entry.pid(); - let new_actor = simple_actor as fn(_) -> _; - let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); - actor_entry.add(future, Priority::NORMAL); + let pid = add_test_actor(&mut scheduler, Priority::NORMAL); assert!(scheduler.has_process()); assert!(scheduler.has_ready_process()); let process = scheduler.next_process().unwrap(); scheduler.mark_ready(pid); - scheduler.add_process(process); + scheduler.add_back_process(process); } #[test] fn next_process() { let mut scheduler = Scheduler::new(); - let actor_entry = scheduler.add_actor(); - let pid = actor_entry.pid(); - let new_actor = simple_actor as fn(_) -> _; - let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); - actor_entry.add(future, Priority::NORMAL); + let pid = add_test_actor(&mut scheduler, Priority::NORMAL); if let Some(process) = scheduler.next_process() { assert_eq!(process.as_ref().id(), pid); @@ -131,22 +131,9 @@ fn next_process() { fn next_process_order() { let mut scheduler = Scheduler::new(); - let new_actor = simple_actor as fn(_) -> _; - // Actor 1. - let actor_entry = scheduler.add_actor(); - let pid1 = actor_entry.pid(); - let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); - actor_entry.add(future, Priority::LOW); - // Actor 2. - let actor_entry = scheduler.add_actor(); - let pid2 = actor_entry.pid(); - let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); - actor_entry.add(future, Priority::HIGH); - // Actor 3. - let actor_entry = scheduler.add_actor(); - let pid3 = actor_entry.pid(); - let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); - actor_entry.add(future, Priority::NORMAL); + let pid1 = add_test_actor(&mut scheduler, Priority::LOW); + let pid2 = add_test_actor(&mut scheduler, Priority::HIGH); + let pid3 = add_test_actor(&mut scheduler, Priority::NORMAL); assert!(scheduler.has_process()); assert!(scheduler.has_ready_process()); @@ -173,11 +160,7 @@ fn next_process_order() { fn add_process() { let mut scheduler = Scheduler::new(); - let actor_entry = scheduler.add_actor(); - let pid = actor_entry.pid(); - let new_actor = simple_actor as fn(_) -> _; - let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); - actor_entry.add(future, Priority::NORMAL); + let pid = add_test_actor(&mut scheduler, Priority::NORMAL); assert!(scheduler.has_process()); assert!(scheduler.has_ready_process()); @@ -193,14 +176,10 @@ fn add_process() { fn add_process_marked_ready() { let mut scheduler = Scheduler::new(); - let actor_entry = scheduler.add_actor(); - let pid = actor_entry.pid(); - let new_actor = simple_actor as fn(_) -> _; - let (future, _) = init_local_actor_future(NoSupervisor, new_actor, ()).unwrap(); - actor_entry.add(future, Priority::NORMAL); + let pid = add_test_actor(&mut scheduler, Priority::NORMAL); let process = scheduler.next_process().unwrap(); - scheduler.add_process(process); + scheduler.add_back_process(process); assert!(scheduler.has_process()); assert!(!scheduler.has_ready_process()); @@ -222,7 +201,8 @@ fn scheduler_run_order() { } let mut scheduler = Scheduler::new(); - let mut runtime_ref = test::runtime(); + let waker = nop_task_waker(); + let mut ctx = task::Context::from_waker(&waker); // The order in which the processes have been run. let run_order = Rc::new(RefCell::new(Vec::new())); @@ -232,11 +212,14 @@ fn scheduler_run_order() { let priorities = [Priority::LOW, Priority::NORMAL, Priority::HIGH]; let mut pids = vec![]; for (id, priority) in priorities.iter().enumerate() { - let actor_entry = scheduler.add_actor(); - pids.push(actor_entry.pid()); - let (future, _) = - init_local_actor_future(NoSupervisor, new_actor, (id, run_order.clone())).unwrap(); - actor_entry.add(future, *priority); + let pid = scheduler + .add_new_process(*priority, |pid| { + let rt = ThreadLocal::new(TEST_PID, test::runtime()); + ActorFuture::new(NoSupervisor, new_actor, (id, run_order.clone()), rt) + .map(|(future, _)| (future, pid)) + }) + .unwrap(); + pids.push(pid); } assert!(scheduler.has_process()); @@ -246,10 +229,7 @@ fn scheduler_run_order() { // are equal). for _ in 0..3 { let mut process = scheduler.next_process().unwrap(); - assert_eq!( - process.as_mut().run(&mut runtime_ref), - ProcessResult::Complete - ); + assert_eq!(process.as_mut().run(&mut ctx), Poll::Ready(())); } assert!(!scheduler.has_process()); assert_eq!(*run_order.borrow(), vec![2_usize, 1, 0]); @@ -276,66 +256,64 @@ impl NewActor for TestAssertUnmovedNewActor { #[test] fn assert_actor_process_unmoved() { let mut scheduler = Scheduler::new(); - let mut runtime_ref = test::runtime(); + let waker = nop_task_waker(); + let mut ctx = task::Context::from_waker(&waker); - let actor_entry = scheduler.add_actor(); - let pid = actor_entry.pid(); - let (future, _) = init_local_actor_future(NoSupervisor, TestAssertUnmovedNewActor, ()).unwrap(); - actor_entry.add(future, Priority::NORMAL); + let pid = scheduler + .add_new_process(Priority::NORMAL, |pid| { + let rt = ThreadLocal::new(TEST_PID, test::runtime()); + ActorFuture::new(NoSupervisor, TestAssertUnmovedNewActor, (), rt) + .map(|(future, _)| (future, pid)) + }) + .unwrap(); // Run the process multiple times, ensure it's not moved in the process. let mut process = scheduler.next_process().unwrap(); - assert_eq!( - process.as_mut().run(&mut runtime_ref), - ProcessResult::Pending - ); - scheduler.add_process(process); + assert_eq!(process.as_mut().run(&mut ctx), Poll::Pending); + scheduler.add_back_process(process); scheduler.mark_ready(pid); let mut process = scheduler.next_process().unwrap(); - assert_eq!( - process.as_mut().run(&mut runtime_ref), - ProcessResult::Pending - ); - scheduler.add_process(process); + assert_eq!(process.as_mut().run(&mut ctx), Poll::Pending); + scheduler.add_back_process(process); scheduler.mark_ready(pid); let mut process = scheduler.next_process().unwrap(); - assert_eq!( - process.as_mut().run(&mut runtime_ref), - ProcessResult::Pending - ); + assert_eq!(process.as_mut().run(&mut ctx), Poll::Pending); } #[test] fn assert_future_process_unmoved() { let mut scheduler = Scheduler::new(); - let mut runtime_ref = test::runtime(); + let waker = nop_task_waker(); + let mut ctx = task::Context::from_waker(&waker); - let future = AssertUnmoved::new(pending()); - scheduler.add_future(future, Priority::NORMAL); + let _ = scheduler.add_new_process(Priority::NORMAL, |_| { + Ok::<_, !>((FutureProcess(AssertUnmoved::new(pending())), ())) + }); // Run the process multiple times, ensure it's not moved in the process. let mut process = scheduler.next_process().unwrap(); let pid = process.as_ref().id(); - assert_eq!( - process.as_mut().run(&mut runtime_ref), - ProcessResult::Pending - ); - scheduler.add_process(process); + assert_eq!(process.as_mut().run(&mut ctx), Poll::Pending); + scheduler.add_back_process(process); scheduler.mark_ready(pid); let mut process = scheduler.next_process().unwrap(); - assert_eq!( - process.as_mut().run(&mut runtime_ref), - ProcessResult::Pending - ); - scheduler.add_process(process); + assert_eq!(process.as_mut().run(&mut ctx), Poll::Pending); + scheduler.add_back_process(process); scheduler.mark_ready(pid); let mut process = scheduler.next_process().unwrap(); - assert_eq!( - process.as_mut().run(&mut runtime_ref), - ProcessResult::Pending - ); + assert_eq!(process.as_mut().run(&mut ctx), Poll::Pending); +} + +fn add_test_actor(scheduler: &mut Scheduler, priority: Priority) -> ProcessId { + scheduler + .add_new_process(priority, |pid| { + let new_actor = simple_actor as fn(_) -> _; + let rt = ThreadLocal::new(TEST_PID, test::runtime()); + ActorFuture::new(NoSupervisor, new_actor, (), rt).map(|(future, _)| (future, pid)) + }) + .unwrap() } diff --git a/rt/src/process/tests.rs b/rt/src/process/tests.rs index d33f9875b..7f041f76d 100644 --- a/rt/src/process/tests.rs +++ b/rt/src/process/tests.rs @@ -1,18 +1,18 @@ //! Tests for the process module. use std::cmp::Ordering; -use std::future::pending; +use std::future::{pending, Future}; use std::mem::size_of; use std::pin::Pin; +use std::task::{self, Poll}; use std::thread::sleep; use std::time::Duration; use mio::Token; -use crate::process::{FutureProcess, Process, ProcessData, ProcessId, ProcessResult}; +use crate::process::{FutureProcess, Process, ProcessData, ProcessId}; use crate::spawn::options::Priority; -use crate::test::{self, AssertUnmoved}; -use crate::{RuntimeRef, ThreadLocal, ThreadSafe}; +use crate::test::{nop_task_waker, AssertUnmoved}; #[test] fn pid() { @@ -51,14 +51,18 @@ fn size_assertions() { #[derive(Debug)] struct NopTestProcess; +impl Future for NopTestProcess { + type Output = (); + + fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<()> { + unimplemented!(); + } +} + impl Process for NopTestProcess { fn name(&self) -> &'static str { "NopTestProcess" } - - fn run(self: Pin<&mut Self>, _: &mut RuntimeRef, _: ProcessId) -> ProcessResult { - unimplemented!(); - } } #[test] @@ -124,15 +128,19 @@ fn process_data_ordering() { #[derive(Debug)] struct SleepyProcess(Duration); +impl Future for SleepyProcess { + type Output = (); + + fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<()> { + sleep(self.0); + Poll::Pending + } +} + impl Process for SleepyProcess { fn name(&self) -> &'static str { "SleepyProcess" } - - fn run(self: Pin<&mut Self>, _: &mut RuntimeRef, _: ProcessId) -> ProcessResult { - sleep(self.0); - ProcessResult::Pending - } } #[test] @@ -146,40 +154,26 @@ fn process_data_runtime_increase() { process.fair_runtime = Duration::from_millis(10); // Runtime must increase after running. - let mut runtime_ref = test::runtime(); - let res = process.as_mut().run(&mut runtime_ref); - assert_eq!(res, ProcessResult::Pending); + let waker = nop_task_waker(); + let mut ctx = task::Context::from_waker(&waker); + let res = process.as_mut().run(&mut ctx); + assert_eq!(res, Poll::Pending); assert!(process.fair_runtime >= SLEEP_TIME); } #[test] -fn future_process_thread_local_assert_future_unmoved() { - let process = FutureProcess::<_, ThreadLocal>::new(AssertUnmoved::new(pending())); - let mut process: Pin> = Box::pin(process); - - // All we do is run it a couple of times, it should panic if the actor is - // moved. - let mut runtime_ref = test::runtime(); - let res = process.as_mut().run(&mut runtime_ref, ProcessId(0)); - assert_eq!(res, ProcessResult::Pending); - let res = process.as_mut().run(&mut runtime_ref, ProcessId(0)); - assert_eq!(res, ProcessResult::Pending); - let res = process.as_mut().run(&mut runtime_ref, ProcessId(0)); - assert_eq!(res, ProcessResult::Pending); -} - -#[test] -fn future_process_thread_safe_assert_future_unmoved() { - let process = FutureProcess::<_, ThreadSafe>::new(AssertUnmoved::new(pending())); +fn future_process_assert_future_unmoved() { + let process = FutureProcess(AssertUnmoved::new(pending())); let mut process: Pin> = Box::pin(process); // All we do is run it a couple of times, it should panic if the actor is // moved. - let mut runtime_ref = test::runtime(); - let res = process.as_mut().run(&mut runtime_ref, ProcessId(0)); - assert_eq!(res, ProcessResult::Pending); - let res = process.as_mut().run(&mut runtime_ref, ProcessId(0)); - assert_eq!(res, ProcessResult::Pending); - let res = process.as_mut().run(&mut runtime_ref, ProcessId(0)); - assert_eq!(res, ProcessResult::Pending); + let waker = nop_task_waker(); + let mut ctx = task::Context::from_waker(&waker); + let res = process.as_mut().poll(&mut ctx); + assert_eq!(res, Poll::Pending); + let res = process.as_mut().poll(&mut ctx); + assert_eq!(res, Poll::Pending); + let res = process.as_mut().poll(&mut ctx); + assert_eq!(res, Poll::Pending); } diff --git a/rt/src/shared/scheduler/inactive.rs b/rt/src/shared/scheduler/inactive.rs index f3858f242..8d6e20c19 100644 --- a/rt/src/shared/scheduler/inactive.rs +++ b/rt/src/shared/scheduler/inactive.rs @@ -674,16 +674,17 @@ unsafe fn drop_tagged_pointer(ptr: TaggedPointer) { #[cfg(test)] mod tests { + use std::future::Future; use std::mem::{align_of, size_of}; use std::pin::Pin; use std::ptr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; + use std::task::{self, Poll}; - use crate::process::{Process, ProcessId, ProcessResult}; + use crate::process::{Process, ProcessId}; use crate::shared::scheduler::RunQueue; use crate::spawn::options::Priority; - use crate::RuntimeRef; use super::{ as_pid, branch_from_tagged, diff_branch_depth, drop_tagged_pointer, is_branch, is_process, @@ -707,14 +708,18 @@ mod tests { struct TestProcess; + impl Future for TestProcess { + type Output = (); + + fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<()> { + unimplemented!(); + } + } + impl Process for TestProcess { fn name(&self) -> &'static str { "TestProcess" } - - fn run(self: Pin<&mut Self>, _: &mut RuntimeRef, _: ProcessId) -> ProcessResult { - unimplemented!() - } } fn test_process() -> Pin> { @@ -804,14 +809,18 @@ mod tests { } } + impl Future for DropTest { + type Output = (); + + fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<()> { + unimplemented!(); + } + } + impl Process for DropTest { fn name(&self) -> &'static str { "DropTest" } - - fn run(self: Pin<&mut Self>, _: &mut RuntimeRef, _: ProcessId) -> ProcessResult { - unimplemented!() - } } #[test] diff --git a/rt/src/shared/scheduler/mod.rs b/rt/src/shared/scheduler/mod.rs index 741876e64..474c730e6 100644 --- a/rt/src/shared/scheduler/mod.rs +++ b/rt/src/shared/scheduler/mod.rs @@ -1,8 +1,4 @@ -//! Module with the thread-safe scheduler. -//! -//! Scheduler for the actors started with [`RuntimeRef::try_spawn`]. -//! -//! [`RuntimeRef::try_spawn`]: crate::RuntimeRef::try_spawn +//! Thread-safe scheduler. use std::mem::MaybeUninit; use std::pin::Pin; diff --git a/rt/src/shared/scheduler/runqueue.rs b/rt/src/shared/scheduler/runqueue.rs index e074865c3..bdb37f8ba 100644 --- a/rt/src/shared/scheduler/runqueue.rs +++ b/rt/src/shared/scheduler/runqueue.rs @@ -121,13 +121,14 @@ impl Node { #[cfg(test)] mod tests { + use std::future::Future; use std::mem::size_of; use std::pin::Pin; + use std::task::{self, Poll}; use std::time::Duration; - use crate::process::{Process, ProcessId, ProcessResult}; + use crate::process::{Process, ProcessId}; use crate::spawn::options::Priority; - use crate::RuntimeRef; use super::{Node, ProcessData, RunQueue}; @@ -141,14 +142,18 @@ mod tests { struct TestProcess; + impl Future for TestProcess { + type Output = (); + + fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<()> { + unimplemented!(); + } + } + impl Process for TestProcess { fn name(&self) -> &'static str { "TestProcess" } - - fn run(self: Pin<&mut Self>, _: &mut RuntimeRef, _: ProcessId) -> ProcessResult { - ProcessResult::Complete - } } fn add_process(run_queue: &RunQueue, fair_runtime: Duration) -> ProcessId { diff --git a/rt/src/shared/scheduler/tests.rs b/rt/src/shared/scheduler/tests.rs index de09d5afd..18028e6ee 100644 --- a/rt/src/shared/scheduler/tests.rs +++ b/rt/src/shared/scheduler/tests.rs @@ -3,13 +3,14 @@ use std::future::{pending, Pending}; use std::mem::size_of; use std::sync::{Arc, Mutex}; +use std::task::{self, Poll}; -use heph::actor::{self, NewActor}; +use heph::actor::{self, ActorFuture, NewActor}; use heph::supervisor::NoSupervisor; -use crate::process::{ProcessId, ProcessResult}; +use crate::process::{FutureProcess, ProcessId}; use crate::shared::scheduler::{Priority, ProcessData, Scheduler}; -use crate::test::{self, init_actor_future, AssertUnmoved}; +use crate::test::{self, nop_task_waker, AssertUnmoved, TEST_PID}; use crate::ThreadSafe; fn assert_size(expected: usize) { @@ -38,18 +39,13 @@ fn adding_actor() { assert!(!scheduler.has_ready_process()); assert_eq!(scheduler.remove(), None); - // Add an actor to the scheduler. - let actor_entry = scheduler.add_actor(); - let pid = actor_entry.pid(); - let new_actor = simple_actor as fn(_) -> _; - let (future, _) = init_actor_future(NoSupervisor, new_actor, ()).unwrap(); - actor_entry.add(future, Priority::NORMAL); + let pid = add_test_actor(&scheduler, Priority::NORMAL); // Newly added processes are ready by default. assert!(scheduler.has_process()); assert!(scheduler.has_ready_process()); let process = scheduler.remove().unwrap(); - scheduler.add_process(process); + scheduler.add_back_process(process); // After scheduling the process should be ready to run. scheduler.mark_ready(pid); @@ -58,8 +54,8 @@ fn adding_actor() { let process = scheduler.remove().unwrap(); assert_eq!(process.as_ref().id(), pid); - // After the process is run, and returned `ProcessResult::Complete`, it - // should be removed. + // After the process is run, and returned `Poll::Ready(()`, it should be + // removed. assert!(!scheduler.has_process()); assert!(!scheduler.has_ready_process()); assert_eq!(scheduler.remove(), None); @@ -67,7 +63,7 @@ fn adding_actor() { assert!(!scheduler.has_ready_process()); // Adding the process back means its not ready. - scheduler.add_process(process); + scheduler.add_back_process(process); assert!(scheduler.has_process()); assert!(!scheduler.has_ready_process()); assert_eq!(scheduler.remove(), None); @@ -106,7 +102,8 @@ fn scheduler_run_order() { } let scheduler = Scheduler::new(); - let mut runtime_ref = test::runtime(); + let waker = nop_task_waker(); + let mut ctx = task::Context::from_waker(&waker); // The order in which the processes have been run. let run_order = Arc::new(Mutex::new(Vec::new())); @@ -116,11 +113,14 @@ fn scheduler_run_order() { let priorities = [Priority::LOW, Priority::NORMAL, Priority::HIGH]; let mut pids = vec![]; for (id, priority) in priorities.iter().enumerate() { - let actor_entry = scheduler.add_actor(); - pids.push(actor_entry.pid()); - let (future, _) = - init_actor_future(NoSupervisor, new_actor, (id, run_order.clone())).unwrap(); - actor_entry.add(future, *priority); + let pid = scheduler + .add_new_process(*priority, |pid| { + let rt = ThreadSafe::new(TEST_PID, test::shared_internals()); + ActorFuture::new(NoSupervisor, new_actor, (id, run_order.clone()), rt) + .map(|(future, _)| (future, pid)) + }) + .unwrap(); + pids.push(pid); } assert!(scheduler.has_process()); @@ -130,10 +130,7 @@ fn scheduler_run_order() { // are equal). for _ in 0..3 { let mut process = scheduler.remove().unwrap(); - assert_eq!( - process.as_mut().run(&mut runtime_ref), - ProcessResult::Complete - ); + assert_eq!(process.as_mut().run(&mut ctx), Poll::Ready(())); } assert!(!scheduler.has_process()); assert_eq!(*run_order.lock().unwrap(), vec![2_usize, 1, 0]); @@ -160,68 +157,66 @@ impl NewActor for TestAssertUnmovedNewActor { #[test] fn assert_actor_process_unmoved() { let scheduler = Scheduler::new(); - let mut runtime_ref = test::runtime(); + let waker = nop_task_waker(); + let mut ctx = task::Context::from_waker(&waker); - let actor_entry = scheduler.add_actor(); - let pid = actor_entry.pid(); - let (future, _) = init_actor_future(NoSupervisor, TestAssertUnmovedNewActor, ()).unwrap(); - actor_entry.add(future, Priority::NORMAL); + let pid = scheduler + .add_new_process(Priority::NORMAL, |pid| { + let rt = ThreadSafe::new(TEST_PID, test::shared_internals()); + ActorFuture::new(NoSupervisor, TestAssertUnmovedNewActor, (), rt) + .map(|(future, _)| (future, pid)) + }) + .unwrap(); // Run the process multiple times, ensure it's not moved in the // process. let mut process = scheduler.remove().unwrap(); - assert_eq!( - process.as_mut().run(&mut runtime_ref), - ProcessResult::Pending - ); - scheduler.add_process(process); + assert_eq!(process.as_mut().run(&mut ctx), Poll::Pending); + scheduler.add_back_process(process); scheduler.mark_ready(pid); let mut process = scheduler.remove().unwrap(); - assert_eq!( - process.as_mut().run(&mut runtime_ref), - ProcessResult::Pending - ); - scheduler.add_process(process); + assert_eq!(process.as_mut().run(&mut ctx), Poll::Pending); + scheduler.add_back_process(process); scheduler.mark_ready(pid); let mut process = scheduler.remove().unwrap(); - assert_eq!( - process.as_mut().run(&mut runtime_ref), - ProcessResult::Pending - ); + assert_eq!(process.as_mut().run(&mut ctx), Poll::Pending); } #[test] fn assert_future_process_unmoved() { let scheduler = Scheduler::new(); - let mut runtime_ref = test::runtime(); + let waker = nop_task_waker(); + let mut ctx = task::Context::from_waker(&waker); - let future = AssertUnmoved::new(pending()); - scheduler.add_future(future, Priority::NORMAL); + let _ = scheduler.add_new_process(Priority::NORMAL, |_| { + Ok::<_, !>((FutureProcess(AssertUnmoved::new(pending())), ())) + }); // Run the process multiple times, ensure it's not moved in the // process. let mut process = scheduler.remove().unwrap(); let pid = process.as_ref().id(); - assert_eq!( - process.as_mut().run(&mut runtime_ref), - ProcessResult::Pending - ); - scheduler.add_process(process); + assert_eq!(process.as_mut().run(&mut ctx), Poll::Pending); + scheduler.add_back_process(process); scheduler.mark_ready(pid); let mut process = scheduler.remove().unwrap(); - assert_eq!( - process.as_mut().run(&mut runtime_ref), - ProcessResult::Pending - ); - scheduler.add_process(process); + assert_eq!(process.as_mut().run(&mut ctx), Poll::Pending); + scheduler.add_back_process(process); scheduler.mark_ready(pid); let mut process = scheduler.remove().unwrap(); - assert_eq!( - process.as_mut().run(&mut runtime_ref), - ProcessResult::Pending - ); + assert_eq!(process.as_mut().run(&mut ctx), Poll::Pending); +} + +fn add_test_actor(scheduler: &Scheduler, priority: Priority) -> ProcessId { + scheduler + .add_new_process(priority, |pid| { + let new_actor = simple_actor as fn(_) -> _; + let rt = ThreadSafe::new(TEST_PID, test::shared_internals()); + ActorFuture::new(NoSupervisor, new_actor, (), rt).map(|(future, _)| (future, pid)) + }) + .unwrap() } diff --git a/rt/src/shared/waker.rs b/rt/src/shared/waker.rs index e76eb6bd5..c8bd2ec59 100644 --- a/rt/src/shared/waker.rs +++ b/rt/src/shared/waker.rs @@ -174,17 +174,19 @@ unsafe fn drop_wake_data(_: *const ()) { #[cfg(test)] mod tests { + use std::future::Future; use std::mem::size_of; use std::pin::Pin; use std::sync::{Arc, Weak}; + use std::task::{self, Poll}; use std::thread::{self, sleep}; use std::time::Duration; - use crate::process::{Process, ProcessData, ProcessId, ProcessResult}; + use crate::process::{FutureProcess, Process, ProcessId}; use crate::shared::waker::{self, WakerData}; use crate::shared::{RuntimeInternals, Scheduler}; use crate::spawn::options::Priority; - use crate::{test, RuntimeRef}; + use crate::test; const PID1: ProcessId = ProcessId(1); const PID2: ProcessId = ProcessId(2); @@ -196,14 +198,18 @@ mod tests { struct TestProcess; + impl Future for TestProcess { + type Output = (); + + fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<()> { + unimplemented!(); + } + } + impl Process for TestProcess { fn name(&self) -> &'static str { "TestProcess" } - - fn run(self: Pin<&mut Self>, _: &mut RuntimeRef, _: ProcessId) -> ProcessResult { - unimplemented!(); - } } #[test] @@ -212,6 +218,10 @@ mod tests { let pid = add_process(&shared_internals.scheduler); assert!(shared_internals.scheduler.has_process()); + assert!(shared_internals.scheduler.has_ready_process()); + let process = shared_internals.scheduler.remove().unwrap(); + shared_internals.scheduler.add_back_process(process); + assert!(shared_internals.scheduler.has_process()); assert!(!shared_internals.scheduler.has_ready_process()); // Create a new waker. @@ -240,6 +250,10 @@ mod tests { // Add a test process. let pid = add_process(&shared_internals.scheduler); assert!(shared_internals.scheduler.has_process()); + assert!(shared_internals.scheduler.has_ready_process()); + let process = shared_internals.scheduler.remove().unwrap(); + shared_internals.scheduler.add_back_process(process); + assert!(shared_internals.scheduler.has_process()); assert!(!shared_internals.scheduler.has_ready_process()); // Create a cloned waker. @@ -261,6 +275,10 @@ mod tests { let pid = add_process(&shared_internals.scheduler); assert!(shared_internals.scheduler.has_process()); + assert!(shared_internals.scheduler.has_ready_process()); + let process = shared_internals.scheduler.remove().unwrap(); + shared_internals.scheduler.add_back_process(process); + assert!(shared_internals.scheduler.has_process()); assert!(!shared_internals.scheduler.has_ready_process()); let shared_internals2 = shared_internals.clone(); @@ -327,10 +345,10 @@ mod tests { } fn add_process(scheduler: &Scheduler) -> ProcessId { - let process: Pin> = Box::pin(TestProcess); - let process_data = Box::pin(ProcessData::new(Priority::NORMAL, process)); - let pid = process_data.as_ref().id(); - scheduler.add_process(process_data); - pid + scheduler + .add_new_process(Priority::NORMAL, |pid| { + Ok::<_, !>((FutureProcess(TestProcess), pid)) + }) + .unwrap() } } diff --git a/rt/src/test.rs b/rt/src/test.rs index c793b5257..f44a1fd3c 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -56,8 +56,6 @@ use std::task::{self, Poll}; use std::time::{Duration, Instant}; use std::{io, slice, thread}; -#[cfg(test)] -use heph::actor::ActorFuture; use heph::actor::{self, Actor, NewActor, SyncActor, SyncWaker}; use heph::actor_ref::{ActorGroup, ActorRef}; use heph::supervisor::{Supervisor, SyncSupervisor}; @@ -90,7 +88,7 @@ pub(crate) fn noop_waker() -> &'static ThreadWaker { }) } -fn shared_internals() -> Arc { +pub(crate) fn shared_internals() -> Arc { static SHARED_INTERNALS: OnceLock> = OnceLock::new(); SHARED_INTERNALS .get_or_init(|| { @@ -387,36 +385,6 @@ where init_actor_with_inbox(new_actor, arg).map(|(actor, _, actor_ref)| (actor, actor_ref)) } -/// Initialise a thread-local `ActorFuture`. -#[cfg(test)] -pub(crate) fn init_local_actor_future( - supervisor: S, - new_actor: NA, - argument: NA::Argument, -) -> Result<(ActorFuture, ActorRef), NA::Error> -where - S: Supervisor, - NA: NewActor, -{ - let rt = ThreadLocal::new(TEST_PID, runtime()); - ActorFuture::new(supervisor, new_actor, argument, rt) -} - -/// Initialise a thread-safe `ActorFuture`. -#[cfg(test)] -pub(crate) fn init_actor_future( - supervisor: S, - new_actor: NA, - argument: NA::Argument, -) -> Result<(ActorFuture, ActorRef), NA::Error> -where - S: Supervisor, - NA: NewActor, -{ - let rt = ThreadSafe::new(TEST_PID, shared_internals()); - ActorFuture::new(supervisor, new_actor, argument, rt) -} - /// Initialise a thread-local actor with access to it's inbox. #[allow(clippy::type_complexity)] pub(crate) fn init_local_actor_with_inbox( @@ -576,3 +544,16 @@ where unsafe impl Send for AssertUnmoved {} #[cfg(test)] unsafe impl std::marker::Sync for AssertUnmoved {} + +/// Returns a no-op [`task::Waker`]. +#[cfg(test)] +pub(crate) fn nop_task_waker() -> task::Waker { + use std::task::{RawWaker, RawWakerVTable}; + static WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new( + |_| RawWaker::new(std::ptr::null(), &WAKER_VTABLE), + |_| {}, + |_| {}, + |_| {}, + ); + unsafe { task::Waker::from_raw(RawWaker::new(std::ptr::null(), &WAKER_VTABLE)) } +} diff --git a/rt/src/timers/mod.rs b/rt/src/timers/mod.rs index 911a97573..e2edac4eb 100644 --- a/rt/src/timers/mod.rs +++ b/rt/src/timers/mod.rs @@ -6,7 +6,6 @@ //! //! [`timer`]: crate::timer //! -//! //! # Implementation //! //! This implementation is based on a Timing Wheel as discussed in the paper From 28a6b5a12e13ed19de567a692875fd9fd4e6d0b0 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 17:45:43 +0200 Subject: [PATCH 081/177] Catch panics in FutureProcess So we don't have to do that when running the processes any more as ActorFuture already catches and handles panics. --- rt/src/local/scheduler/mod.rs | 11 ++++++++++ rt/src/process/mod.rs | 39 +++++++++++++++++++++++++++++----- rt/src/shared/scheduler/mod.rs | 1 - rt/src/worker.rs | 4 +--- src/actor/future.rs | 2 +- 5 files changed, 47 insertions(+), 10 deletions(-) diff --git a/rt/src/local/scheduler/mod.rs b/rt/src/local/scheduler/mod.rs index e48365098..498c0d920 100644 --- a/rt/src/local/scheduler/mod.rs +++ b/rt/src/local/scheduler/mod.rs @@ -2,6 +2,7 @@ use std::collections::BinaryHeap; use std::mem::MaybeUninit; +use std::panic::{catch_unwind, AssertUnwindSafe}; use std::pin::Pin; use log::trace; @@ -105,6 +106,16 @@ impl Scheduler { /// Add back a process that was previously removed via /// [`Scheduler::next_process`]. pub(crate) fn add_back_process(&mut self, process: Pin>) { + let pid = process.as_ref().id(); + trace!(pid = pid.0; "adding back process"); self.inactive.add(process); } + + /// Mark `process` as complete, removing it from the scheduler. + pub(crate) fn complete(&self, process: Pin>) { + let pid = process.as_ref().id(); + trace!(pid = pid.0; "removing process"); + // Don't want to panic when dropping the process. + drop(catch_unwind(AssertUnwindSafe(move || drop(process)))); + } } diff --git a/rt/src/process/mod.rs b/rt/src/process/mod.rs index 5275212e6..299405b33 100644 --- a/rt/src/process/mod.rs +++ b/rt/src/process/mod.rs @@ -1,15 +1,17 @@ //! Module containing the `Process` trait, related types and implementations. +use std::any::Any; use std::cmp::Ordering; use std::fmt; use std::future::Future; +use std::panic::{catch_unwind, AssertUnwindSafe}; use std::pin::Pin; use std::task::{self, Poll}; use std::time::{Duration, Instant}; use heph::actor::{self, ActorFuture, NewActor}; use heph::supervisor::Supervisor; -use log::{as_debug, trace}; +use log::{as_debug, error, trace}; use mio::Token; use crate::spawn::options::Priority; @@ -51,6 +53,10 @@ impl fmt::Display for ProcessId { } /// The trait that represents a process. +/// +/// # Panics +/// +/// The implementation of the [`Future`] MUST catch panics. pub(crate) trait Process: Future { /// Return the name of this process, used in logging. fn name(&self) -> &'static str { @@ -62,17 +68,40 @@ pub(crate) trait Process: Future { /// Wrapper around a [`Future`] to implement [`Process`]. pub(crate) struct FutureProcess(pub(crate) Fut); -impl Future for FutureProcess { - type Output = Fut::Output; +impl> Future for FutureProcess { + type Output = (); - fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { // SAFETY: not moving the `Fut`ure. - unsafe { Fut::poll(Pin::map_unchecked_mut(self, |s| &mut s.0), ctx) } + let future = unsafe { Pin::map_unchecked_mut(self.as_mut(), |s| &mut s.0) }; + match catch_unwind(AssertUnwindSafe(|| future.poll(ctx))) { + Ok(Poll::Ready(())) => Poll::Ready(()), + Ok(Poll::Pending) => Poll::Pending, + Err(panic) => { + let msg = panic_message(&*panic); + let name = self.name(); + error!("future '{name}' panicked at '{msg}'"); + Poll::Ready(()) + } + } + } +} + +/// Attempts to extract a message from a panic, defaulting to ``. +/// NOTE: be sure to derefence the `Box`! +fn panic_message<'a>(panic: &'a (dyn Any + Send + 'static)) -> &'a str { + match panic.downcast_ref::<&'static str>() { + Some(s) => s, + None => match panic.downcast_ref::() { + Some(s) => s, + None => "", + }, } } impl Process for FutureProcess where Fut: Future {} +// NOTE: `ActorFuture` already catches panics for us. impl Process for ActorFuture where S: Supervisor, diff --git a/rt/src/shared/scheduler/mod.rs b/rt/src/shared/scheduler/mod.rs index 474c730e6..54710f2c8 100644 --- a/rt/src/shared/scheduler/mod.rs +++ b/rt/src/shared/scheduler/mod.rs @@ -182,7 +182,6 @@ impl Scheduler { } /// Mark `process` as complete, removing it from the scheduler. - #[allow(clippy::unused_self)] // See NOTE below. pub(super) fn complete(&self, process: Pin>) { let pid = process.as_ref().id(); trace!(pid = pid.0; "removing process"); diff --git a/rt/src/worker.rs b/rt/src/worker.rs index d8195e08b..fb3515671 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -18,7 +18,6 @@ use std::cell::RefMut; use std::num::NonZeroUsize; use std::os::fd::{AsFd, AsRawFd}; -use std::panic::{catch_unwind, AssertUnwindSafe}; use std::rc::Rc; use std::sync::Arc; use std::time::{Duration, Instant}; @@ -367,8 +366,7 @@ impl Worker { let mut ctx = task::Context::from_waker(&waker); match process.as_mut().run(&mut ctx) { task::Poll::Ready(()) => { - // Don't want to panic when dropping the process. - drop(catch_unwind(AssertUnwindSafe(move || drop(process)))); + self.internals.scheduler.borrow_mut().complete(process); } task::Poll::Pending => { self.internals diff --git a/src/actor/future.rs b/src/actor/future.rs index 968fb8f28..686fb236f 100644 --- a/src/actor/future.rs +++ b/src/actor/future.rs @@ -199,7 +199,7 @@ where } /// Attempts to extract a message from a panic, defaulting to ``. -/// Note: be sure to derefence the `Box`! +/// NOTE: be sure to derefence the `Box`! fn panic_message<'a>(panic: &'a (dyn Any + Send + 'static)) -> &'a str { match panic.downcast_ref::<&'static str>() { Some(s) => s, From ed380f6f11180da86e74a737ef0fc9023a43cbe3 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 18:18:29 +0200 Subject: [PATCH 082/177] Update format_name The name of async functions has changed slightly. --- src/actor/context.rs | 2 +- src/actor/mod.rs | 95 ++++++++++++++++++++------------------------ src/actor/tests.rs | 4 ++ 3 files changed, 47 insertions(+), 54 deletions(-) diff --git a/src/actor/context.rs b/src/actor/context.rs index c93bb1e70..8c4911956 100644 --- a/src/actor/context.rs +++ b/src/actor/context.rs @@ -96,7 +96,7 @@ impl Context { /// async fn print_actor(mut ctx: actor::Context) { /// // Create a timer, this will be ready once the timeout has /// // passed. - /// let timeout = Timer::after(&mut ctx, Duration::from_millis(100)); + /// let timeout = Timer::after(ctx.runtime().clone(), Duration::from_millis(100)); /// // Create a future to receive a message. /// let msg_future = ctx.receive_next(); /// diff --git a/src/actor/mod.rs b/src/actor/mod.rs index 301da9c62..92274c2f8 100644 --- a/src/actor/mod.rs +++ b/src/actor/mod.rs @@ -314,7 +314,7 @@ pub trait NewActor { /// // Actor that handles a connection. /// async fn conn_actor( /// ctx: actor::Context, - /// mut stream: TcpStream, + /// stream: TcpStream, /// greet_mars: bool /// ) -> io::Result<()> { /// # drop(ctx); // Silence dead code warnings. @@ -565,7 +565,7 @@ fn format_name(full_name: &'static str) -> &'static str { const GEN_FUTURE: &str = "GenFuture<"; const GENERIC_START: &str = "<"; const GENERIC_END: &str = ">"; - const CLOSURE: &str = "{{closure}}"; + const CLOSURE: &str = "::{{closure}}"; let mut name = full_name; @@ -584,51 +584,6 @@ fn format_name(full_name: &'static str) -> &'static str { (Some(start_index), Some(i)) if start_index < i => { // Outer type is `GenFuture`, remove that. name = &name[start_index + GEN_FUTURE.len()..name.len() - GENERIC_END.len()]; - - // Async functions often end with `::{{closure}}`; also remove that, - // e.g. from `1_hello_world::greeter_actor::{{closure}}` to - // `1_hello_world::greeter_actor`. - if let Some(start_index) = name.rfind("::") { - let last_part = &name[start_index + 2..]; - if last_part == CLOSURE { - name = &name[..start_index]; - } - } - - // Remove generic parameters, e.g. - // `deadline_actor` to - // `deadline_actor`. - if name.ends_with('>') { - if let Some(start_index) = name.find('<') { - name = &name[..start_index]; - } - } - - // Function named `actor` in a module named `actor`. We'll drop the - // function name and keep the `actor` part of the module, e.g. - // `storage::actor::actor` to `storage::actor`. - if name.ends_with("actor::actor") { - // Remove `::actor`. - name = &name[..name.len() - 7]; - } - - // Either take the last part of the name's path, e.g. from - // `1_hello_world::greeter_actor` to `greeter_actor`. - // Or keep the module name in case the actor's name would be `actor`, - // e.g. from `1_hello_world::some::nested::module::greeter::actor` to - // `greeter::actor`. - if let Some(start_index) = name.rfind("::") { - let actor_name = &name[start_index + 2..]; - if actor_name == "actor" { - // If the actor's name is `actor` will keep the last module - // name as part of the name. - if let Some(module_index) = name[..start_index].rfind("::") { - name = &name[module_index + 2..]; - } // Else only a single module in path. - } else { - name = actor_name; - } - } } _ => { // Otherwise we trait it like a normal type and remove the generic @@ -637,15 +592,49 @@ fn format_name(full_name: &'static str) -> &'static str { // to `heph::net::tcp::server::TcpServer`. if let Some(start_index) = name.find(GENERIC_START) { name = &name[..start_index]; - - if let Some(start_index) = name.rfind("::") { - // Next we remove the module path, e.g. from - // `heph::net::tcp::server::TcpServer` to `TcpServer`. - name = &name[start_index + 2..]; - } } } } + // Async functions often end with `::{{closure}}`; also remove that, + // e.g. from `1_hello_world::greeter_actor::{{closure}}` to + // `1_hello_world::greeter_actor`. + name = name.trim_end_matches(CLOSURE); + + // Remove generic parameters, e.g. + // `deadline_actor` to + // `deadline_actor`. + if name.ends_with('>') { + if let Some(start_index) = name.find('<') { + name = &name[..start_index]; + } + } + + // Function named `actor` in a module named `actor`. We'll drop the + // function name and keep the `actor` part of the module, e.g. + // `storage::actor::actor` to `storage::actor`. + if name.ends_with("actor::actor") { + // Remove `::actor`. + name = &name[..name.len() - 7]; + } + + // Either take the last part of the name's path, e.g. from + // `1_hello_world::greeter_actor` to `greeter_actor`. + // Or keep the module name in case the actor's name would be `actor`, + // e.g. from `1_hello_world::some::nested::module::greeter::actor` to + // `greeter::actor`. + if let Some(start_index) = name.rfind("::") { + let actor_name = &name[start_index + 2..]; + if actor_name == "actor" { + // If the actor's name is `actor` will keep the last module + // name as part of the name. + if let Some(module_index) = name[..start_index].rfind("::") { + name = &name[module_index + 2..]; + } // Else only a single module in path. + } else if !actor_name.is_empty() { + name = actor_name; + } + } + name } diff --git a/src/actor/tests.rs b/src/actor/tests.rs index 4f0fa5d3a..0b551ed1f 100644 --- a/src/actor/tests.rs +++ b/src/actor/tests.rs @@ -19,6 +19,10 @@ fn actor_name() { "core::future::from_generator::GenFuture<2_my_ip::conn_actor::{{closure}}>", "conn_actor" ), + ( + "2_my_ip::conn_actor::{{closure}}", + "conn_actor", + ), // Generic parameter(s) wrapped in GenFuture. ( "core::future::from_generator::GenFuture::{{closure}}>", From d6ca7e864c265729052008ecd42cf324b7b73334 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 18:21:07 +0200 Subject: [PATCH 083/177] Fix unused code warning It's used in the test module. --- rt/src/lib.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 093f5e8f3..fb98c1c4f 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -203,6 +203,7 @@ macro_rules! syscall { use std::convert::TryInto; use std::future::Future; use std::rc::Rc; +#[cfg(any(test, feature = "test"))] use std::sync::Arc; use std::task; use std::time::{Duration, Instant}; @@ -625,6 +626,7 @@ impl RuntimeRef { /// # Notes /// /// Prefer `new_waker` if possible, only use `task::Waker` for `Future`s. + #[cfg(any(test, feature = "test"))] fn new_local_task_waker(&self, pid: ProcessId) -> task::Waker { local::waker::new(self.internals.waker_id, pid) } @@ -646,6 +648,7 @@ impl RuntimeRef { } /// Returns a copy of the shared internals. + #[cfg(any(test, feature = "test"))] fn clone_shared(&self) -> Arc { self.internals.shared.clone() } From e288679b5e8c7d4839dc3374a3091886c9f52524 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 18:37:42 +0200 Subject: [PATCH 084/177] Create scheduler module And move the local scheduler into it. Same as what we did for the timers implementation. --- rt/src/lib.rs | 1 + rt/src/local/mod.rs | 3 +-- rt/src/{local => }/scheduler/inactive.rs | 2 +- rt/src/{local => }/scheduler/mod.rs | 2 +- rt/src/{local => }/scheduler/tests.rs | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) rename rt/src/{local => }/scheduler/inactive.rs (99%) rename rt/src/{local => }/scheduler/mod.rs (99%) rename rt/src/{local => }/scheduler/tests.rs (99%) diff --git a/rt/src/lib.rs b/rt/src/lib.rs index fb98c1c4f..24adda70c 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -223,6 +223,7 @@ pub mod log; pub mod net; pub mod pipe; mod process; +mod scheduler; mod setup; mod shared; mod signal; diff --git a/rt/src/local/mod.rs b/rt/src/local/mod.rs index e776be699..4f75118c9 100644 --- a/rt/src/local/mod.rs +++ b/rt/src/local/mod.rs @@ -9,11 +9,10 @@ use mio::Poll; use crate::{shared, trace, Signal}; -mod scheduler; pub(super) mod waker; +use crate::scheduler::Scheduler; use crate::timers::Timers; -use scheduler::Scheduler; use waker::WakerId; /// Internals of the runtime, to which `RuntimeRef`s have a reference. diff --git a/rt/src/local/scheduler/inactive.rs b/rt/src/scheduler/inactive.rs similarity index 99% rename from rt/src/local/scheduler/inactive.rs rename to rt/src/scheduler/inactive.rs index c86bce62f..9628ab6a4 100644 --- a/rt/src/local/scheduler/inactive.rs +++ b/rt/src/scheduler/inactive.rs @@ -4,8 +4,8 @@ use std::mem::{forget, replace}; use std::pin::Pin; use std::ptr::NonNull; -use crate::local::scheduler::ProcessData; use crate::process::ProcessId; +use crate::scheduler::ProcessData; /// Number of bits to shift per level. const LEVEL_SHIFT: usize = 4; diff --git a/rt/src/local/scheduler/mod.rs b/rt/src/scheduler/mod.rs similarity index 99% rename from rt/src/local/scheduler/mod.rs rename to rt/src/scheduler/mod.rs index 498c0d920..b9d844457 100644 --- a/rt/src/local/scheduler/mod.rs +++ b/rt/src/scheduler/mod.rs @@ -1,4 +1,4 @@ -//! Thread-local scheduler. +//! Scheduler implementation. use std::collections::BinaryHeap; use std::mem::MaybeUninit; diff --git a/rt/src/local/scheduler/tests.rs b/rt/src/scheduler/tests.rs similarity index 99% rename from rt/src/local/scheduler/tests.rs rename to rt/src/scheduler/tests.rs index 57dd606ed..7e8878deb 100644 --- a/rt/src/local/scheduler/tests.rs +++ b/rt/src/scheduler/tests.rs @@ -11,8 +11,8 @@ use std::task::{self, Poll}; use heph::actor::{self, ActorFuture, NewActor}; use heph::supervisor::NoSupervisor; -use crate::local::scheduler::{ProcessData, Scheduler}; use crate::process::{FutureProcess, Process, ProcessId}; +use crate::scheduler::{ProcessData, Scheduler}; use crate::spawn::options::Priority; use crate::test::{self, nop_task_waker, AssertUnmoved, TEST_PID}; use crate::ThreadLocal; From b406cc696ebc68872eedf372200cbecff9b14b58 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 18:44:14 +0200 Subject: [PATCH 085/177] Move shared Scheduler into scheduler module --- rt/src/scheduler/mod.rs | 1 + .../shared}/inactive.rs | 4 ++-- .../scheduler => scheduler/shared}/mod.rs | 22 +++++++++---------- .../shared}/runqueue.rs | 14 ++++++------ .../scheduler => scheduler/shared}/tests.rs | 2 +- rt/src/shared/mod.rs | 3 +-- rt/src/timers/shared.rs | 2 +- 7 files changed, 24 insertions(+), 24 deletions(-) rename rt/src/{shared/scheduler => scheduler/shared}/inactive.rs (99%) rename rt/src/{shared/scheduler => scheduler/shared}/mod.rs (91%) rename rt/src/{shared/scheduler => scheduler/shared}/runqueue.rs (97%) rename rt/src/{shared/scheduler => scheduler/shared}/tests.rs (99%) diff --git a/rt/src/scheduler/mod.rs b/rt/src/scheduler/mod.rs index b9d844457..d0f014396 100644 --- a/rt/src/scheduler/mod.rs +++ b/rt/src/scheduler/mod.rs @@ -12,6 +12,7 @@ use crate::ptr_as_usize; use crate::spawn::options::Priority; mod inactive; +pub(crate) mod shared; #[cfg(test)] mod tests; diff --git a/rt/src/shared/scheduler/inactive.rs b/rt/src/scheduler/shared/inactive.rs similarity index 99% rename from rt/src/shared/scheduler/inactive.rs rename to rt/src/scheduler/shared/inactive.rs index 8d6e20c19..26ab1b063 100644 --- a/rt/src/shared/scheduler/inactive.rs +++ b/rt/src/scheduler/shared/inactive.rs @@ -4,7 +4,7 @@ use std::pin::Pin; use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use std::{fmt, ptr}; -use crate::shared::scheduler::{ProcessData, RunQueue}; +use crate::scheduler::shared::{ProcessData, RunQueue}; use crate::ProcessId; /// Number of bits to shift per level. @@ -683,7 +683,7 @@ mod tests { use std::task::{self, Poll}; use crate::process::{Process, ProcessId}; - use crate::shared::scheduler::RunQueue; + use crate::scheduler::shared::RunQueue; use crate::spawn::options::Priority; use super::{ diff --git a/rt/src/shared/scheduler/mod.rs b/rt/src/scheduler/shared/mod.rs similarity index 91% rename from rt/src/shared/scheduler/mod.rs rename to rt/src/scheduler/shared/mod.rs index 54710f2c8..d2eb157f5 100644 --- a/rt/src/shared/scheduler/mod.rs +++ b/rt/src/scheduler/shared/mod.rs @@ -1,11 +1,11 @@ -//! Thread-safe scheduler. +//! Thread-safe version of `Scheduler`. use std::mem::MaybeUninit; use std::pin::Pin; use log::trace; -use crate::process::{self, Process, ProcessId}; +use crate::process::{Process, ProcessId}; use crate::ptr_as_usize; use crate::spawn::options::Priority; @@ -17,7 +17,7 @@ mod tests; use inactive::Inactive; use runqueue::RunQueue; -pub(super) type ProcessData = process::ProcessData; +pub(crate) type ProcessData = crate::process::ProcessData; /// The thread-safe scheduler, responsible for scheduling processes that can run /// one any of the worker threads, e.g. thread-safe actors. @@ -81,7 +81,7 @@ pub(super) type ProcessData = process::ProcessData; /// section above, it will not be added to the [`Inactive`] list but instead be /// moved to the [`RunQueue`] again. #[derive(Debug)] -pub(super) struct Scheduler { +pub(crate) struct Scheduler { /// Processes that are ready to run. ready: RunQueue, /// Inactive processes that are not ready to run. @@ -90,7 +90,7 @@ pub(super) struct Scheduler { impl Scheduler { /// Create a new `Scheduler`. - pub(super) fn new() -> Scheduler { + pub(crate) fn new() -> Scheduler { Scheduler { ready: RunQueue::empty(), inactive: Inactive::empty(), @@ -113,7 +113,7 @@ impl Scheduler { /// # Notes /// /// Once this function returns the value could already be outdated. - pub(super) fn has_process(&self) -> bool { + pub(crate) fn has_process(&self) -> bool { let has_inactive = self.inactive.has_process(); has_inactive || self.has_ready_process() } @@ -124,7 +124,7 @@ impl Scheduler { /// # Notes /// /// Once this function returns the value could already be outdated. - pub(super) fn has_ready_process(&self) -> bool { + pub(crate) fn has_ready_process(&self) -> bool { self.ready.has_process() } @@ -157,7 +157,7 @@ impl Scheduler { /// # Notes /// /// Calling this with an invalid or outdated `pid` will be silently ignored. - pub(super) fn mark_ready(&self, pid: ProcessId) { + pub(crate) fn mark_ready(&self, pid: ProcessId) { trace!(pid = pid.0; "marking process as ready"); self.inactive.mark_ready(pid, &self.ready); // NOTE: if the process in currently not in the `Inactive` list it will @@ -169,20 +169,20 @@ impl Scheduler { /// /// Returns `Ok(Some(..))` if a process was successfully removed or /// `Ok(None)` if no processes are available to run. - pub(super) fn remove(&self) -> Option>> { + pub(crate) fn remove(&self) -> Option>> { self.ready.remove() } /// Add back a process that was previously removed via /// [`Scheduler::remove`] and add it to the inactive list. - pub(super) fn add_back_process(&self, process: Pin>) { + pub(crate) fn add_back_process(&self, process: Pin>) { let pid = process.as_ref().id(); trace!(pid = pid.0; "adding back process"); self.inactive.add(process, &self.ready); } /// Mark `process` as complete, removing it from the scheduler. - pub(super) fn complete(&self, process: Pin>) { + pub(crate) fn complete(&self, process: Pin>) { let pid = process.as_ref().id(); trace!(pid = pid.0; "removing process"); self.inactive.complete(process); diff --git a/rt/src/shared/scheduler/runqueue.rs b/rt/src/scheduler/shared/runqueue.rs similarity index 97% rename from rt/src/shared/scheduler/runqueue.rs rename to rt/src/scheduler/shared/runqueue.rs index bdb37f8ba..9df09aafb 100644 --- a/rt/src/shared/scheduler/runqueue.rs +++ b/rt/src/scheduler/shared/runqueue.rs @@ -2,7 +2,7 @@ use std::mem::replace; use std::pin::Pin; use std::sync::Mutex; -use super::ProcessData; +use crate::scheduler::shared::ProcessData; // TODO: currently this creates and drops Node on almost every operation. Maybe // we can keep (some of) the structure in place, changing `Node.process` into an @@ -12,7 +12,7 @@ use super::ProcessData; /// /// Implemented as a simple binary tree. #[derive(Debug)] -pub(super) struct RunQueue { +pub(crate) struct RunQueue { root: Mutex, } @@ -27,7 +27,7 @@ struct Node { impl RunQueue { /// Returns an empty `RunQueue`. - pub(super) const fn empty() -> RunQueue { + pub(crate) const fn empty() -> RunQueue { RunQueue { root: Mutex::new(None), } @@ -38,7 +38,7 @@ impl RunQueue { /// # Notes /// /// Don't call this often, it's terrible for performance. - pub(super) fn len(&self) -> usize { + pub(crate) fn len(&self) -> usize { match &mut *self.root.lock().unwrap() { Some(branch) => branch.len(), None => 0, @@ -46,12 +46,12 @@ impl RunQueue { } /// Returns `true` if the queue contains any process. - pub(super) fn has_process(&self) -> bool { + pub(crate) fn has_process(&self) -> bool { self.root.lock().unwrap().is_some() } /// Add `process` to the queue of running processes. - pub(super) fn add(&self, process: Pin>) { + pub(crate) fn add(&self, process: Pin>) { let mut next_node = &mut *self.root.lock().unwrap(); loop { match next_node { @@ -74,7 +74,7 @@ impl RunQueue { } /// Remove the next process to run from the queue. - pub(super) fn remove(&self) -> Option>> { + pub(crate) fn remove(&self) -> Option>> { let mut next_node = &mut *self.root.lock().unwrap(); loop { match next_node { diff --git a/rt/src/shared/scheduler/tests.rs b/rt/src/scheduler/shared/tests.rs similarity index 99% rename from rt/src/shared/scheduler/tests.rs rename to rt/src/scheduler/shared/tests.rs index 18028e6ee..9eb37bbf0 100644 --- a/rt/src/shared/scheduler/tests.rs +++ b/rt/src/scheduler/shared/tests.rs @@ -9,7 +9,7 @@ use heph::actor::{self, ActorFuture, NewActor}; use heph::supervisor::NoSupervisor; use crate::process::{FutureProcess, ProcessId}; -use crate::shared::scheduler::{Priority, ProcessData, Scheduler}; +use crate::scheduler::shared::{Priority, ProcessData, Scheduler}; use crate::test::{self, nop_task_waker, AssertUnmoved, TEST_PID}; use crate::ThreadSafe; diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index f7055b5f3..4889c08a0 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -22,11 +22,10 @@ use crate::thread_waker::ThreadWaker; use crate::timers::TimerToken; use crate::{trace, ThreadSafe}; -mod scheduler; pub(crate) mod waker; +use crate::scheduler::shared::{ProcessData, Scheduler}; use crate::timers::shared::Timers; -use scheduler::{ProcessData, Scheduler}; use waker::WakerId; /// Setup of [`RuntimeInternals`]. diff --git a/rt/src/timers/shared.rs b/rt/src/timers/shared.rs index e493ccdda..c678dbdaa 100644 --- a/rt/src/timers/shared.rs +++ b/rt/src/timers/shared.rs @@ -1,4 +1,4 @@ -//! Threadsafe version of `Timers`. +//! Thread-safe version of `Timers`. use std::cmp::min; use std::sync::RwLock; From bc99fd6d28cee420c372e1183132c3d0d75823df Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 18:48:13 +0200 Subject: [PATCH 086/177] Fix a couple of Clippy lints --- rt/src/coordinator/waker.rs | 2 +- rt/src/error.rs | 2 +- rt/src/scheduler/mod.rs | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/rt/src/coordinator/waker.rs b/rt/src/coordinator/waker.rs index 2963c6f70..69ec91d21 100644 --- a/rt/src/coordinator/waker.rs +++ b/rt/src/coordinator/waker.rs @@ -40,7 +40,7 @@ fn into_data_ptr(bitmap: Arc, id: usize) -> *const () { // This is a "fat" pointer, a pointer to `AtomicBitMap` and a length. let bitmap_ptr = Arc::into_raw(bitmap); // This will point to the start of the `AtomicBitMap` as is "thin". - let bitmap_start = bitmap_ptr as *const (); + let bitmap_start = bitmap_ptr.cast::<()>(); // Ensure we have bit to put our `id`. assert!(bitmap_start as usize & PTR_BITS_UNUSED == 0); // Squash the pointer and our `id` together. diff --git a/rt/src/error.rs b/rt/src/error.rs index 5825da205..0e03449b7 100644 --- a/rt/src/error.rs +++ b/rt/src/error.rs @@ -128,7 +128,7 @@ impl fmt::Debug for Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - const DESC: &'static str = "error running Heph runtime"; + const DESC: &str = "error running Heph runtime"; match self.inner { ErrorInner::Setup(ref err) => { write!(f, "{DESC}: error in user-defined setup: {err}") diff --git a/rt/src/scheduler/mod.rs b/rt/src/scheduler/mod.rs index d0f014396..e7f563f1b 100644 --- a/rt/src/scheduler/mod.rs +++ b/rt/src/scheduler/mod.rs @@ -113,6 +113,7 @@ impl Scheduler { } /// Mark `process` as complete, removing it from the scheduler. + #[allow(clippy::unused_self)] pub(crate) fn complete(&self, process: Pin>) { let pid = process.as_ref().id(); trace!(pid = pid.0; "removing process"); From 6bcafff013dbf9ca73dd598612790664f95b11e2 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 18:48:24 +0200 Subject: [PATCH 087/177] Ignore doc-markdown Clippy lint Too many false positives, it's just getting annoying. --- Makefile.include | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile.include b/Makefile.include index 10c55f068..c8df4bc84 100644 --- a/Makefile.include +++ b/Makefile.include @@ -39,6 +39,7 @@ $(TARGETS): # Reasons to allow lints: # `cargo-common-metadata`: for `benches` and `tools`. +# `doc-markdown`: too many false positives. # `equatable-if-let`: bad lint. # `future-not-send`: we don't want to require all generic parameters to be `Send`. # `manual-let-else`: not really a fan of this. @@ -69,6 +70,7 @@ clippy: --deny clippy::nursery \ --deny clippy::cargo \ --allow clippy::cargo-common-metadata \ + --allow clippy::doc-markdown \ --allow clippy::enum-glob-use \ --allow clippy::equatable-if-let \ --allow clippy::future-not-send \ From 8a7d21e3918bc3d5bbe4ecb4834ae89b50cf33ab Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 18 Apr 2023 20:34:55 +0200 Subject: [PATCH 088/177] Add some tests for ActorFuture --- src/actor/future.rs | 6 +- src/actor/tests.rs | 235 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 237 insertions(+), 4 deletions(-) diff --git a/src/actor/future.rs b/src/actor/future.rs index 686fb236f..6ec9a3af0 100644 --- a/src/actor/future.rs +++ b/src/actor/future.rs @@ -163,10 +163,10 @@ where type Output = (); fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { - // This is safe because we're not moving the actor. + // SAFETY: not moving the actor. let this = unsafe { Pin::get_unchecked_mut(self) }; - // The actor need to be called with `Pin`. So we're undoing the previous - // operation, still ensuring that the actor is not moved. + // SAFETY: undoing the previous operation, still ensuring that the actor + // is not moved. let mut actor = unsafe { Pin::new_unchecked(&mut this.actor) }; match catch_unwind(AssertUnwindSafe(|| actor.as_mut().try_poll(ctx))) { diff --git a/src/actor/tests.rs b/src/actor/tests.rs index 0b551ed1f..4c7d91586 100644 --- a/src/actor/tests.rs +++ b/src/actor/tests.rs @@ -1,4 +1,13 @@ -use crate::actor; +use std::any::Any; +use std::cell::Cell; +use std::future::Future; +use std::pin::pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; +use std::task::{self, Poll}; + +use crate::actor::{self, Actor, ActorFuture, NewActor}; +use crate::supervisor::{NoSupervisor, Supervisor, SupervisorStrategy}; #[test] fn actor_name() { @@ -90,3 +99,227 @@ fn actor_name() { assert_eq!(got, *expected, "input: {input}"); } } + +async fn ok_actor(mut ctx: actor::Context<()>) { + assert_eq!(ctx.receive_next().await, Ok(())); +} + +#[test] +fn actor_future() { + let new_actor = ok_actor as fn(_) -> _; + let (actor, actor_ref) = ActorFuture::new(NoSupervisor, new_actor, (), ()).unwrap(); + let mut actor = pin!(actor); + + let (waker, count) = task_wake_counter(); + let mut ctx = task::Context::from_waker(&waker); + + // Actor should return `Poll::Pending` in the first call, since no message + // is available. + let res = actor.as_mut().poll(&mut ctx); + assert_eq!(res, Poll::Pending); + + // Send a message and the actor should return Ok. + actor_ref.try_send(()).unwrap(); + assert_eq!(count.load(Ordering::SeqCst), 1); + let res = actor.as_mut().poll(&mut ctx); + assert_eq!(res, Poll::Ready(())); +} + +async fn error_actor(mut ctx: actor::Context<()>, fail: bool) -> Result<(), ()> { + if fail { + Err(()) + } else { + assert_eq!(ctx.receive_next().await, Ok(())); + Ok(()) + } +} + +#[test] +fn erroneous_actor_process() { + let mut supervisor_called_count = 0; + let supervisor = |()| { + supervisor_called_count += 1; + SupervisorStrategy::Stop + }; + let new_actor = error_actor as fn(_, _) -> _; + let (actor, _) = ActorFuture::new(supervisor, new_actor, true, ()).unwrap(); + let mut actor = pin!(actor); + + // Actor should return an error and be stopped. + let (waker, count) = task_wake_counter(); + let mut ctx = task::Context::from_waker(&waker); + let res = actor.as_mut().poll(&mut ctx); + assert_eq!(res, Poll::Ready(())); + assert_eq!(supervisor_called_count, 1); + assert_eq!(count.load(Ordering::SeqCst), 0); +} + +#[test] +fn restarting_erroneous_actor_process() { + let supervisor_called_count = Cell::new(0); + let supervisor = |()| { + supervisor_called_count.set(supervisor_called_count.get() + 1); + SupervisorStrategy::Restart(false) + }; + let new_actor = error_actor as fn(_, _) -> _; + let (actor, actor_ref) = ActorFuture::new(supervisor, new_actor, true, ()).unwrap(); + let mut actor = pin!(actor); + + // Actor should return an error and be restarted. + let (waker, count) = task_wake_counter(); + let mut ctx = task::Context::from_waker(&waker); + let res = actor.as_mut().poll(&mut ctx); + assert_eq!(res, Poll::Pending); + assert_eq!(supervisor_called_count.get(), 1); + // The future to wake itself after a restart to ensure it gets run again. + assert_eq!(count.load(Ordering::SeqCst), 1); + + // After a restart the actor should continue without issues. + let res = actor.as_mut().poll(&mut ctx); + assert_eq!(res, Poll::Pending); + assert_eq!(supervisor_called_count.get(), 1); + + // Finally after sending it a message it should complete. + actor_ref.try_send(()).unwrap(); + assert_eq!(count.load(Ordering::SeqCst), 2); + let res = actor.as_mut().poll(&mut ctx); + assert_eq!(res, Poll::Ready(())); + assert_eq!(supervisor_called_count.get(), 1); +} + +async fn panic_actor(mut ctx: actor::Context<()>, fail: bool) -> Result<(), ()> { + if fail { + panic!("oops!") + } else { + assert_eq!(ctx.receive_next().await, Ok(())); + Ok(()) + } +} + +#[test] +fn panicking_actor_process() { + struct TestSupervisor<'a>(&'a mut usize); + + impl Supervisor for TestSupervisor<'_> + where + NA: NewActor, + { + fn decide(&mut self, _: ::Error) -> SupervisorStrategy { + unreachable!() + } + + fn decide_on_restart_error(&mut self, err: !) -> SupervisorStrategy { + // This can't be called. + err + } + + fn second_restart_error(&mut self, err: !) { + // This can't be called. + err + } + + fn decide_on_panic( + &mut self, + panic: Box, + ) -> SupervisorStrategy { + drop(panic); + *self.0 += 1; + SupervisorStrategy::Stop + } + } + + let mut supervisor_called_count = 0; + let supervisor = TestSupervisor(&mut supervisor_called_count); + let new_actor = panic_actor as fn(_, _) -> _; + let (actor, _) = ActorFuture::new(supervisor, new_actor, true, ()).unwrap(); + let mut actor = pin!(actor); + + // Actor should panic and be stopped. + let (waker, count) = task_wake_counter(); + let mut ctx = task::Context::from_waker(&waker); + let res = actor.as_mut().poll(&mut ctx); + assert_eq!(res, Poll::Ready(())); + assert_eq!(supervisor_called_count, 1); + assert_eq!(count.load(Ordering::SeqCst), 0); +} + +#[test] +fn restarting_panicking_actor_process() { + struct TestSupervisor<'a>(&'a Cell); + + impl Supervisor for TestSupervisor<'_> + where + NA: NewActor, + { + fn decide(&mut self, _: ::Error) -> SupervisorStrategy { + unreachable!() + } + + fn decide_on_restart_error(&mut self, err: !) -> SupervisorStrategy { + // This can't be called. + err + } + + fn second_restart_error(&mut self, err: !) { + // This can't be called. + err + } + + fn decide_on_panic( + &mut self, + panic: Box, + ) -> SupervisorStrategy { + drop(panic); + self.0.set(self.0.get() + 1); + SupervisorStrategy::Restart(false) + } + } + + let supervisor_called_count = Cell::new(0); + let supervisor = TestSupervisor(&supervisor_called_count); + let new_actor = panic_actor as fn(_, _) -> _; + let (actor, actor_ref) = ActorFuture::new(supervisor, new_actor, true, ()).unwrap(); + let mut actor = pin!(actor); + + // Actor should panic and be restarted. + let (waker, count) = task_wake_counter(); + let mut ctx = task::Context::from_waker(&waker); + let res = actor.as_mut().poll(&mut ctx); + assert_eq!(res, Poll::Pending); + assert_eq!(supervisor_called_count.get(), 1); + // The future to wake itself after a restart to ensure it gets run again. + assert_eq!(count.load(Ordering::SeqCst), 1); + + // After a restart the actor should continue without issues. + let res = actor.as_mut().poll(&mut ctx); + assert_eq!(res, Poll::Pending); + assert_eq!(supervisor_called_count.get(), 1); + + // Finally after sending it a message it should complete. + actor_ref.try_send(()).unwrap(); + assert_eq!(count.load(Ordering::SeqCst), 2); + let res = actor.as_mut().poll(&mut ctx); + assert_eq!(res, Poll::Ready(())); + assert_eq!(supervisor_called_count.get(), 1); +} + +/// Returns a [`task::Waker`] that counts the times it's called in `call_count`. +pub(crate) fn task_wake_counter() -> (task::Waker, Arc) { + #[repr(transparent)] + struct WakeCounter(AtomicUsize); + + impl task::Wake for WakeCounter { + fn wake(self: Arc) { + _ = self.0.fetch_add(1, Ordering::SeqCst); + } + } + + let call_count = Arc::new(AtomicUsize::new(0)); + ( + // SAFETY: safe with `repr(transparent)`. + task::Waker::from(unsafe { + std::mem::transmute::, Arc>(call_count.clone()) + }), + call_count, + ) +} From 062244f70628f651a4a3f2f16d064f788e6b71c8 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 19 Apr 2023 10:11:19 +0200 Subject: [PATCH 089/177] Fix a number of internal doc links --- rt/src/coordinator/mod.rs | 6 +++--- rt/src/io/buf.rs | 10 +++++++--- rt/src/lib.rs | 8 ++------ rt/src/shared/mod.rs | 2 +- rt/src/timers/mod.rs | 5 +++-- rt/src/worker.rs | 7 +++---- 6 files changed, 19 insertions(+), 19 deletions(-) diff --git a/rt/src/coordinator/mod.rs b/rt/src/coordinator/mod.rs index 04cbab45e..58e17ed9c 100644 --- a/rt/src/coordinator/mod.rs +++ b/rt/src/coordinator/mod.rs @@ -63,14 +63,14 @@ pub(super) struct Coordinator { /// Internals shared between the coordinator and all workers. internals: Arc, - // Data used in [`Metrics`]. - /// Start time, used to calculate [`Metrics`]'s uptime. + // Data used in [`Coordinator::log_metrics`]. + /// Start time of the application. start: Instant, /// Name of the application. app_name: Box, /// OS name and version, from `uname(2)`. host_os: Box, - /// Name of the host. `nodename` field from `uname(2)`. + /// Name of the host, `nodename` field from `uname(2)`. host_name: Box, /// Id of the host. host_id: Uuid, diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index 7cdb099f3..983912c98 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -455,8 +455,10 @@ mod private { /// /// # Safety /// - /// This has the same safety requirements as [`BufMut::parts`], but then for - /// all buffers used. + /// This has the same safety requirements as [`BufMut::parts_mut`], but + /// then for all buffers used. + /// + /// [`BufMut::parts_mut`]: crate::io::BufMut::parts_mut unsafe fn as_iovecs_mut(&mut self) -> [libc::iovec; N]; /// Mark `n` bytes as initialised. @@ -464,7 +466,7 @@ mod private { /// # Safety /// /// The caller must ensure that `n` bytes are initialised in the vectors - /// return by [`BufMutSlice::as_iovec`]. + /// return by [`BufMutSlice::as_iovecs_mut`]. /// /// The implementation must ensure that that proper buffer(s) are /// initialised. For example when this is called with `n = 10` with two @@ -487,6 +489,8 @@ mod private { /// /// This has the same safety requirements as [`Buf::parts`], but then for /// all buffers used. + /// + /// [`Buf::parts`]: crate::io::Buf::parts unsafe fn as_iovecs(&self) -> [libc::iovec; N]; } } diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 24adda70c..c280a0f96 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -579,7 +579,7 @@ impl RuntimeRef { /// /// Similar to thread-local actors this will only run on a single thread. /// See the discussion of thread-local vs. thread-safe actors in the - /// [`actor`] module for additional information. + /// [`heph::actor`] module for additional information. #[allow(clippy::needless_pass_by_value)] pub fn spawn_local_future(&mut self, future: Fut, options: FutureOptions) where @@ -601,7 +601,7 @@ impl RuntimeRef { /// /// Similar to thread-safe actors this can run on any of the workers /// threads. See the discussion of thread-local vs. thread-safe actors in - /// the [`actor`] module for additional information. + /// the [`heph::actor`] module for additional information. pub fn spawn_future(&mut self, future: Fut, options: FutureOptions) where Fut: Future + Send + std::marker::Sync + 'static, @@ -633,16 +633,12 @@ impl RuntimeRef { } /// Add a timer. - /// - /// See [`Timers::add`]. pub(crate) fn add_timer(&self, deadline: Instant, waker: task::Waker) -> TimerToken { ::log::trace!(deadline = as_debug!(deadline); "adding timer"); self.internals.timers.borrow_mut().add(deadline, waker) } /// Remove a previously set timer. - /// - /// See [`Timers::remove`]. pub(crate) fn remove_timer(&self, deadline: Instant, token: TimerToken) { ::log::trace!(deadline = as_debug!(deadline); "removing timer"); self.internals.timers.borrow_mut().remove(deadline, token); diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 4889c08a0..b33d30478 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -117,7 +117,7 @@ impl RuntimeInternals { Ok(RuntimeSetup { poll, ring }) } - /// Same as [`setup`], but doesn't attach to an existing [`a10::Ring`]. + /// Same as [`RuntimeInternals::setup`], but doesn't attach to an existing [`a10::Ring`]. #[cfg(any(test, feature = "test"))] pub(crate) fn test_setup() -> io::Result { let poll = Poll::new()?; diff --git a/rt/src/timers/mod.rs b/rt/src/timers/mod.rs index e2edac4eb..8671c7922 100644 --- a/rt/src/timers/mod.rs +++ b/rt/src/timers/mod.rs @@ -49,9 +49,10 @@ mod tests; mod private { //! [`TimerToken`] needs to be public because it's used in the - //! private-in-public trait [`PrivateAccess`]. + //! private-in-public trait [`PrivateAccess`], so we use the same trick + //! here. //! - //! [`PrivateAccess`]: crate::access::private::PrivateAccess + //! [`PrivateAccess`]: crate::access::PrivateAccess /// Token used to expire a timer. #[derive(Copy, Clone, Debug)] diff --git a/rt/src/worker.rs b/rt/src/worker.rs index fb3515671..e46f66dd2 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -86,7 +86,7 @@ pub(super) fn setup( /// Setup work required before starting a worker thread, see [`setup`]. pub(super) struct WorkerSetup { - /// See [`Worker::id`]. + /// See [`WorkerSetup::id`]. id: NonZeroUsize, /// Poll instance for the worker thread. This is needed before starting the /// thread to initialise the [`rt::local::waker`]. @@ -153,8 +153,7 @@ impl Handle { self.id.get() } - /// Registers the channel used to communicate with the thread. Uses the - /// [`Worker::id`] as [`Token`]. + /// Registers the channel used to communicate with the thread. pub(super) fn register(&mut self, registry: &Registry) -> io::Result<()> { self.channel.register(registry, Token(self.id())) } @@ -897,7 +896,7 @@ impl std::error::Error for Error { } } -/// Control the [`Runtime`]. +/// Control message send to the worker threads. #[allow(variant_size_differences)] // Can't make `Run` smaller. pub(crate) enum Control { /// Runtime has started, i.e. [`rt::Runtime::start`] was called. From 4d13821bde5264a143976225bdba73b98370f0a7 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 19 Apr 2023 10:28:49 +0200 Subject: [PATCH 090/177] Improve the docs of the access and spawn modules And the items within them. --- rt/src/access.rs | 14 ++++++++------ rt/src/lib.rs | 8 +++----- rt/src/spawn/mod.rs | 15 +++++++++++++-- rt/src/spawn/options.rs | 6 +++--- 4 files changed, 27 insertions(+), 16 deletions(-) diff --git a/rt/src/access.rs b/rt/src/access.rs index 4e3b10498..e638a351c 100644 --- a/rt/src/access.rs +++ b/rt/src/access.rs @@ -20,9 +20,10 @@ //! * [`ThreadSafe`]: passed to thread-safe actors and gives access to the //! runtime parts that are shared between threads. //! -//! Finally we have [`Sync`], which passed to synchronous actors and also gives -//! access to the runtime parts that are shared between threads. However it -//! doesn't actually implement the `Access` trait. +//! Finally we have [`Sync`], which is passed to synchronous actors and also +//! gives access to the runtime parts that are shared between threads. However +//! it doesn't actually implement the `Access` trait as synchronous actors can +//! block. //! //! [`rt::Access`]: crate::Access //! [`SyncActor::RuntimeAccess`]: heph::actor::SyncActor::RuntimeAccess @@ -44,10 +45,11 @@ use crate::timers::TimerToken; use crate::trace::{self, Trace}; use crate::{shared, RuntimeRef}; -/// Trait to indicate an API needs access to the Heph runtime. +/// Runtime Access Trait. /// -/// This is used by various API to get access to the runtime, but its only -/// usable inside the Heph crate. +/// This trait is used to indicate an API needs access to the Heph runtime. It +/// is used by various API to get access to the runtime, but its only usable +/// inside the Heph crate. /// /// Also see [`NewActor::RuntimeAccess`] and [`SyncActor::RuntimeAccess`]. /// diff --git a/rt/src/lib.rs b/rt/src/lib.rs index c280a0f96..7f39cf3cc 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -367,9 +367,7 @@ impl Runtime { /// Spawn an synchronous actor that runs on its own thread. /// /// For more information and examples of synchronous actors see the - /// [`actor`] module. - /// - /// [`actor`]: heph::actor + /// [`heph::actor`] module. pub fn spawn_sync_actor( &mut self, supervisor: S, @@ -579,7 +577,7 @@ impl RuntimeRef { /// /// Similar to thread-local actors this will only run on a single thread. /// See the discussion of thread-local vs. thread-safe actors in the - /// [`heph::actor`] module for additional information. + /// [`spawn`] module for additional information. #[allow(clippy::needless_pass_by_value)] pub fn spawn_local_future(&mut self, future: Fut, options: FutureOptions) where @@ -601,7 +599,7 @@ impl RuntimeRef { /// /// Similar to thread-safe actors this can run on any of the workers /// threads. See the discussion of thread-local vs. thread-safe actors in - /// the [`heph::actor`] module for additional information. + /// the [`spawn`] module for additional information. pub fn spawn_future(&mut self, future: Fut, options: FutureOptions) where Fut: Future + Send + std::marker::Sync + 'static, diff --git a/rt/src/spawn/mod.rs b/rt/src/spawn/mod.rs index b24674552..f262540b3 100644 --- a/rt/src/spawn/mod.rs +++ b/rt/src/spawn/mod.rs @@ -31,12 +31,13 @@ //! //! The upside of running a thread-local actor is that it doesn't have to be //! [`Send`] or [`Sync`], allowing it to use cheaper types that don't require -//! synchronisation. Futhermore these kind of actors are the cheapest to run. +//! synchronisation. Futhermore these kind of actors are the cheapest to run +//! from a runtime perspective. //! //! The downside is that if a single actor blocks it will block *all* actors on //! the thread the actor is running on. Something that some runtimes work around //! with actor/futures/tasks that transparently move between threads and hide -//! blocking/bad actors, Heph does not (for thread-local actor). +//! blocking/bad actors, Heph does not (for thread-local actors). //! //! [`RuntimeRef::try_spawn_local`]: crate::RuntimeRef::try_spawn_local //! [`ThreadLocal`]: crate::access::ThreadLocal @@ -69,6 +70,16 @@ pub mod options; pub use options::{ActorOptions, FutureOptions, SyncActorOptions}; /// The `Spawn` trait defines how new actors are added to the runtime. +/// +/// This trait can be implemented using the two flavours of `RT`, either +/// [`ThreadLocal`] or [`ThreadSafe`], because of this it's implemented twice +/// for types that support spawning both thread-local and thread-safe actors. +/// For information on the difference between thread-local and thread-safe +/// actors see the [`spawn`] module. +/// +/// [`spawn`]: crate::spawn +/// [`ThreadLocal`]: crate::ThreadLocal +/// [`ThreadSafe`]: crate::ThreadSafe pub trait Spawn { /// Attempts to spawn an actor. /// diff --git a/rt/src/spawn/options.rs b/rt/src/spawn/options.rs index 857897b79..afcd34de4 100644 --- a/rt/src/spawn/options.rs +++ b/rt/src/spawn/options.rs @@ -178,10 +178,10 @@ impl SyncActorOptions { self.thread_name } - /// Set the name of the actor. This is for example used in the naming of the - /// thread in which the actor runs. + /// Set the name of the actor. This is used in the naming of the thread in + /// which the actor runs. /// - /// Defaults to "Sync actor `$n`", where `$n` is some number. + /// Defaults to "Sync actor *n*", where *n* is some number. pub fn with_name(mut self, thread_name: String) -> Self { self.thread_name = Some(thread_name); self From a1d309e4ecd454f9648e265135edbe2e2671879e Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 19 Apr 2023 10:42:37 +0200 Subject: [PATCH 091/177] Some more doc improvements Small stuff. --- rt/src/io/mod.rs | 8 ++++---- rt/src/systemd.rs | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/rt/src/io/mod.rs b/rt/src/io/mod.rs index a0490417c..2bbfce624 100644 --- a/rt/src/io/mod.rs +++ b/rt/src/io/mod.rs @@ -3,15 +3,15 @@ //! # Working with Buffers //! //! For working with buffers we define two plus two traits. For "regular", i.e. -//! single buffer I/O, we have the following traits: +//! single buffer I/O, we have the following two traits: //! * [`Buf`] is used in writing/sending. //! * [`BufMut`] is used in reading/receiving. //! //! The basic design of both traits is the same and is fairly simple. Usage //! starts with a call to [`parts`]/[`parts_mut`], which returns a pointer to -//! the bytes in the bufer to read from or writing into. For `BufMut` the caller -//! an write into the buffer and update the length using [`update_length`], -//! though normally this is done by an I/O operation. +//! the bytes in the bufer to read from or write into. For `BufMut` the caller +//! writes into the buffer and updates the length using [`update_length`], though +//! normally this is done by an I/O operation. //! //! For vectored I/O we have the same two traits as above, but suffixed with //! `Slice`: diff --git a/rt/src/systemd.rs b/rt/src/systemd.rs index f32d5b67d..c62b9c4ea 100644 --- a/rt/src/systemd.rs +++ b/rt/src/systemd.rs @@ -257,8 +257,8 @@ pub enum State { /// Actor that manages the communication to the service manager. /// /// It will set the application state (with the service manager) to ready when -/// it is spawned. Once it receives a signal (in the form of a message) it will -/// update the state accordingly. +/// it is spawned. Once it receives a signal (in the form of a +/// [`ServiceMessage`]) it will update the state accordingly. /// /// Finally it will ping the service manager if a watchdog is active. It will /// check using `health_check` on the current status of the application. From d7fbe98ae70a92121b215027027fa178f829bdeb Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 19 Apr 2023 11:19:12 +0200 Subject: [PATCH 092/177] Remove Option from shared task::Waker implementation Not required now that Weak::new is const, though unstable. --- rt/src/lib.rs | 1 + rt/src/shared/waker.rs | 12 ++++-------- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 7f39cf3cc..e16753033 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -145,6 +145,7 @@ #![feature( async_iterator, const_option, + const_weak_new, doc_auto_cfg, doc_cfg_hide, drain_filter, diff --git a/rt/src/shared/waker.rs b/rt/src/shared/waker.rs index c8bd2ec59..7108a4b00 100644 --- a/rt/src/shared/waker.rs +++ b/rt/src/shared/waker.rs @@ -38,10 +38,10 @@ pub(crate) struct WakerId(u8); /// that only a single write happens to each element of the array. And because /// after the initial write each element is read only there are no further data /// races possible. -static mut RUNTIMES: [Option>; MAX_RUNTIMES] = [NO_RUNTIME; MAX_RUNTIMES]; +static mut RUNTIMES: [Weak; MAX_RUNTIMES] = [NO_RUNTIME; MAX_RUNTIMES]; // NOTE: this is only here because `NO_WAKER` is not `Copy`, thus // `[None; MAX_THREADS]` doesn't work, but explicitly using a `const` does. -const NO_RUNTIME: Option> = None; +const NO_RUNTIME: Weak = Weak::new(); /// Initialise a new waker. /// @@ -59,7 +59,7 @@ pub(crate) fn init(internals: Weak) -> WakerId { // Safety: this is safe because we are the only thread that has write access // to the given index. See documentation of `WAKERS` for more. - unsafe { RUNTIMES[id as usize] = Some(internals) } + unsafe { RUNTIMES[id as usize] = internals } WakerId(id) } @@ -78,11 +78,7 @@ fn get(waker_id: WakerId) -> &'static Weak { // Safety: `WakerId` is only created by `init`, which ensures its valid. // Furthermore `init` ensures that `RUNTIMES[waker_id]` is initialised and // is read-only after that. See `RUNTIMES` documentation for more. - unsafe { - RUNTIMES[waker_id.0 as usize] - .as_ref() - .expect("tried to get a waker for a thread that isn't initialised") - } + unsafe { &RUNTIMES[waker_id.0 as usize] } } /// Waker data passed to the [`task::Waker`] implementation. From bf1b1c6f29a7a332516b02c582eb776fd4dfe7e7 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 19 Apr 2023 12:29:21 +0200 Subject: [PATCH 093/177] Allow more runtimes during testing Instead of 32 we allow 256. To facilitate this change we change the bits used from the least significate to the most significate bits, since ProcessId are effectively pointers this should be fine and could scale up to 16 bits. --- rt/src/shared/waker.rs | 56 ++++++++++++++++++++++-------------------- 1 file changed, 30 insertions(+), 26 deletions(-) diff --git a/rt/src/shared/waker.rs b/rt/src/shared/waker.rs index 7108a4b00..bea2b5215 100644 --- a/rt/src/shared/waker.rs +++ b/rt/src/shared/waker.rs @@ -1,4 +1,5 @@ -//! Module containing the `task::Waker` implementation for thread-safe actors. +//! Module containing the `task::Waker` implementation for thread-safe actors +//! and futures. use std::sync::atomic::{AtomicU8, Ordering}; use std::sync::Weak; @@ -9,16 +10,20 @@ use crate::{ptr_as_usize, ProcessId}; /// Maximum number of runtimes supported. pub(crate) const MAX_RUNTIMES: usize = 1 << MAX_RUNTIMES_BITS; +/// Number of most significate bits used for the [`WakerId`]. #[cfg(not(any(test, feature = "test")))] -pub(crate) const MAX_RUNTIMES_BITS: usize = 0; // 1. +const MAX_RUNTIMES_BITS: usize = 0; // 1. #[cfg(any(test, feature = "test"))] -pub(crate) const MAX_RUNTIMES_BITS: usize = 5; // 32. +const MAX_RUNTIMES_BITS: usize = 8; // 256. +const WAKER_ID_SHIFT: usize = usize::BITS as usize - MAX_RUNTIMES_BITS; +const WAKER_ID_MASK: usize = (MAX_RUNTIMES - 1) << WAKER_ID_SHIFT; +const PID_MASK: usize = !WAKER_ID_MASK; /// An id for a waker. /// /// Returned by [`init`] and used in [`new`] to create a new [`task::Waker`]. -// -// This serves as index into `WAKERS`. +/// +/// This serves as index into `WAKERS`. #[derive(Copy, Clone, Debug, Eq, PartialEq)] #[repr(transparent)] pub(crate) struct WakerId(u8); @@ -57,7 +62,7 @@ pub(crate) fn init(internals: Weak) -> WakerId { "Created too many Heph `Runtime`s, maximum of {MAX_RUNTIMES}", ); - // Safety: this is safe because we are the only thread that has write access + // SAFETY: this is safe because we are the only thread that has write access // to the given index. See documentation of `WAKERS` for more. unsafe { RUNTIMES[id as usize] = internals } WakerId(id) @@ -69,13 +74,13 @@ pub(crate) fn init(internals: Weak) -> WakerId { pub(crate) fn new(waker_id: WakerId, pid: ProcessId) -> task::Waker { let data = WakerData::new(waker_id, pid).into_raw_data(); let raw_waker = task::RawWaker::new(data, &WAKER_VTABLE); - // Safety: we follow the contract on `RawWaker`. + // SAFETY: we follow the contract on `RawWaker`. unsafe { task::Waker::from_raw(raw_waker) } } /// Get the internals for `waker_id`. fn get(waker_id: WakerId) -> &'static Weak { - // Safety: `WakerId` is only created by `init`, which ensures its valid. + // SAFETY: `WakerId` is only created by `init`, which ensures its valid. // Furthermore `init` ensures that `RUNTIMES[waker_id]` is initialised and // is read-only after that. See `RUNTIMES` documentation for more. unsafe { &RUNTIMES[waker_id.0 as usize] } @@ -85,37 +90,36 @@ fn get(waker_id: WakerId) -> &'static Weak { /// /// # Layout /// -/// The [`MAX_RUNTIMES_BITS`] least significant bits are the [`WakerId`]. The +/// The [`MAX_RUNTIMES_BITS`] most significant bits are the [`WakerId`]. The /// remaining bits are the [`ProcessId`], from which at least /// `MAX_RUNTIMES_BITS` most significant bits are not used. #[derive(Copy, Clone, Debug, Eq, PartialEq)] #[repr(transparent)] struct WakerData(usize); -const WAKER_ID_MASK: usize = (1 << MAX_RUNTIMES_BITS) - 1; - impl WakerData { /// Create new `WakerData`. fn new(waker_id: WakerId, pid: ProcessId) -> WakerData { - let data = - WakerData((pid.0 << MAX_RUNTIMES_BITS) | ((waker_id.0 as usize) & WAKER_ID_MASK)); - assert!(data.pid() == pid, "`ProcessId` too large for `WakerData`"); + let data = WakerData(pid.0 | ((waker_id.0 as usize) << WAKER_ID_SHIFT)); + debug_assert!( + data.pid() == pid && data.waker_id() == waker_id, + "`ProcessId` too large for `WakerData`" + ); data } /// Get the waker id. const fn waker_id(self) -> WakerId { - // Safety: we know we won't truncate the waker id as it's an u8. + // SAFETY: we know we won't truncate the waker id as it's an u8. #[allow(clippy::cast_possible_truncation)] - WakerId((self.0 & WAKER_ID_MASK) as u8) + WakerId(((self.0 & WAKER_ID_MASK) >> WAKER_ID_SHIFT) as u8) } /// Get the process id. const fn pid(self) -> ProcessId { - // Safety: we know we won't truncate the pid, we check in + // SAFETY: we know we won't truncate the pid, we checked in // `WakerData::new`. - #[allow(clippy::cast_possible_truncation)] - ProcessId(self.0 >> MAX_RUNTIMES_BITS) + ProcessId(self.0 & PID_MASK) } /// Convert raw data from [`task::RawWaker`] into [`WakerData`]. @@ -147,9 +151,9 @@ unsafe fn clone_wake_data(data: *const ()) -> task::RawWaker { } unsafe fn wake(data: *const ()) { - // This is safe because we received the data from the `RawWaker`, which - // doesn't modify the data. - let data = WakerData::from_raw_data(data); + // SAFETY: we received the data from the `RawWaker`, which doesn't modify + // `data`. + let data = unsafe { WakerData::from_raw_data(data) }; if let Some(shared_internals) = get(data.waker_id()).upgrade() { shared_internals.mark_ready(data.pid()); shared_internals.wake_workers(1); @@ -158,14 +162,14 @@ unsafe fn wake(data: *const ()) { unsafe fn wake_by_ref(data: *const ()) { assert_copy::(); - // Since `WakerData` is `Copy` `wake` doesn't actually consume any data, so - // we can just call it. - wake(data); + // SAFETY: Since `WakerData` is `Copy` `wake` doesn't actually consume any + // data, so we can just call it. + unsafe { wake(data) }; } unsafe fn drop_wake_data(_: *const ()) { assert_copy::(); - // Since the data is `Copy` we don't have to do anything. + // Since `WakerData` is `Copy` we don't have to do anything. } #[cfg(test)] From cbc3d1d136650c2d02c5f407497ea683a833edd0 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 19 Apr 2023 12:48:47 +0200 Subject: [PATCH 094/177] Move shared::waker to waker::share module --- rt/src/coordinator/mod.rs | 3 +- rt/src/lib.rs | 1 + rt/src/shared/mod.rs | 28 ++- rt/src/shared/waker.rs | 354 -------------------------------------- rt/src/test.rs | 4 +- rt/src/waker/mod.rs | 5 + rt/src/waker/shared.rs | 173 +++++++++++++++++++ rt/src/waker/tests.rs | 173 +++++++++++++++++++ 8 files changed, 377 insertions(+), 364 deletions(-) delete mode 100644 rt/src/shared/waker.rs create mode 100644 rt/src/waker/mod.rs create mode 100644 rt/src/waker/shared.rs create mode 100644 rt/src/waker/tests.rs diff --git a/rt/src/coordinator/mod.rs b/rt/src/coordinator/mod.rs index 58e17ed9c..a84f8c1fb 100644 --- a/rt/src/coordinator/mod.rs +++ b/rt/src/coordinator/mod.rs @@ -33,6 +33,7 @@ use mio_signals::{SignalSet, Signals}; use crate::setup::{host_id, host_info, Uuid}; use crate::thread_waker::ThreadWaker; +use crate::waker::shared::init_shared_waker; use crate::{ self as rt, cpu_usage, shared, trace, worker, Signal, SyncWorker, SYNC_WORKER_ID_END, SYNC_WORKER_ID_START, @@ -96,7 +97,7 @@ impl Coordinator { let setup = shared::RuntimeInternals::setup(ring.submission_queue())?; let internals = Arc::new_cyclic(|shared_internals| { - let waker_id = shared::waker::init(shared_internals.clone()); + let waker_id = init_shared_waker(shared_internals.clone()); setup.complete(waker_id, worker_wakers, trace_log) }); diff --git a/rt/src/lib.rs b/rt/src/lib.rs index e16753033..c9b3a9072 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -240,6 +240,7 @@ mod timers; pub mod trace; #[doc(hidden)] pub mod util; +mod waker; mod worker; use process::ProcessId; diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index b33d30478..09fc85ea0 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -17,17 +17,16 @@ use mio::unix::SourceFd; use mio::{Events, Interest, Poll, Registry, Token}; use crate::process::{FutureProcess, Process, ProcessId}; +use crate::scheduler::shared::{ProcessData, Scheduler}; +#[cfg(test)] +use crate::spawn::options::Priority; use crate::spawn::{ActorOptions, FutureOptions}; use crate::thread_waker::ThreadWaker; +use crate::timers::shared::Timers; use crate::timers::TimerToken; +use crate::waker::shared::{new_shared_task_waker, WakerId}; use crate::{trace, ThreadSafe}; -pub(crate) mod waker; - -use crate::scheduler::shared::{ProcessData, Scheduler}; -use crate::timers::shared::Timers; -use waker::WakerId; - /// Setup of [`RuntimeInternals`]. /// /// # Notes @@ -137,7 +136,12 @@ impl RuntimeInternals { /// Returns a new [`task::Waker`] for the thread-safe actor with `pid`. pub(crate) fn new_task_waker(&self, pid: ProcessId) -> task::Waker { - waker::new(self.shared_id, pid) + new_shared_task_waker(self.shared_id, pid) + } + + #[cfg(test)] + pub(crate) fn waker_id(&self) -> WakerId { + self.shared_id } /// Register the shared [`Poll`] instance with `registry`. @@ -260,6 +264,16 @@ impl RuntimeInternals { }); } + /// Add a new proces to the scheduler. + #[cfg(test)] + pub(crate) fn add_new_process(&self, priority: Priority, setup: F) -> Result + where + F: FnOnce(ProcessId) -> Result<(P, T), E>, + P: Process + Send + Sync + 'static, + { + self.scheduler.add_new_process(priority, setup) + } + /// See [`Scheduler::mark_ready`]. pub(crate) fn mark_ready(&self, pid: ProcessId) { self.scheduler.mark_ready(pid); diff --git a/rt/src/shared/waker.rs b/rt/src/shared/waker.rs deleted file mode 100644 index bea2b5215..000000000 --- a/rt/src/shared/waker.rs +++ /dev/null @@ -1,354 +0,0 @@ -//! Module containing the `task::Waker` implementation for thread-safe actors -//! and futures. - -use std::sync::atomic::{AtomicU8, Ordering}; -use std::sync::Weak; -use std::task; - -use crate::shared::RuntimeInternals; -use crate::{ptr_as_usize, ProcessId}; - -/// Maximum number of runtimes supported. -pub(crate) const MAX_RUNTIMES: usize = 1 << MAX_RUNTIMES_BITS; -/// Number of most significate bits used for the [`WakerId`]. -#[cfg(not(any(test, feature = "test")))] -const MAX_RUNTIMES_BITS: usize = 0; // 1. -#[cfg(any(test, feature = "test"))] -const MAX_RUNTIMES_BITS: usize = 8; // 256. -const WAKER_ID_SHIFT: usize = usize::BITS as usize - MAX_RUNTIMES_BITS; -const WAKER_ID_MASK: usize = (MAX_RUNTIMES - 1) << WAKER_ID_SHIFT; -const PID_MASK: usize = !WAKER_ID_MASK; - -/// An id for a waker. -/// -/// Returned by [`init`] and used in [`new`] to create a new [`task::Waker`]. -/// -/// This serves as index into `WAKERS`. -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -#[repr(transparent)] -pub(crate) struct WakerId(u8); - -/// Each coordinator has a unique [`WakerId`] which is used as index into this -/// array. -/// -/// # Safety -/// -/// Only [`init`] may write to this array. After the initial write, no more -/// writes are allowed and the array element is read only. To get a -/// [`task::Waker`] use the [`new`] function. -/// -/// Following the rules above means that there are no data races. The array can -/// only be indexed by [`WakerId`], which is only created by [`init`], which -/// ensures the waker is setup before returning the [`WakerId`]. This ensures -/// that only a single write happens to each element of the array. And because -/// after the initial write each element is read only there are no further data -/// races possible. -static mut RUNTIMES: [Weak; MAX_RUNTIMES] = [NO_RUNTIME; MAX_RUNTIMES]; -// NOTE: this is only here because `NO_WAKER` is not `Copy`, thus -// `[None; MAX_THREADS]` doesn't work, but explicitly using a `const` does. -const NO_RUNTIME: Weak = Weak::new(); - -/// Initialise a new waker. -/// -/// This returns a [`WakerId`] which can be used to create a new [`task::Waker`] -/// using [`new`]. -pub(crate) fn init(internals: Weak) -> WakerId { - /// Static used to determine unique indices into `RUNTIMES`. - static IDS: AtomicU8 = AtomicU8::new(0); - - let id = IDS.fetch_add(1, Ordering::SeqCst); - assert!( - (id as usize) < MAX_RUNTIMES, - "Created too many Heph `Runtime`s, maximum of {MAX_RUNTIMES}", - ); - - // SAFETY: this is safe because we are the only thread that has write access - // to the given index. See documentation of `WAKERS` for more. - unsafe { RUNTIMES[id as usize] = internals } - WakerId(id) -} - -/// Create a new [`task::Waker`]. -/// -/// [`init`] must be called before calling this function to get a [`WakerId`]. -pub(crate) fn new(waker_id: WakerId, pid: ProcessId) -> task::Waker { - let data = WakerData::new(waker_id, pid).into_raw_data(); - let raw_waker = task::RawWaker::new(data, &WAKER_VTABLE); - // SAFETY: we follow the contract on `RawWaker`. - unsafe { task::Waker::from_raw(raw_waker) } -} - -/// Get the internals for `waker_id`. -fn get(waker_id: WakerId) -> &'static Weak { - // SAFETY: `WakerId` is only created by `init`, which ensures its valid. - // Furthermore `init` ensures that `RUNTIMES[waker_id]` is initialised and - // is read-only after that. See `RUNTIMES` documentation for more. - unsafe { &RUNTIMES[waker_id.0 as usize] } -} - -/// Waker data passed to the [`task::Waker`] implementation. -/// -/// # Layout -/// -/// The [`MAX_RUNTIMES_BITS`] most significant bits are the [`WakerId`]. The -/// remaining bits are the [`ProcessId`], from which at least -/// `MAX_RUNTIMES_BITS` most significant bits are not used. -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -#[repr(transparent)] -struct WakerData(usize); - -impl WakerData { - /// Create new `WakerData`. - fn new(waker_id: WakerId, pid: ProcessId) -> WakerData { - let data = WakerData(pid.0 | ((waker_id.0 as usize) << WAKER_ID_SHIFT)); - debug_assert!( - data.pid() == pid && data.waker_id() == waker_id, - "`ProcessId` too large for `WakerData`" - ); - data - } - - /// Get the waker id. - const fn waker_id(self) -> WakerId { - // SAFETY: we know we won't truncate the waker id as it's an u8. - #[allow(clippy::cast_possible_truncation)] - WakerId(((self.0 & WAKER_ID_MASK) >> WAKER_ID_SHIFT) as u8) - } - - /// Get the process id. - const fn pid(self) -> ProcessId { - // SAFETY: we know we won't truncate the pid, we checked in - // `WakerData::new`. - ProcessId(self.0 & PID_MASK) - } - - /// Convert raw data from [`task::RawWaker`] into [`WakerData`]. - /// - /// # Safety - /// - /// This doesn't check if the provided `data` is valid, the caller is - /// responsible for this. - const unsafe fn from_raw_data(data: *const ()) -> WakerData { - WakerData(ptr_as_usize(data)) - } - - /// Convert [`WakerData`] into raw data for [`task::RawWaker`]. - const fn into_raw_data(self) -> *const () { - self.0 as *const () - } -} - -/// Virtual table used by the `Waker` implementation. -static WAKER_VTABLE: task::RawWakerVTable = - task::RawWakerVTable::new(clone_wake_data, wake, wake_by_ref, drop_wake_data); - -fn assert_copy() {} - -unsafe fn clone_wake_data(data: *const ()) -> task::RawWaker { - assert_copy::(); - // Since the data is `Copy`, we just copy it. - task::RawWaker::new(data, &WAKER_VTABLE) -} - -unsafe fn wake(data: *const ()) { - // SAFETY: we received the data from the `RawWaker`, which doesn't modify - // `data`. - let data = unsafe { WakerData::from_raw_data(data) }; - if let Some(shared_internals) = get(data.waker_id()).upgrade() { - shared_internals.mark_ready(data.pid()); - shared_internals.wake_workers(1); - } -} - -unsafe fn wake_by_ref(data: *const ()) { - assert_copy::(); - // SAFETY: Since `WakerData` is `Copy` `wake` doesn't actually consume any - // data, so we can just call it. - unsafe { wake(data) }; -} - -unsafe fn drop_wake_data(_: *const ()) { - assert_copy::(); - // Since `WakerData` is `Copy` we don't have to do anything. -} - -#[cfg(test)] -mod tests { - use std::future::Future; - use std::mem::size_of; - use std::pin::Pin; - use std::sync::{Arc, Weak}; - use std::task::{self, Poll}; - use std::thread::{self, sleep}; - use std::time::Duration; - - use crate::process::{FutureProcess, Process, ProcessId}; - use crate::shared::waker::{self, WakerData}; - use crate::shared::{RuntimeInternals, Scheduler}; - use crate::spawn::options::Priority; - use crate::test; - - const PID1: ProcessId = ProcessId(1); - const PID2: ProcessId = ProcessId(2); - - #[test] - fn assert_waker_data_size() { - assert_eq!(size_of::<*const ()>(), size_of::()); - } - - struct TestProcess; - - impl Future for TestProcess { - type Output = (); - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<()> { - unimplemented!(); - } - } - - impl Process for TestProcess { - fn name(&self) -> &'static str { - "TestProcess" - } - } - - #[test] - fn waker() { - let shared_internals = new_internals(); - - let pid = add_process(&shared_internals.scheduler); - assert!(shared_internals.scheduler.has_process()); - assert!(shared_internals.scheduler.has_ready_process()); - let process = shared_internals.scheduler.remove().unwrap(); - shared_internals.scheduler.add_back_process(process); - assert!(shared_internals.scheduler.has_process()); - assert!(!shared_internals.scheduler.has_ready_process()); - - // Create a new waker. - let waker = waker::new(shared_internals.shared_id, pid); - - // Waking should move the process to the ready queue. - waker.wake_by_ref(); - assert!(shared_internals.scheduler.has_process()); - assert!(shared_internals.scheduler.has_ready_process()); - let process = shared_internals.scheduler.remove().unwrap(); - assert_eq!(process.as_ref().id(), pid); - - // Waking a process that isn't in the scheduler should be fine. - waker.wake(); - assert!(!shared_internals.scheduler.has_process()); - assert!(!shared_internals.scheduler.has_ready_process()); - shared_internals.complete(process); - assert!(!shared_internals.scheduler.has_process()); - assert!(!shared_internals.scheduler.has_ready_process()); - } - - #[test] - fn cloned_waker() { - let shared_internals = new_internals(); - - // Add a test process. - let pid = add_process(&shared_internals.scheduler); - assert!(shared_internals.scheduler.has_process()); - assert!(shared_internals.scheduler.has_ready_process()); - let process = shared_internals.scheduler.remove().unwrap(); - shared_internals.scheduler.add_back_process(process); - assert!(shared_internals.scheduler.has_process()); - assert!(!shared_internals.scheduler.has_ready_process()); - - // Create a cloned waker. - let waker1 = waker::new(shared_internals.shared_id, pid); - let waker2 = waker1.clone(); - drop(waker1); - - // Waking should move the process to the ready queue. - waker2.wake(); - assert!(shared_internals.scheduler.has_process()); - assert!(shared_internals.scheduler.has_ready_process()); - let process = shared_internals.scheduler.remove().unwrap(); - assert_eq!(process.as_ref().id(), pid); - } - - #[test] - fn wake_from_different_thread() { - let shared_internals = new_internals(); - - let pid = add_process(&shared_internals.scheduler); - assert!(shared_internals.scheduler.has_process()); - assert!(shared_internals.scheduler.has_ready_process()); - let process = shared_internals.scheduler.remove().unwrap(); - shared_internals.scheduler.add_back_process(process); - assert!(shared_internals.scheduler.has_process()); - assert!(!shared_internals.scheduler.has_ready_process()); - - let shared_internals2 = shared_internals.clone(); - let handle = thread::spawn(move || { - let waker = waker::new(shared_internals2.shared_id, pid); - waker.wake_by_ref(); - waker.wake(); - }); - - loop { - if let Some(process) = shared_internals.scheduler.remove() { - assert_eq!(process.as_ref().id(), pid); - shared_internals.complete(process); - break; - } - - sleep(Duration::from_millis(1)); - } - - handle.join().unwrap(); - } - - #[test] - fn no_internals() { - let waker_id = waker::init(Weak::new()); - let waker = waker::new(waker_id, PID1); - - // This shouldn't be a problem. - waker.wake_by_ref(); - waker.wake(); - } - - #[test] - fn will_wake() { - let waker_id = waker::init(Weak::new()); - let waker1a = waker::new(waker_id, PID1); - let waker1b = waker::new(waker_id, PID1); - let waker2a = waker::new(waker_id, PID2); - let waker2b = waker2a.clone(); - - assert!(waker1a.will_wake(&waker1a)); - assert!(waker1a.will_wake(&waker1b)); - assert!(!waker1a.will_wake(&waker2a)); - assert!(!waker1a.will_wake(&waker2b)); - - assert!(waker1b.will_wake(&waker1a)); - assert!(waker1b.will_wake(&waker1b)); - assert!(!waker1b.will_wake(&waker2a)); - assert!(!waker1b.will_wake(&waker2b)); - - assert!(!waker2a.will_wake(&waker1a)); - assert!(!waker2a.will_wake(&waker1b)); - assert!(waker2a.will_wake(&waker2a)); - assert!(waker2a.will_wake(&waker2b)); - } - - fn new_internals() -> Arc { - let setup = RuntimeInternals::test_setup().unwrap(); - Arc::new_cyclic(|shared_internals| { - let waker_id = waker::init(shared_internals.clone()); - let worker_wakers = vec![test::noop_waker()].into_boxed_slice(); - setup.complete(waker_id, worker_wakers, None) - }) - } - - fn add_process(scheduler: &Scheduler) -> ProcessId { - scheduler - .add_new_process(Priority::NORMAL, |pid| { - Ok::<_, !>((FutureProcess(TestProcess), pid)) - }) - .unwrap() - } -} diff --git a/rt/src/test.rs b/rt/src/test.rs index f44a1fd3c..ed21de617 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -62,10 +62,10 @@ use heph::supervisor::{Supervisor, SyncSupervisor}; use heph_inbox::oneshot::new_oneshot; use heph_inbox::Manager; -use crate::shared::waker; use crate::spawn::{ActorOptions, FutureOptions, SyncActorOptions}; use crate::sync_worker::SyncWorker; use crate::thread_waker::ThreadWaker; +use crate::waker::shared::init_shared_waker; use crate::worker::{Control, Worker}; use crate::{ self as rt, shared, ProcessId, RuntimeRef, Sync, ThreadLocal, ThreadSafe, SYNC_WORKER_ID_END, @@ -95,7 +95,7 @@ pub(crate) fn shared_internals() -> Arc { let setup = shared::RuntimeInternals::test_setup() .expect("failed to setup runtime internals for test module"); Arc::new_cyclic(|shared_internals| { - let waker_id = waker::init(shared_internals.clone()); + let waker_id = init_shared_waker(shared_internals.clone()); let worker_wakers = vec![noop_waker()].into_boxed_slice(); setup.complete(waker_id, worker_wakers, None) }) diff --git a/rt/src/waker/mod.rs b/rt/src/waker/mod.rs new file mode 100644 index 000000000..5ff67d643 --- /dev/null +++ b/rt/src/waker/mod.rs @@ -0,0 +1,5 @@ +//! `task::Waker` implementation. + +pub(crate) mod shared; +#[cfg(test)] +mod tests; diff --git a/rt/src/waker/shared.rs b/rt/src/waker/shared.rs new file mode 100644 index 000000000..6e708f492 --- /dev/null +++ b/rt/src/waker/shared.rs @@ -0,0 +1,173 @@ +//! Module containing the `task::Waker` implementation for thread-safe actors +//! and futures. + +use std::sync::atomic::{AtomicU8, Ordering}; +use std::sync::Weak; +use std::task; + +use crate::shared::RuntimeInternals; +use crate::{ptr_as_usize, ProcessId}; + +/// Maximum number of runtimes supported. +const MAX_RUNTIMES: usize = 1 << MAX_RUNTIMES_BITS; +/// Number of most significate bits used for the [`WakerId`]. +#[cfg(not(any(test, feature = "test")))] +const MAX_RUNTIMES_BITS: usize = 0; // 1. +#[cfg(any(test, feature = "test"))] +const MAX_RUNTIMES_BITS: usize = 8; // 256. +const WAKER_ID_SHIFT: usize = usize::BITS as usize - MAX_RUNTIMES_BITS; +const WAKER_ID_MASK: usize = (MAX_RUNTIMES - 1) << WAKER_ID_SHIFT; +const PID_MASK: usize = !WAKER_ID_MASK; + +/// An id for a waker. +/// +/// Returned by [`init`] and used in [`new`] to create a new [`task::Waker`]. +/// +/// This serves as index into `WAKERS`. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +#[repr(transparent)] +pub(crate) struct WakerId(u8); + +/// Each coordinator has a unique [`WakerId`] which is used as index into this +/// array. +/// +/// # Safety +/// +/// Only [`init`] may write to this array. After the initial write, no more +/// writes are allowed and the array element is read only. To get a +/// [`task::Waker`] use the [`new`] function. +/// +/// Following the rules above means that there are no data races. The array can +/// only be indexed by [`WakerId`], which is only created by [`init`], which +/// ensures the waker is setup before returning the [`WakerId`]. This ensures +/// that only a single write happens to each element of the array. And because +/// after the initial write each element is read only there are no further data +/// races possible. +static mut RUNTIMES: [Weak; MAX_RUNTIMES] = [NO_RUNTIME; MAX_RUNTIMES]; +// NOTE: this is only here because `NO_WAKER` is not `Copy`, thus +// `[None; MAX_THREADS]` doesn't work, but explicitly using a `const` does. +const NO_RUNTIME: Weak = Weak::new(); + +/// Initialise a new waker. +/// +/// This returns a [`WakerId`] which can be used to create a new [`task::Waker`] +/// using [`new`]. +pub(crate) fn init_shared_waker(internals: Weak) -> WakerId { + /// Static used to determine unique indices into `RUNTIMES`. + static IDS: AtomicU8 = AtomicU8::new(0); + + let id = IDS.fetch_add(1, Ordering::SeqCst); + assert!( + (id as usize) < MAX_RUNTIMES, + "Created too many Heph `Runtime`s, maximum of {MAX_RUNTIMES}", + ); + + // SAFETY: this is safe because we are the only thread that has write access + // to the given index. See documentation of `WAKERS` for more. + unsafe { RUNTIMES[id as usize] = internals } + WakerId(id) +} + +/// Create a new [`task::Waker`]. +/// +/// [`init`] must be called before calling this function to get a [`WakerId`]. +pub(crate) fn new_shared_task_waker(waker_id: WakerId, pid: ProcessId) -> task::Waker { + let data = WakerData::new(waker_id, pid).into_raw_data(); + let raw_waker = task::RawWaker::new(data, &WAKER_VTABLE); + // SAFETY: we follow the contract on `RawWaker`. + unsafe { task::Waker::from_raw(raw_waker) } +} + +/// Get the internals for `waker_id`. +fn get(waker_id: WakerId) -> &'static Weak { + // SAFETY: `WakerId` is only created by `init`, which ensures its valid. + // Furthermore `init` ensures that `RUNTIMES[waker_id]` is initialised and + // is read-only after that. See `RUNTIMES` documentation for more. + unsafe { &RUNTIMES[waker_id.0 as usize] } +} + +/// Waker data passed to the [`task::Waker`] implementation. +/// +/// # Layout +/// +/// The [`MAX_RUNTIMES_BITS`] most significant bits are the [`WakerId`]. The +/// remaining bits are the [`ProcessId`], from which at least +/// `MAX_RUNTIMES_BITS` most significant bits are not used. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +#[repr(transparent)] +struct WakerData(usize); + +impl WakerData { + /// Create new `WakerData`. + fn new(waker_id: WakerId, pid: ProcessId) -> WakerData { + let data = WakerData(pid.0 | ((waker_id.0 as usize) << WAKER_ID_SHIFT)); + debug_assert!( + data.pid() == pid && data.waker_id() == waker_id, + "`ProcessId` too large for `WakerData`" + ); + data + } + + /// Get the waker id. + const fn waker_id(self) -> WakerId { + // SAFETY: we know we won't truncate the waker id as it's an u8. + #[allow(clippy::cast_possible_truncation)] + WakerId(((self.0 & WAKER_ID_MASK) >> WAKER_ID_SHIFT) as u8) + } + + /// Get the process id. + const fn pid(self) -> ProcessId { + // SAFETY: we know we won't truncate the pid, we checked in + // `WakerData::new`. + ProcessId(self.0 & PID_MASK) + } + + /// Convert raw data from [`task::RawWaker`] into [`WakerData`]. + /// + /// # Safety + /// + /// This doesn't check if the provided `data` is valid, the caller is + /// responsible for this. + const unsafe fn from_raw_data(data: *const ()) -> WakerData { + WakerData(ptr_as_usize(data)) + } + + /// Convert [`WakerData`] into raw data for [`task::RawWaker`]. + const fn into_raw_data(self) -> *const () { + self.0 as *const () + } +} + +/// Virtual table used by the `Waker` implementation. +static WAKER_VTABLE: task::RawWakerVTable = + task::RawWakerVTable::new(clone_wake_data, wake, wake_by_ref, drop_wake_data); + +fn assert_copy() {} + +unsafe fn clone_wake_data(data: *const ()) -> task::RawWaker { + assert_copy::(); + // Since the data is `Copy`, we just copy it. + task::RawWaker::new(data, &WAKER_VTABLE) +} + +unsafe fn wake(data: *const ()) { + // SAFETY: we received the data from the `RawWaker`, which doesn't modify + // `data`. + let data = unsafe { WakerData::from_raw_data(data) }; + if let Some(shared_internals) = get(data.waker_id()).upgrade() { + shared_internals.mark_ready(data.pid()); + shared_internals.wake_workers(1); + } +} + +unsafe fn wake_by_ref(data: *const ()) { + assert_copy::(); + // SAFETY: Since `WakerData` is `Copy` `wake` doesn't actually consume any + // data, so we can just call it. + unsafe { wake(data) }; +} + +unsafe fn drop_wake_data(_: *const ()) { + assert_copy::(); + // Since `WakerData` is `Copy` we don't have to do anything. +} diff --git a/rt/src/waker/tests.rs b/rt/src/waker/tests.rs new file mode 100644 index 000000000..2129f56e0 --- /dev/null +++ b/rt/src/waker/tests.rs @@ -0,0 +1,173 @@ +mod shared { + use std::future::Future; + use std::pin::Pin; + use std::sync::{Arc, Weak}; + use std::task::{self, Poll}; + use std::thread::{self, sleep}; + use std::time::Duration; + + use crate::process::{FutureProcess, Process, ProcessId}; + use crate::shared::RuntimeInternals; + use crate::spawn::options::Priority; + use crate::test; + use crate::waker::shared::{init_shared_waker, new_shared_task_waker}; + + const PID1: ProcessId = ProcessId(1); + const PID2: ProcessId = ProcessId(2); + + struct TestProcess; + + impl Future for TestProcess { + type Output = (); + + fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<()> { + unimplemented!(); + } + } + + impl Process for TestProcess { + fn name(&self) -> &'static str { + "TestProcess" + } + } + + #[test] + fn waker() { + let shared_internals = new_internals(); + + let pid = add_process(&shared_internals); + assert!(shared_internals.has_process()); + assert!(shared_internals.has_ready_process()); + let process = shared_internals.remove_process().unwrap(); + shared_internals.add_back_process(process); + assert!(shared_internals.has_process()); + assert!(!shared_internals.has_ready_process()); + + // Create a new waker. + let waker = new_shared_task_waker(shared_internals.waker_id(), pid); + + // Waking should move the process to the ready queue. + waker.wake_by_ref(); + assert!(shared_internals.has_process()); + assert!(shared_internals.has_ready_process()); + let process = shared_internals.remove_process().unwrap(); + assert_eq!(process.as_ref().id(), pid); + + // Waking a process that isn't in the scheduler should be fine. + waker.wake(); + assert!(!shared_internals.has_process()); + assert!(!shared_internals.has_ready_process()); + shared_internals.complete(process); + assert!(!shared_internals.has_process()); + assert!(!shared_internals.has_ready_process()); + } + + #[test] + fn cloned_waker() { + let shared_internals = new_internals(); + + // Add a test process. + let pid = add_process(&shared_internals); + assert!(shared_internals.has_process()); + assert!(shared_internals.has_ready_process()); + let process = shared_internals.remove_process().unwrap(); + shared_internals.add_back_process(process); + assert!(shared_internals.has_process()); + assert!(!shared_internals.has_ready_process()); + + // Create a cloned waker. + let waker1 = new_shared_task_waker(shared_internals.waker_id(), pid); + let waker2 = waker1.clone(); + drop(waker1); + + // Waking should move the process to the ready queue. + waker2.wake(); + assert!(shared_internals.has_process()); + assert!(shared_internals.has_ready_process()); + let process = shared_internals.remove_process().unwrap(); + assert_eq!(process.as_ref().id(), pid); + } + + #[test] + fn wake_from_different_thread() { + let shared_internals = new_internals(); + + let pid = add_process(&shared_internals); + assert!(shared_internals.has_process()); + assert!(shared_internals.has_ready_process()); + let process = shared_internals.remove_process().unwrap(); + shared_internals.add_back_process(process); + assert!(shared_internals.has_process()); + assert!(!shared_internals.has_ready_process()); + + let shared_internals2 = shared_internals.clone(); + let handle = thread::spawn(move || { + let waker = new_shared_task_waker(shared_internals2.waker_id(), pid); + waker.wake_by_ref(); + waker.wake(); + }); + + loop { + if let Some(process) = shared_internals.remove_process() { + assert_eq!(process.as_ref().id(), pid); + shared_internals.complete(process); + break; + } + + sleep(Duration::from_millis(1)); + } + + handle.join().unwrap(); + } + + #[test] + fn no_internals() { + let waker_id = init_shared_waker(Weak::new()); + let waker = new_shared_task_waker(waker_id, PID1); + + // This shouldn't be a problem. + waker.wake_by_ref(); + waker.wake(); + } + + #[test] + fn will_wake() { + let waker_id = init_shared_waker(Weak::new()); + let waker1a = new_shared_task_waker(waker_id, PID1); + let waker1b = new_shared_task_waker(waker_id, PID1); + let waker2a = new_shared_task_waker(waker_id, PID2); + let waker2b = waker2a.clone(); + + assert!(waker1a.will_wake(&waker1a)); + assert!(waker1a.will_wake(&waker1b)); + assert!(!waker1a.will_wake(&waker2a)); + assert!(!waker1a.will_wake(&waker2b)); + + assert!(waker1b.will_wake(&waker1a)); + assert!(waker1b.will_wake(&waker1b)); + assert!(!waker1b.will_wake(&waker2a)); + assert!(!waker1b.will_wake(&waker2b)); + + assert!(!waker2a.will_wake(&waker1a)); + assert!(!waker2a.will_wake(&waker1b)); + assert!(waker2a.will_wake(&waker2a)); + assert!(waker2a.will_wake(&waker2b)); + } + + fn new_internals() -> Arc { + let setup = RuntimeInternals::test_setup().unwrap(); + Arc::new_cyclic(|shared_internals| { + let waker_id = init_shared_waker(shared_internals.clone()); + let worker_wakers = vec![test::noop_waker()].into_boxed_slice(); + setup.complete(waker_id, worker_wakers, None) + }) + } + + fn add_process(internals: &RuntimeInternals) -> ProcessId { + internals + .add_new_process(Priority::NORMAL, |pid| { + Ok::<_, !>((FutureProcess(TestProcess), pid)) + }) + .unwrap() + } +} From 7137be5052af896d392d29bda0daf53a7717f4d7 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 19 Apr 2023 13:06:30 +0200 Subject: [PATCH 095/177] Change wakers implementation Instead of exposing init_shared_waker and new_shared_task_waker using the WakerId type. Expose only the Wakers type which has the new and new_task_waker methods. The new Wakers type is easier to use and more importantly harder to use incorrectly. --- rt/src/coordinator/mod.rs | 6 +- rt/src/shared/mod.rs | 17 ++---- rt/src/test.rs | 6 +- rt/src/waker/mod.rs | 2 +- rt/src/waker/shared.rs | 112 +++++++++++++++++++------------------- rt/src/waker/tests.rs | 24 ++++---- 6 files changed, 81 insertions(+), 86 deletions(-) diff --git a/rt/src/coordinator/mod.rs b/rt/src/coordinator/mod.rs index a84f8c1fb..714c9d2ab 100644 --- a/rt/src/coordinator/mod.rs +++ b/rt/src/coordinator/mod.rs @@ -33,7 +33,7 @@ use mio_signals::{SignalSet, Signals}; use crate::setup::{host_id, host_info, Uuid}; use crate::thread_waker::ThreadWaker; -use crate::waker::shared::init_shared_waker; +use crate::waker::shared::Wakers; use crate::{ self as rt, cpu_usage, shared, trace, worker, Signal, SyncWorker, SYNC_WORKER_ID_END, SYNC_WORKER_ID_START, @@ -97,8 +97,8 @@ impl Coordinator { let setup = shared::RuntimeInternals::setup(ring.submission_queue())?; let internals = Arc::new_cyclic(|shared_internals| { - let waker_id = init_shared_waker(shared_internals.clone()); - setup.complete(waker_id, worker_wakers, trace_log) + let wakers = Wakers::new(shared_internals.clone()); + setup.complete(wakers, worker_wakers, trace_log) }); let (host_os, host_name) = host_info()?; diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 09fc85ea0..b46708ff8 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -24,7 +24,7 @@ use crate::spawn::{ActorOptions, FutureOptions}; use crate::thread_waker::ThreadWaker; use crate::timers::shared::Timers; use crate::timers::TimerToken; -use crate::waker::shared::{new_shared_task_waker, WakerId}; +use crate::waker::shared::Wakers; use crate::{trace, ThreadSafe}; /// Setup of [`RuntimeInternals`]. @@ -46,7 +46,7 @@ impl RuntimeSetup { /// Complete the runtime setup. pub(crate) fn complete( self, - shared_id: WakerId, + wakers: Wakers, worker_wakers: Box<[&'static ThreadWaker]>, trace_log: Option>, ) -> RuntimeInternals { @@ -54,12 +54,12 @@ impl RuntimeSetup { debug_assert!(worker_wakers.len() >= 1); let sq = self.ring.submission_queue().clone(); RuntimeInternals { - shared_id, worker_wakers, wake_worker_idx: AtomicUsize::new(0), poll: Mutex::new(self.poll), ring: Mutex::new(self.ring), sq, + wakers, scheduler: Scheduler::new(), timers: Timers::new(), trace_log, @@ -70,8 +70,6 @@ impl RuntimeSetup { /// Shared internals of the runtime. #[derive(Debug)] pub(crate) struct RuntimeInternals { - /// Waker id used to create [`task::Waker`]s for thread-safe actors. - shared_id: WakerId, /// Thread wakers for all the workers. worker_wakers: Box<[&'static ThreadWaker]>, /// Index into `worker_wakers` to wake next, see @@ -84,6 +82,8 @@ pub(crate) struct RuntimeInternals { ring: Mutex, /// SubmissionQueue for the `ring`. sq: a10::SubmissionQueue, + /// Wakers used to create [`task::Waker`]s for thread-safe actors. + wakers: Wakers, /// Scheduler for thread-safe actors. scheduler: Scheduler, /// Timers for thread-safe actors. @@ -136,12 +136,7 @@ impl RuntimeInternals { /// Returns a new [`task::Waker`] for the thread-safe actor with `pid`. pub(crate) fn new_task_waker(&self, pid: ProcessId) -> task::Waker { - new_shared_task_waker(self.shared_id, pid) - } - - #[cfg(test)] - pub(crate) fn waker_id(&self) -> WakerId { - self.shared_id + self.wakers.new_task_waker(pid) } /// Register the shared [`Poll`] instance with `registry`. diff --git a/rt/src/test.rs b/rt/src/test.rs index ed21de617..4b5141a27 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -65,7 +65,7 @@ use heph_inbox::Manager; use crate::spawn::{ActorOptions, FutureOptions, SyncActorOptions}; use crate::sync_worker::SyncWorker; use crate::thread_waker::ThreadWaker; -use crate::waker::shared::init_shared_waker; +use crate::waker::shared::Wakers; use crate::worker::{Control, Worker}; use crate::{ self as rt, shared, ProcessId, RuntimeRef, Sync, ThreadLocal, ThreadSafe, SYNC_WORKER_ID_END, @@ -95,9 +95,9 @@ pub(crate) fn shared_internals() -> Arc { let setup = shared::RuntimeInternals::test_setup() .expect("failed to setup runtime internals for test module"); Arc::new_cyclic(|shared_internals| { - let waker_id = init_shared_waker(shared_internals.clone()); + let wakers = Wakers::new(shared_internals.clone()); let worker_wakers = vec![noop_waker()].into_boxed_slice(); - setup.complete(waker_id, worker_wakers, None) + setup.complete(wakers, worker_wakers, None) }) }) .clone() diff --git a/rt/src/waker/mod.rs b/rt/src/waker/mod.rs index 5ff67d643..9ded87494 100644 --- a/rt/src/waker/mod.rs +++ b/rt/src/waker/mod.rs @@ -1,4 +1,4 @@ -//! `task::Waker` implementation. +//! Wakers implementation. pub(crate) mod shared; #[cfg(test)] diff --git a/rt/src/waker/shared.rs b/rt/src/waker/shared.rs index 6e708f492..2d5a7130e 100644 --- a/rt/src/waker/shared.rs +++ b/rt/src/waker/shared.rs @@ -10,79 +10,79 @@ use crate::{ptr_as_usize, ProcessId}; /// Maximum number of runtimes supported. const MAX_RUNTIMES: usize = 1 << MAX_RUNTIMES_BITS; -/// Number of most significate bits used for the [`WakerId`]. +/// Number of most significate bits used for the [`WakersId`]. #[cfg(not(any(test, feature = "test")))] -const MAX_RUNTIMES_BITS: usize = 0; // 1. +const MAX_RUNTIMES_BITS: usize = 1; // 3. #[cfg(any(test, feature = "test"))] const MAX_RUNTIMES_BITS: usize = 8; // 256. const WAKER_ID_SHIFT: usize = usize::BITS as usize - MAX_RUNTIMES_BITS; const WAKER_ID_MASK: usize = (MAX_RUNTIMES - 1) << WAKER_ID_SHIFT; const PID_MASK: usize = !WAKER_ID_MASK; -/// An id for a waker. -/// -/// Returned by [`init`] and used in [`new`] to create a new [`task::Waker`]. +/// Type to create [`task::Waker`] for thread-safe actors and futures. +#[derive(Debug)] +pub(crate) struct Wakers { + id: WakersId, +} + +impl Wakers { + /// Create a new `Wakers` waking processes in `internals`'s scheduler. + pub(crate) fn new(internals: Weak) -> Wakers { + /// Static used to determine unique indices into `RUNTIMES`. + static IDS: AtomicU8 = AtomicU8::new(0); + + let id = IDS.fetch_add(1, Ordering::SeqCst); + assert!( + (id as usize) < MAX_RUNTIMES, + "Created too many Heph `Runtime`s, maximum of {MAX_RUNTIMES}", + ); + + // SAFETY: this is safe because we are the only thread that has write access + // to the given index. See documentation of `WAKERS` for more. + unsafe { RUNTIMES[id as usize] = internals } + Wakers { id: WakersId(id) } + } + + /// Create a new [`task::Waker`] for the process with `pid`. + pub(crate) fn new_task_waker(&self, pid: ProcessId) -> task::Waker { + let data = WakerData::new(self.id, pid).into_raw_data(); + let raw_waker = task::RawWaker::new(data, &WAKER_VTABLE); + // SAFETY: we follow the contract on `RawWaker`. + unsafe { task::Waker::from_raw(raw_waker) } + } +} + +/// An id for a [`Wakers`]. /// /// This serves as index into `WAKERS`. #[derive(Copy, Clone, Debug, Eq, PartialEq)] #[repr(transparent)] -pub(crate) struct WakerId(u8); +struct WakersId(u8); -/// Each coordinator has a unique [`WakerId`] which is used as index into this -/// array. +/// Each coordinator, i.e. runtime, has a unique [`Wakers`] with unique +/// [`WakersId`] which is used as index into this array. /// /// # Safety /// -/// Only [`init`] may write to this array. After the initial write, no more -/// writes are allowed and the array element is read only. To get a -/// [`task::Waker`] use the [`new`] function. +/// Only [`Wakers::new`] may write to this array. After the initial write, no +/// more writes are allowed and the array element is read only. /// /// Following the rules above means that there are no data races. The array can -/// only be indexed by [`WakerId`], which is only created by [`init`], which -/// ensures the waker is setup before returning the [`WakerId`]. This ensures -/// that only a single write happens to each element of the array. And because -/// after the initial write each element is read only there are no further data -/// races possible. +/// only be indexed by [`WakersId`], which is only created by [`Waker::new`], +/// which ensures the waker is setup before returning the [`WakersId`]. This +/// ensures that only a single write happens to each element of the array. And +/// because after the initial write each element is read only there are no +/// further data races possible. static mut RUNTIMES: [Weak; MAX_RUNTIMES] = [NO_RUNTIME; MAX_RUNTIMES]; // NOTE: this is only here because `NO_WAKER` is not `Copy`, thus // `[None; MAX_THREADS]` doesn't work, but explicitly using a `const` does. const NO_RUNTIME: Weak = Weak::new(); -/// Initialise a new waker. -/// -/// This returns a [`WakerId`] which can be used to create a new [`task::Waker`] -/// using [`new`]. -pub(crate) fn init_shared_waker(internals: Weak) -> WakerId { - /// Static used to determine unique indices into `RUNTIMES`. - static IDS: AtomicU8 = AtomicU8::new(0); - - let id = IDS.fetch_add(1, Ordering::SeqCst); - assert!( - (id as usize) < MAX_RUNTIMES, - "Created too many Heph `Runtime`s, maximum of {MAX_RUNTIMES}", - ); - - // SAFETY: this is safe because we are the only thread that has write access - // to the given index. See documentation of `WAKERS` for more. - unsafe { RUNTIMES[id as usize] = internals } - WakerId(id) -} - -/// Create a new [`task::Waker`]. -/// -/// [`init`] must be called before calling this function to get a [`WakerId`]. -pub(crate) fn new_shared_task_waker(waker_id: WakerId, pid: ProcessId) -> task::Waker { - let data = WakerData::new(waker_id, pid).into_raw_data(); - let raw_waker = task::RawWaker::new(data, &WAKER_VTABLE); - // SAFETY: we follow the contract on `RawWaker`. - unsafe { task::Waker::from_raw(raw_waker) } -} - /// Get the internals for `waker_id`. -fn get(waker_id: WakerId) -> &'static Weak { - // SAFETY: `WakerId` is only created by `init`, which ensures its valid. - // Furthermore `init` ensures that `RUNTIMES[waker_id]` is initialised and - // is read-only after that. See `RUNTIMES` documentation for more. +fn get(waker_id: WakersId) -> &'static Weak { + // SAFETY: `WakersId` is only created by `Wakers::new`, which ensures its + // valid. Furthermore `Wakers::new` ensures that `RUNTIMES[waker_id]` is + // initialised and is read-only after that. See `RUNTIMES` docs for more. unsafe { &RUNTIMES[waker_id.0 as usize] } } @@ -90,7 +90,7 @@ fn get(waker_id: WakerId) -> &'static Weak { /// /// # Layout /// -/// The [`MAX_RUNTIMES_BITS`] most significant bits are the [`WakerId`]. The +/// The [`MAX_RUNTIMES_BITS`] most significant bits are the [`WakersId`]. The /// remaining bits are the [`ProcessId`], from which at least /// `MAX_RUNTIMES_BITS` most significant bits are not used. #[derive(Copy, Clone, Debug, Eq, PartialEq)] @@ -99,7 +99,7 @@ struct WakerData(usize); impl WakerData { /// Create new `WakerData`. - fn new(waker_id: WakerId, pid: ProcessId) -> WakerData { + fn new(waker_id: WakersId, pid: ProcessId) -> WakerData { let data = WakerData(pid.0 | ((waker_id.0 as usize) << WAKER_ID_SHIFT)); debug_assert!( data.pid() == pid && data.waker_id() == waker_id, @@ -109,10 +109,10 @@ impl WakerData { } /// Get the waker id. - const fn waker_id(self) -> WakerId { + const fn waker_id(self) -> WakersId { // SAFETY: we know we won't truncate the waker id as it's an u8. #[allow(clippy::cast_possible_truncation)] - WakerId(((self.0 & WAKER_ID_MASK) >> WAKER_ID_SHIFT) as u8) + WakersId(((self.0 & WAKER_ID_MASK) >> WAKER_ID_SHIFT) as u8) } /// Get the process id. @@ -126,8 +126,8 @@ impl WakerData { /// /// # Safety /// - /// This doesn't check if the provided `data` is valid, the caller is - /// responsible for this. + /// The caller must ensure the `data` is created using + /// [`WakerData::into_raw_data`]. const unsafe fn from_raw_data(data: *const ()) -> WakerData { WakerData(ptr_as_usize(data)) } diff --git a/rt/src/waker/tests.rs b/rt/src/waker/tests.rs index 2129f56e0..6be81487c 100644 --- a/rt/src/waker/tests.rs +++ b/rt/src/waker/tests.rs @@ -10,7 +10,7 @@ mod shared { use crate::shared::RuntimeInternals; use crate::spawn::options::Priority; use crate::test; - use crate::waker::shared::{init_shared_waker, new_shared_task_waker}; + use crate::waker::shared::Wakers; const PID1: ProcessId = ProcessId(1); const PID2: ProcessId = ProcessId(2); @@ -44,7 +44,7 @@ mod shared { assert!(!shared_internals.has_ready_process()); // Create a new waker. - let waker = new_shared_task_waker(shared_internals.waker_id(), pid); + let waker = shared_internals.new_task_waker(pid); // Waking should move the process to the ready queue. waker.wake_by_ref(); @@ -76,7 +76,7 @@ mod shared { assert!(!shared_internals.has_ready_process()); // Create a cloned waker. - let waker1 = new_shared_task_waker(shared_internals.waker_id(), pid); + let waker1 = shared_internals.new_task_waker(pid); let waker2 = waker1.clone(); drop(waker1); @@ -102,7 +102,7 @@ mod shared { let shared_internals2 = shared_internals.clone(); let handle = thread::spawn(move || { - let waker = new_shared_task_waker(shared_internals2.waker_id(), pid); + let waker = shared_internals2.new_task_waker(pid); waker.wake_by_ref(); waker.wake(); }); @@ -122,8 +122,8 @@ mod shared { #[test] fn no_internals() { - let waker_id = init_shared_waker(Weak::new()); - let waker = new_shared_task_waker(waker_id, PID1); + let wakers = Wakers::new(Weak::new()); + let waker = wakers.new_task_waker(PID1); // This shouldn't be a problem. waker.wake_by_ref(); @@ -132,10 +132,10 @@ mod shared { #[test] fn will_wake() { - let waker_id = init_shared_waker(Weak::new()); - let waker1a = new_shared_task_waker(waker_id, PID1); - let waker1b = new_shared_task_waker(waker_id, PID1); - let waker2a = new_shared_task_waker(waker_id, PID2); + let wakers = Wakers::new(Weak::new()); + let waker1a = wakers.new_task_waker(PID1); + let waker1b = wakers.new_task_waker(PID1); + let waker2a = wakers.new_task_waker(PID2); let waker2b = waker2a.clone(); assert!(waker1a.will_wake(&waker1a)); @@ -157,9 +157,9 @@ mod shared { fn new_internals() -> Arc { let setup = RuntimeInternals::test_setup().unwrap(); Arc::new_cyclic(|shared_internals| { - let waker_id = init_shared_waker(shared_internals.clone()); + let wakers = Wakers::new(shared_internals.clone()); let worker_wakers = vec![test::noop_waker()].into_boxed_slice(); - setup.complete(waker_id, worker_wakers, None) + setup.complete(wakers, worker_wakers, None) }) } From 0e8f1eab75c3b38913aa2cfe8819b5d055da3ba9 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 19 Apr 2023 13:09:16 +0200 Subject: [PATCH 096/177] Rename crate::waker to wakers To match the name of the new Wakers type. --- rt/src/coordinator/mod.rs | 2 +- rt/src/lib.rs | 2 +- rt/src/shared/mod.rs | 2 +- rt/src/test.rs | 2 +- rt/src/{waker => wakers}/mod.rs | 0 rt/src/{waker => wakers}/shared.rs | 0 rt/src/{waker => wakers}/tests.rs | 2 +- 7 files changed, 5 insertions(+), 5 deletions(-) rename rt/src/{waker => wakers}/mod.rs (100%) rename rt/src/{waker => wakers}/shared.rs (100%) rename rt/src/{waker => wakers}/tests.rs (99%) diff --git a/rt/src/coordinator/mod.rs b/rt/src/coordinator/mod.rs index 714c9d2ab..9b559424b 100644 --- a/rt/src/coordinator/mod.rs +++ b/rt/src/coordinator/mod.rs @@ -33,7 +33,7 @@ use mio_signals::{SignalSet, Signals}; use crate::setup::{host_id, host_info, Uuid}; use crate::thread_waker::ThreadWaker; -use crate::waker::shared::Wakers; +use crate::wakers::shared::Wakers; use crate::{ self as rt, cpu_usage, shared, trace, worker, Signal, SyncWorker, SYNC_WORKER_ID_END, SYNC_WORKER_ID_START, diff --git a/rt/src/lib.rs b/rt/src/lib.rs index c9b3a9072..e31c6e288 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -240,7 +240,7 @@ mod timers; pub mod trace; #[doc(hidden)] pub mod util; -mod waker; +mod wakers; mod worker; use process::ProcessId; diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index b46708ff8..ae15c88bb 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -24,7 +24,7 @@ use crate::spawn::{ActorOptions, FutureOptions}; use crate::thread_waker::ThreadWaker; use crate::timers::shared::Timers; use crate::timers::TimerToken; -use crate::waker::shared::Wakers; +use crate::wakers::shared::Wakers; use crate::{trace, ThreadSafe}; /// Setup of [`RuntimeInternals`]. diff --git a/rt/src/test.rs b/rt/src/test.rs index 4b5141a27..70bb9f5ac 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -65,7 +65,7 @@ use heph_inbox::Manager; use crate::spawn::{ActorOptions, FutureOptions, SyncActorOptions}; use crate::sync_worker::SyncWorker; use crate::thread_waker::ThreadWaker; -use crate::waker::shared::Wakers; +use crate::wakers::shared::Wakers; use crate::worker::{Control, Worker}; use crate::{ self as rt, shared, ProcessId, RuntimeRef, Sync, ThreadLocal, ThreadSafe, SYNC_WORKER_ID_END, diff --git a/rt/src/waker/mod.rs b/rt/src/wakers/mod.rs similarity index 100% rename from rt/src/waker/mod.rs rename to rt/src/wakers/mod.rs diff --git a/rt/src/waker/shared.rs b/rt/src/wakers/shared.rs similarity index 100% rename from rt/src/waker/shared.rs rename to rt/src/wakers/shared.rs diff --git a/rt/src/waker/tests.rs b/rt/src/wakers/tests.rs similarity index 99% rename from rt/src/waker/tests.rs rename to rt/src/wakers/tests.rs index 6be81487c..b13ba040e 100644 --- a/rt/src/waker/tests.rs +++ b/rt/src/wakers/tests.rs @@ -10,7 +10,7 @@ mod shared { use crate::shared::RuntimeInternals; use crate::spawn::options::Priority; use crate::test; - use crate::waker::shared::Wakers; + use crate::wakers::shared::Wakers; const PID1: ProcessId = ProcessId(1); const PID2: ProcessId = ProcessId(2); From 3dc89f50f629f8ce9bc7ff920cb7e196cc7ebfdd Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 19 Apr 2023 13:18:34 +0200 Subject: [PATCH 097/177] Start Heph v0.5 developement --- Cargo.toml | 3 ++- README.md | 2 +- http/Cargo.toml | 2 +- remote/Cargo.toml | 2 +- rt/Cargo.toml | 2 +- 5 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 4098d03a6..7bd9bd974 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,8 @@ [package] name = "heph" description = "Heph is an actor framework based on asynchronous functions." -version = "0.4.0" +version = "0.5.0" +publish = false # In development. authors = ["Thomas de Zeeuw "] license = "MIT" documentation = "https://docs.rs/heph" diff --git a/README.md b/README.md index 2565c8f04..c21cb544b 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ Second, Heph needs to be added as a dependency. ```toml [dependencies] -heph = "0.4.0" +heph = "0.5.0" ``` Now, you're ready to starting writing your application! Next, you can look at diff --git a/http/Cargo.toml b/http/Cargo.toml index acdc36c3e..af7a0bdec 100644 --- a/http/Cargo.toml +++ b/http/Cargo.toml @@ -13,7 +13,7 @@ include = ["/Cargo.toml", "/src/**/*.rs", "/README.md", "/LICENSE"] edition = "2021" [dependencies] -heph = { version = "0.4.0", path = "../", default-features = false, features = ["runtime"] } +heph = { version = "0.5.0", path = "../", default-features = false, features = ["runtime"] } httparse = { version = "1.5.1", default-features = false } httpdate = { version = "1.0.0", default-features = false } log = { version = "0.4.8", default-features = false } diff --git a/remote/Cargo.toml b/remote/Cargo.toml index 8ae3e2b5e..64a169ae8 100644 --- a/remote/Cargo.toml +++ b/remote/Cargo.toml @@ -9,7 +9,7 @@ default = ["json"] json = ["serde_json"] [dependencies] -heph = { version = "0.4.0", path = "../", default-features = false } +heph = { version = "0.5.0", path = "../", default-features = false } heph-rt = { version = "0.5.0", path = "../rt", default-features = false } log = { version = "0.4.14", default-features = false } serde = { version = "1.0.130", default-features = false } diff --git a/rt/Cargo.toml b/rt/Cargo.toml index 88078afa6..79735d3ed 100644 --- a/rt/Cargo.toml +++ b/rt/Cargo.toml @@ -19,7 +19,7 @@ test = ["heph/test"] [dependencies] a10 = { version = "0.1.0", default-features = false, git = "https://github.com/Thomasdezeeuw/a10" } -heph = { version = "0.4.0", path = "../", default-features = false } +heph = { version = "0.5.0", path = "../", default-features = false } heph-inbox = { version = "0.2.3", path = "../inbox", default-features = false } log = { version = "0.4.16", default-features = false, features = ["kv_unstable", "kv_unstable_std"] } crossbeam-channel = { version = "0.5.0", default-features = false, features = ["std"] } From 737e40e0b5e396730e0d45fd019061e87425b17c Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 19 Apr 2023 16:54:25 +0200 Subject: [PATCH 098/177] Use Access trait directly Not as {use crate as rt}::Access any longer. --- rt/src/io/mod.rs | 4 ++-- rt/src/net/tcp/listener.rs | 6 +++--- rt/src/net/tcp/server.rs | 7 ++++--- rt/src/net/tcp/stream.rs | 6 +++--- rt/src/net/udp.rs | 4 ++-- rt/src/net/uds/datagram.rs | 10 +++++----- rt/src/pipe.rs | 10 +++++----- rt/src/setup.rs | 4 ++-- rt/src/shared/mod.rs | 2 +- rt/src/systemd.rs | 9 +++++---- rt/src/timer.rs | 28 ++++++++++++++-------------- 11 files changed, 46 insertions(+), 44 deletions(-) diff --git a/rt/src/io/mod.rs b/rt/src/io/mod.rs index 2bbfce624..62a5daa31 100644 --- a/rt/src/io/mod.rs +++ b/rt/src/io/mod.rs @@ -38,7 +38,7 @@ use a10::Extract; -use crate as rt; +use crate::access::Access; // For ease of use within the crate. pub(crate) use std::io::{Error, Result}; @@ -57,7 +57,7 @@ macro_rules! stdio { $fn: ident () -> $name: ident, $fd: expr ) => { #[doc = concat!("Create a new `", stringify!($name), "`.\n\n")] - pub fn $fn(rt: &RT) -> $name { + pub fn $fn(rt: &RT) -> $name { $name(std::mem::ManuallyDrop::new(unsafe { a10::AsyncFd::new( $fd, rt.submission_queue(), diff --git a/rt/src/net/tcp/listener.rs b/rt/src/net/tcp/listener.rs index 4dbff270c..1b6f4881b 100644 --- a/rt/src/net/tcp/listener.rs +++ b/rt/src/net/tcp/listener.rs @@ -9,8 +9,8 @@ use std::{fmt, io}; use a10::AsyncFd; use socket2::{Domain, Protocol, SockRef, Socket, Type}; +use crate::access::Access; use crate::net::{convert_address, SockAddr, TcpStream}; -use crate::{self as rt}; /// A TCP socket listener. /// @@ -172,7 +172,7 @@ impl TcpListener { /// `address`. pub async fn bind(rt: &RT, address: SocketAddr) -> io::Result where - RT: rt::Access, + RT: Access, { TcpListener::bind_setup(rt, address, |_| Ok(())).await } @@ -183,7 +183,7 @@ impl TcpListener { setup: F, ) -> io::Result where - RT: rt::Access, + RT: Access, F: FnOnce(&Socket) -> io::Result<()>, { let fd = a10::net::socket( diff --git a/rt/src/net/tcp/server.rs b/rt/src/net/tcp/server.rs index 2f2ec5ab9..6693b736c 100644 --- a/rt/src/net/tcp/server.rs +++ b/rt/src/net/tcp/server.rs @@ -282,10 +282,11 @@ use heph::supervisor::Supervisor; use log::{debug, trace}; use socket2::{Domain, Protocol, Socket, Type}; +use crate::access::Access; use crate::net::{TcpListener, TcpStream}; use crate::spawn::{ActorOptions, Spawn}; use crate::util::{either, next}; -use crate::{self as rt, Signal}; +use crate::Signal; /// Create a new [server setup]. /// @@ -401,7 +402,7 @@ impl NewActor for Setup where S: Supervisor + Clone + 'static, NA: NewActor + Clone + 'static, - NA::RuntimeAccess: rt::Access + Spawn, + NA::RuntimeAccess: Access + Spawn, { type Message = Message; type Argument = (); @@ -443,7 +444,7 @@ async fn tcp_server( where S: Supervisor + Clone + 'static, NA: NewActor + Clone + 'static, - NA::RuntimeAccess: rt::Access + Spawn, + NA::RuntimeAccess: Access + Spawn, { let listener = TcpListener::bind_setup(ctx.runtime_ref(), local, set_listener_options) .await diff --git a/rt/src/net/tcp/stream.rs b/rt/src/net/tcp/stream.rs index 54104c978..d8570826b 100644 --- a/rt/src/net/tcp/stream.rs +++ b/rt/src/net/tcp/stream.rs @@ -6,7 +6,7 @@ use std::net::{Shutdown, SocketAddr}; use a10::{AsyncFd, Extract}; use socket2::{Domain, Protocol, SockRef, Type}; -use crate as rt; +use crate::access::Access; use crate::io::{Buf, BufMut, BufMutSlice, BufSlice, BufWrapper}; use crate::net::{ convert_address, Recv, RecvN, RecvNVectored, RecvVectored, Send, SendAll, SendAllVectored, @@ -46,7 +46,7 @@ impl TcpStream { /// specified `address`. pub async fn connect(rt: &RT, address: SocketAddr) -> io::Result where - RT: rt::Access, + RT: Access, { let fd = a10::net::socket( rt.submission_queue(), @@ -76,7 +76,7 @@ impl TcpStream { /// [`TcpListener`]: crate::net::tcp::TcpListener pub fn set_auto_cpu_affinity(&self, rt: &RT) where - RT: rt::Access, + RT: Access, { #[cfg(target_os = "linux")] if let Some(cpu) = rt.cpu() { diff --git a/rt/src/net/udp.rs b/rt/src/net/udp.rs index 9442a70f4..011b5109a 100644 --- a/rt/src/net/udp.rs +++ b/rt/src/net/udp.rs @@ -9,12 +9,12 @@ use std::{fmt, io}; use a10::{AsyncFd, Extract}; use socket2::{Domain, Protocol, SockRef, Type}; +use crate::access::Access; use crate::io::{Buf, BufMut, BufMutSlice, BufSlice, BufWrapper}; use crate::net::{ convert_address, Recv, RecvFrom, RecvFromVectored, RecvVectored, Send, SendTo, SendToVectored, SendVectored, SockAddr, }; -use crate::{self as rt}; pub use crate::net::{Connected, Unconnected}; @@ -131,7 +131,7 @@ impl UdpSocket { /// Create a UDP socket binding to the `local` address. pub async fn bind(rt: &RT, local: SocketAddr) -> io::Result> where - RT: rt::Access, + RT: Access, { let fd = a10::net::socket( rt.submission_queue(), diff --git a/rt/src/net/uds/datagram.rs b/rt/src/net/uds/datagram.rs index f8c08558a..c0fc1ebf0 100644 --- a/rt/src/net/uds/datagram.rs +++ b/rt/src/net/uds/datagram.rs @@ -7,7 +7,7 @@ use a10::{AsyncFd, Extract}; use log::warn; use socket2::{Domain, SockRef, Type}; -use crate as rt; +use crate::access::Access; use crate::io::{Buf, BufMut, BufMutSlice, BufSlice, BufWrapper}; use crate::net::uds::UnixAddr; use crate::net::{ @@ -47,7 +47,7 @@ impl UnixDatagram { /// Creates a Unix datagram socket bound to `address`. pub async fn bind(rt: &RT, address: UnixAddr) -> io::Result> where - RT: rt::Access, + RT: Access, { let socket = UnixDatagram::unbound(rt).await?; socket.with_ref(|socket| socket.bind(&address.inner))?; @@ -57,7 +57,7 @@ impl UnixDatagram { /// Creates a Unix Datagram socket which is not bound to any address. pub async fn unbound(rt: &RT) -> io::Result> where - RT: rt::Access, + RT: Access, { let fd = a10::net::socket( rt.submission_queue(), @@ -73,7 +73,7 @@ impl UnixDatagram { /// Creates an unnamed pair of connected sockets. pub fn pair(rt: &RT) -> io::Result<(UnixDatagram, UnixDatagram)> where - RT: rt::Access, + RT: Access, { let (s1, s2) = socket2::Socket::pair(Domain::UNIX, Type::DGRAM.cloexec(), None)?; let s1 = UnixDatagram::new(rt, unsafe { @@ -90,7 +90,7 @@ impl UnixDatagram { fn new(rt: &RT, fd: AsyncFd) -> io::Result> where - RT: rt::Access, + RT: Access, { let socket = UnixDatagram { fd, diff --git a/rt/src/pipe.rs b/rt/src/pipe.rs index 886b85c75..9f104728e 100644 --- a/rt/src/pipe.rs +++ b/rt/src/pipe.rs @@ -101,7 +101,7 @@ use std::process::{ChildStderr, ChildStdin, ChildStdout}; use a10::{AsyncFd, Extract}; -use crate as rt; +use crate::access::Access; use crate::io::{ Buf, BufMut, BufMutSlice, BufSlice, BufWrapper, Read, ReadN, ReadNVectored, ReadVectored, Write, WriteAll, WriteAllVectored, WriteVectored, @@ -119,7 +119,7 @@ use crate::io::{ /// [`pipe(2)`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/pipe.html pub fn new(rt: &RT) -> io::Result<(Sender, Receiver)> where - RT: rt::Access, + RT: Access, { let mut fds: [RawFd; 2] = [-1, -1]; let _ = syscall!(pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC))?; @@ -143,7 +143,7 @@ impl Sender { /// Convert a [`ChildStdin`] to a `Sender`. pub fn from_child_stdin(rt: &RT, stdin: ChildStdin) -> io::Result where - RT: rt::Access, + RT: Access, { // Safety: `ChildStdin` is guaranteed to be a valid file descriptor. let fd = unsafe { AsyncFd::new(stdin.into_raw_fd(), rt.submission_queue()) }; @@ -203,7 +203,7 @@ impl Receiver { /// Convert a [`ChildStdout`] to a `Receiver`. pub fn from_child_stdout(rt: &RT, stdout: ChildStdout) -> io::Result where - RT: rt::Access, + RT: Access, { // Safety: `ChildStdout` is guaranteed to be a valid file descriptor. let fd = unsafe { AsyncFd::new(stdout.into_raw_fd(), rt.submission_queue()) }; @@ -213,7 +213,7 @@ impl Receiver { /// Convert a [`ChildStderr`] to a `Receiver`. pub fn from_child_stderr(rt: &RT, stderr: ChildStderr) -> io::Result where - RT: rt::Access, + RT: Access, { // Safety: `ChildStderr` is guaranteed to be a valid file descriptor. let fd = unsafe { AsyncFd::new(stderr.into_raw_fd(), rt.submission_queue()) }; diff --git a/rt/src/setup.rs b/rt/src/setup.rs index 53e061fc2..133eb19cb 100644 --- a/rt/src/setup.rs +++ b/rt/src/setup.rs @@ -400,11 +400,11 @@ pub(crate) fn set_cpu_affinity(worker_id: NonZeroUsize) -> Option { let cpu_set = cpu_set(cpu); match set_affinity(&cpu_set) { Ok(()) => { - debug!(worker_id = {}; "worker thread CPU affinity set to {cpu}"); + debug!(worker_id = log::as_display!(worker_id); "worker thread CPU affinity set to {cpu}"); Some(cpu) } Err(err) => { - warn!(worker_id = {}; "failed to set CPU affinity on thread: {err}"); + warn!(worker_id = log::as_display!(worker_id); "failed to set CPU affinity on thread: {err}"); None } } diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index ae15c88bb..2f2e5487d 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -80,7 +80,7 @@ pub(crate) struct RuntimeInternals { poll: Mutex, /// io_uring completion ring. ring: Mutex, - /// SubmissionQueue for the `ring`. + /// Submission queue for the `ring`. sq: a10::SubmissionQueue, /// Wakers used to create [`task::Waker`]s for thread-safe actors. wakers: Wakers, diff --git a/rt/src/systemd.rs b/rt/src/systemd.rs index c62b9c4ea..07e2aa6b7 100644 --- a/rt/src/systemd.rs +++ b/rt/src/systemd.rs @@ -23,10 +23,11 @@ use heph::actor; use heph::messages::Terminate; use log::{as_debug, debug, warn}; +use crate::access::Access; use crate::net::uds::{Connected, UnixAddr, UnixDatagram}; use crate::timer::Interval; use crate::util::{either, next}; -use crate::{self as rt, Signal}; +use crate::Signal; /// systemd notifier. /// @@ -57,7 +58,7 @@ impl Notify { /// [`systemd.service(5)`]: https://www.freedesktop.org/software/systemd/man/systemd.service.html#WatchdogSec= pub async fn new(rt: &RT) -> io::Result> where - RT: rt::Access, + RT: Access, { const SOCKET_ENV_VAR: &str = "NOTIFY_SOCKET"; const WATCHDOG_PID_ENV_VAR: &str = "WATCHDOG_PID"; @@ -102,7 +103,7 @@ impl Notify { /// the environment variables set by systemd. pub async fn connect(rt: &RT, path: P) -> io::Result where - RT: rt::Access, + RT: Access, P: AsRef, { let socket = UnixDatagram::unbound(rt).await?; @@ -278,7 +279,7 @@ pub async fn watchdog( mut health_check: H, ) -> io::Result<()> where - RT: rt::Access + Clone, + RT: Access + Clone, H: FnMut() -> Result<(), E>, E: ToString, { diff --git a/rt/src/timer.rs b/rt/src/timer.rs index 135947b8d..eddd76c43 100644 --- a/rt/src/timer.rs +++ b/rt/src/timer.rs @@ -16,8 +16,8 @@ use std::pin::Pin; use std::task::{self, Poll}; use std::time::{Duration, Instant}; +use crate::access::Access; use crate::timers::TimerToken; -use crate::{self as rt}; /// Type returned when the deadline has passed. /// @@ -86,14 +86,14 @@ impl From for io::ErrorKind { /// ``` #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Timer { +pub struct Timer { deadline: Instant, rt: RT, /// If `Some` it means we've added a timer that hasn't expired yet. timer_pending: Option, } -impl Timer { +impl Timer { /// Create a new `Timer`. pub const fn at(rt: RT, deadline: Instant) -> Timer { Timer { @@ -129,7 +129,7 @@ impl Timer { } } -impl Future for Timer { +impl Future for Timer { type Output = DeadlinePassed; fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { @@ -144,9 +144,9 @@ impl Future for Timer { } } -impl Unpin for Timer {} +impl Unpin for Timer {} -impl Drop for Timer { +impl Drop for Timer { fn drop(&mut self) { if let Some(token) = self.timer_pending { self.rt.remove_timer(self.deadline, token); @@ -222,12 +222,12 @@ impl Drop for Timer { /// ``` #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Deadline { +pub struct Deadline { timer: Timer, future: Fut, } -impl Deadline { +impl Deadline { /// Create a new `Deadline`. pub const fn at(rt: RT, deadline: Instant, future: Fut) -> Deadline { Deadline { @@ -269,7 +269,7 @@ impl Deadline { } } -impl Future for Deadline +impl Future for Deadline where Fut: Future>, E: From, @@ -293,7 +293,7 @@ where } } -impl Unpin for Deadline {} +impl Unpin for Deadline {} /// An [`AsyncIterator`] that yields an item after an interval has passed. /// @@ -357,12 +357,12 @@ impl Unpin for Deadline {} /// ``` #[derive(Debug)] #[must_use = "AsyncIterators do nothing unless polled"] -pub struct Interval { +pub struct Interval { timer: Timer, interval: Duration, } -impl Interval { +impl Interval { /// Create a new `Interval`. pub fn every(rt: RT, interval: Duration) -> Interval { Interval { @@ -377,7 +377,7 @@ impl Interval { } } -impl AsyncIterator for Interval { +impl AsyncIterator for Interval { type Item = DeadlinePassed; fn poll_next(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll> { @@ -393,4 +393,4 @@ impl AsyncIterator for Interval { } } -impl Unpin for Interval {} +impl Unpin for Interval {} From d5e01b13fd4fdc78ccd62b13080528231a37522e Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 20 Apr 2023 14:40:49 +0200 Subject: [PATCH 099/177] Move coordinator waker implementation to waker module So it can be reused. --- rt/src/coordinator/mod.rs | 8 +- rt/src/coordinator/waker.rs | 111 ---------------------- rt/src/{coordinator => wakers}/bitmap.rs | 114 ----------------------- rt/src/wakers/mod.rs | 114 +++++++++++++++++++++++ rt/src/wakers/tests.rs | 109 ++++++++++++++++++++++ 5 files changed, 225 insertions(+), 231 deletions(-) delete mode 100644 rt/src/coordinator/waker.rs rename rt/src/{coordinator => wakers}/bitmap.rs (54%) diff --git a/rt/src/coordinator/mod.rs b/rt/src/coordinator/mod.rs index 9b559424b..c48aaba1e 100644 --- a/rt/src/coordinator/mod.rs +++ b/rt/src/coordinator/mod.rs @@ -34,16 +34,12 @@ use mio_signals::{SignalSet, Signals}; use crate::setup::{host_id, host_info, Uuid}; use crate::thread_waker::ThreadWaker; use crate::wakers::shared::Wakers; +use crate::wakers::{self, AtomicBitMap}; use crate::{ self as rt, cpu_usage, shared, trace, worker, Signal, SyncWorker, SYNC_WORKER_ID_END, SYNC_WORKER_ID_START, }; -mod bitmap; -mod waker; - -use bitmap::AtomicBitMap; - /// Token used to receive process signals. const SIGNAL: Token = Token(usize::MAX); const RING: Token = Token(usize::MAX - 1); @@ -203,7 +199,7 @@ impl Coordinator { // Run all coordinator futures that are ready. while let Some(idx) = self.futures_ready.next_set() { - let waker = waker::new(self.futures_ready.clone(), idx); + let waker = wakers::new(self.futures_ready.clone(), idx); let mut ctx = task::Context::from_waker(&waker); match self.futures[idx].as_mut().poll(&mut ctx) { task::Poll::Ready(()) => { diff --git a/rt/src/coordinator/waker.rs b/rt/src/coordinator/waker.rs deleted file mode 100644 index 69ec91d21..000000000 --- a/rt/src/coordinator/waker.rs +++ /dev/null @@ -1,111 +0,0 @@ -//! Waker implementation for the coordinator. -//! -//! # Implementation -//! -//! The implementation is fairly simple. All it does is set a bit in an -//! [`AtomicBitMap`] contained in an [`Arc`]. - -use std::sync::Arc; -use std::{ptr, task}; - -use crate::coordinator::bitmap::AtomicBitMap; - -/// Maximum number of wakers this module supports. -pub(crate) const MAX_WAKERS: usize = 1 << PTR_BITS_UNUSED; -/// Number of bits we expect a 64 bit pointer to not used, leaving them for us -/// to fill with our index (into `AtomicBitMap`). -const PTR_BITS_UNUSED: usize = 16; -/// Amount of bits to shift to not overwrite the pointer address. -const PTR_DATA_SHIFT: usize = usize::BITS as usize - PTR_BITS_UNUSED; -/// Mask to get the data from a pointer. -const DATA_MASK: usize = ((1 << PTR_BITS_UNUSED) - 1) << PTR_DATA_SHIFT; -/// Mask to get the pointer to the `AtomicBitMap`. -const PTR_MASK: usize = (1 << PTR_DATA_SHIFT) - 1; - -pub(super) fn new(bitmap: Arc, id: usize) -> task::Waker { - let data = into_data_ptr(bitmap, id); - let raw_waker = task::RawWaker::new(data, &WAKER_VTABLE); - unsafe { task::Waker::from_raw(raw_waker) } -} - -/// # Panics -/// -/// This will panic if the capacity of `bitmap` is smaller than `id`. `id` must -/// be smallar then [`MAX_WAKERS`]. -fn into_data_ptr(bitmap: Arc, id: usize) -> *const () { - // Check the input is valid. - assert!(bitmap.capacity() >= id); - assert!(id <= MAX_WAKERS); - - // This is a "fat" pointer, a pointer to `AtomicBitMap` and a length. - let bitmap_ptr = Arc::into_raw(bitmap); - // This will point to the start of the `AtomicBitMap` as is "thin". - let bitmap_start = bitmap_ptr.cast::<()>(); - // Ensure we have bit to put our `id`. - assert!(bitmap_start as usize & PTR_BITS_UNUSED == 0); - // Squash the pointer and our `id` together. - ((bitmap_start as usize) & (id << PTR_DATA_SHIFT)) as *const () -} - -static WAKER_VTABLE: task::RawWakerVTable = - task::RawWakerVTable::new(clone_wake_data, wake, wake_by_ref, drop_wake_data); - -unsafe fn clone_wake_data(data: *const ()) -> task::RawWaker { - let (bitmap_ptr, _) = data_as_raw_ptr(data); - Arc::increment_strong_count(bitmap_ptr); - // After we incremented the strong count we can reuse the same data. - task::RawWaker::new(data, &WAKER_VTABLE) -} - -unsafe fn wake(data: *const ()) { - let (bitmap, id) = from_data_ptr(data); - bitmap.set(id); -} - -unsafe fn wake_by_ref(data: *const ()) { - let (bitmap_ptr, id) = data_as_raw_ptr(data); - let bitmap = &*bitmap_ptr; - bitmap.set(id); -} - -unsafe fn drop_wake_data(data: *const ()) { - drop(from_data_ptr(data)); -} - -/// # Safety -/// -/// `data` MUST be created by [`into_data_ptr`]. -unsafe fn from_data_ptr(data: *const ()) -> (Arc, usize) { - let (bitmap_ptr, id) = data_as_raw_ptr(data); - (Arc::from_raw(bitmap_ptr), id) -} - -/// Returns a raw pointer to the `AtomicBitMap` inside of an `Arc`. -/// -/// # Safety -/// -/// `data` MUST be created by [`into_data_ptr`]. -unsafe fn data_as_raw_ptr(data: *const ()) -> (*const AtomicBitMap, usize) { - // SAFETY: the caller must ensure that `data` is created using - // `into_data_ptr`. That guarantees us two things, 1) `id` is valid and 2) - // that the pointer is valid and the bitmap has enough capacity for the - // `id`. - // The above guarantees ensure that calling `min_bitmap_size` results in a - // bitmap that has at least enough capacity that we can set the `id`-th bit. - // The returned pointer might be a shorter than the true length of - // `AtomicBitMap`, but we can work with that. - let id = data as usize & DATA_MASK; - let bitmap_start = (data as usize & PTR_MASK) as *const (); - let bitmap_size = min_bitmap_size(id); - let bitmap_ptr = ptr::from_raw_parts(bitmap_start, bitmap_size); - (bitmap_ptr, id) -} - -/// Returns the minimum bitmap size such that `id` can be set. -fn min_bitmap_size(id: usize) -> usize { - let mut bitmap_size = id / usize::BITS as usize; - if (id % usize::BITS as usize) != 0 { - bitmap_size += 1; - } - bitmap_size -} diff --git a/rt/src/coordinator/bitmap.rs b/rt/src/wakers/bitmap.rs similarity index 54% rename from rt/src/coordinator/bitmap.rs rename to rt/src/wakers/bitmap.rs index ce86d7a55..c1982d68e 100644 --- a/rt/src/coordinator/bitmap.rs +++ b/rt/src/wakers/bitmap.rs @@ -85,117 +85,3 @@ impl fmt::Debug for AtomicBitMap { Ok(()) } } - -#[test] -fn setting_and_unsetting_one() { - setting_and_unsetting(64) -} - -#[test] -fn setting_and_unsetting_two() { - setting_and_unsetting(128) -} - -#[test] -fn setting_and_unsetting_three() { - setting_and_unsetting(192) -} - -#[test] -fn setting_and_unsetting_four() { - setting_and_unsetting(256) -} - -#[test] -fn setting_and_unsetting_eight() { - setting_and_unsetting(512) -} - -#[test] -fn setting_and_unsetting_sixteen() { - setting_and_unsetting(1024) -} - -#[cfg(test)] -fn setting_and_unsetting(entries: usize) { - let map = AtomicBitMap::new(entries); - assert_eq!(map.capacity(), entries); - - // Set all indices. - for n in 0..entries { - map.set(n); - } - - // All bits should be set. - for data in &map.data { - assert!(data.load(Ordering::Relaxed) == usize::MAX); - } - - // Unset all indices again. - for n in 0..entries { - assert_eq!(map.next_set(), Some(n)); - } - // Bitmap should be zeroed. - for data in &map.data { - assert!(data.load(Ordering::Relaxed) == 0); - } - - // Test unsetting an index not in order. - map.set(63); - map.set(0); - assert!(matches!(map.next_set(), Some(i) if i == 0)); - assert!(matches!(map.next_set(), Some(i) if i == 63)); - - // Next avaiable index should be 0 again. - assert_eq!(map.next_set(), None); -} - -#[test] -fn setting_and_unsetting_concurrent() { - use std::sync::{Arc, Barrier}; - use std::thread; - - const N: usize = 4; - const M: usize = 1024; - - let bitmap = Arc::new(AtomicBitMap::new(N * M)); - - for n in 0..N * M { - bitmap.set(n); - } - - let barrier = Arc::new(Barrier::new(N + 1)); - let handles = (0..N) - .map(|i| { - let bitmap = bitmap.clone(); - let barrier = barrier.clone(); - thread::spawn(move || { - let mut indices = Vec::with_capacity(M); - _ = barrier.wait(); - - if i % 2 == 0 { - for _ in 0..M { - let idx = bitmap.next_set().expect("failed to get index"); - indices.push(idx); - } - - for idx in indices { - bitmap.set(idx); - } - } else { - for _ in 0..M { - let idx = bitmap.next_set().expect("failed to get index"); - bitmap.set(idx); - } - } - }) - }) - .collect::>(); - - _ = barrier.wait(); - handles - .into_iter() - .map(|handle| handle.join()) - .collect::>() - .unwrap(); -} diff --git a/rt/src/wakers/mod.rs b/rt/src/wakers/mod.rs index 9ded87494..0bb746e06 100644 --- a/rt/src/wakers/mod.rs +++ b/rt/src/wakers/mod.rs @@ -1,5 +1,119 @@ //! Wakers implementation. +//! +//! # Implementation +//! +//! The implementation is fairly simple. All it does is set a bit in an +//! [`AtomicBitMap`] contained in an [`Arc`]. +use std::sync::Arc; +use std::{ptr, task}; + +mod bitmap; pub(crate) mod shared; #[cfg(test)] mod tests; + +pub(crate) use bitmap::AtomicBitMap; + +/// Maximum number of wakers this implementation supports. +pub(crate) const MAX_WAKERS: usize = 1 << PTR_BITS_UNUSED; +/// Number of bits we expect a 64 bit pointer to not use, leaving them for us to +/// fill with our index (into [`AtomicBitMap`]). +const PTR_BITS_UNUSED: usize = 16; +/// Amount of bits to shift to not overwrite the pointer address. +const PTR_DATA_SHIFT: usize = usize::BITS as usize - PTR_BITS_UNUSED; +/// Mask to get the data from a pointer. +const DATA_MASK: usize = ((1 << PTR_BITS_UNUSED) - 1) << PTR_DATA_SHIFT; +/// Mask to get the pointer to the `AtomicBitMap`. +const PTR_MASK: usize = (1 << PTR_DATA_SHIFT) - 1; + +/// Create a new `task::Waker`. +/// +/// `id` MUST be smaller than [`MAX_WAKERS`]. +pub(crate) fn new(bitmap: Arc, id: usize) -> task::Waker { + let data = into_data_ptr(bitmap, id); + let raw_waker = task::RawWaker::new(data, &WAKER_VTABLE); + unsafe { task::Waker::from_raw(raw_waker) } +} + +/// # Panics +/// +/// This will panic if the capacity of `bitmap` is smaller than `id`. `id` must +/// be smallar then [`MAX_WAKERS`]. +fn into_data_ptr(bitmap: Arc, id: usize) -> *const () { + // Check the input is valid. + assert!(bitmap.capacity() >= id); + assert!(id <= MAX_WAKERS); + + // This is a "fat" pointer, a pointer to `AtomicBitMap` and a length. + let bitmap_ptr = Arc::into_raw(bitmap); + // This will point to the start of the `AtomicBitMap` as is "thin". + let bitmap_start = bitmap_ptr.cast::<()>(); + // Ensure we have bit to put our `id`. + assert!(bitmap_start as usize & PTR_BITS_UNUSED == 0); + // Squash the pointer and our `id` together. + ((bitmap_start as usize) & (id << PTR_DATA_SHIFT)) as *const () +} + +static WAKER_VTABLE: task::RawWakerVTable = + task::RawWakerVTable::new(clone_wake_data, wake, wake_by_ref, drop_wake_data); + +unsafe fn clone_wake_data(data: *const ()) -> task::RawWaker { + let (bitmap_ptr, _) = data_as_raw_ptr(data); + Arc::increment_strong_count(bitmap_ptr); + // After we incremented the strong count we can reuse the same data. + task::RawWaker::new(data, &WAKER_VTABLE) +} + +unsafe fn wake(data: *const ()) { + let (bitmap, id) = from_data_ptr(data); + bitmap.set(id); +} + +unsafe fn wake_by_ref(data: *const ()) { + let (bitmap_ptr, id) = data_as_raw_ptr(data); + let bitmap = &*bitmap_ptr; + bitmap.set(id); +} + +unsafe fn drop_wake_data(data: *const ()) { + drop(from_data_ptr(data)); +} + +/// # Safety +/// +/// `data` MUST be created by [`into_data_ptr`]. +unsafe fn from_data_ptr(data: *const ()) -> (Arc, usize) { + let (bitmap_ptr, id) = data_as_raw_ptr(data); + (Arc::from_raw(bitmap_ptr), id) +} + +/// Returns a raw pointer to the `AtomicBitMap` inside of an `Arc`. +/// +/// # Safety +/// +/// `data` MUST be created by [`into_data_ptr`]. +unsafe fn data_as_raw_ptr(data: *const ()) -> (*const AtomicBitMap, usize) { + // SAFETY: the caller must ensure that `data` is created using + // `into_data_ptr`. That guarantees us two things, 1) `id` is valid and 2) + // that the pointer is valid and the bitmap has enough capacity for the + // `id`. + // The above guarantees ensure that calling `min_bitmap_size` results in a + // bitmap that has at least enough capacity that we can set the `id`-th bit. + // The returned pointer might be a shorter than the true length of + // `AtomicBitMap`, but we can work with that. + let id = data as usize & DATA_MASK; + let bitmap_start = (data as usize & PTR_MASK) as *const (); + let bitmap_size = min_bitmap_size(id); + let bitmap_ptr = ptr::from_raw_parts(bitmap_start, bitmap_size); + (bitmap_ptr, id) +} + +/// Returns the minimum bitmap size such that `id` can be set. +fn min_bitmap_size(id: usize) -> usize { + let mut bitmap_size = id / usize::BITS as usize; + if (id % usize::BITS as usize) != 0 { + bitmap_size += 1; + } + bitmap_size +} diff --git a/rt/src/wakers/tests.rs b/rt/src/wakers/tests.rs index b13ba040e..f07f02560 100644 --- a/rt/src/wakers/tests.rs +++ b/rt/src/wakers/tests.rs @@ -171,3 +171,112 @@ mod shared { .unwrap() } } + +mod bitmap { + use crate::wakers::bitmap::AtomicBitMap; + + #[test] + fn setting_and_unsetting_one() { + setting_and_unsetting(64) + } + + #[test] + fn setting_and_unsetting_two() { + setting_and_unsetting(128) + } + + #[test] + fn setting_and_unsetting_three() { + setting_and_unsetting(192) + } + + #[test] + fn setting_and_unsetting_four() { + setting_and_unsetting(256) + } + + #[test] + fn setting_and_unsetting_eight() { + setting_and_unsetting(512) + } + + #[test] + fn setting_and_unsetting_sixteen() { + setting_and_unsetting(1024) + } + + #[cfg(test)] + fn setting_and_unsetting(entries: usize) { + let map = AtomicBitMap::new(entries); + assert_eq!(map.capacity(), entries); + + // Set all indices. + for n in 0..entries { + map.set(n); + } + + // All bits should be set. + for n in 0..entries { + assert_eq!(map.next_set(), Some(n)); + } + + // Test unsetting an index not in order. + map.set(63); + map.set(0); + assert!(matches!(map.next_set(), Some(i) if i == 0)); + assert!(matches!(map.next_set(), Some(i) if i == 63)); + + // Next avaiable index should be 0 again. + assert_eq!(map.next_set(), None); + } + + #[test] + fn setting_and_unsetting_concurrent() { + use std::sync::{Arc, Barrier}; + use std::thread; + + const N: usize = 4; + const M: usize = 1024; + + let bitmap = Arc::new(AtomicBitMap::new(N * M)); + + for n in 0..N * M { + bitmap.set(n); + } + + let barrier = Arc::new(Barrier::new(N + 1)); + let handles = (0..N) + .map(|i| { + let bitmap = bitmap.clone(); + let barrier = barrier.clone(); + thread::spawn(move || { + let mut indices = Vec::with_capacity(M); + _ = barrier.wait(); + + if i % 2 == 0 { + for _ in 0..M { + let idx = bitmap.next_set().expect("failed to get index"); + indices.push(idx); + } + + for idx in indices { + bitmap.set(idx); + } + } else { + for _ in 0..M { + let idx = bitmap.next_set().expect("failed to get index"); + bitmap.set(idx); + } + } + }) + }) + .collect::>(); + + _ = barrier.wait(); + handles + .into_iter() + .map(|handle| handle.join()) + .collect::>() + .unwrap(); + } +} From 9b2abe69f0eac1d247037c7e7ceca844871d4110 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 20 Apr 2023 14:41:31 +0200 Subject: [PATCH 100/177] Move coordinator to src/coordinator.rs No point in have a directory with a single file. --- rt/src/{coordinator/mod.rs => coordinator.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename rt/src/{coordinator/mod.rs => coordinator.rs} (100%) diff --git a/rt/src/coordinator/mod.rs b/rt/src/coordinator.rs similarity index 100% rename from rt/src/coordinator/mod.rs rename to rt/src/coordinator.rs From 3abedc546b1da60a843904d3e4f13fad4afd281b Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 20 Apr 2023 14:51:30 +0200 Subject: [PATCH 101/177] Use io_uring based shutdown For TcpStreams and UnixDatagrams. --- rt/src/net/tcp/stream.rs | 4 ++-- rt/src/net/uds/datagram.rs | 4 ++-- rt/tests/functional/tcp/stream.rs | 6 +++--- rt/tests/functional/uds/datagram.rs | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/rt/src/net/tcp/stream.rs b/rt/src/net/tcp/stream.rs index d8570826b..44ef3cbd2 100644 --- a/rt/src/net/tcp/stream.rs +++ b/rt/src/net/tcp/stream.rs @@ -336,8 +336,8 @@ impl TcpStream { /// This function will cause all pending and future I/O on the specified /// portions to return immediately with an appropriate value (see the /// documentation of [`Shutdown`]). - pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { - self.with_ref(|socket| socket.shutdown(how)) + pub async fn shutdown(&self, how: Shutdown) -> io::Result<()> { + self.fd.shutdown(how).await } /// Get the value of the `SO_ERROR` option on this socket. diff --git a/rt/src/net/uds/datagram.rs b/rt/src/net/uds/datagram.rs index c0fc1ebf0..f6a562774 100644 --- a/rt/src/net/uds/datagram.rs +++ b/rt/src/net/uds/datagram.rs @@ -137,8 +137,8 @@ impl UnixDatagram { /// This function will cause all pending and future I/O on the specified /// portions to return immediately with an appropriate value (see the /// documentation of [`Shutdown`]). - pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { - self.with_ref(|socket| socket.shutdown(how)) + pub async fn shutdown(&self, how: Shutdown) -> io::Result<()> { + self.fd.shutdown(how).await } /// Get the value of the `SO_ERROR` option on this socket. diff --git a/rt/tests/functional/tcp/stream.rs b/rt/tests/functional/tcp/stream.rs index 9cdfd72d7..c6e7c893d 100644 --- a/rt/tests/functional/tcp/stream.rs +++ b/rt/tests/functional/tcp/stream.rs @@ -942,7 +942,7 @@ fn shutdown_read() { .await .unwrap(); - stream.shutdown(Shutdown::Read).unwrap(); + stream.shutdown(Shutdown::Read).await.unwrap(); let buf = stream.recv(Vec::with_capacity(2)).await.unwrap(); assert!(buf.is_empty()); @@ -991,7 +991,7 @@ fn shutdown_write() { .await .unwrap(); - stream.shutdown(Shutdown::Write).unwrap(); + stream.shutdown(Shutdown::Write).await.unwrap(); let err = stream.send(DATA).await.unwrap_err(); assert_eq!(err.kind(), io::ErrorKind::BrokenPipe); @@ -1039,7 +1039,7 @@ fn shutdown_both() { .await .unwrap(); - stream.shutdown(Shutdown::Both).unwrap(); + stream.shutdown(Shutdown::Both).await.unwrap(); let err = stream.send(DATA).await.unwrap_err(); assert_eq!(err.kind(), io::ErrorKind::BrokenPipe); diff --git a/rt/tests/functional/uds/datagram.rs b/rt/tests/functional/uds/datagram.rs index ac9a043fe..492601162 100644 --- a/rt/tests/functional/uds/datagram.rs +++ b/rt/tests/functional/uds/datagram.rs @@ -48,8 +48,8 @@ fn pair() { buf.clear(); // Shutdown. - s1.shutdown(Shutdown::Both)?; - s2.shutdown(Shutdown::Both)?; + s1.shutdown(Shutdown::Both).await?; + s2.shutdown(Shutdown::Both).await?; // No errors. assert!(s1.take_error()?.is_none()); From e26a1ba444fcc959425579ac8c55517593f43201 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 21 Apr 2023 14:27:56 +0200 Subject: [PATCH 102/177] Update rustc version --- rt/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rt/src/lib.rs b/rt/src/lib.rs index e31c6e288..239d5f86a 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -149,6 +149,7 @@ doc_auto_cfg, doc_cfg_hide, drain_filter, + impl_trait_in_assoc_type, io_slice_advance, is_sorted, maybe_uninit_array_assume_init, @@ -157,7 +158,6 @@ new_uninit, ptr_metadata, stmt_expr_attributes, - type_alias_impl_trait, waker_getters )] #![warn( From 457fafb824011e7cd518fcd5fb1c604bcc63b351 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 21 Apr 2023 14:46:26 +0200 Subject: [PATCH 103/177] Fix bitmap based waker implementation --- rt/src/wakers/mod.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/rt/src/wakers/mod.rs b/rt/src/wakers/mod.rs index 0bb746e06..3422892a0 100644 --- a/rt/src/wakers/mod.rs +++ b/rt/src/wakers/mod.rs @@ -47,12 +47,13 @@ fn into_data_ptr(bitmap: Arc, id: usize) -> *const () { // This is a "fat" pointer, a pointer to `AtomicBitMap` and a length. let bitmap_ptr = Arc::into_raw(bitmap); - // This will point to the start of the `AtomicBitMap` as is "thin". + // This will point to the start of the `AtomicBitMap` and makes the pointer + // "thin". let bitmap_start = bitmap_ptr.cast::<()>(); // Ensure we have bit to put our `id`. - assert!(bitmap_start as usize & PTR_BITS_UNUSED == 0); + assert!(bitmap_start as usize & PTR_MASK == bitmap_start as usize); // Squash the pointer and our `id` together. - ((bitmap_start as usize) & (id << PTR_DATA_SHIFT)) as *const () + ((bitmap_start as usize) | (id << PTR_DATA_SHIFT)) as *const () } static WAKER_VTABLE: task::RawWakerVTable = @@ -102,7 +103,7 @@ unsafe fn data_as_raw_ptr(data: *const ()) -> (*const AtomicBitMap, usize) { // bitmap that has at least enough capacity that we can set the `id`-th bit. // The returned pointer might be a shorter than the true length of // `AtomicBitMap`, but we can work with that. - let id = data as usize & DATA_MASK; + let id = (data as usize & DATA_MASK) >> PTR_DATA_SHIFT; let bitmap_start = (data as usize & PTR_MASK) as *const (); let bitmap_size = min_bitmap_size(id); let bitmap_ptr = ptr::from_raw_parts(bitmap_start, bitmap_size); @@ -111,9 +112,5 @@ unsafe fn data_as_raw_ptr(data: *const ()) -> (*const AtomicBitMap, usize) { /// Returns the minimum bitmap size such that `id` can be set. fn min_bitmap_size(id: usize) -> usize { - let mut bitmap_size = id / usize::BITS as usize; - if (id % usize::BITS as usize) != 0 { - bitmap_size += 1; - } - bitmap_size + (id + usize::BITS as usize) / usize::BITS as usize } From 1e2af247425c13a93b9975b6c5a0bb68a5923777 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 21 Apr 2023 17:06:36 +0200 Subject: [PATCH 104/177] Update memory stress test example --- rt/examples/99_stress_memory.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/rt/examples/99_stress_memory.rs b/rt/examples/99_stress_memory.rs index d7b3e2652..eae3fb9c7 100644 --- a/rt/examples/99_stress_memory.rs +++ b/rt/examples/99_stress_memory.rs @@ -1,10 +1,12 @@ //! This is just a memory stress test of the runtime. //! -//! Currently using 10 million "actors" this test uses 2.59 GB and takes ~5 +//! Currently using 10 million "actors" this test uses ~3 GB and takes ~3 //! seconds to spawn the actors. #![feature(never_type)] +use std::future::pending; + use log::info; use heph::actor; @@ -40,13 +42,10 @@ fn main() -> Result<(), rt::Error> { /// Our "actor", but it doesn't do much. async fn actor(_: actor::Context) { - /* Nothing. */ + pending().await } async fn control_actor(_: actor::Context) { info!("Running, check the memory usage!"); info!("Send a signal (e.g. by pressing Ctrl-C) to stop."); - // NOTE: don't do this. This is only here to prevent the other actors from - // running. - std::thread::sleep(std::time::Duration::from_secs(100)); } From 519f4dfe20c651112bf8af181e4d5abe3fe8f6a0 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 21 Apr 2023 20:08:15 +0200 Subject: [PATCH 105/177] Don't use a different amount of MAX_RUNTIMES in testing --- rt/src/log.rs | 2 +- rt/src/net/mod.rs | 2 +- rt/src/wakers/shared.rs | 3 --- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/rt/src/log.rs b/rt/src/log.rs index 0339a435b..8b32a66d2 100644 --- a/rt/src/log.rs +++ b/rt/src/log.rs @@ -1,4 +1,4 @@ -//! Logging related types. +//! Logging. //! //! Logging in Heph is done via the [`log`] crate, much like the entire Rust //! ecosystem does (or should). However the log crate doesn't provide an actual diff --git a/rt/src/net/mod.rs b/rt/src/net/mod.rs index 23d9dc7ca..50b7b84c1 100644 --- a/rt/src/net/mod.rs +++ b/rt/src/net/mod.rs @@ -1,4 +1,4 @@ -//! Network related types. +//! Networking primitives. //! //! The network module support two types of protocols: //! diff --git a/rt/src/wakers/shared.rs b/rt/src/wakers/shared.rs index 2d5a7130e..25f35242a 100644 --- a/rt/src/wakers/shared.rs +++ b/rt/src/wakers/shared.rs @@ -11,9 +11,6 @@ use crate::{ptr_as_usize, ProcessId}; /// Maximum number of runtimes supported. const MAX_RUNTIMES: usize = 1 << MAX_RUNTIMES_BITS; /// Number of most significate bits used for the [`WakersId`]. -#[cfg(not(any(test, feature = "test")))] -const MAX_RUNTIMES_BITS: usize = 1; // 3. -#[cfg(any(test, feature = "test"))] const MAX_RUNTIMES_BITS: usize = 8; // 256. const WAKER_ID_SHIFT: usize = usize::BITS as usize - MAX_RUNTIMES_BITS; const WAKER_ID_MASK: usize = (MAX_RUNTIMES - 1) << WAKER_ID_SHIFT; From a088b8cd3fb589845ab816d2a63aec5bc8072dfd Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Mon, 24 Apr 2023 22:47:41 +0200 Subject: [PATCH 106/177] Use actor::Context pid as tracing substream id Allows us to remove the pid from ThreadSafe and ThreadLocal. --- inbox/src/lib.rs | 7 +++++++ rt/src/access.rs | 11 ++++++++--- rt/src/lib.rs | 4 ++-- rt/src/shared/mod.rs | 4 ++-- src/actor/context.rs | 5 +++++ 5 files changed, 24 insertions(+), 7 deletions(-) diff --git a/inbox/src/lib.rs b/inbox/src/lib.rs index ae1f38ea0..4c512cf0e 100644 --- a/inbox/src/lib.rs +++ b/inbox/src/lib.rs @@ -1289,3 +1289,10 @@ impl Drop for Manager { /// preferred over using this type as they are less error-prone. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct Id(usize); + +impl Id { + #[doc(hidden)] // Not part of the stable API. + pub fn as_usize(self) -> usize { + self.0 + } +} diff --git a/rt/src/access.rs b/rt/src/access.rs index e638a351c..d18d71ffe 100644 --- a/rt/src/access.rs +++ b/rt/src/access.rs @@ -90,6 +90,7 @@ mod private { fn finish_trace( &mut self, timing: Option, + substream_id: u64, description: &str, attributes: &[(&str, &dyn trace::AttributeValue)], ); @@ -164,11 +165,12 @@ impl PrivateAccess for ThreadLocal { fn finish_trace( &mut self, timing: Option, + substream_id: u64, description: &str, attributes: &[(&str, &dyn trace::AttributeValue)], ) { self.rt - .finish_trace(timing, self.pid, description, attributes); + .finish_trace(substream_id, timing, description, attributes); } } @@ -281,11 +283,12 @@ impl PrivateAccess for ThreadSafe { fn finish_trace( &mut self, timing: Option, + substream_id: u64, description: &str, attributes: &[(&str, &dyn trace::AttributeValue)], ) { self.rt - .finish_trace(timing, self.pid, description, attributes); + .finish_trace(timing, substream_id, description, attributes); } } @@ -331,7 +334,9 @@ where description: &str, attributes: &[(&str, &dyn trace::AttributeValue)], ) { - self.runtime().finish_trace(timing, description, attributes); + let substream_id = self.pid() as u64; + self.runtime() + .finish_trace(timing, substream_id, description, attributes); } } diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 239d5f86a..bba0b1d1f 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -660,15 +660,15 @@ impl RuntimeRef { fn finish_trace( &mut self, + substream_id: u64, timing: Option, - pid: ProcessId, description: &str, attributes: &[(&str, &dyn trace::AttributeValue)], ) { trace::finish( (*self.internals.trace_log.borrow_mut()).as_mut(), timing, - pid.0 as u64, + substream_id, description, attributes, ); diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 2f2e5487d..2bbd2372c 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -345,14 +345,14 @@ impl RuntimeInternals { pub(crate) fn finish_trace( &self, timing: Option, - pid: ProcessId, + substream_id: u64, description: &str, attributes: &[(&str, &dyn trace::AttributeValue)], ) { trace::finish( self.trace_log.as_deref(), timing, - pid.0 as u64, + substream_id, description, attributes, ); diff --git a/src/actor/context.rs b/src/actor/context.rs index 8c4911956..ad1debc94 100644 --- a/src/actor/context.rs +++ b/src/actor/context.rs @@ -137,6 +137,11 @@ impl Context { pub fn register_inbox_waker(&mut self, waker: &task::Waker) { _ = self.inbox.register_waker(waker); } + + #[doc(hidden)] // Not part of the stable API. + pub fn pid(&self) -> usize { + self.inbox.id().as_usize() + } } /// Error returned in case receiving a value from an actor's inbox fails. From d6557882fb9631ced7ce5de5e17b031d2ed7731f Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Mon, 24 Apr 2023 22:54:11 +0200 Subject: [PATCH 107/177] Remove pid from Thread{Local,Safe} Reduce the size by 8 bytes (one pointer). Which is also the case for all timers, i.e. all Future types in the timer module, and the actor::Context. --- rt/src/access.rs | 11 ++++------- rt/src/lib.rs | 2 +- rt/src/scheduler/shared/tests.rs | 8 ++++---- rt/src/scheduler/tests.rs | 12 ++++++------ rt/src/shared/mod.rs | 2 +- rt/src/test.rs | 4 ++-- rt/tests/functional/test.rs | 2 +- rt/tests/functional/timer.rs | 12 ++++++------ 8 files changed, 25 insertions(+), 28 deletions(-) diff --git a/rt/src/access.rs b/rt/src/access.rs index d18d71ffe..8ad843ad5 100644 --- a/rt/src/access.rs +++ b/rt/src/access.rs @@ -39,7 +39,6 @@ use heph::actor::{self, NewActor, SyncContext}; use heph::actor_ref::ActorRef; use heph::supervisor::Supervisor; -use crate::process::ProcessId; use crate::spawn::{ActorOptions, FutureOptions, Spawn}; use crate::timers::TimerToken; use crate::trace::{self, Trace}; @@ -115,13 +114,12 @@ pub(crate) use private::PrivateAccess; /// [`actor::Context`]: heph::actor::Context #[derive(Clone)] pub struct ThreadLocal { - pid: ProcessId, rt: RuntimeRef, } impl ThreadLocal { - pub(crate) const fn new(pid: ProcessId, rt: RuntimeRef) -> ThreadLocal { - ThreadLocal { pid, rt } + pub(crate) const fn new(rt: RuntimeRef) -> ThreadLocal { + ThreadLocal { rt } } } @@ -237,13 +235,12 @@ impl fmt::Debug for ThreadLocal { /// [`spawn_future`]: ThreadSafe::spawn_future #[derive(Clone)] pub struct ThreadSafe { - pid: ProcessId, rt: Arc, } impl ThreadSafe { - pub(crate) const fn new(pid: ProcessId, rt: Arc) -> ThreadSafe { - ThreadSafe { pid, rt } + pub(crate) const fn new(rt: Arc) -> ThreadSafe { + ThreadSafe { rt } } /// Spawn a thread-safe [`Future`]. diff --git a/rt/src/lib.rs b/rt/src/lib.rs index bba0b1d1f..f24d08096 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -698,7 +698,7 @@ where .add_new_process(options.priority(), |pid| { let name = NA::name(); debug!(pid = pid.0, name = name; "spawning thread-local actor"); - let rt = ThreadLocal::new(pid, self.clone()); + let rt = ThreadLocal::new(self.clone()); ActorFuture::new(supervisor, new_actor, arg, rt) }) } diff --git a/rt/src/scheduler/shared/tests.rs b/rt/src/scheduler/shared/tests.rs index 9eb37bbf0..69305e85e 100644 --- a/rt/src/scheduler/shared/tests.rs +++ b/rt/src/scheduler/shared/tests.rs @@ -10,7 +10,7 @@ use heph::supervisor::NoSupervisor; use crate::process::{FutureProcess, ProcessId}; use crate::scheduler::shared::{Priority, ProcessData, Scheduler}; -use crate::test::{self, nop_task_waker, AssertUnmoved, TEST_PID}; +use crate::test::{self, nop_task_waker, AssertUnmoved}; use crate::ThreadSafe; fn assert_size(expected: usize) { @@ -115,7 +115,7 @@ fn scheduler_run_order() { for (id, priority) in priorities.iter().enumerate() { let pid = scheduler .add_new_process(*priority, |pid| { - let rt = ThreadSafe::new(TEST_PID, test::shared_internals()); + let rt = ThreadSafe::new(test::shared_internals()); ActorFuture::new(NoSupervisor, new_actor, (id, run_order.clone()), rt) .map(|(future, _)| (future, pid)) }) @@ -162,7 +162,7 @@ fn assert_actor_process_unmoved() { let pid = scheduler .add_new_process(Priority::NORMAL, |pid| { - let rt = ThreadSafe::new(TEST_PID, test::shared_internals()); + let rt = ThreadSafe::new(test::shared_internals()); ActorFuture::new(NoSupervisor, TestAssertUnmovedNewActor, (), rt) .map(|(future, _)| (future, pid)) }) @@ -215,7 +215,7 @@ fn add_test_actor(scheduler: &Scheduler, priority: Priority) -> ProcessId { scheduler .add_new_process(priority, |pid| { let new_actor = simple_actor as fn(_) -> _; - let rt = ThreadSafe::new(TEST_PID, test::shared_internals()); + let rt = ThreadSafe::new(test::shared_internals()); ActorFuture::new(NoSupervisor, new_actor, (), rt).map(|(future, _)| (future, pid)) }) .unwrap() diff --git a/rt/src/scheduler/tests.rs b/rt/src/scheduler/tests.rs index 7e8878deb..819046d53 100644 --- a/rt/src/scheduler/tests.rs +++ b/rt/src/scheduler/tests.rs @@ -14,7 +14,7 @@ use heph::supervisor::NoSupervisor; use crate::process::{FutureProcess, Process, ProcessId}; use crate::scheduler::{ProcessData, Scheduler}; use crate::spawn::options::Priority; -use crate::test::{self, nop_task_waker, AssertUnmoved, TEST_PID}; +use crate::test::{self, nop_task_waker, AssertUnmoved}; use crate::ThreadLocal; fn assert_size(expected: usize) { @@ -64,7 +64,7 @@ fn add_actor() { let _ = scheduler .add_new_process(Priority::NORMAL, |_| { let new_actor = simple_actor as fn(_) -> _; - let rt = ThreadLocal::new(TEST_PID, test::runtime()); + let rt = ThreadLocal::new(test::runtime()); ActorFuture::new(NoSupervisor, new_actor, (), rt) }) .unwrap(); @@ -82,7 +82,7 @@ fn mark_ready() { let pid = scheduler .add_new_process(Priority::NORMAL, |pid| { let new_actor = simple_actor as fn(_) -> _; - let rt = ThreadLocal::new(TEST_PID, test::runtime()); + let rt = ThreadLocal::new(test::runtime()); ActorFuture::new(NoSupervisor, new_actor, (), rt).map(|(future, _)| (future, pid)) }) .unwrap(); @@ -214,7 +214,7 @@ fn scheduler_run_order() { for (id, priority) in priorities.iter().enumerate() { let pid = scheduler .add_new_process(*priority, |pid| { - let rt = ThreadLocal::new(TEST_PID, test::runtime()); + let rt = ThreadLocal::new(test::runtime()); ActorFuture::new(NoSupervisor, new_actor, (id, run_order.clone()), rt) .map(|(future, _)| (future, pid)) }) @@ -261,7 +261,7 @@ fn assert_actor_process_unmoved() { let pid = scheduler .add_new_process(Priority::NORMAL, |pid| { - let rt = ThreadLocal::new(TEST_PID, test::runtime()); + let rt = ThreadLocal::new(test::runtime()); ActorFuture::new(NoSupervisor, TestAssertUnmovedNewActor, (), rt) .map(|(future, _)| (future, pid)) }) @@ -312,7 +312,7 @@ fn add_test_actor(scheduler: &mut Scheduler, priority: Priority) -> ProcessId { scheduler .add_new_process(priority, |pid| { let new_actor = simple_actor as fn(_) -> _; - let rt = ThreadLocal::new(TEST_PID, test::runtime()); + let rt = ThreadLocal::new(test::runtime()); ActorFuture::new(NoSupervisor, new_actor, (), rt).map(|(future, _)| (future, pid)) }) .unwrap() diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 2bbd2372c..f0ad829e3 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -240,7 +240,7 @@ impl RuntimeInternals { self.scheduler.add_new_process(options.priority(), |pid| { let name = NA::name(); debug!(pid = pid.0, name = name; "spawning thread-safe actor"); - let rt = ThreadSafe::new(pid, self.clone()); + let rt = ThreadSafe::new(self.clone()); ActorFuture::new(supervisor, new_actor, arg, rt) }) } diff --git a/rt/src/test.rs b/rt/src/test.rs index 70bb9f5ac..065c67791 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -395,7 +395,7 @@ where NA: NewActor, { let (manager, sender, receiver) = Manager::new_small_channel(); - let ctx = actor::Context::new(receiver, ThreadLocal::new(TEST_PID, runtime())); + let ctx = actor::Context::new(receiver, ThreadLocal::new(runtime())); let actor = new_actor.new(ctx, arg)?; Ok((actor, manager, ActorRef::local(sender))) } @@ -410,7 +410,7 @@ where NA: NewActor, { let (manager, sender, receiver) = Manager::new_small_channel(); - let ctx = actor::Context::new(receiver, ThreadSafe::new(TEST_PID, shared_internals())); + let ctx = actor::Context::new(receiver, ThreadSafe::new(shared_internals())); let actor = new_actor.new(ctx, arg)?; Ok((actor, manager, ActorRef::local(sender))) } diff --git a/rt/tests/functional/test.rs b/rt/tests/functional/test.rs index 55a63c711..cf8a0eea1 100644 --- a/rt/tests/functional/test.rs +++ b/rt/tests/functional/test.rs @@ -33,7 +33,7 @@ fn test_size_of_actor() { #[allow(trivial_casts)] { - assert_eq!(size_of_actor_val(&(actor1 as fn(_) -> _)), 40); + assert_eq!(size_of_actor_val(&(actor1 as fn(_) -> _)), 32); } struct Na; diff --git a/rt/tests/functional/timer.rs b/rt/tests/functional/timer.rs index 4f6e1cb02..537892178 100644 --- a/rt/tests/functional/timer.rs +++ b/rt/tests/functional/timer.rs @@ -20,12 +20,12 @@ const TIMEOUT: Duration = Duration::from_millis(100); #[test] fn size() { - assert_size::>(48); - assert_size::>(48); - assert_size::>(48); - assert_size::>(48); - assert_size::>(64); - assert_size::>(64); + assert_size::>(40); + assert_size::>(40); + assert_size::>(40); + assert_size::>(40); + assert_size::>(56); + assert_size::>(56); assert_size::(0); } From 2d7a7372beb27d94bcedc18cd0ff91ccf4748b63 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 15:37:51 +0200 Subject: [PATCH 108/177] Don't assume pid is equal to the pointer of the process As this will not be the case in the future. --- rt/src/scheduler/inactive.rs | 32 ++--- rt/src/scheduler/shared/inactive.rs | 180 +++++++++++++++------------- rt/src/scheduler/tests.rs | 2 +- 3 files changed, 118 insertions(+), 96 deletions(-) diff --git a/rt/src/scheduler/inactive.rs b/rt/src/scheduler/inactive.rs index 9628ab6a4..837e955a9 100644 --- a/rt/src/scheduler/inactive.rs +++ b/rt/src/scheduler/inactive.rs @@ -14,14 +14,14 @@ const N_BRANCHES: usize = 1 << LEVEL_SHIFT; // 16 /// Number of bits to mask per level. const LEVEL_MASK: usize = (1 << LEVEL_SHIFT) - 1; /// For alignment reasons the two least significant bits of a boxed -/// `ProcessData` are always 0, so we can safely skip them. Also see `ok_ptr` +/// `ProcessData` are always 0, so we can safely skip them. Also see `ok_pid` /// and alignment tests below. const SKIP_BITS: usize = 2; const SKIP_MASK: usize = (1 << SKIP_BITS) - 1; -/// Returns `false` if `ptr`'s `SKIP_BITS` aren't valid. -pub(super) fn ok_ptr(ptr: *const ()) -> bool { - ptr as usize & max(SKIP_MASK, POINTER_TAG_BITS) == 0 +/// Returns `false` if `pid`'s `SKIP_BITS` aren't valid. +fn ok_pid(pid: ProcessId) -> bool { + pid.0 & max(SKIP_MASK, POINTER_TAG_BITS) == 0 } /// Inactive processes. @@ -30,6 +30,9 @@ pub(super) fn ok_ptr(ptr: *const ()) -> bool { /// to a `Branch`, which again contains four pointers, or point to /// `ProcessData`. /// +/// Indexing into the structure is done using the `ProcessId` of the process, +/// however the pointer itself points to `ProcessData`. +/// /// Because processes should have short ready state times (see process states), /// but longer total lifetime they quickly move into and out from the structure. /// To ensure operations remain quick we keep the structure of tree in place @@ -41,16 +44,15 @@ pub(super) fn ok_ptr(ptr: *const ()) -> bool { /// * , /// * Ideal Hash Trees by Phil Bagwell /// * Fast And Space Efficient Trie Searches by Phil Bagwell -// `pub(super)` because its used in `AddActor`. #[derive(Debug)] -pub(super) struct Inactive { +pub(crate) struct Inactive { root: Branch, length: usize, } impl Inactive { /// Create an empty `Inactive` tree. - pub(super) const fn empty() -> Inactive { + pub(crate) const fn empty() -> Inactive { Inactive { root: Branch::empty(), length: 0, @@ -58,26 +60,26 @@ impl Inactive { } /// Returns the number of processes in the inactive list. - pub(super) const fn len(&self) -> usize { + pub(crate) const fn len(&self) -> usize { self.length } /// Returns `true` if the queue contains a process. - pub(super) const fn has_process(&self) -> bool { + pub(crate) const fn has_process(&self) -> bool { self.length != 0 } /// Add a `process`. - pub(super) fn add(&mut self, process: Pin>) { + pub(crate) fn add(&mut self, process: Pin>) { let pid = process.as_ref().id(); - // Ensure `SKIP_BITS` is correct. - debug_assert!(pid.0 & SKIP_MASK == 0); + debug_assert!(ok_pid(pid)); self.root.add(process, pid.0 >> SKIP_BITS, 0); self.length += 1; } /// Removes the process with id `pid`, if any. - pub(super) fn remove(&mut self, pid: ProcessId) -> Option>> { + pub(crate) fn remove(&mut self, pid: ProcessId) -> Option>> { + debug_assert!(ok_pid(pid)); self.root.remove(pid, pid.0 >> SKIP_BITS).map(|process| { debug_assert_eq!(process.as_ref().id(), pid); self.length -= 1; @@ -239,7 +241,7 @@ impl Pointer { impl From>> for Pointer { fn from(process: Pin>) -> Pointer { #[allow(trivial_casts)] - let ptr = Box::leak(Pin::into_inner(process)) as *mut _; + let ptr = Box::into_raw(Pin::into_inner(process)); let ptr = (ptr as usize | PROCESS_TAG) as *mut (); Pointer { tagged_ptr: unsafe { NonNull::new_unchecked(ptr) }, @@ -250,7 +252,7 @@ impl From>> for Pointer { impl From>> for Pointer { fn from(process: Pin>) -> Pointer { #[allow(trivial_casts)] - let ptr = Box::leak(Pin::into_inner(process)) as *mut _; + let ptr = Box::into_raw(Pin::into_inner(process)); let ptr = (ptr as usize | BRANCH_TAG) as *mut (); Pointer { tagged_ptr: unsafe { NonNull::new_unchecked(ptr) }, diff --git a/rt/src/scheduler/shared/inactive.rs b/rt/src/scheduler/shared/inactive.rs index 26ab1b063..c1776080d 100644 --- a/rt/src/scheduler/shared/inactive.rs +++ b/rt/src/scheduler/shared/inactive.rs @@ -18,9 +18,9 @@ const LEVEL_MASK: usize = (1 << LEVEL_SHIFT) - 1; const SKIP_BITS: usize = 2; const SKIP_MASK: usize = (1 << SKIP_BITS) - 1; -/// Returns `false` if `ptr`'s `SKIP_BITS` aren't valid. -pub(super) fn ok_ptr(ptr: *const ()) -> bool { - ptr as usize & SKIP_MASK == 0 +/// Returns `false` if `pid`'s `SKIP_BITS` aren't valid. +fn ok_pid(pid: ProcessId) -> bool { + pid.0 & SKIP_MASK == 0 } /// Inactive processes. @@ -32,6 +32,9 @@ pub(super) fn ok_ptr(ptr: *const ()) -> bool { /// * a marker to indicate a process was marked as ready to run, /// * or a null pointer to indicate the slot is empty. /// +/// Indexing into the structure is done using the `ProcessId` of the process, +/// however the pointer itself points to `ProcessData`. +/// /// Because processes should have short ready state times (see process states), /// but longer total lifetime they quickly move into and out from this /// structure. To ensure operations remain quick we keep the structure of tree @@ -45,9 +48,9 @@ pub(super) fn ok_ptr(ptr: *const ()) -> bool { /// * Ideal Hash Trees by Phil Bagwell /// * Fast And Space Efficient Trie Searches by Phil Bagwell #[derive(Debug)] -pub(super) struct Inactive { +pub(crate) struct Inactive { root: Branch, - /// The number of processes is the tree, **not** markers. + /// The number of processes in the tree, **not** markers. /// NOTE: do not use the value for correctness, it's highly likely to be /// outdated. length: AtomicUsize, @@ -55,7 +58,7 @@ pub(super) struct Inactive { impl Inactive { /// Create an empty `Inactive` tree. - pub(super) const fn empty() -> Inactive { + pub(crate) const fn empty() -> Inactive { Inactive { root: Branch::empty(), length: AtomicUsize::new(0), @@ -63,7 +66,7 @@ impl Inactive { } /// Returns the number of processes in the inactive list. - pub(super) fn len(&self) -> usize { + pub(crate) fn len(&self) -> usize { let len = self.length.load(Ordering::Relaxed); // The `length` can actually underflow quite easily, to not report a // clearly incorrect value we'll report zero instead. @@ -79,7 +82,7 @@ impl Inactive { /// # Notes /// /// Once this function returns the value could already be outdated. - pub(super) fn has_process(&self) -> bool { + pub(crate) fn has_process(&self) -> bool { // NOTE: doing anything based on this function is prone to race // conditions, so relaxed ordering is fine. self.length.load(Ordering::Relaxed) != 0 @@ -89,9 +92,9 @@ impl Inactive { /// /// It will add `process` to `run_queue` if it was marked as ready-to-run /// while it was removed from the `Inactive` tree. - pub(super) fn add(&self, process: Pin>, run_queue: &RunQueue) { + pub(crate) fn add(&self, process: Pin>, run_queue: &RunQueue) { let pid = process.as_ref().id(); - debug_assert!(ok_ptr(pid.0 as *mut ())); + debug_assert!(ok_pid(pid)); let changed = self.root.add(process, pid.0 >> SKIP_BITS, 0, run_queue); self.update_length(changed); } @@ -99,30 +102,29 @@ impl Inactive { /// Removes the process with id `pid`, if the process is currently not /// stored in the `Inactive` tree it is marked as ready and /// [`Inactive::add`] will return it once added back. - pub(super) fn mark_ready(&self, pid: ProcessId, run_queue: &RunQueue) { - debug_assert!(ok_ptr(pid.0 as *mut ())); + pub(crate) fn mark_ready(&self, pid: ProcessId, run_queue: &RunQueue) { + debug_assert!(ok_pid(pid)); let changed = self.root.mark_ready(pid, pid.0 >> SKIP_BITS, 0, run_queue); self.update_length(changed); } /// Mark `process` as complete, removing a ready marker from the tree. - pub(super) fn complete(&self, process: Pin>) { + pub(crate) fn complete(&self, process: Pin>) { let pid = process.as_ref().id(); - debug_assert!(ok_ptr(pid.0 as *mut ())); - let tagged_pid = ready_to_run(pid); + debug_assert!(ok_pid(pid)); + let ready_marker = ready_to_run(pid); let mut node = &self.root; let mut w_pid = pid.0 >> SKIP_BITS; - // Safety: this needs to sync with all possible points that can change + // SAFETY: this needs to sync with all possible points that can change // this value; all need to use `Acquire`/`Release` (or `AcqRel`). let mut old_ptr = node.branches[w_pid & LEVEL_MASK].load(Ordering::Acquire); loop { - if old_ptr == tagged_pid { + if old_ptr == ready_marker { // Found the marker, try to remove it. - debug_assert!(is_ready_marker(old_ptr)); - // Safety: see comment for `load` above. + // SAFETY: see comment for `load` above. match node.branches[w_pid & LEVEL_MASK].compare_exchange( - tagged_pid, + old_ptr, ptr::null_mut(), Ordering::AcqRel, Ordering::Acquire, @@ -139,21 +141,19 @@ impl Inactive { // updated (`old`) pointer. Err(old) => old_ptr = old, } - } else if old_ptr.is_null() || is_process(old_ptr) || is_ready_marker(old_ptr) { - // No marker for the process in the tree. - break; - } else { - debug_assert!(is_branch(old_ptr)); + } else if is_branch(old_ptr) { // Pointer is a branch. Try at the next level. let branch_ptr: *mut Branch = as_ptr(old_ptr).cast(); w_pid >>= LEVEL_SHIFT; debug_assert!(!branch_ptr.is_null()); - // Safety: if the pointer is not null, a process or a ready - // marker it must be a branch. Non-null pointers must always be - // valid. + // SAFETY: if the pointer is a branch it must be always valid as + // per the comment on `Branch.branches`. node = unsafe { &*branch_ptr }; - // Safety: see comment for `load` above. + // SAFETY: see comment for `load` above. old_ptr = node.branches[w_pid & LEVEL_MASK].load(Ordering::Acquire); + } else { + // No marker for the process in the tree. + break; } } @@ -167,11 +167,11 @@ impl Inactive { match n { 0 => {} n if n.is_negative() => { - // Safety: needs to sync with below. + // SAFETY: needs to sync with below. _ = self.length.fetch_sub(-n as usize, Ordering::AcqRel); } n => { - // Safety: needs to sync with above. + // SAFETY: needs to sync with above. _ = self.length.fetch_add(n as usize, Ordering::AcqRel); } } @@ -197,9 +197,9 @@ type TaggedPointer = *mut (); /// Tags used for the `Pointer`. const TAG_BITS: usize = 2; const TAG_MASK: usize = (1 << TAG_BITS) - 1; -const BRANCH_TAG: usize = 0b00; -const PROCESS_TAG: usize = 0b01; -const READY_TO_RUN: usize = 0b10; +const BRANCH_TAG: usize = 0b01; +const PROCESS_TAG: usize = 0b10; +const READY_TO_RUN: usize = 0b11; impl Branch { /// Create an empty `Branch`. @@ -220,13 +220,15 @@ impl Branch { depth: usize, run_queue: &RunQueue, ) -> isize { + let pid = process.as_ref().id(); let process = tag_process(process); - self._add(process, w_pid, depth, run_queue) + self._add(process, pid, w_pid, depth, run_queue) } fn _add( &self, process: TaggedPointer, + pid: ProcessId, mut w_pid: usize, mut depth: usize, run_queue: &RunQueue, @@ -240,7 +242,7 @@ impl Branch { if old_ptr.is_null() { // Empty slot, we can put the `process` into it. match node.branches[w_pid & LEVEL_MASK].compare_exchange( - ptr::null_mut(), + old_ptr, process, Ordering::AcqRel, Ordering::Acquire, @@ -256,13 +258,14 @@ impl Branch { w_pid >>= LEVEL_SHIFT; depth += 1; debug_assert!(!branch_ptr.is_null()); - // Safety: checked if the pointer is a branch above and per the + // SAFETY: checked if the pointer is a branch above and per the // docs of `Branch.branches` once it's a branch it's immutable. node = unsafe { &*branch_ptr }; old_ptr = node.branches[w_pid & LEVEL_MASK].load(Ordering::Acquire); - } else if is_ready_marker(old_ptr) && as_pid(old_ptr) == as_pid(process) { + } else if is_ready_marker(old_ptr) && unsafe { as_pid(old_ptr) } == pid { + // SAFETY: (above) `as_pid` is safe to call on ready markers. // Found a ready marker for the process we want to add. - // Remove it and add the process to the run queue. + // Remove it and add the process to the run queue instead. match node.branches[w_pid & LEVEL_MASK].compare_exchange( old_ptr, ptr::null_mut(), @@ -271,9 +274,10 @@ impl Branch { ) { Ok(old) => { debug_assert!(is_ready_marker(old)); - debug_assert!(as_pid(old) == as_pid(process)); - debug_assert!(is_process(process)); - // Safety: caller must ensure `process` is tagged + // SAFETY: per above `old` is a ready marker, thus it is + // safe to call `as_pid`. + debug_assert!(unsafe { as_pid(old) } == pid); + // SAFETY: caller must ensure `process` is tagged // pointer to a process. let process = unsafe { process_from_tagged(process) }; run_queue.add(process); @@ -298,22 +302,24 @@ impl Branch { // Now we have to add two processes (or markers). First // we create the branch structure that can hold the two // processes, i.e. create enough branches to the point - // the two pointers differ in the branch slots. - // Required depth to go were the pointers are in - // different slots. - let req_depth = diff_branch_depth(as_pid(other_process), as_pid(process)); + // the two pointers differ in the branch slots. Required + // depth to go were the pointers are in different slots. + // SAFETY: we own `other_process` so we can safely call + // `as_pid`. + let other_pid = unsafe { as_pid(other_process) }; + let req_depth = diff_branch_depth(other_pid, pid); debug_assert!(req_depth > depth); changed += node.add_branches(req_depth, ptr::null_mut(), w_pid, depth, run_queue); // Add the other process/marker. changed += if is_process(other_process) { - let w_pid = wpid_for(other_process, depth); + let w_pid = wpid_for(other_pid, depth); // NOTE: `-1` because we've just removed the process // above that we're going to add again here. - node._add(other_process, w_pid, depth, run_queue) - 1 + node._add(other_process, other_pid, w_pid, depth, run_queue) - 1 } else { debug_assert!(is_ready_marker(other_process)); - let w_pid = wpid_for(other_process, depth); + let w_pid = wpid_for(other_pid, depth); node._mark_ready(other_process, w_pid, depth, run_queue) }; // Continue our own adding process. @@ -349,6 +355,8 @@ impl Branch { run_queue: &RunQueue, ) -> isize { debug_assert!(is_ready_marker(marker)); + // SAFETY: `as_pid` is safe to call with a ready marker. + let marker_pid = unsafe { as_pid(marker) }; let mut node = self; // NOTE: from this point on `self` is invalid, use `node` instead. let mut old_ptr = node.branches[w_pid & LEVEL_MASK].load(Ordering::Acquire); @@ -373,14 +381,17 @@ impl Branch { w_pid >>= LEVEL_SHIFT; depth += 1; debug_assert!(!branch_ptr.is_null()); - // Safety: checked if the pointer is a branch above and per the + // SAFETY: checked if the pointer is a branch above and per the // docs of `Branch.branches` once it's a branch it's immutable. node = unsafe { &*branch_ptr }; old_ptr = node.branches[w_pid & LEVEL_MASK].load(Ordering::Acquire); - } else if is_ready_marker(old_ptr) && as_pid(old_ptr) == as_pid(marker) { + } else if is_ready_marker(old_ptr) && unsafe { as_pid(old_ptr) } == marker_pid { + // SAFETY: (above) `as_pid` is safe to call on ready markers. // Already has a marker for the process. return changed; - } else if is_process(old_ptr) && as_pid(old_ptr) == as_pid(marker) { + } else if is_process(old_ptr) && unsafe { as_pid(old_ptr) } == marker_pid { + // SAFETY: (above) `as_pid` is safe to call on ready markers. + // Already has a marker for the process. // Found the process, remove it. match node.branches[w_pid & LEVEL_MASK].compare_exchange( old_ptr, @@ -391,7 +402,7 @@ impl Branch { Ok(_) => { debug_assert!(is_process(old_ptr)); debug_assert!(!as_ptr(old_ptr).is_null()); - // Safety: checked if the pointer is a process above. + // SAFETY: checked if the pointer is a process above. let process = unsafe { process_from_tagged(old_ptr) }; run_queue.add(process); return changed - 1; @@ -416,21 +427,24 @@ impl Branch { // processes, i.e. create enough branches to the point // the two pointers differ in the branch slots. debug_assert!(is_process(other_process) || is_ready_marker(other_process)); + // SAFETY: we own `other_process` so we can safely call + // `as_pid`. + let other_pid = unsafe { as_pid(other_process) }; // Required depth to go were the pointers are in different slots. - let req_depth = diff_branch_depth(as_pid(other_process), as_pid(marker)); + let req_depth = diff_branch_depth(other_pid, marker_pid); debug_assert!(req_depth > depth); changed += node.add_branches(req_depth, ptr::null_mut(), w_pid, depth, run_queue); // Add the other process/marker. changed += if is_process(other_process) { debug_assert!(is_process(other_process)); - let w_pid = wpid_for(other_process, depth); + let w_pid = wpid_for(other_pid, depth); // NOTE: `-1` because we've just removed the process // above that we're going to add again here. - node._add(other_process, w_pid, depth, run_queue) - 1 + node._add(other_process, other_pid, w_pid, depth, run_queue) - 1 } else { debug_assert!(is_ready_marker(other_process)); - let w_pid = wpid_for(other_process, depth); + let w_pid = wpid_for(other_pid, depth); node._mark_ready(other_process, w_pid, depth, run_queue) }; // Continue our own adding process. @@ -481,19 +495,23 @@ impl Branch { w_pid >>= LEVEL_SHIFT; depth += 1; debug_assert!(!branch_ptr.is_null()); - // Safety: create the branch pointer ourselves, so we know + // SAFETY: created the branch pointer ourselves, so we know // it's a branch. node = unsafe { &*branch_ptr.cast() }; old_ptr = node.branches[w_pid & LEVEL_MASK].load(Ordering::Acquire); if is_process(old) { debug_assert!(is_process(old)); - let w_pid = wpid_for(old, depth); + // SAFETY: we own `old` so we can safely call `as_pid`. + let old_pid = unsafe { as_pid(old) }; + let w_pid = wpid_for(old_pid, depth); // NOTE: -1 because we've just removed the process. - changed += node._add(old, w_pid, depth, run_queue) - 1; + changed += node._add(old, old_pid, w_pid, depth, run_queue) - 1; } else if is_ready_marker(old) { debug_assert!(is_ready_marker(old)); - let w_pid = wpid_for(old, depth); + // SAFETY: `old` is a ready marker so it's safe to call. + let old_pid = unsafe { as_pid(old) }; + let w_pid = wpid_for(old_pid, depth); changed += node._mark_ready(old, w_pid, depth, run_queue); } else { debug_assert!(old_ptr.is_null()); @@ -513,14 +531,14 @@ impl Branch { w_pid >>= LEVEL_SHIFT; depth += 1; debug_assert!(!branch_ptr.is_null()); - // Safety: checked if it's a branch pointer and non-null above. + // SAFETY: checked if it's a branch pointer and non-null above. node = unsafe { &*branch_ptr.cast() }; old_ptr = node.branches[w_pid & LEVEL_MASK].load(Ordering::Acquire); } } if let Some(branch) = w_branch { - // Safety: created the pointer ourselves. + // SAFETY: created the pointer ourselves. unsafe { drop(branch_from_tagged(branch)) }; } @@ -536,7 +554,7 @@ impl fmt::Debug for Branch { _ if ptr.is_null() => &"null", BRANCH_TAG => { let branch: *mut Branch = as_ptr(ptr).cast(); - // Safety: check if it's a branch pointer. + // SAFETY: check if it's a branch pointer. unsafe { &*branch } } PROCESS_TAG => &"process", @@ -563,22 +581,19 @@ fn diff_branch_depth(pid1: ProcessId, pid2: ProcessId) -> usize { /// Converts `process` into a tagged pointer. fn tag_process(process: Pin>) -> TaggedPointer { #[allow(trivial_casts)] - let ptr = Box::into_raw(Pin::into_inner(process)).cast(); - debug_assert!(ok_ptr(ptr)); + let ptr = Box::into_raw(Pin::into_inner(process)).cast::<()>(); (ptr as usize | PROCESS_TAG) as *mut () } /// Tag a pointer as pointing to a branch. fn tag_branch(branch: Pin>) -> TaggedPointer { #[allow(trivial_casts)] - let ptr = Box::into_raw(Pin::into_inner(branch)).cast(); - debug_assert!(ok_ptr(ptr)); + let ptr = Box::into_raw(Pin::into_inner(branch)).cast::<()>(); (ptr as usize | BRANCH_TAG) as *mut () } /// Create a mark ready-to-run `Pointer`. fn ready_to_run(pid: ProcessId) -> TaggedPointer { - debug_assert!(ok_ptr(pid.0 as *mut ())); (pid.0 | READY_TO_RUN) as *mut () } @@ -623,14 +638,18 @@ fn has_tag(ptr: TaggedPointer, tag: usize) -> bool { (ptr as usize & TAG_MASK) == tag } -/// Returns this pointer `ProcessId`. +/// Returns this pointer as `ProcessId`. /// -/// # Notes +/// # Safety /// -/// This is only valid for process pointers and ready-to-run markers. -fn as_pid(ptr: TaggedPointer) -> ProcessId { - debug_assert!(is_process(ptr) || is_ready_marker(ptr)); - ProcessId(as_ptr(ptr) as usize) +/// This is only valid for ready-to-run markers or **owned** processes. +unsafe fn as_pid(ptr: TaggedPointer) -> ProcessId { + if is_process(ptr) { + Pin::new(unsafe { &*(as_ptr(ptr).cast::()) }).id() + } else { + debug_assert!(is_ready_marker(ptr)); + ProcessId(as_ptr(ptr) as usize) + } } /// Returns the raw pointer without its tag. @@ -639,8 +658,8 @@ fn as_ptr(ptr: TaggedPointer) -> *mut () { } /// Returns the working pid for `ptr` at `depth`. -fn wpid_for(ptr: TaggedPointer, depth: usize) -> usize { - ptr as usize >> ((depth * LEVEL_SHIFT) + SKIP_BITS) +fn wpid_for(pid: ProcessId, depth: usize) -> usize { + pid.0 >> ((depth * LEVEL_SHIFT) + SKIP_BITS) } impl Drop for Branch { @@ -663,9 +682,9 @@ unsafe fn drop_tagged_pointer(ptr: TaggedPointer) { } match ptr as usize & TAG_MASK { - // Safety: checked for non-null and that it's a branch. + // SAFETY: checked for non-null and that it's a branch. BRANCH_TAG => drop(branch_from_tagged(ptr)), - // Safety: checked for non-null and that it's a process. + // SAFETY: checked for non-null and that it's a process. PROCESS_TAG => drop(process_from_tagged(ptr)), READY_TO_RUN => { /* Just a marker, nothing to drop. */ } _ => unreachable!(), @@ -797,7 +816,8 @@ mod tests { assert!(!is_process(tagged_pid)); assert!(!is_branch(tagged_pid)); assert!(is_ready_marker(tagged_pid)); - let pid2 = as_pid(tagged_pid); + // SAFETY: `tagged_pid` is a ready marker so it's safe to call. + let pid2 = unsafe { as_pid(tagged_pid) }; assert_eq!(pid, pid2); } diff --git a/rt/src/scheduler/tests.rs b/rt/src/scheduler/tests.rs index 819046d53..0e7162ec4 100644 --- a/rt/src/scheduler/tests.rs +++ b/rt/src/scheduler/tests.rs @@ -100,7 +100,7 @@ fn mark_ready_before_run() { let mut scheduler = Scheduler::new(); // Incorrect (outdated) pid should be ok. - scheduler.mark_ready(ProcessId(1)); + scheduler.mark_ready(ProcessId(100)); let pid = add_test_actor(&mut scheduler, Priority::NORMAL); From 6677254dd23a0c4c1debeb7171247230ff2f2ec2 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 15:39:17 +0200 Subject: [PATCH 109/177] Add pid to actor::Context --- inbox/src/lib.rs | 5 ++++ rt/src/process/mod.rs | 62 +++++++++++++++++++++++++++---------------- src/actor/future.rs | 5 ++++ 3 files changed, 49 insertions(+), 23 deletions(-) diff --git a/inbox/src/lib.rs b/inbox/src/lib.rs index 4c512cf0e..d30a1fc6a 100644 --- a/inbox/src/lib.rs +++ b/inbox/src/lib.rs @@ -1226,6 +1226,11 @@ impl Manager { } } + /// Returns the id of the channel. + pub fn id(&self) -> Id { + Id(self.channel.as_ptr() as *const () as usize) + } + fn channel(&self) -> &Channel { unsafe { self.channel.as_ref() } } diff --git a/rt/src/process/mod.rs b/rt/src/process/mod.rs index 299405b33..5a1075c52 100644 --- a/rt/src/process/mod.rs +++ b/rt/src/process/mod.rs @@ -4,12 +4,13 @@ use std::any::Any; use std::cmp::Ordering; use std::fmt; use std::future::Future; +use std::mem::size_of_val; use std::panic::{catch_unwind, AssertUnwindSafe}; use std::pin::Pin; use std::task::{self, Poll}; use std::time::{Duration, Instant}; -use heph::actor::{self, ActorFuture, NewActor}; +use heph::actor::{ActorFuture, NewActor}; use heph::supervisor::Supervisor; use log::{as_debug, error, trace}; use mio::Token; @@ -58,11 +59,20 @@ impl fmt::Display for ProcessId { /// /// The implementation of the [`Future`] MUST catch panics. pub(crate) trait Process: Future { - /// Return the name of this process, used in logging. - fn name(&self) -> &'static str { - // Best we can do. - actor::name::() + /// Return the id for the process. + fn id(self: Pin<&Self>, alternative: ProcessId) -> ProcessId { + if size_of_val(&*self) == 0 { + // For zero sized types Box doesn't make an actual allocation, which + // means that the pointer will be the same for all zero sized types, + // i.e. not unique. + alternative + } else { + ProcessId((&*self as *const Self).cast::<()>() as usize) + } } + + /// Return the name of this process, used in logging. + fn name(&self) -> &'static str; } /// Wrapper around a [`Future`] to implement [`Process`]. @@ -99,7 +109,14 @@ fn panic_message<'a>(panic: &'a (dyn Any + Send + 'static)) -> &'a str { } } -impl Process for FutureProcess where Fut: Future {} +impl Process for FutureProcess +where + Fut: Future, +{ + fn name(&self) -> &'static str { + "FutureProcess" + } +} // NOTE: `ActorFuture` already catches panics for us. impl Process for ActorFuture @@ -108,6 +125,10 @@ where NA: NewActor, RT: Clone, { + fn id(self: Pin<&Self>, _: ProcessId) -> ProcessId { + ProcessId(self.pid()) + } + fn name(&self) -> &'static str { NA::name() } @@ -142,22 +163,17 @@ impl ProcessData

{ pub(crate) fn set_fair_runtime(&mut self, fair_runtime: Duration) { self.fair_runtime = fair_runtime; } +} +impl ProcessData

{ /// Returns the process identifier, or pid for short. - pub(crate) fn id(self: Pin<&Self>) -> ProcessId { - // Since the pid only job is to be unique we just use the pointer to - // this structure as pid. This way we don't have to store any additional - // pid in the structure itself or in the scheduler. - #[allow(trivial_casts)] - let ptr = - unsafe { (Pin::into_inner_unchecked(self) as *const ProcessData

).cast::() }; - ProcessId(ptr as usize) + pub(crate) fn id(&self) -> ProcessId { + let alternative = ProcessId(&*self as *const Self as usize); + self.process.as_ref().id(alternative) } -} -impl ProcessData

{ /// Returns the name of the process. - pub(crate) fn name(self: Pin<&Self>) -> &'static str { + pub(crate) fn name(&self) -> &'static str { self.process.name() } @@ -183,15 +199,15 @@ impl ProcessData

{ } } -impl Eq for ProcessData

{} +impl Eq for ProcessData

{} -impl PartialEq for ProcessData

{ +impl PartialEq for ProcessData

{ fn eq(&self, other: &Self) -> bool { Pin::new(self).id() == Pin::new(other).id() } } -impl Ord for ProcessData

{ +impl Ord for ProcessData

{ fn cmp(&self, other: &Self) -> Ordering { (other.fair_runtime) .cmp(&(self.fair_runtime)) @@ -199,7 +215,7 @@ impl Ord for ProcessData

{ } } -impl PartialOrd for ProcessData

{ +impl PartialOrd for ProcessData

{ fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } @@ -208,8 +224,8 @@ impl PartialOrd for ProcessData

{ impl fmt::Debug for ProcessData

{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Process") - .field("id", &Pin::new(self).id()) - .field("name", &self.process.name()) + .field("id", &self.id()) + .field("name", &self.name()) .field("priority", &self.priority) .field("fair_runtime", &self.fair_runtime) .finish() diff --git a/src/actor/future.rs b/src/actor/future.rs index 6ec9a3af0..a82ecc5b1 100644 --- a/src/actor/future.rs +++ b/src/actor/future.rs @@ -81,6 +81,11 @@ where NA::name() } + #[doc(hidden)] // Not part of the stable API. + pub fn pid(&self) -> usize { + self.inbox.id().as_usize() + } + /// Returns `Poll::Pending` if the actor was successfully restarted, /// `Poll::Ready` if the actor wasn't restarted (or failed to restart). fn handle_actor_error( From 93883b3b36817ec340bc3e83ae9359bae5825175 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 15:40:17 +0200 Subject: [PATCH 110/177] Simplify adding of processes Instead of accepting a function to create the process it just accepts the process now that the creation of actor processes is much simpler. --- rt/src/lib.rs | 27 +++++++------ rt/src/scheduler/mod.rs | 27 ++----------- rt/src/scheduler/shared/mod.rs | 23 ++--------- rt/src/scheduler/shared/tests.rs | 38 +++++++------------ rt/src/scheduler/tests.rs | 65 +++++++++++--------------------- rt/src/shared/mod.rs | 27 ++++++------- rt/src/wakers/tests.rs | 14 ++----- 7 files changed, 72 insertions(+), 149 deletions(-) diff --git a/rt/src/lib.rs b/rt/src/lib.rs index f24d08096..2ba478063 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -585,16 +585,14 @@ impl RuntimeRef { where Fut: Future + 'static, { - _ = self + let process = FutureProcess(future); + let name = process.name(); + let pid = self .internals .scheduler .borrow_mut() - .add_new_process(options.priority(), |pid| { - let process = FutureProcess(future); - let name = process.name(); - debug!(pid = pid.0, name = name; "spawning thread-local future"); - Ok::<_, !>((process, ())) - }); + .add_new_process(options.priority(), process); + debug!(pid = pid.0, name = name; "spawning thread-local future"); } /// Spawn a thread-safe [`Future`]. @@ -692,15 +690,16 @@ where S: Supervisor, NA: NewActor, { - self.internals + let rt = ThreadLocal::new(self.clone()); + let (process, actor_ref) = ActorFuture::new(supervisor, new_actor, arg, rt)?; + let pid = self + .internals .scheduler .borrow_mut() - .add_new_process(options.priority(), |pid| { - let name = NA::name(); - debug!(pid = pid.0, name = name; "spawning thread-local actor"); - let rt = ThreadLocal::new(self.clone()); - ActorFuture::new(supervisor, new_actor, arg, rt) - }) + .add_new_process(options.priority(), process); + let name = NA::name(); + debug!(pid = pid.0, name = name; "spawning thread-local actor"); + Ok(actor_ref) } } diff --git a/rt/src/scheduler/mod.rs b/rt/src/scheduler/mod.rs index e7f563f1b..dc7773b71 100644 --- a/rt/src/scheduler/mod.rs +++ b/rt/src/scheduler/mod.rs @@ -1,14 +1,12 @@ //! Scheduler implementation. use std::collections::BinaryHeap; -use std::mem::MaybeUninit; use std::panic::{catch_unwind, AssertUnwindSafe}; use std::pin::Pin; use log::trace; use crate::process::{self, Process, ProcessId}; -use crate::ptr_as_usize; use crate::spawn::options::Priority; mod inactive; @@ -60,31 +58,14 @@ impl Scheduler { } /// Add a new proces to the scheduler. - pub(crate) fn add_new_process( - &mut self, - priority: Priority, - setup: F, - ) -> Result + pub(crate) fn add_new_process

(&mut self, priority: Priority, process: P) -> ProcessId where - F: FnOnce(ProcessId) -> Result<(P, T), E>, P: Process + 'static, { - // Allocate some memory for the process. - let mut alloc: Box> = Box::new_uninit(); - debug_assert!(inactive::ok_ptr(alloc.as_ptr().cast()), "SKIP_BITS invalid"); - // Based on the allocation we can determine its process id. - let pid = ProcessId(ptr_as_usize(alloc.as_ptr())); - // Let the caller create the actual process (using the pid). - let (process, ret) = setup(pid)?; - let process = ProcessData::new(priority, Box::pin(process)); - // SAFETY: we write the processes and then safetly assume it's initialised. - let process = unsafe { - _ = alloc.write(process); - Pin::from(alloc.assume_init()) - }; - // Finally add it to ready queue. + let process = Box::pin(ProcessData::new(priority, Box::pin(process))); + let pid = process.as_ref().id(); self.ready.push(process); - Ok(ret) + pid } /// Mark the process, with `pid`, as ready to run. diff --git a/rt/src/scheduler/shared/mod.rs b/rt/src/scheduler/shared/mod.rs index d2eb157f5..939bb6f5f 100644 --- a/rt/src/scheduler/shared/mod.rs +++ b/rt/src/scheduler/shared/mod.rs @@ -1,12 +1,10 @@ //! Thread-safe version of `Scheduler`. -use std::mem::MaybeUninit; use std::pin::Pin; use log::trace; use crate::process::{Process, ProcessId}; -use crate::ptr_as_usize; use crate::spawn::options::Priority; mod inactive; @@ -129,27 +127,14 @@ impl Scheduler { } /// Add a new proces to the scheduler. - pub(crate) fn add_new_process(&self, priority: Priority, setup: F) -> Result + pub(crate) fn add_new_process

(&self, priority: Priority, process: P) -> ProcessId where - F: FnOnce(ProcessId) -> Result<(P, T), E>, P: Process + Send + Sync + 'static, { - // Allocate some memory for the process. - let mut alloc: Box> = Box::new_uninit(); - debug_assert!(inactive::ok_ptr(alloc.as_ptr().cast()), "SKIP_BITS invalid"); - // Based on the allocation we can determine its process id. - let pid = ProcessId(ptr_as_usize(alloc.as_ptr())); - // Let the caller create the actual process (using the pid). - let (process, ret) = setup(pid)?; - let process = ProcessData::new(priority, Box::pin(process)); - // SAFETY: we write the processes and then safetly assume it's initialised. - let process = unsafe { - _ = alloc.write(process); - Pin::from(alloc.assume_init()) - }; - // Finally add it to ready queue. + let process = Box::pin(ProcessData::new(priority, Box::pin(process))); + let pid = process.as_ref().id(); self.ready.add(process); - Ok(ret) + pid } /// Mark the process, with `pid`, as ready to run. diff --git a/rt/src/scheduler/shared/tests.rs b/rt/src/scheduler/shared/tests.rs index 69305e85e..d80414c84 100644 --- a/rt/src/scheduler/shared/tests.rs +++ b/rt/src/scheduler/shared/tests.rs @@ -113,13 +113,10 @@ fn scheduler_run_order() { let priorities = [Priority::LOW, Priority::NORMAL, Priority::HIGH]; let mut pids = vec![]; for (id, priority) in priorities.iter().enumerate() { - let pid = scheduler - .add_new_process(*priority, |pid| { - let rt = ThreadSafe::new(test::shared_internals()); - ActorFuture::new(NoSupervisor, new_actor, (id, run_order.clone()), rt) - .map(|(future, _)| (future, pid)) - }) - .unwrap(); + let rt = ThreadSafe::new(test::shared_internals()); + let (process, _) = + ActorFuture::new(NoSupervisor, new_actor, (id, run_order.clone()), rt).unwrap(); + let pid = scheduler.add_new_process(*priority, process); pids.push(pid); } @@ -160,13 +157,9 @@ fn assert_actor_process_unmoved() { let waker = nop_task_waker(); let mut ctx = task::Context::from_waker(&waker); - let pid = scheduler - .add_new_process(Priority::NORMAL, |pid| { - let rt = ThreadSafe::new(test::shared_internals()); - ActorFuture::new(NoSupervisor, TestAssertUnmovedNewActor, (), rt) - .map(|(future, _)| (future, pid)) - }) - .unwrap(); + let rt = ThreadSafe::new(test::shared_internals()); + let (process, _) = ActorFuture::new(NoSupervisor, TestAssertUnmovedNewActor, (), rt).unwrap(); + let pid = scheduler.add_new_process(Priority::NORMAL, process); // Run the process multiple times, ensure it's not moved in the // process. @@ -190,14 +183,12 @@ fn assert_future_process_unmoved() { let waker = nop_task_waker(); let mut ctx = task::Context::from_waker(&waker); - let _ = scheduler.add_new_process(Priority::NORMAL, |_| { - Ok::<_, !>((FutureProcess(AssertUnmoved::new(pending())), ())) - }); + let process = FutureProcess(AssertUnmoved::new(pending())); + let pid = scheduler.add_new_process(Priority::NORMAL, process); // Run the process multiple times, ensure it's not moved in the // process. let mut process = scheduler.remove().unwrap(); - let pid = process.as_ref().id(); assert_eq!(process.as_mut().run(&mut ctx), Poll::Pending); scheduler.add_back_process(process); @@ -212,11 +203,8 @@ fn assert_future_process_unmoved() { } fn add_test_actor(scheduler: &Scheduler, priority: Priority) -> ProcessId { - scheduler - .add_new_process(priority, |pid| { - let new_actor = simple_actor as fn(_) -> _; - let rt = ThreadSafe::new(test::shared_internals()); - ActorFuture::new(NoSupervisor, new_actor, (), rt).map(|(future, _)| (future, pid)) - }) - .unwrap() + let new_actor = simple_actor as fn(_) -> _; + let rt = ThreadSafe::new(test::shared_internals()); + let (process, _) = ActorFuture::new(NoSupervisor, new_actor, (), rt).unwrap(); + scheduler.add_new_process(priority, process) } diff --git a/rt/src/scheduler/tests.rs b/rt/src/scheduler/tests.rs index 0e7162ec4..011c70a2b 100644 --- a/rt/src/scheduler/tests.rs +++ b/rt/src/scheduler/tests.rs @@ -49,9 +49,7 @@ fn has_process() { assert!(!scheduler.has_process()); assert!(!scheduler.has_ready_process()); - let _ = scheduler.add_new_process(Priority::NORMAL, |_| { - Ok::<_, !>((FutureProcess(NopTestProcess), ())) - }); + let _ = scheduler.add_new_process(Priority::NORMAL, FutureProcess(NopTestProcess)); assert!(scheduler.has_process()); assert!(scheduler.has_ready_process()); } @@ -61,13 +59,10 @@ async fn simple_actor(_: actor::Context) {} #[test] fn add_actor() { let mut scheduler = Scheduler::new(); - let _ = scheduler - .add_new_process(Priority::NORMAL, |_| { - let new_actor = simple_actor as fn(_) -> _; - let rt = ThreadLocal::new(test::runtime()); - ActorFuture::new(NoSupervisor, new_actor, (), rt) - }) - .unwrap(); + let new_actor = simple_actor as fn(_) -> _; + let rt = ThreadLocal::new(test::runtime()); + let (process, _) = ActorFuture::new(NoSupervisor, new_actor, (), rt).unwrap(); + let _ = scheduler.add_new_process(Priority::NORMAL, process); assert!(scheduler.has_process()); assert!(scheduler.has_ready_process()); } @@ -77,15 +72,12 @@ fn mark_ready() { let mut scheduler = Scheduler::new(); // Incorrect (outdated) pid should be ok. - scheduler.mark_ready(ProcessId(1)); + scheduler.mark_ready(ProcessId(100)); - let pid = scheduler - .add_new_process(Priority::NORMAL, |pid| { - let new_actor = simple_actor as fn(_) -> _; - let rt = ThreadLocal::new(test::runtime()); - ActorFuture::new(NoSupervisor, new_actor, (), rt).map(|(future, _)| (future, pid)) - }) - .unwrap(); + let new_actor = simple_actor as fn(_) -> _; + let rt = ThreadLocal::new(test::runtime()); + let (process, _) = ActorFuture::new(NoSupervisor, new_actor, (), rt).unwrap(); + let pid = scheduler.add_new_process(Priority::NORMAL, process); assert!(scheduler.has_process()); assert!(scheduler.has_ready_process()); @@ -212,13 +204,10 @@ fn scheduler_run_order() { let priorities = [Priority::LOW, Priority::NORMAL, Priority::HIGH]; let mut pids = vec![]; for (id, priority) in priorities.iter().enumerate() { - let pid = scheduler - .add_new_process(*priority, |pid| { - let rt = ThreadLocal::new(test::runtime()); - ActorFuture::new(NoSupervisor, new_actor, (id, run_order.clone()), rt) - .map(|(future, _)| (future, pid)) - }) - .unwrap(); + let rt = ThreadLocal::new(test::runtime()); + let (process, _) = + ActorFuture::new(NoSupervisor, new_actor, (id, run_order.clone()), rt).unwrap(); + let pid = scheduler.add_new_process(*priority, process); pids.push(pid); } @@ -259,13 +248,9 @@ fn assert_actor_process_unmoved() { let waker = nop_task_waker(); let mut ctx = task::Context::from_waker(&waker); - let pid = scheduler - .add_new_process(Priority::NORMAL, |pid| { - let rt = ThreadLocal::new(test::runtime()); - ActorFuture::new(NoSupervisor, TestAssertUnmovedNewActor, (), rt) - .map(|(future, _)| (future, pid)) - }) - .unwrap(); + let rt = ThreadLocal::new(test::runtime()); + let (process, _) = ActorFuture::new(NoSupervisor, TestAssertUnmovedNewActor, (), rt).unwrap(); + let pid = scheduler.add_new_process(Priority::NORMAL, process); // Run the process multiple times, ensure it's not moved in the process. let mut process = scheduler.next_process().unwrap(); @@ -288,9 +273,8 @@ fn assert_future_process_unmoved() { let waker = nop_task_waker(); let mut ctx = task::Context::from_waker(&waker); - let _ = scheduler.add_new_process(Priority::NORMAL, |_| { - Ok::<_, !>((FutureProcess(AssertUnmoved::new(pending())), ())) - }); + let process = FutureProcess(AssertUnmoved::new(pending())); + let _ = scheduler.add_new_process(Priority::NORMAL, process); // Run the process multiple times, ensure it's not moved in the process. let mut process = scheduler.next_process().unwrap(); @@ -309,11 +293,8 @@ fn assert_future_process_unmoved() { } fn add_test_actor(scheduler: &mut Scheduler, priority: Priority) -> ProcessId { - scheduler - .add_new_process(priority, |pid| { - let new_actor = simple_actor as fn(_) -> _; - let rt = ThreadLocal::new(test::runtime()); - ActorFuture::new(NoSupervisor, new_actor, (), rt).map(|(future, _)| (future, pid)) - }) - .unwrap() + let new_actor = simple_actor as fn(_) -> _; + let rt = ThreadLocal::new(test::runtime()); + let (process, _) = ActorFuture::new(NoSupervisor, new_actor, (), rt).unwrap(); + scheduler.add_new_process(priority, process) } diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index f0ad829e3..873288868 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -237,12 +237,12 @@ impl RuntimeInternals { NA::Actor: Send + Sync + 'static, NA::Message: Send, { - self.scheduler.add_new_process(options.priority(), |pid| { - let name = NA::name(); - debug!(pid = pid.0, name = name; "spawning thread-safe actor"); - let rt = ThreadSafe::new(self.clone()); - ActorFuture::new(supervisor, new_actor, arg, rt) - }) + let rt = ThreadSafe::new(self.clone()); + let (process, actor_ref) = ActorFuture::new(supervisor, new_actor, arg, rt)?; + let pid = self.scheduler.add_new_process(options.priority(), process); + let name = NA::name(); + debug!(pid = pid.0, name = name; "spawning thread-safe actor"); + Ok(actor_ref) } /// Spawn a thread-safe `future`. @@ -251,22 +251,19 @@ impl RuntimeInternals { where Fut: Future + Send + Sync + 'static, { - _ = self.scheduler.add_new_process(options.priority(), |pid| { - let process = FutureProcess(future); - let name = process.name(); - debug!(pid = pid.0, name = name; "spawning thread-safe future"); - Ok::<_, !>((process, ())) - }); + let process = FutureProcess(future); + let name = process.name(); + let pid = self.scheduler.add_new_process(options.priority(), process); + debug!(pid = pid.0, name = name; "spawning thread-safe future"); } /// Add a new proces to the scheduler. #[cfg(test)] - pub(crate) fn add_new_process(&self, priority: Priority, setup: F) -> Result + pub(crate) fn add_new_process

(&self, priority: Priority, process: P) -> ProcessId where - F: FnOnce(ProcessId) -> Result<(P, T), E>, P: Process + Send + Sync + 'static, { - self.scheduler.add_new_process(priority, setup) + self.scheduler.add_new_process(priority, process) } /// See [`Scheduler::mark_ready`]. diff --git a/rt/src/wakers/tests.rs b/rt/src/wakers/tests.rs index f07f02560..332c3be17 100644 --- a/rt/src/wakers/tests.rs +++ b/rt/src/wakers/tests.rs @@ -35,7 +35,7 @@ mod shared { fn waker() { let shared_internals = new_internals(); - let pid = add_process(&shared_internals); + let pid = shared_internals.add_new_process(Priority::NORMAL, FutureProcess(TestProcess)); assert!(shared_internals.has_process()); assert!(shared_internals.has_ready_process()); let process = shared_internals.remove_process().unwrap(); @@ -67,7 +67,7 @@ mod shared { let shared_internals = new_internals(); // Add a test process. - let pid = add_process(&shared_internals); + let pid = shared_internals.add_new_process(Priority::NORMAL, FutureProcess(TestProcess)); assert!(shared_internals.has_process()); assert!(shared_internals.has_ready_process()); let process = shared_internals.remove_process().unwrap(); @@ -92,7 +92,7 @@ mod shared { fn wake_from_different_thread() { let shared_internals = new_internals(); - let pid = add_process(&shared_internals); + let pid = shared_internals.add_new_process(Priority::NORMAL, FutureProcess(TestProcess)); assert!(shared_internals.has_process()); assert!(shared_internals.has_ready_process()); let process = shared_internals.remove_process().unwrap(); @@ -162,14 +162,6 @@ mod shared { setup.complete(wakers, worker_wakers, None) }) } - - fn add_process(internals: &RuntimeInternals) -> ProcessId { - internals - .add_new_process(Priority::NORMAL, |pid| { - Ok::<_, !>((FutureProcess(TestProcess), pid)) - }) - .unwrap() - } } mod bitmap { From b674869b092d9f86830463faebee9e5d7468e007 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 15:43:14 +0200 Subject: [PATCH 111/177] Fix some Clippy lints --- rt/src/process/mod.rs | 6 +++--- rt/src/scheduler/shared/runqueue.rs | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/rt/src/process/mod.rs b/rt/src/process/mod.rs index 5a1075c52..3bea266f7 100644 --- a/rt/src/process/mod.rs +++ b/rt/src/process/mod.rs @@ -2,13 +2,13 @@ use std::any::Any; use std::cmp::Ordering; -use std::fmt; use std::future::Future; use std::mem::size_of_val; use std::panic::{catch_unwind, AssertUnwindSafe}; use std::pin::Pin; use std::task::{self, Poll}; use std::time::{Duration, Instant}; +use std::{fmt, ptr}; use heph::actor::{ActorFuture, NewActor}; use heph::supervisor::Supervisor; @@ -67,7 +67,7 @@ pub(crate) trait Process: Future { // i.e. not unique. alternative } else { - ProcessId((&*self as *const Self).cast::<()>() as usize) + ProcessId(ptr::addr_of!(*self).cast::<()>() as usize) } } @@ -168,7 +168,7 @@ impl ProcessData

{ impl ProcessData

{ /// Returns the process identifier, or pid for short. pub(crate) fn id(&self) -> ProcessId { - let alternative = ProcessId(&*self as *const Self as usize); + let alternative = ProcessId(ptr::addr_of!(*self) as usize); self.process.as_ref().id(alternative) } diff --git a/rt/src/scheduler/shared/runqueue.rs b/rt/src/scheduler/shared/runqueue.rs index 9df09aafb..8b306651d 100644 --- a/rt/src/scheduler/shared/runqueue.rs +++ b/rt/src/scheduler/shared/runqueue.rs @@ -66,7 +66,7 @@ impl RunQueue { } None => { // Last node in the branch add our process to it. - *next_node = Some(Node::new(process)); + *next_node = Some(Box::new(Node::new(process))); return; } } @@ -98,12 +98,12 @@ impl RunQueue { impl Node { /// Returns a new `Node`. - fn new(process: Pin>) -> Box { - Box::new(Node { + const fn new(process: Pin>) -> Node { + Node { process, left: None, right: None, - }) + } } /// Returns the number of processes in this node and it's descendants. From 6d0a63739c6a74c08bcbe9c4b0f8a9b7057ef97f Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 15:45:57 +0200 Subject: [PATCH 112/177] Define a single assert_size function --- rt/src/process/tests.rs | 7 +------ rt/src/scheduler/shared/tests.rs | 7 +------ rt/src/scheduler/tests.rs | 7 +------ rt/src/test.rs | 6 ++++++ 4 files changed, 9 insertions(+), 18 deletions(-) diff --git a/rt/src/process/tests.rs b/rt/src/process/tests.rs index 7f041f76d..3af3cc0be 100644 --- a/rt/src/process/tests.rs +++ b/rt/src/process/tests.rs @@ -2,7 +2,6 @@ use std::cmp::Ordering; use std::future::{pending, Future}; -use std::mem::size_of; use std::pin::Pin; use std::task::{self, Poll}; use std::thread::sleep; @@ -12,7 +11,7 @@ use mio::Token; use crate::process::{FutureProcess, Process, ProcessData, ProcessId}; use crate::spawn::options::Priority; -use crate::test::{nop_task_waker, AssertUnmoved}; +use crate::test::{assert_size, nop_task_waker, AssertUnmoved}; #[test] fn pid() { @@ -37,10 +36,6 @@ fn pid_and_evented_id() { assert_eq!(pid, ProcessId(0)); } -fn assert_size(expected: usize) { - assert_eq!(size_of::(), expected); -} - #[test] fn size_assertions() { assert_size::(8); diff --git a/rt/src/scheduler/shared/tests.rs b/rt/src/scheduler/shared/tests.rs index d80414c84..ecc6f2fc0 100644 --- a/rt/src/scheduler/shared/tests.rs +++ b/rt/src/scheduler/shared/tests.rs @@ -1,7 +1,6 @@ //! Tests for the shared scheduler. use std::future::{pending, Pending}; -use std::mem::size_of; use std::sync::{Arc, Mutex}; use std::task::{self, Poll}; @@ -10,13 +9,9 @@ use heph::supervisor::NoSupervisor; use crate::process::{FutureProcess, ProcessId}; use crate::scheduler::shared::{Priority, ProcessData, Scheduler}; -use crate::test::{self, nop_task_waker, AssertUnmoved}; +use crate::test::{self, assert_size, nop_task_waker, AssertUnmoved}; use crate::ThreadSafe; -fn assert_size(expected: usize) { - assert_eq!(size_of::(), expected); -} - #[test] fn size_assertions() { assert_size::(40); diff --git a/rt/src/scheduler/tests.rs b/rt/src/scheduler/tests.rs index 011c70a2b..1ed6e21ae 100644 --- a/rt/src/scheduler/tests.rs +++ b/rt/src/scheduler/tests.rs @@ -3,7 +3,6 @@ use std::cell::RefCell; use std::future::Future; use std::future::{pending, Pending}; -use std::mem; use std::pin::Pin; use std::rc::Rc; use std::task::{self, Poll}; @@ -14,13 +13,9 @@ use heph::supervisor::NoSupervisor; use crate::process::{FutureProcess, Process, ProcessId}; use crate::scheduler::{ProcessData, Scheduler}; use crate::spawn::options::Priority; -use crate::test::{self, nop_task_waker, AssertUnmoved}; +use crate::test::{self, assert_size, nop_task_waker, AssertUnmoved}; use crate::ThreadLocal; -fn assert_size(expected: usize) { - assert_eq!(mem::size_of::(), expected); -} - #[test] fn size_assertions() { assert_size::(40); diff --git a/rt/src/test.rs b/rt/src/test.rs index 065c67791..39e10bcd7 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -557,3 +557,9 @@ pub(crate) fn nop_task_waker() -> task::Waker { ); unsafe { task::Waker::from_raw(RawWaker::new(std::ptr::null(), &WAKER_VTABLE)) } } + +#[cfg(test)] +#[track_caller] +pub(crate) fn assert_size(expected: usize) { + assert_eq!(std::mem::size_of::(), expected); +} From 682b223164d3df22acfbaec34aa83ab706a8fc7f Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 15:56:32 +0200 Subject: [PATCH 113/177] Fix tracing example instructions --- rt/examples/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rt/examples/README.md b/rt/examples/README.md index 0f986bad9..423c8fa06 100644 --- a/rt/examples/README.md +++ b/rt/examples/README.md @@ -65,9 +65,9 @@ Trace Event Format] so it can be opened by [Catapult trace view]. ```bash $ cargo run --example 8_tracing # Run the example, to generate the trace. - $ cd tools # Got into the tools directory. + $ cd ../tools # Got into the tools directory. # Convert the trace to Chrome's format. - $ cargo run --bin convert_trace ../heph_tracing_example.bin.log + $ cargo run --bin convert_trace ../rt/heph_tracing_example.bin.log # Make the trace viewable in HTML. $ $(CATAPULT_REPO)/tracing/bin/trace2html ../heph_tracing_example.json $ open ../heph_tracing_example.html # Finally open the trace in your browser. From 5b533c2f97642c8cfc3e449a8e7963cf230dca6e Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 17:59:12 +0200 Subject: [PATCH 114/177] Add UnixAddr::as_pathname --- rt/src/net/uds/mod.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/rt/src/net/uds/mod.rs b/rt/src/net/uds/mod.rs index 42a783a7c..7109bfa00 100644 --- a/rt/src/net/uds/mod.rs +++ b/rt/src/net/uds/mod.rs @@ -20,6 +20,7 @@ pub use datagram::UnixDatagram; /// Unix socket address. #[derive(Clone, Debug, Eq, PartialEq)] pub struct UnixAddr { + /// NOTE: must always be of type `AF_UNIX`. inner: SockAddr, } @@ -31,6 +32,11 @@ impl UnixAddr { { SockAddr::unix(path.as_ref()).map(|a| UnixAddr { inner: a }) } + + /// Returns the contents of this address if it is a pathname address. + pub fn as_pathname(&self) -> Option<&Path> { + self.inner.as_pathname() + } } /// **Not part of the API, do not use**. @@ -53,6 +59,7 @@ impl a10::net::SocketAddress for UnixAddr { // SAFETY: caller must initialise the address. let mut this = this.assume_init(); this.inner.set_length(length); + debug_assert!(this.inner.is_unix()); this } } From 1dc2605a6e0cacb9298246e10f56c1e2fcabeeb3 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 17:16:30 +0200 Subject: [PATCH 115/177] Add UnixStream --- rt/src/net/mod.rs | 2 +- rt/src/net/uds/datagram.rs | 5 +- rt/src/net/uds/mod.rs | 4 +- rt/src/net/uds/stream.rs | 298 ++++++++++++++++++++++++++++++ rt/src/wakers/tests.rs | 1 - rt/tests/functional/uds/mod.rs | 1 + rt/tests/functional/uds/stream.rs | 101 ++++++++++ 7 files changed, 407 insertions(+), 5 deletions(-) create mode 100644 rt/src/net/uds/stream.rs create mode 100644 rt/tests/functional/uds/stream.rs diff --git a/rt/src/net/mod.rs b/rt/src/net/mod.rs index 50b7b84c1..3ea76dcd5 100644 --- a/rt/src/net/mod.rs +++ b/rt/src/net/mod.rs @@ -29,7 +29,7 @@ pub use tcp::{TcpListener, TcpStream}; #[doc(no_inline)] pub use udp::UdpSocket; #[doc(no_inline)] -pub use uds::UnixDatagram; +pub use uds::{UnixDatagram, UnixStream}; pub(crate) use futures::{ Recv, RecvFrom, RecvFromVectored, RecvN, RecvNVectored, RecvVectored, Send, SendAll, diff --git a/rt/src/net/uds/datagram.rs b/rt/src/net/uds/datagram.rs index f6a562774..739e5732d 100644 --- a/rt/src/net/uds/datagram.rs +++ b/rt/src/net/uds/datagram.rs @@ -1,10 +1,11 @@ +//! Module with [`UnixDatagram`]. + use std::marker::PhantomData; use std::net::Shutdown; use std::os::fd::IntoRawFd; use std::{fmt, io}; use a10::{AsyncFd, Extract}; -use log::warn; use socket2::{Domain, SockRef, Type}; use crate::access::Access; @@ -101,7 +102,7 @@ impl UnixDatagram { socket.with_ref(|socket| { if let Some(cpu) = rt.cpu() { if let Err(err) = socket.set_cpu_affinity(cpu) { - warn!("failed to set CPU affinity on UnixDatagram: {err}"); + log::warn!("failed to set CPU affinity on UnixDatagram: {err}"); } } Ok(()) diff --git a/rt/src/net/uds/mod.rs b/rt/src/net/uds/mod.rs index 7109bfa00..2e9b7656a 100644 --- a/rt/src/net/uds/mod.rs +++ b/rt/src/net/uds/mod.rs @@ -3,7 +3,7 @@ //! Three main types are provided: //! //! * `UnixListener` listens for incoming Unix connections. -//! * `UnixStream` represents a Unix stream socket. +//! * [`UnixStream`] represents a Unix stream socket. //! * [`UnixDatagram`] represents a Unix datagram socket. use std::mem::{size_of, MaybeUninit}; @@ -13,9 +13,11 @@ use std::{io, ptr}; use socket2::SockAddr; mod datagram; +mod stream; pub use crate::net::{Connected, Unconnected}; pub use datagram::UnixDatagram; +pub use stream::UnixStream; /// Unix socket address. #[derive(Clone, Debug, Eq, PartialEq)] diff --git a/rt/src/net/uds/stream.rs b/rt/src/net/uds/stream.rs new file mode 100644 index 000000000..7f6f13ece --- /dev/null +++ b/rt/src/net/uds/stream.rs @@ -0,0 +1,298 @@ +//! Module with [`UnixStream`] and related types. + +#![allow(unused_imports)] // FIXME: remove. + +use std::io; +use std::net::{Shutdown, SocketAddr}; +use std::os::fd::IntoRawFd; + +use a10::{AsyncFd, Extract}; +use socket2::{Domain, Protocol, SockRef, Type}; + +use crate::access::Access; +use crate::io::{Buf, BufMut, BufMutSlice, BufSlice, BufWrapper}; +use crate::net::uds::UnixAddr; +use crate::net::{ + convert_address, Recv, RecvN, RecvNVectored, RecvVectored, Send, SendAll, SendAllVectored, + SendVectored, SockAddr, +}; + +/// A non-blocking Unix stream. +/// +/// # Examples +/// +/// Sending `Hello world!` to a peer. +/// +/// ``` +/// #![feature(never_type)] +/// +/// use std::io; +/// +/// use heph::actor; +/// use heph_rt::ThreadLocal; +/// use heph_rt::net::uds::{UnixStream, UnixAddr}; +/// +/// async fn actor(ctx: actor::Context) -> io::Result<()> { +/// let address = UnixAddr::from_pathname("/path/to/my/socket").unwrap(); +/// let stream = UnixStream::connect(ctx.runtime_ref(), address).await?; +/// stream.send_all("Hello world!").await?; +/// Ok(()) +/// } +/// # drop(actor); // Silent dead code warnings. +/// ``` +#[derive(Debug)] +pub struct UnixStream { + pub(in crate::net) fd: AsyncFd, +} + +impl UnixStream { + /// Create a new Unix stream and issues a non-blocking connect to the + /// specified `address`. + pub async fn connect(rt: &RT, address: UnixAddr) -> io::Result + where + RT: Access, + { + let fd = a10::net::socket( + rt.submission_queue(), + Domain::UNIX.into(), + Type::STREAM.cloexec().into(), + 0, + 0, + ) + .await?; + let socket = UnixStream::new(rt, fd); + socket.fd.connect(address).await?; + Ok(socket) + } + + /// Creates an unnamed pair of connected sockets. + pub fn pair(rt: &RT) -> io::Result<(UnixStream, UnixStream)> + where + RT: Access, + { + let (s1, s2) = socket2::Socket::pair(Domain::UNIX, Type::STREAM.cloexec(), None)?; + let s1 = UnixStream::new(rt, unsafe { + // SAFETY: the call to `pair` above ensures the file descriptors are + // valid. + AsyncFd::new(s1.into_raw_fd(), rt.submission_queue()) + }); + let s2 = UnixStream::new(rt, unsafe { + // SAFETY: Same as above. + AsyncFd::new(s2.into_raw_fd(), rt.submission_queue()) + }); + Ok((s1, s2)) + } + + fn new(rt: &RT, fd: AsyncFd) -> UnixStream + where + RT: Access, + { + let socket = UnixStream { fd }; + socket.set_auto_cpu_affinity(rt); + socket + } + + /// Automatically set the CPU affinity based on the runtime access `rt`. + /// + /// For non-Linux OSs this is a no-op. If `rt` is not local this is also a + /// no-op. + /// + /// # Notes + /// + /// This is already called when the `UnixStream` is created using + /// [`UnixStream::connect`], this is mostly useful when accepting a + /// connection from [`UnixListener`]. + /// + /// [`UnixListener`]: crate::net::uds::UnixListener + pub fn set_auto_cpu_affinity(&self, rt: &RT) + where + RT: Access, + { + #[cfg(target_os = "linux")] + if let Some(cpu) = rt.cpu() { + if let Err(err) = self.set_cpu_affinity(cpu) { + log::warn!("failed to set CPU affinity on UnixStream: {err}"); + } + } + } + + /// Set the CPU affinity to `cpu`. + /// + /// On Linux this uses `SO_INCOMING_CPU`. + #[cfg(target_os = "linux")] + pub(crate) fn set_cpu_affinity(&self, cpu: usize) -> io::Result<()> { + self.with_ref(|socket| socket.set_cpu_affinity(cpu)) + } + + /// Returns the socket address of the remote peer of this Unix connection. + pub fn peer_addr(&self) -> io::Result { + self.with_ref(|socket| socket.peer_addr().map(|a| UnixAddr { inner: a })) + } + + /// Returns the socket address of the local half of this Unix connection. + pub fn local_addr(&self) -> io::Result { + self.with_ref(|socket| socket.local_addr().map(|a| UnixAddr { inner: a })) + } + + /// Send the bytes in `buf` to the peer. + /// + /// Return the number of bytes written. This may we fewer then the length of + /// `buf`. To ensure that all bytes are written use + /// [`UnixStream::send_all`]. + pub async fn send(&self, buf: B) -> io::Result<(B, usize)> { + Send(self.fd.send(BufWrapper(buf), 0).extract()).await + } + + /// Send the all bytes in `buf` to the peer. + /// + /// If this fails to send all bytes (this happens if a write returns + /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. + pub async fn send_all(&self, buf: B) -> io::Result { + SendAll(self.fd.send_all(BufWrapper(buf)).extract()).await + } + + /// Sends data on the socket to the connected socket, using vectored I/O. + pub async fn send_vectored, const N: usize>( + &self, + bufs: B, + ) -> io::Result<(B, usize)> { + SendVectored(self.fd.send_vectored(BufWrapper(bufs), 0).extract()).await + } + + /// Send the all bytes in `bufs` to the peer. + /// + /// If this fails to send all bytes (this happens if a write returns + /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. + pub async fn send_vectored_all, const N: usize>( + &self, + bufs: B, + ) -> io::Result { + SendAllVectored(self.fd.send_all_vectored(BufWrapper(bufs)).extract()).await + } + + /// Receive messages from the stream. + /// + /// # Examples + /// + /// ``` + /// #![feature(never_type)] + /// + /// use std::io; + /// + /// use heph::actor; + /// use heph_rt::ThreadLocal; + /// use heph_rt::net::uds::{UnixStream, UnixAddr}; + /// + /// async fn actor(ctx: actor::Context) -> io::Result<()> { + /// let address = UnixAddr::from_pathname("/path/to/my/socket").unwrap(); + /// let stream = UnixStream::connect(ctx.runtime_ref(), address).await?; + /// + /// let buf = Vec::with_capacity(4 * 1024); // 4 KB. + /// let buf = stream.recv(buf).await?; + /// println!("read {} bytes: {buf:?}", buf.len()); + /// + /// Ok(()) + /// } + /// # + /// # drop(actor); // Silent dead code warnings. + /// ``` + pub async fn recv(&self, buf: B) -> io::Result { + Recv(self.fd.recv(BufWrapper(buf), 0)).await + } + + /// Receive at least `n` bytes from the stream. + /// + /// This returns [`io::ErrorKind::UnexpectedEof`] if less then `n` bytes could be read. + /// + /// # Examples + /// + /// ``` + /// #![feature(never_type)] + /// + /// use std::io; + /// + /// use heph::actor; + /// use heph_rt::ThreadLocal; + /// use heph_rt::net::uds::{UnixStream, UnixAddr}; + /// + /// async fn actor(ctx: actor::Context) -> io::Result<()> { + /// let address = UnixAddr::from_pathname("/path/to/my/socket").unwrap(); + /// let stream = UnixStream::connect(ctx.runtime_ref(), address).await?; + /// + /// let buf = Vec::with_capacity(4 * 1024); // 4 KB. + /// // NOTE: this will return an error if the peer sends less than 1 KB + /// // of data before shutting down or closing the connection. + /// let n = 1024; + /// let buf = stream.recv_n(buf, n).await?; + /// println!("read {} bytes: {buf:?}", buf.len()); + /// + /// Ok(()) + /// } + /// # + /// # drop(actor); // Silent dead code warnings. + /// ``` + pub async fn recv_n(&self, buf: B, n: usize) -> io::Result { + debug_assert!( + buf.spare_capacity() >= n, + "called `UnixStream::recv_n` with a buffer smaller then `n`" + ); + RecvN(self.fd.recv_n(BufWrapper(buf), n)).await + } + + /// Receive messages from the stream, using vectored I/O. + pub async fn recv_vectored, const N: usize>(&self, bufs: B) -> io::Result { + RecvVectored(self.fd.recv_vectored(BufWrapper(bufs), 0)).await + } + + /// Receive at least `n` bytes from the stream, using vectored I/O. + /// + /// This returns [`io::ErrorKind::UnexpectedEof`] if less then `n` bytes could be read. + pub async fn recv_n_vectored, const N: usize>( + &self, + bufs: B, + n: usize, + ) -> io::Result { + debug_assert!( + bufs.total_spare_capacity() >= n, + "called `UnixStream::recv_n_vectored` with a buffer smaller then `n`" + ); + RecvNVectored(self.fd.recv_n_vectored(BufWrapper(bufs), n)).await + } + + /// Receive messages from the stream, without removing that data from the + /// queue. + pub async fn peek(&self, buf: B) -> io::Result { + Recv(self.fd.recv(BufWrapper(buf), libc::MSG_PEEK)).await + } + + /// Receive messages from the stream, without removing it from the input + /// queue, using vectored I/O. + pub async fn peek_vectored, const N: usize>(&self, bufs: B) -> io::Result { + RecvVectored(self.fd.recv_vectored(BufWrapper(bufs), libc::MSG_PEEK)).await + } + + /// Shuts down the read, write, or both halves of this connection. + /// + /// This function will cause all pending and future I/O on the specified + /// portions to return immediately with an appropriate value (see the + /// documentation of [`Shutdown`]). + pub async fn shutdown(&self, how: Shutdown) -> io::Result<()> { + self.fd.shutdown(how).await + } + + /// Get the value of the `SO_ERROR` option on this socket. + /// + /// This will retrieve the stored error in the underlying socket, clearing + /// the field in the process. This can be useful for checking errors between + /// calls. + pub fn take_error(&self) -> io::Result> { + self.with_ref(|socket| socket.take_error()) + } + + fn with_ref(&self, f: F) -> io::Result + where + F: FnOnce(SockRef<'_>) -> io::Result, + { + f(SockRef::from(&self.fd)) + } +} diff --git a/rt/src/wakers/tests.rs b/rt/src/wakers/tests.rs index 332c3be17..53b051d59 100644 --- a/rt/src/wakers/tests.rs +++ b/rt/src/wakers/tests.rs @@ -197,7 +197,6 @@ mod bitmap { setting_and_unsetting(1024) } - #[cfg(test)] fn setting_and_unsetting(entries: usize) { let map = AtomicBitMap::new(entries); assert_eq!(map.capacity(), entries); diff --git a/rt/tests/functional/uds/mod.rs b/rt/tests/functional/uds/mod.rs index abdd34f0f..cad1e4947 100644 --- a/rt/tests/functional/uds/mod.rs +++ b/rt/tests/functional/uds/mod.rs @@ -1,3 +1,4 @@ //! Tests for the UDS types. mod datagram; +mod stream; diff --git a/rt/tests/functional/uds/stream.rs b/rt/tests/functional/uds/stream.rs new file mode 100644 index 000000000..6e2a41351 --- /dev/null +++ b/rt/tests/functional/uds/stream.rs @@ -0,0 +1,101 @@ +//! Tests for `UnixStream`. + +use std::io::{self, Read}; +use std::net::Shutdown; +use std::os::unix::net; +use std::time::Duration; + +use heph::actor; +use heph_rt::net::uds::{UnixAddr, UnixStream}; +use heph_rt::spawn::ActorOptions; +use heph_rt::test::{join, try_spawn_local, PanicSupervisor}; +use heph_rt::{self as rt}; + +use crate::util::temp_file; + +const DATA: &[u8] = b"Hello world"; +const DATA2: &[u8] = b"Hello mars"; + +#[test] +fn pair() { + async fn actor(ctx: actor::Context) -> io::Result<()> + where + RT: rt::Access, + { + let (s1, s2) = UnixStream::pair(ctx.runtime_ref())?; + + // Addresses must point to each other. + let s1_local = s1.local_addr()?; + let s1_peer = s1.peer_addr()?; + let s2_local = s2.local_addr()?; + let s2_peer = s2.peer_addr()?; + assert_eq!(s1_local, s2_peer); + assert_eq!(s1_peer, s2_local); + + // Send to one arrives at the other. + let (_, n) = s1.send(DATA).await?; + assert_eq!(n, DATA.len()); + let mut buf = s2.recv(Vec::with_capacity(DATA.len() + 1)).await?; + assert_eq!(buf.len(), DATA.len()); + assert_eq!(buf, DATA); + buf.clear(); + + // Same as above, but then in the other direction. + let (_, n) = s2.send(DATA2).await?; + assert_eq!(n, DATA2.len()); + let mut buf = s1.recv(buf).await?; + assert_eq!(buf.len(), DATA2.len()); + assert_eq!(buf, DATA2); + buf.clear(); + + // Shutdown. + s1.shutdown(Shutdown::Both).await?; + s2.shutdown(Shutdown::Both).await?; + + // No errors. + assert!(s1.take_error()?.is_none()); + assert!(s2.take_error()?.is_none()); + + Ok(()) + } + + #[allow(trivial_casts)] + let actor = actor as fn(_) -> _; + let actor_ref = try_spawn_local(PanicSupervisor, actor, (), ActorOptions::default()).unwrap(); + join(&actor_ref, Duration::from_secs(1)).unwrap(); +} + +#[test] +fn connect() { + async fn actor(ctx: actor::Context) -> io::Result<()> + where + RT: rt::Access, + { + let path = temp_file("uds_stream_bound"); + let listener = net::UnixListener::bind(&path)?; + + let address = UnixAddr::from_pathname(path)?; + let stream = UnixStream::connect(ctx.runtime_ref(), address).await?; + + let (mut client, _) = listener.accept()?; + + let (_, n) = stream.send(DATA).await?; + assert_eq!(n, DATA.len()); + let mut buf = vec![0; DATA.len() + 1]; + let n = client.read(&mut buf)?; + assert_eq!(n, DATA.len()); + assert_eq!(&buf[..n], DATA); + + // No errors. + assert!(listener.take_error()?.is_none()); + assert!(client.take_error()?.is_none()); + assert!(stream.take_error()?.is_none()); + + Ok(()) + } + + #[allow(trivial_casts)] + let actor = actor as fn(_) -> _; + let actor_ref = try_spawn_local(PanicSupervisor, actor, (), ActorOptions::default()).unwrap(); + join(&actor_ref, Duration::from_secs(1)).unwrap(); +} From 3a4ee711b178fd15a5aad62118f0403cf8c0fa22 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 17:18:14 +0200 Subject: [PATCH 116/177] Add UnixListener --- rt/src/net/mod.rs | 2 +- rt/src/net/uds/listener.rs | 209 ++++++++++++++++++++++++++++ rt/src/net/uds/mod.rs | 4 +- rt/tests/functional/uds/listener.rs | 85 +++++++++++ rt/tests/functional/uds/mod.rs | 1 + 5 files changed, 299 insertions(+), 2 deletions(-) create mode 100644 rt/src/net/uds/listener.rs create mode 100644 rt/tests/functional/uds/listener.rs diff --git a/rt/src/net/mod.rs b/rt/src/net/mod.rs index 3ea76dcd5..be11984d2 100644 --- a/rt/src/net/mod.rs +++ b/rt/src/net/mod.rs @@ -29,7 +29,7 @@ pub use tcp::{TcpListener, TcpStream}; #[doc(no_inline)] pub use udp::UdpSocket; #[doc(no_inline)] -pub use uds::{UnixDatagram, UnixStream}; +pub use uds::{UnixDatagram, UnixListener, UnixStream}; pub(crate) use futures::{ Recv, RecvFrom, RecvFromVectored, RecvN, RecvNVectored, RecvVectored, Send, SendAll, diff --git a/rt/src/net/uds/listener.rs b/rt/src/net/uds/listener.rs new file mode 100644 index 000000000..705fd2ce3 --- /dev/null +++ b/rt/src/net/uds/listener.rs @@ -0,0 +1,209 @@ +//! Module with [`UnixListener`] and related types. + +use std::async_iter::AsyncIterator; +use std::pin::Pin; +use std::task::{self, Poll}; +use std::{fmt, io}; + +use a10::AsyncFd; +use socket2::{Domain, SockRef, Type}; + +use crate::access::Access; +use crate::net::uds::{UnixAddr, UnixStream}; + +/// A Unix socket listener. +/// +/// A listener can be created using [`UnixListener::bind`]. After it is created +/// there are two ways to accept incoming [`UnixStream`]s: +/// +/// * [`accept`] accepts a single connection, or +/// * [`incoming`] which returns stream of incoming connections. +/// +/// [`accept`]: UnixListener::accept +/// [`incoming`]: UnixListener::incoming +/// +/// # Examples +/// +/// Accepting a single [`UnixStream`], using [`UnixListener::accept`]. +/// +/// ``` +/// #![feature(never_type)] +/// +/// use std::io; +/// +/// use heph::actor; +/// use heph_rt::net::uds::{UnixListener, UnixAddr}; +/// use heph_rt::ThreadLocal; +/// use log::info; +/// +/// async fn actor(ctx: actor::Context, address: UnixAddr) -> io::Result<()> { +/// // Create a new listener. +/// let listener = UnixListener::bind(ctx.runtime_ref(), address).await?; +/// +/// // Accept a connection. +/// let (stream, peer_address) = listener.accept().await?; +/// info!("accepted connection from: {:?}", peer_address.as_pathname()); +/// +/// // Next we write the path to the connection. +/// if let Some(path) = peer_address.as_pathname() { +/// stream.send_all(path.display().to_string()).await?; +/// } +/// Ok(()) +/// } +/// # drop(actor); // Silent dead code warnings. +/// ``` +/// +/// Accepting multiple [`UnixStream`]s, using [`UnixListener::incoming`]. +/// +/// ``` +/// #![feature(never_type)] +/// +/// use std::io; +/// +/// use log::info; +/// +/// use heph::actor; +/// use heph_rt::net::uds::{UnixListener, UnixAddr}; +/// use heph_rt::ThreadLocal; +/// use heph_rt::util::next; +/// +/// async fn actor(ctx: actor::Context, address: UnixAddr) -> io::Result<()> { +/// // Create a new listener. +/// let listener = UnixListener::bind(ctx.runtime_ref(), address).await?; +/// let mut incoming = listener.incoming(); +/// loop { +/// let stream = match next(&mut incoming).await { +/// Some(Ok(stream)) => stream, +/// Some(Err(err)) => return Err(err), +/// None => return Ok(()), +/// }; +/// +/// // Optionally set the CPU affinity as that's not done automatically +/// // (in case the stream is send to another thread). +/// stream.set_auto_cpu_affinity(ctx.runtime_ref()); +/// +/// let peer_address = stream.peer_addr()?; +/// info!("accepted connection from: {:?}", peer_address.as_pathname()); +/// +/// // Next we write the path to the connection. +/// if let Some(path) = peer_address.as_pathname() { +/// stream.send_all(path.display().to_string()).await?; +/// } +/// } +/// } +/// # drop(actor); // Silent dead code warnings. +/// ``` +pub struct UnixListener { + fd: AsyncFd, +} + +impl UnixListener { + /// Creates a Unix socket bound to `address`. + pub async fn bind(rt: &RT, address: UnixAddr) -> io::Result + where + RT: Access, + { + let fd = a10::net::socket( + rt.submission_queue(), + Domain::UNIX.into(), + Type::STREAM.cloexec().into(), + 0, + 0, + ) + .await?; + + let socket = UnixListener { fd }; + + #[cfg(target_os = "linux")] + socket.with_ref(|socket| { + if let Some(cpu) = rt.cpu() { + if let Err(err) = socket.set_cpu_affinity(cpu) { + log::warn!("failed to set CPU affinity on UnixListener: {err}"); + } + } + + socket.bind(&address.inner)?; + socket.listen(1024)?; + + Ok(()) + })?; + + Ok(socket) + } + + /// Returns the socket address of the local half of this socket. + pub fn local_addr(&self) -> io::Result { + self.with_ref(|socket| socket.local_addr().map(|a| UnixAddr { inner: a })) + } + + /// Accept a new incoming [`UnixStream`]. + /// + /// Returns the Unix stream and the remote address of the peer. See the + /// [`UnixListener`] documentation for an example. + /// + /// # Notes + /// + /// The CPU affinity is **not** set on the returned Unix stream. To set that + /// use [`UnixStream::set_auto_cpu_affinity`]. + pub async fn accept(&self) -> io::Result<(UnixStream, UnixAddr)> { + self.fd + .accept() + .await + .map(|(fd, addr)| (UnixStream { fd }, addr)) + } + + /// Returns a stream of incoming [`UnixStream`]s. + /// + /// Note that unlike [`accept`] this doesn't return the address because it + /// uses io_uring's multishot accept (making it faster then calling `accept` + /// in a loop). See the [`UnixListener`] documentation for an example. + /// + /// [`accept`]: UnixListener::accept + /// + /// # Notes + /// + /// The CPU affinity is **not** set on the returned Unix stream. To set that + /// use [`UnixStream::set_auto_cpu_affinity`]. + #[allow(clippy::doc_markdown)] // For "io_uring". + pub fn incoming(&self) -> Incoming<'_> { + Incoming(self.fd.multishot_accept()) + } + + /// Get the value of the `SO_ERROR` option on this socket. + /// + /// This will retrieve the stored error in the underlying socket, clearing + /// the field in the process. This can be useful for checking errors between + /// calls. + pub fn take_error(&self) -> io::Result> { + self.with_ref(|socket| socket.take_error()) + } + + fn with_ref(&self, f: F) -> io::Result + where + F: FnOnce(SockRef<'_>) -> io::Result, + { + f(SockRef::from(&self.fd)) + } +} + +/// The [`AsyncIterator`] behind [`UnixListener::incoming`]. +#[derive(Debug)] +#[must_use = "AsyncIterators do nothing unless polled"] +pub struct Incoming<'a>(a10::net::MultishotAccept<'a>); + +impl<'a> AsyncIterator for Incoming<'a> { + type Item = io::Result; + + fn poll_next(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll> { + // SAFETY: not moving the `Future`. + unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) } + .poll_next(ctx) + .map_ok(|fd| UnixStream { fd }) + } +} + +impl fmt::Debug for UnixListener { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.fd.fmt(f) + } +} diff --git a/rt/src/net/uds/mod.rs b/rt/src/net/uds/mod.rs index 2e9b7656a..7bb3fba9a 100644 --- a/rt/src/net/uds/mod.rs +++ b/rt/src/net/uds/mod.rs @@ -2,7 +2,7 @@ //! //! Three main types are provided: //! -//! * `UnixListener` listens for incoming Unix connections. +//! * [`UnixListener`] listens for incoming Unix connections. //! * [`UnixStream`] represents a Unix stream socket. //! * [`UnixDatagram`] represents a Unix datagram socket. @@ -13,10 +13,12 @@ use std::{io, ptr}; use socket2::SockAddr; mod datagram; +mod listener; mod stream; pub use crate::net::{Connected, Unconnected}; pub use datagram::UnixDatagram; +pub use listener::UnixListener; pub use stream::UnixStream; /// Unix socket address. diff --git a/rt/tests/functional/uds/listener.rs b/rt/tests/functional/uds/listener.rs new file mode 100644 index 000000000..7de426478 --- /dev/null +++ b/rt/tests/functional/uds/listener.rs @@ -0,0 +1,85 @@ +//! Tests for `UnixListener`. + +use std::io; +use std::time::Duration; + +use heph::actor; +use heph_rt::net::uds::{UnixAddr, UnixListener, UnixStream}; +use heph_rt::spawn::ActorOptions; +use heph_rt::test::{join, try_spawn_local, PanicSupervisor}; +use heph_rt::util::next; +use heph_rt::{self as rt}; + +use crate::util::temp_file; + +const DATA: &[u8] = b"Hello world"; + +#[test] +fn accept() { + async fn actor(ctx: actor::Context) -> io::Result<()> + where + RT: rt::Access, + { + let path = temp_file("uds_listener_accept"); + let address = UnixAddr::from_pathname(path)?; + let listener = UnixListener::bind(ctx.runtime_ref(), address.clone()).await?; + + let stream = UnixStream::connect(ctx.runtime_ref(), address).await?; + + let (client, _) = listener.accept().await?; + + let (_, n) = stream.send(DATA).await?; + assert_eq!(n, DATA.len()); + let buf = client.recv(Vec::with_capacity(DATA.len() + 1)).await?; + assert_eq!(buf.len(), DATA.len()); + assert_eq!(buf, DATA); + + // No errors. + assert!(listener.take_error()?.is_none()); + assert!(client.take_error()?.is_none()); + assert!(stream.take_error()?.is_none()); + + Ok(()) + } + + #[allow(trivial_casts)] + let actor = actor as fn(_) -> _; + let actor_ref = try_spawn_local(PanicSupervisor, actor, (), ActorOptions::default()).unwrap(); + join(&actor_ref, Duration::from_secs(1)).unwrap(); +} + +#[test] +fn incoming() { + async fn actor(ctx: actor::Context) -> io::Result<()> + where + RT: rt::Access, + { + let path = temp_file("uds_listener_incoming"); + let address = UnixAddr::from_pathname(path)?; + let listener = UnixListener::bind(ctx.runtime_ref(), address.clone()).await?; + + let mut incoming = listener.incoming(); + + let stream = UnixStream::connect(ctx.runtime_ref(), address).await?; + + let client = next(&mut incoming).await.unwrap()?; + + let (_, n) = stream.send(DATA).await?; + assert_eq!(n, DATA.len()); + let buf = client.recv(Vec::with_capacity(DATA.len() + 1)).await?; + assert_eq!(buf.len(), DATA.len()); + assert_eq!(buf, DATA); + + // No errors. + assert!(listener.take_error()?.is_none()); + assert!(client.take_error()?.is_none()); + assert!(stream.take_error()?.is_none()); + + Ok(()) + } + + #[allow(trivial_casts)] + let actor = actor as fn(_) -> _; + let actor_ref = try_spawn_local(PanicSupervisor, actor, (), ActorOptions::default()).unwrap(); + join(&actor_ref, Duration::from_secs(1)).unwrap(); +} diff --git a/rt/tests/functional/uds/mod.rs b/rt/tests/functional/uds/mod.rs index cad1e4947..b2fdec4ca 100644 --- a/rt/tests/functional/uds/mod.rs +++ b/rt/tests/functional/uds/mod.rs @@ -1,4 +1,5 @@ //! Tests for the UDS types. mod datagram; +mod listener; mod stream; From 87ee7500f80f31e951cf45308ee236c60c342a6f Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 17:21:02 +0200 Subject: [PATCH 117/177] Cleanup net::uds module and expand docs Now the net::uds module structure matches that of net::tcp and it gets mentioned in the net module. --- rt/src/net/mod.rs | 8 ++++++++ rt/src/net/uds/datagram.rs | 6 ++++-- rt/src/net/uds/mod.rs | 10 ++++++---- rt/src/systemd.rs | 3 ++- 4 files changed, 20 insertions(+), 7 deletions(-) diff --git a/rt/src/net/mod.rs b/rt/src/net/mod.rs index be11984d2..a3e89b3b1 100644 --- a/rt/src/net/mod.rs +++ b/rt/src/net/mod.rs @@ -8,12 +8,20 @@ //! * A [TCP server], listens for connections and starts a new actor for each. //! * [User Datagram Protocol] (UDP) only provides a single socket type: //! * [`UdpSocket`]. +//! * [Unix Domain Socket] (UDS) module provides three types: +//! * A [Unix stream] between two socket. +//! * A [Unix listening socket], a socket used to listen for connections. +//! * A [Unix datagram socket]. //! //! [Transmission Control Protocol]: crate::net::tcp //! [TCP stream]: crate::net::TcpStream //! [TCP listening socket]: crate::net::TcpListener //! [TCP server]: crate::net::tcp::server //! [User Datagram Protocol]: crate::net::udp +//! [Unix Domain Socket]: crate::net::uds +//! [Unix stream]: crate::net::UnixStream +//! [Unix listening socket]: crate::net::UnixListener +//! [Unix datagram socket]: crate::net::UnixDatagram use std::mem::{size_of, MaybeUninit}; use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; diff --git a/rt/src/net/uds/datagram.rs b/rt/src/net/uds/datagram.rs index 739e5732d..c17095e79 100644 --- a/rt/src/net/uds/datagram.rs +++ b/rt/src/net/uds/datagram.rs @@ -12,10 +12,12 @@ use crate::access::Access; use crate::io::{Buf, BufMut, BufMutSlice, BufSlice, BufWrapper}; use crate::net::uds::UnixAddr; use crate::net::{ - Connected, Recv, RecvFrom, RecvFromVectored, RecvVectored, Send, SendTo, SendToVectored, - SendVectored, Unconnected, + Recv, RecvFrom, RecvFromVectored, RecvVectored, Send, SendTo, SendToVectored, SendVectored, }; +#[doc(no_inline)] +pub use crate::net::{Connected, Unconnected}; + /// A Unix datagram socket. /// /// To create a socket [`UnixDatagram::bind`] or [`UnixDatagram::unbound`] can diff --git a/rt/src/net/uds/mod.rs b/rt/src/net/uds/mod.rs index 7bb3fba9a..62b3f006e 100644 --- a/rt/src/net/uds/mod.rs +++ b/rt/src/net/uds/mod.rs @@ -12,13 +12,15 @@ use std::{io, ptr}; use socket2::SockAddr; -mod datagram; -mod listener; -mod stream; +pub mod datagram; +pub mod listener; +pub mod stream; -pub use crate::net::{Connected, Unconnected}; +#[doc(no_inline)] pub use datagram::UnixDatagram; +#[doc(no_inline)] pub use listener::UnixListener; +#[doc(no_inline)] pub use stream::UnixStream; /// Unix socket address. diff --git a/rt/src/systemd.rs b/rt/src/systemd.rs index 07e2aa6b7..62d3853b1 100644 --- a/rt/src/systemd.rs +++ b/rt/src/systemd.rs @@ -24,7 +24,8 @@ use heph::messages::Terminate; use log::{as_debug, debug, warn}; use crate::access::Access; -use crate::net::uds::{Connected, UnixAddr, UnixDatagram}; +use crate::net::uds::datagram::{Connected, UnixDatagram}; +use crate::net::uds::UnixAddr; use crate::timer::Interval; use crate::util::{either, next}; use crate::Signal; From b84a1a4db7dfa00d6284885bf01f9adfc6e22125 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 18:25:58 +0200 Subject: [PATCH 118/177] Fix example tests --- rt/tests/examples.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/rt/tests/examples.rs b/rt/tests/examples.rs index d1d8350f3..c7a2a21ea 100644 --- a/rt/tests/examples.rs +++ b/rt/tests/examples.rs @@ -64,7 +64,6 @@ fn test_4_sync_actor() { } #[test] -#[ignore] fn test_6_process_signals() { let mut child = run_example("6_process_signals"); @@ -120,7 +119,6 @@ fn test_6_process_signals() { } #[test] -#[ignore] fn test_7_restart_supervisor() { // Index of the "?" in the string below. const LEFT_INDEX: usize = 51; @@ -128,7 +126,7 @@ fn test_7_restart_supervisor() { let output = run_example_output("7_restart_supervisor"); let mut lines = output.lines(); - let mut expected = "lvl=\"WARN\" msg=\"print actor failed, restarting it (?/5 restarts left): can't print message synchronously 'Hello world!': actor message 'Hello world!'\" target=\"restart_supervisor\" module=\"restart_supervisor\"".to_owned(); + let mut expected = "lvl=\"WARN\" msg=\"print actor failed, restarting it (?/5 restarts left): can't print message synchronously 'Hello world!': actor message 'Hello world!'\" target=\"7_restart_supervisor\" module=\"7_restart_supervisor\"".to_owned(); for left in (0..5).rev() { let line = lines.next().unwrap(); @@ -138,11 +136,11 @@ fn test_7_restart_supervisor() { assert_eq!(line, expected); } - let expected = "lvl=\"WARN\" msg=\"print actor failed, stopping it (no restarts left): can't print message synchronously 'Hello world!': actor message 'Hello world!'\" target=\"restart_supervisor\" module=\"restart_supervisor\""; + let expected = "lvl=\"WARN\" msg=\"print actor failed, stopping it (no restarts left): can't print message synchronously 'Hello world!': actor message 'Hello world!'\" target=\"7_restart_supervisor\" module=\"7_restart_supervisor\""; let last_line = lines.next().unwrap(); assert_eq!(last_line, expected); - let mut expected = "lvl=\"WARN\" msg=\"print actor failed, restarting it (?/5 restarts left): can't print message 'Hello world!': actor message 'Hello world!'\" target=\"restart_supervisor\" module=\"restart_supervisor\"".to_owned(); + let mut expected = "lvl=\"WARN\" msg=\"print actor failed, restarting it (?/5 restarts left): can't print message 'Hello world!': actor message 'Hello world!'\" target=\"7_restart_supervisor\" module=\"7_restart_supervisor\"".to_owned(); for left in (0..5).rev() { let line = lines.next().unwrap(); @@ -152,7 +150,7 @@ fn test_7_restart_supervisor() { assert_eq!(line, expected); } - let expected = "lvl=\"WARN\" msg=\"print actor failed, stopping it (no restarts left): can't print message 'Hello world!': actor message 'Hello world!'\" target=\"restart_supervisor\" module=\"restart_supervisor\""; + let expected = "lvl=\"WARN\" msg=\"print actor failed, stopping it (no restarts left): can't print message 'Hello world!': actor message 'Hello world!'\" target=\"7_restart_supervisor\" module=\"7_restart_supervisor\""; let last_line = lines.next().unwrap(); assert_eq!(last_line, expected); From 3a29ac87080d0e492113bf5a64b030b9d6639632 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 18:26:47 +0200 Subject: [PATCH 119/177] Enable running_actors test again --- rt/tests/functional/runtime.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/rt/tests/functional/runtime.rs b/rt/tests/functional/runtime.rs index ac151d513..b237bc658 100644 --- a/rt/tests/functional/runtime.rs +++ b/rt/tests/functional/runtime.rs @@ -149,7 +149,6 @@ fn auto_cpu_affinity() { #[test] #[allow(clippy::type_complexity, clippy::too_many_arguments)] -#[ignore] fn running_actors() { use SupervisorStrategy::*; From ab3cb7134075b331ced59c39f19094ddf5cc64ad Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 18:35:25 +0200 Subject: [PATCH 120/177] Remove ptr_as_usize Use `as usize` instead. --- rt/src/lib.rs | 12 +----------- rt/src/local/waker.rs | 6 +++--- rt/src/setup.rs | 7 ++----- rt/src/wakers/shared.rs | 6 +++--- 4 files changed, 9 insertions(+), 22 deletions(-) diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 2ba478063..1418cdf9b 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -261,16 +261,6 @@ use timers::TimerToken; const SYNC_WORKER_ID_START: usize = 10000; const SYNC_WORKER_ID_END: usize = SYNC_WORKER_ID_START + 10000; -/// Returns `ptr` as `usize`. -const fn ptr_as_usize(ptr: *const T) -> usize { - union Pointer { - ptr: *const T, - int: usize, - } - let ptr = Pointer { ptr }; - unsafe { ptr.int } -} - #[test] #[allow(clippy::assertions_on_constants)] // This is the point of the test. fn sync_worker_id() { @@ -391,7 +381,7 @@ impl Runtime { } #[allow(clippy::cast_possible_truncation)] - // Safety: MAX_THREADS always fits in u32. + // SAFETY: MAX_THREADS always fits in u32. let trace_log = self .trace_log .as_ref() diff --git a/rt/src/local/waker.rs b/rt/src/local/waker.rs index 150fb865b..47528f97f 100644 --- a/rt/src/local/waker.rs +++ b/rt/src/local/waker.rs @@ -7,8 +7,8 @@ use std::task; use crossbeam_channel::Sender; use log::{error, trace}; +use crate::process::ProcessId; use crate::thread_waker::ThreadWaker; -use crate::{ptr_as_usize, ProcessId}; /// Maximum number of threads currently supported by this `Waker` /// implementation. @@ -174,8 +174,8 @@ impl WakerData { /// /// This doesn't check if the provided `data` is valid, the caller is /// responsible for this. - const unsafe fn from_raw_data(data: *const ()) -> WakerData { - WakerData(ptr_as_usize(data)) + unsafe fn from_raw_data(data: *const ()) -> WakerData { + WakerData(data as usize) } /// Convert `WakerData` into raw data for `RawWaker`. diff --git a/rt/src/setup.rs b/rt/src/setup.rs index 133eb19cb..bbb6ca4de 100644 --- a/rt/src/setup.rs +++ b/rt/src/setup.rs @@ -34,7 +34,7 @@ pub struct Setup { impl Setup { /// See [`Runtime::setup`]. - pub(super) const fn new() -> Setup { + pub(crate) const fn new() -> Setup { Setup { name: None, threads: 1, @@ -63,10 +63,7 @@ impl Setup { /// Most applications would want to use [`Setup::use_all_cores`] which sets /// the number of threads equal to the number of CPU cores. pub fn num_threads(mut self, n: usize) -> Self { - assert!( - n != 0, - "Can't create zero worker threads, one is the minimum" - ); + assert!(n != 0, "Can't create zero worker threads, 1 is the minimum"); assert!( n < MAX_THREADS, "Can't create {n} worker threads, {MAX_THREADS} is the maximum", diff --git a/rt/src/wakers/shared.rs b/rt/src/wakers/shared.rs index 25f35242a..8580e7a49 100644 --- a/rt/src/wakers/shared.rs +++ b/rt/src/wakers/shared.rs @@ -5,8 +5,8 @@ use std::sync::atomic::{AtomicU8, Ordering}; use std::sync::Weak; use std::task; +use crate::process::ProcessId; use crate::shared::RuntimeInternals; -use crate::{ptr_as_usize, ProcessId}; /// Maximum number of runtimes supported. const MAX_RUNTIMES: usize = 1 << MAX_RUNTIMES_BITS; @@ -125,8 +125,8 @@ impl WakerData { /// /// The caller must ensure the `data` is created using /// [`WakerData::into_raw_data`]. - const unsafe fn from_raw_data(data: *const ()) -> WakerData { - WakerData(ptr_as_usize(data)) + unsafe fn from_raw_data(data: *const ()) -> WakerData { + WakerData(data as usize) } /// Convert [`WakerData`] into raw data for [`task::RawWaker`]. From 2831b3bed686f47083c7d585cf8e7c55c9f2cbe9 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 18:37:23 +0200 Subject: [PATCH 121/177] Small improvement to Signal docs --- rt/src/signal.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/rt/src/signal.rs b/rt/src/signal.rs index 2943ac027..b96226419 100644 --- a/rt/src/signal.rs +++ b/rt/src/signal.rs @@ -14,9 +14,9 @@ use std::fmt; /// /// What happens to threads spawned outside of Heph's control, i.e. manually /// spawned, before calling [`rt::Setup::build`] is unspecified. They may still -/// receive a process signal or they may not. This is due to platform -/// limitations and differences. Any manually spawned threads spawned after -/// calling build should not get a process signal. +/// receive a process signal or they may not. This is due to OS limitations and +/// differences. Any manually spawned threads spawned after calling build should +/// not get a process signal. /// /// The runtime will only attempt to send the process signal to the actor once. /// If the message can't be send it's **not** retried. Ensure that the inbox of @@ -67,7 +67,7 @@ pub enum Signal { impl Signal { /// Convert a [`mio_signals::Signal`] into our own `Signal`. - pub(super) const fn from_mio(signal: mio_signals::Signal) -> Signal { + pub(crate) const fn from_mio(signal: mio_signals::Signal) -> Signal { match signal { mio_signals::Signal::Interrupt => Signal::Interrupt, mio_signals::Signal::Terminate => Signal::Terminate, @@ -78,7 +78,7 @@ impl Signal { } /// Whether or not the `Signal` is considered a "stopping" signal. - pub(super) const fn should_stop(self) -> bool { + pub(crate) const fn should_stop(self) -> bool { match self { Signal::Interrupt | Signal::Terminate | Signal::Quit => true, Signal::User1 | Signal::User2 => false, @@ -86,7 +86,7 @@ impl Signal { } /// Returns a human readable name for the signal. - pub(super) const fn as_str(self) -> &'static str { + pub(crate) const fn as_str(self) -> &'static str { match self { Signal::Interrupt => "interrupt", Signal::Terminate => "terminate", From 0a05bab22cc29f77c12256d911dc8fbcd7ecfe74 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 18:39:57 +0200 Subject: [PATCH 122/177] Use upper case SAFETY convention This code was written before the convention was commonplace, but it is let's switch to it everywhere. --- rt/src/io/buf.rs | 2 +- rt/src/local/waker.rs | 10 +++++----- rt/src/pipe.rs | 6 +++--- rt/src/shared/mod.rs | 2 +- rt/src/thread_waker.rs | 6 +++--- rt/src/timers/mod.rs | 2 +- rt/src/util.rs | 6 +++--- rt/tests/functional/io.rs | 4 ++-- rt/tests/functional/tcp/server.rs | 2 +- rt/tests/util/mod.rs | 4 ++-- 10 files changed, 22 insertions(+), 22 deletions(-) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index 983912c98..57aeddad2 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -114,7 +114,7 @@ pub unsafe trait BufMut: 'static { /// // Writes `src` to `buf`. /// # let (dst, len) = unsafe { buf.parts_mut() }; /// # let len = std::cmp::min(src.len(), len); -/// # // Safety: both the src and dst pointers are good. And we've ensured +/// # // SAFETY: both the src and dst pointers are good. And we've ensured /// # // that the length is correct, not overwriting data we don't own or /// # // reading data we don't own. /// # unsafe { diff --git a/rt/src/local/waker.rs b/rt/src/local/waker.rs index 47528f97f..dd45ac0d1 100644 --- a/rt/src/local/waker.rs +++ b/rt/src/local/waker.rs @@ -39,7 +39,7 @@ pub(crate) fn init(waker: mio::Waker, notifications: Sender) -> Waker "Created too many Heph worker threads" ); - // Safety: this is safe because we are the only thread that has write access + // SAFETY: this is safe because we are the only thread that has write access // to the given index. See documentation of `THREAD_WAKERS` for more. unsafe { THREAD_WAKERS[thread_id as usize] = Some(Waker { @@ -56,7 +56,7 @@ pub(crate) fn init(waker: mio::Waker, notifications: Sender) -> Waker pub(crate) fn new(waker_id: WakerId, pid: ProcessId) -> task::Waker { let data = WakerData::new(waker_id, pid).into_raw_data(); let raw_waker = task::RawWaker::new(data, &WAKER_VTABLE); - // Safety: we follow the contract on `RawWaker`. + // SAFETY: we follow the contract on `RawWaker`. unsafe { task::Waker::from_raw(raw_waker) } } @@ -90,7 +90,7 @@ const NO_WAKER: Option = None; /// Get waker data for `waker_id` fn get(waker_id: WakerId) -> &'static Waker { - // Safety: `WakerId` is only created by `init`, which ensures its valid. + // SAFETY: `WakerId` is only created by `init`, which ensures its valid. // Furthermore `init` ensures that `THREAD_WAKER[waker_id]` is initialised // and is read-only after that. See `THREAD_WAKERS` documentation for more. unsafe { @@ -156,14 +156,14 @@ impl WakerData { /// Get the thread id of from the waker data. const fn waker_id(self) -> WakerId { - // Safety: `WakerId` is u8, so no truncating. + // SAFETY: `WakerId` is u8, so no truncating. #[allow(clippy::cast_possible_truncation)] WakerId((self.0 >> THREAD_SHIFT) as u8) } /// Get the process id from the waker data. const fn pid(self) -> ProcessId { - // Safety: checked pid in `WakerData::new`, so no truncation. + // SAFETY: checked pid in `WakerData::new`, so no truncation. #[allow(clippy::cast_possible_truncation)] ProcessId(self.0 & !THREAD_MASK) } diff --git a/rt/src/pipe.rs b/rt/src/pipe.rs index 9f104728e..14e8f6fea 100644 --- a/rt/src/pipe.rs +++ b/rt/src/pipe.rs @@ -145,7 +145,7 @@ impl Sender { where RT: Access, { - // Safety: `ChildStdin` is guaranteed to be a valid file descriptor. + // SAFETY: `ChildStdin` is guaranteed to be a valid file descriptor. let fd = unsafe { AsyncFd::new(stdin.into_raw_fd(), rt.submission_queue()) }; Ok(Sender { fd }) } @@ -205,7 +205,7 @@ impl Receiver { where RT: Access, { - // Safety: `ChildStdout` is guaranteed to be a valid file descriptor. + // SAFETY: `ChildStdout` is guaranteed to be a valid file descriptor. let fd = unsafe { AsyncFd::new(stdout.into_raw_fd(), rt.submission_queue()) }; Ok(Receiver { fd }) } @@ -215,7 +215,7 @@ impl Receiver { where RT: Access, { - // Safety: `ChildStderr` is guaranteed to be a valid file descriptor. + // SAFETY: `ChildStderr` is guaranteed to be a valid file descriptor. let fd = unsafe { AsyncFd::new(stderr.into_raw_fd(), rt.submission_queue()) }; Ok(Receiver { fd }) } diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 873288868..1d557a25c 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -282,7 +282,7 @@ impl RuntimeInternals { // [1]: https://en.wikipedia.org/wiki/Thundering_herd_problem // [2]: https://en.wikipedia.org/wiki/Round-robin_scheduling let n = min(n, self.worker_wakers.len()); - // Safety: needs to sync with itself. + // SAFETY: needs to sync with itself. let wake_worker_idx = self.wake_worker_idx.fetch_add(n, Ordering::AcqRel) % self.worker_wakers.len(); let (wake_second, wake_first) = self.worker_wakers.split_at(wake_worker_idx); diff --git a/rt/src/thread_waker.rs b/rt/src/thread_waker.rs index e050de67a..e893f6a63 100644 --- a/rt/src/thread_waker.rs +++ b/rt/src/thread_waker.rs @@ -36,13 +36,13 @@ impl ThreadWaker { /// Wake up the thread if it's not currently polling. Returns `true` if the /// thread is awoken, `false` otherwise. pub(crate) fn wake(&self) -> io::Result { - // Safety: this needs to sync with the `store(Release)` in + // SAFETY: this needs to sync with the `store(Release)` in // `mark_polling`, hence `Acquire` is needed. if self.polling_status.load(Ordering::Acquire) != IS_POLLING { return Ok(false); } - // Safety: this needs to sync with the `store(Release)` in + // SAFETY: this needs to sync with the `store(Release)` in // `mark_polling`, hence `AcqRel` is needed. if self.polling_status.fetch_add(WAKING, Ordering::AcqRel) == IS_POLLING { self.waker.wake().map(|()| true) @@ -54,7 +54,7 @@ impl ThreadWaker { /// Mark the thread as currently polling (or not). pub(crate) fn mark_polling(&self, is_polling: bool) { let status = if is_polling { IS_POLLING } else { NOT_POLLING }; - // Safety: this needs to sync with the `load` and `fetch_add` in `wake`, + // SAFETY: this needs to sync with the `load` and `fetch_add` in `wake`, // thus `Release` is needed. // NOTE: don't lower the strength of this ordering as will not generate // the correct assembly. diff --git a/rt/src/timers/mod.rs b/rt/src/timers/mod.rs index 8671c7922..07879b72d 100644 --- a/rt/src/timers/mod.rs +++ b/rt/src/timers/mod.rs @@ -307,7 +307,7 @@ impl Timers { } fn current_slot(&mut self) -> &mut Vec> { - // Safety: `self.index` is always valid. + // SAFETY: `self.index` is always valid. &mut self.slots[self.index as usize] } } diff --git a/rt/src/util.rs b/rt/src/util.rs index e24c2e031..086d5f824 100644 --- a/rt/src/util.rs +++ b/rt/src/util.rs @@ -27,12 +27,12 @@ where type Output = Result; fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { - // Safety: not moving `future1`. + // SAFETY: not moving `future1`. let future1 = unsafe { Pin::map_unchecked_mut(self.as_mut(), |s| &mut s.future1) }; match future1.poll(ctx) { Poll::Ready(value) => Poll::Ready(Ok(value)), Poll::Pending => { - // Safety: not moving `future2`. + // SAFETY: not moving `future2`. let future2 = unsafe { Pin::map_unchecked_mut(self, |s| &mut s.future2) }; match future2.poll(ctx) { Poll::Ready(value) => Poll::Ready(Err(value)), @@ -62,7 +62,7 @@ where type Output = Option; fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { - // Safety: not moving `iter`. + // SAFETY: not moving `iter`. unsafe { Pin::map_unchecked_mut(self, |s| &mut s.iter).poll_next(ctx) } } } diff --git a/rt/tests/functional/io.rs b/rt/tests/functional/io.rs index 6637e42a7..25545157c 100644 --- a/rt/tests/functional/io.rs +++ b/rt/tests/functional/io.rs @@ -14,7 +14,7 @@ fn write_bytes(src: &[u8], buf: &mut B) -> usize { let (dst, len) = unsafe { buf.parts_mut() }; assert_eq!(len, spare_capacity); let len = min(src.len(), len); - // Safety: both the `src` and `dst` pointers are good. And we've ensured + // SAFETY: both the `src` and `dst` pointers are good. And we've ensured // that the length is correct, not overwriting data we don't own or reading // data we don't own. unsafe { @@ -29,7 +29,7 @@ fn write_bytes_vectored, const N: usize>(src: &[u8], bufs: &mu let mut left = src; for iovec in unsafe { bufs.as_iovecs_mut() } { let len = min(left.len(), iovec.iov_len); - // Safety: both the `left` and `dst` pointers are good. And we've + // SAFETY: both the `left` and `dst` pointers are good. And we've // ensured that the length is correct, not overwriting data we don't own // or reading data we don't own. unsafe { diff --git a/rt/tests/functional/tcp/server.rs b/rt/tests/functional/tcp/server.rs index df7c76d4b..c6930e179 100644 --- a/rt/tests/functional/tcp/server.rs +++ b/rt/tests/functional/tcp/server.rs @@ -158,7 +158,7 @@ fn new_actor_error() { ctx: &mut task::Context<'_>, ) -> Poll> { let res = Actor::try_poll( - // Safety: not moving. + // SAFETY: not moving. unsafe { Pin::new_unchecked(&mut Pin::into_inner_unchecked(self).0) }, ctx, ); diff --git a/rt/tests/util/mod.rs b/rt/tests/util/mod.rs index ca2e29245..03300786a 100644 --- a/rt/tests/util/mod.rs +++ b/rt/tests/util/mod.rs @@ -165,7 +165,7 @@ where type Output = (Fut::Output, usize); fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { - // Safety: this is safe because we're not moving the future. + // SAFETY: this is safe because we're not moving the future. let this = unsafe { Pin::into_inner_unchecked(self) }; this.count += 1; let future = unsafe { Pin::new_unchecked(&mut this.inner) }; @@ -180,7 +180,7 @@ where type Item = (I::Item, usize); fn poll_next(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll> { - // Safety: this is safe because we're not moving the future. + // SAFETY: this is safe because we're not moving the future. let this = unsafe { Pin::into_inner_unchecked(self) }; this.count += 1; let iter = unsafe { Pin::new_unchecked(&mut this.inner) }; From ffda1721aafa53f680ef6e710165238976be9b92 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 18:40:34 +0200 Subject: [PATCH 123/177] Switch to use pub(crate) everywhere Instead of pub(super). --- rt/src/channel.rs | 6 +++--- rt/src/coordinator.rs | 10 +++++----- rt/src/error.rs | 16 ++++++++-------- rt/src/local/mod.rs | 24 ++++++++++++------------ rt/src/sync_worker.rs | 8 ++++---- rt/src/worker.rs | 24 ++++++++++++------------ 6 files changed, 44 insertions(+), 44 deletions(-) diff --git a/rt/src/channel.rs b/rt/src/channel.rs index 94c6d8455..18626044f 100644 --- a/rt/src/channel.rs +++ b/rt/src/channel.rs @@ -57,7 +57,7 @@ impl Sender { } /// Register the sending end of the Unix pipe of this channel. - pub(super) fn register(&mut self, registry: &Registry, token: Token) -> io::Result<()> { + pub(crate) fn register(&mut self, registry: &Registry, token: Token) -> io::Result<()> { registry.register(&mut self.pipe, token, Interest::WRITABLE) } } @@ -71,7 +71,7 @@ pub(crate) struct Receiver { impl Receiver { /// Try to receive a message from the channel. - pub(super) fn try_recv(&mut self) -> io::Result> { + pub(crate) fn try_recv(&mut self) -> io::Result> { if let Ok(msg) = self.channel.try_recv() { Ok(Some(msg)) } else { @@ -98,7 +98,7 @@ impl Receiver { } /// Register the receiving end of the Unix pipe of this channel. - pub(super) fn register(&mut self, registry: &Registry, token: Token) -> io::Result<()> { + pub(crate) fn register(&mut self, registry: &Registry, token: Token) -> io::Result<()> { registry.register(&mut self.pipe, token, Interest::READABLE) } } diff --git a/rt/src/coordinator.rs b/rt/src/coordinator.rs index c48aaba1e..e27195d48 100644 --- a/rt/src/coordinator.rs +++ b/rt/src/coordinator.rs @@ -45,7 +45,7 @@ const SIGNAL: Token = Token(usize::MAX); const RING: Token = Token(usize::MAX - 1); /// Coordinator responsible for coordinating the Heph runtime. -pub(super) struct Coordinator { +pub(crate) struct Coordinator { /// io_uring completion ring. ring: a10::Ring, /// OS poll, used to poll the status of the (sync) worker threads and @@ -80,7 +80,7 @@ impl Coordinator { /// /// This must be called before creating the worker threads to properly catch /// process signals. - pub(super) fn init( + pub(crate) fn init( ring: a10::Ring, app_name: Box, worker_wakers: Box<[&'static ThreadWaker]>, @@ -115,7 +115,7 @@ impl Coordinator { } /// Get access to the shared runtime internals. - pub(super) const fn shared_internals(&self) -> &Arc { + pub(crate) const fn shared_internals(&self) -> &Arc { &self.internals } @@ -124,7 +124,7 @@ impl Coordinator { /// # Notes /// /// `workers` and `sync_workers` must be sorted based on `id`. - pub(super) fn run( + pub(crate) fn run( mut self, mut workers: Vec, mut sync_workers: Vec, @@ -460,7 +460,7 @@ fn handle_sync_worker_event( /// Error running the [`Coordinator`]. #[derive(Debug)] -pub(super) enum Error { +pub(crate) enum Error { /// Error in starting up the Coordinator. Startup(io::Error), /// Error in [`register_workers`]. diff --git a/rt/src/error.rs b/rt/src/error.rs index 0e03449b7..297f03a02 100644 --- a/rt/src/error.rs +++ b/rt/src/error.rs @@ -52,49 +52,49 @@ impl Error { } } - pub(super) const fn setup_trace(err: io::Error) -> Error { + pub(crate) const fn setup_trace(err: io::Error) -> Error { Error { inner: ErrorInner::SetupTrace(err), } } - pub(super) const fn init_coordinator(err: io::Error) -> Error { + pub(crate) const fn init_coordinator(err: io::Error) -> Error { Error { inner: ErrorInner::InitCoordinator(err), } } - pub(super) const fn coordinator(err: coordinator::Error) -> Error { + pub(crate) const fn coordinator(err: coordinator::Error) -> Error { Error { inner: ErrorInner::Coordinator(err), } } - pub(super) const fn start_worker(err: io::Error) -> Error { + pub(crate) const fn start_worker(err: io::Error) -> Error { Error { inner: ErrorInner::StartWorker(err), } } - pub(super) const fn worker(err: worker::Error) -> Error { + pub(crate) const fn worker(err: worker::Error) -> Error { Error { inner: ErrorInner::Worker(err), } } - pub(super) fn worker_panic(err: Box) -> Error { + pub(crate) fn worker_panic(err: Box) -> Error { Error { inner: ErrorInner::WorkerPanic(convert_panic(err)), } } - pub(super) const fn start_sync_actor(err: io::Error) -> Error { + pub(crate) const fn start_sync_actor(err: io::Error) -> Error { Error { inner: ErrorInner::StartSyncActor(err), } } - pub(super) fn sync_actor_panic(err: Box) -> Error { + pub(crate) fn sync_actor_panic(err: Box) -> Error { Error { inner: ErrorInner::SyncActorPanic(convert_panic(err)), } diff --git a/rt/src/local/mod.rs b/rt/src/local/mod.rs index 4f75118c9..85d71679b 100644 --- a/rt/src/local/mod.rs +++ b/rt/src/local/mod.rs @@ -9,7 +9,7 @@ use mio::Poll; use crate::{shared, trace, Signal}; -pub(super) mod waker; +pub(crate) mod waker; use crate::scheduler::Scheduler; use crate::timers::Timers; @@ -17,32 +17,32 @@ use waker::WakerId; /// Internals of the runtime, to which `RuntimeRef`s have a reference. #[derive(Debug)] -pub(super) struct RuntimeInternals { +pub(crate) struct RuntimeInternals { /// Unique id among the worker threads. - pub(super) id: NonZeroUsize, + pub(crate) id: NonZeroUsize, /// Runtime internals shared between coordinator and worker threads. - pub(super) shared: Arc, + pub(crate) shared: Arc, /// Waker id used to create a `Waker` for thread-local actors. - pub(super) waker_id: WakerId, + pub(crate) waker_id: WakerId, /// Scheduler for thread-local actors. - pub(super) scheduler: RefCell, + pub(crate) scheduler: RefCell, /// OS poll, used for event notifications to support non-blocking I/O. - pub(super) poll: RefCell, + pub(crate) poll: RefCell, /// io_uring completion ring. - pub(super) ring: RefCell, + pub(crate) ring: RefCell, /// Timers, deadlines and timeouts. pub(crate) timers: RefCell, /// Actor references to relay received `Signal`s to. - pub(super) signal_receivers: RefCell>, + pub(crate) signal_receivers: RefCell>, /// CPU affinity of the worker thread, or `None` if not set. - pub(super) cpu: Option, + pub(crate) cpu: Option, /// Log used for tracing, `None` is tracing is disabled. - pub(super) trace_log: RefCell>, + pub(crate) trace_log: RefCell>, } impl RuntimeInternals { /// Create a local runtime internals. - pub(super) fn new( + pub(crate) fn new( id: NonZeroUsize, shared_internals: Arc, waker_id: WakerId, diff --git a/rt/src/sync_worker.rs b/rt/src/sync_worker.rs index a5fe6be9c..f45fda16e 100644 --- a/rt/src/sync_worker.rs +++ b/rt/src/sync_worker.rs @@ -67,7 +67,7 @@ impl SyncWorker { } /// Return the worker's id. - pub(super) const fn id(&self) -> usize { + pub(crate) const fn id(&self) -> usize { self.id } @@ -75,12 +75,12 @@ impl SyncWorker { /// thread. Uses the [`id`] as [`Token`]. /// /// [`id`]: SyncWorker::id - pub(super) fn register(&mut self, registry: &Registry) -> io::Result<()> { + pub(crate) fn register(&mut self, registry: &Registry) -> io::Result<()> { registry.register(&mut self.sender, Token(self.id), Interest::WRITABLE) } /// Checks if the `SyncWorker` is alive. - pub(super) fn is_alive(&self) -> bool { + pub(crate) fn is_alive(&self) -> bool { match (&self.sender).write(&[]) { Ok(..) => true, Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => true, @@ -89,7 +89,7 @@ impl SyncWorker { } /// See [`thread::JoinHandle::join`]. - pub(super) fn join(self) -> thread::Result<()> { + pub(crate) fn join(self) -> thread::Result<()> { self.handle.join() } diff --git a/rt/src/worker.rs b/rt/src/worker.rs index e46f66dd2..8e7fdb8c3 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -45,7 +45,7 @@ use crate::{self as rt, cpu_usage, shared, trace, RuntimeRef, Signal}; const RUN_POLL_RATIO: usize = 32; /// Token used to indicate user space events have happened. -pub(super) const WAKER: Token = Token(usize::MAX); +pub(crate) const WAKER: Token = Token(usize::MAX); /// Token used to indicate a message was received on the communication channel. const COMMS: Token = Token(usize::MAX - 1); /// Token used to indicate the shared [`Poll`] (in [`shared::RuntimeInternals`]) @@ -59,7 +59,7 @@ const SHARED_RING: Token = Token(usize::MAX - 4); /// Setup a new worker thread. /// /// Use [`WorkerSetup::start`] to spawn the worker thread. -pub(super) fn setup( +pub(crate) fn setup( id: NonZeroUsize, coordinator_sq: &a10::SubmissionQueue, ) -> io::Result<(WorkerSetup, &'static ThreadWaker)> { @@ -85,7 +85,7 @@ pub(super) fn setup( } /// Setup work required before starting a worker thread, see [`setup`]. -pub(super) struct WorkerSetup { +pub(crate) struct WorkerSetup { /// See [`WorkerSetup::id`]. id: NonZeroUsize, /// Poll instance for the worker thread. This is needed before starting the @@ -101,7 +101,7 @@ pub(super) struct WorkerSetup { impl WorkerSetup { /// Start a new worker thread. - pub(super) fn start( + pub(crate) fn start( self, shared_internals: Arc, auto_cpu_affinity: bool, @@ -131,14 +131,14 @@ impl WorkerSetup { } /// Return the worker's id. - pub(super) const fn id(&self) -> usize { + pub(crate) const fn id(&self) -> usize { self.id.get() } } /// Handle to a worker thread. #[derive(Debug)] -pub(super) struct Handle { +pub(crate) struct Handle { /// Unique id (among all threads in the [`rt::Runtime`]). id: NonZeroUsize, /// Two-way communication channel to share messages with the worker thread. @@ -149,27 +149,27 @@ pub(super) struct Handle { impl Handle { /// Return the worker's id. - pub(super) const fn id(&self) -> usize { + pub(crate) const fn id(&self) -> usize { self.id.get() } /// Registers the channel used to communicate with the thread. - pub(super) fn register(&mut self, registry: &Registry) -> io::Result<()> { + pub(crate) fn register(&mut self, registry: &Registry) -> io::Result<()> { self.channel.register(registry, Token(self.id())) } /// Send the worker thread a signal that the runtime has started. - pub(super) fn send_runtime_started(&mut self) -> io::Result<()> { + pub(crate) fn send_runtime_started(&mut self) -> io::Result<()> { self.channel.try_send(Control::Started) } /// Send the worker thread a `signal`. - pub(super) fn send_signal(&mut self, signal: Signal) -> io::Result<()> { + pub(crate) fn send_signal(&mut self, signal: Signal) -> io::Result<()> { self.channel.try_send(Control::Signal(signal)) } /// Send the worker thread the function `f` to run. - pub(super) fn send_function( + pub(crate) fn send_function( &mut self, f: Box Result<(), String> + Send + 'static>, ) -> io::Result<()> { @@ -177,7 +177,7 @@ impl Handle { } /// See [`thread::JoinHandle::join`]. - pub(super) fn join(self) -> thread::Result> { + pub(crate) fn join(self) -> thread::Result> { self.handle.join() } } From 52d52ac84679bf1d1976b735d532ca3a0c768e7e Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 18:57:20 +0200 Subject: [PATCH 124/177] Use SOMAXCONN in calls to listen --- rt/src/net/tcp/listener.rs | 2 +- rt/src/net/uds/listener.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/rt/src/net/tcp/listener.rs b/rt/src/net/tcp/listener.rs index 1b6f4881b..26cd95f81 100644 --- a/rt/src/net/tcp/listener.rs +++ b/rt/src/net/tcp/listener.rs @@ -207,7 +207,7 @@ impl TcpListener { setup(&socket)?; socket.bind(&address.into())?; - socket.listen(1024)?; + socket.listen(libc::SOMAXCONN)?; Ok(()) })?; diff --git a/rt/src/net/uds/listener.rs b/rt/src/net/uds/listener.rs index 705fd2ce3..a68fe4c8a 100644 --- a/rt/src/net/uds/listener.rs +++ b/rt/src/net/uds/listener.rs @@ -123,7 +123,7 @@ impl UnixListener { } socket.bind(&address.inner)?; - socket.listen(1024)?; + socket.listen(libc::SOMAXCONN)?; Ok(()) })?; From 5ab3cc31f5cb6c93dcc3b230e629489895a38cc4 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 22:31:48 +0200 Subject: [PATCH 125/177] Use no-op task::Waker in test::poll_* functions No point in creating it via runtime that isn't used. --- rt/src/lib.rs | 10 ---------- rt/src/test.rs | 17 +++++------------ 2 files changed, 5 insertions(+), 22 deletions(-) diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 1418cdf9b..60207c707 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -610,16 +610,6 @@ impl RuntimeRef { .add_unique(actor_ref); } - /// Get a clone of the sending end of the notification channel. - /// - /// # Notes - /// - /// Prefer `new_waker` if possible, only use `task::Waker` for `Future`s. - #[cfg(any(test, feature = "test"))] - fn new_local_task_waker(&self, pid: ProcessId) -> task::Waker { - local::waker::new(self.internals.waker_id, pid) - } - /// Add a timer. pub(crate) fn add_timer(&self, deadline: Instant, waker: task::Waker) -> TimerToken { ::log::trace!(deadline = as_debug!(deadline); "adding timer"); diff --git a/rt/src/test.rs b/rt/src/test.rs index 39e10bcd7..19ccfa3e7 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -68,7 +68,7 @@ use crate::thread_waker::ThreadWaker; use crate::wakers::shared::Wakers; use crate::worker::{Control, Worker}; use crate::{ - self as rt, shared, ProcessId, RuntimeRef, Sync, ThreadLocal, ThreadSafe, SYNC_WORKER_ID_END, + self as rt, shared, RuntimeRef, Sync, ThreadLocal, ThreadSafe, SYNC_WORKER_ID_END, SYNC_WORKER_ID_START, }; @@ -76,8 +76,6 @@ use crate::{ #[cfg(feature = "test")] pub use heph::test::*; -pub(crate) const TEST_PID: ProcessId = ProcessId(0); - pub(crate) fn noop_waker() -> &'static ThreadWaker { static NOOP_WAKER: OnceLock = OnceLock::new(); NOOP_WAKER.get_or_init(|| { @@ -449,8 +447,6 @@ where /// Poll a future. /// -/// The [`task::Context`] will be provided by the *test* runtime. -/// /// # Notes /// /// Wake notifications will be ignored, if this is required run an end to end @@ -459,15 +455,13 @@ pub fn poll_future(future: Pin<&mut Fut>) -> Poll where Fut: Future + ?Sized, { - let waker = runtime().new_local_task_waker(TEST_PID); + let waker = nop_task_waker(); let mut ctx = task::Context::from_waker(&waker); Future::poll(future, &mut ctx) } /// Poll a [`AsyncIterator`]. /// -/// The [`task::Context`] will be provided by the *test* runtime. -/// /// # Notes /// /// Wake notifications will be ignored, if this is required run an end to end @@ -476,7 +470,7 @@ pub fn poll_next(iter: Pin<&mut I>) -> Poll> where I: AsyncIterator + ?Sized, { - let waker = runtime().new_local_task_waker(TEST_PID); + let waker = nop_task_waker(); let mut ctx = task::Context::from_waker(&waker); AsyncIterator::poll_next(iter, &mut ctx) } @@ -484,7 +478,7 @@ where /// Poll an actor. /// /// This is effectively the same function as [`poll_future`], but instead polls -/// an actor. The [`task::Context`] will be provided by the *test* runtime. +/// an actor. /// /// # Notes /// @@ -494,7 +488,7 @@ pub fn poll_actor(actor: Pin<&mut A>) -> Poll> where A: Actor + ?Sized, { - let waker = runtime().new_local_task_waker(TEST_PID); + let waker = nop_task_waker(); let mut ctx = task::Context::from_waker(&waker); Actor::try_poll(actor, &mut ctx) } @@ -546,7 +540,6 @@ unsafe impl Send for AssertUnmoved {} unsafe impl std::marker::Sync for AssertUnmoved {} /// Returns a no-op [`task::Waker`]. -#[cfg(test)] pub(crate) fn nop_task_waker() -> task::Waker { use std::task::{RawWaker, RawWakerVTable}; static WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new( From 007c1204ab8a957bbe638df531855875f4cd2023 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 23:00:00 +0200 Subject: [PATCH 126/177] Don't expose test::runtime Encourages bad practices. --- rt/src/test.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/rt/src/test.rs b/rt/src/test.rs index 19ccfa3e7..4168df3d5 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -6,8 +6,6 @@ //! (properly). //! //! Available utilities: -//! * Blocking on [`Future`]s: -//! * [`block_on`]: spawns a `Future` and waits for the result. //! * Spawning: //! * [`try_spawn_local`]: attempt to spawn a thread-local [actor]. //! * [`try_spawn`]: attempt to spawn a thread-safe [actor]. @@ -17,20 +15,22 @@ //! * Waiting on spawned actors: //! * [`join`], [`join_many`]: wait for the actor(s) to finish running. //! * [`join_all`]: wait all actors in a group to finish running. +//! * Blocking on [`Future`]s: +//! * [`block_on`]: spawns a `Future` and waits for the result. //! * Initialising actors: //! * [`init_local_actor`]: initialise a thread-local actor. //! * [`init_actor`]: initialise a thread-safe actor. //! * Polling: //! * [`poll_actor`]: poll an [`Actor`]. //! * [`poll_future`]: poll a [`Future`]. -//! * [`poll_next`]: poll a [`AsyncIterator`]. +//! * [`poll_next`]: poll an [`AsyncIterator`]. //! * Miscellaneous: //! * [`size_of_actor`], [`size_of_actor_val`]: returns the size of an actor. //! * [`set_message_loss`]: set the percentage of messages lost on purpose. //! * [`PanicSupervisor`]: supervisor that panics when it receives an actor's //! error. //! -//! [actor]: actor +//! [actor]: heph::actor //! [synchronous actor]: SyncActor //! //! # Notes @@ -107,7 +107,7 @@ pub(crate) fn shared_internals() -> Arc { /// /// The returned runtime reference is **not** a reference to the *test* runtime /// as described in the module documentation. -pub fn runtime() -> RuntimeRef { +pub(crate) fn runtime() -> RuntimeRef { thread_local! { /// Per thread runtime. static TEST_RT: Worker = { From bc72bd448a3be1171557e03751e555a4fb7d37ab Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 23:03:42 +0200 Subject: [PATCH 127/177] Simplify test::init_(local_)actor --- rt/src/test.rs | 42 +++++++++--------------------------------- 1 file changed, 9 insertions(+), 33 deletions(-) diff --git a/rt/src/test.rs b/rt/src/test.rs index 4168df3d5..41fa82907 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -59,8 +59,8 @@ use std::{io, slice, thread}; use heph::actor::{self, Actor, NewActor, SyncActor, SyncWaker}; use heph::actor_ref::{ActorGroup, ActorRef}; use heph::supervisor::{Supervisor, SyncSupervisor}; +use heph_inbox as inbox; use heph_inbox::oneshot::new_oneshot; -use heph_inbox::Manager; use crate::spawn::{ActorOptions, FutureOptions, SyncActorOptions}; use crate::sync_worker::SyncWorker; @@ -362,55 +362,31 @@ pub fn join_all(actors: &ActorGroup, timeout: Duration) -> JoinResult { /// Initialise a thread-local actor. #[allow(clippy::type_complexity)] pub fn init_local_actor( - new_actor: NA, - arg: NA::Argument, -) -> Result<(NA::Actor, ActorRef), NA::Error> -where - NA: NewActor, -{ - init_local_actor_with_inbox(new_actor, arg).map(|(actor, _, actor_ref)| (actor, actor_ref)) -} - -/// Initialise a thread-safe actor. -#[allow(clippy::type_complexity)] -pub fn init_actor( - new_actor: NA, - arg: NA::Argument, -) -> Result<(NA::Actor, ActorRef), NA::Error> -where - NA: NewActor, -{ - init_actor_with_inbox(new_actor, arg).map(|(actor, _, actor_ref)| (actor, actor_ref)) -} - -/// Initialise a thread-local actor with access to it's inbox. -#[allow(clippy::type_complexity)] -pub(crate) fn init_local_actor_with_inbox( mut new_actor: NA, arg: NA::Argument, -) -> Result<(NA::Actor, Manager, ActorRef), NA::Error> +) -> Result<(NA::Actor, ActorRef), NA::Error> where NA: NewActor, { - let (manager, sender, receiver) = Manager::new_small_channel(); + let (sender, receiver) = inbox::new_small(); let ctx = actor::Context::new(receiver, ThreadLocal::new(runtime())); let actor = new_actor.new(ctx, arg)?; - Ok((actor, manager, ActorRef::local(sender))) + Ok((actor, ActorRef::local(sender))) } -/// Initialise a thread-safe actor with access to it's inbox. +/// Initialise a thread-safe actor. #[allow(clippy::type_complexity)] -pub(crate) fn init_actor_with_inbox( +pub fn init_actor( mut new_actor: NA, arg: NA::Argument, -) -> Result<(NA::Actor, Manager, ActorRef), NA::Error> +) -> Result<(NA::Actor, ActorRef), NA::Error> where NA: NewActor, { - let (manager, sender, receiver) = Manager::new_small_channel(); + let (sender, receiver) = inbox::new_small(); let ctx = actor::Context::new(receiver, ThreadSafe::new(shared_internals())); let actor = new_actor.new(ctx, arg)?; - Ok((actor, manager, ActorRef::local(sender))) + Ok((actor, ActorRef::local(sender))) } /// Spawn a synchronous actor. From a26c3dfe745127c5382be6c8c0445248db1d5303 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 27 Apr 2023 23:10:03 +0200 Subject: [PATCH 128/177] Add TestAssertUnmovedNewActor to test module --- rt/src/scheduler/shared/tests.rs | 27 +++++---------------------- rt/src/scheduler/tests.rs | 27 +++++---------------------- rt/src/test.rs | 27 +++++++++++++++++++++++++++ 3 files changed, 37 insertions(+), 44 deletions(-) diff --git a/rt/src/scheduler/shared/tests.rs b/rt/src/scheduler/shared/tests.rs index ecc6f2fc0..b4e4a11fa 100644 --- a/rt/src/scheduler/shared/tests.rs +++ b/rt/src/scheduler/shared/tests.rs @@ -1,15 +1,15 @@ //! Tests for the shared scheduler. -use std::future::{pending, Pending}; +use std::future::pending; use std::sync::{Arc, Mutex}; use std::task::{self, Poll}; -use heph::actor::{self, ActorFuture, NewActor}; +use heph::actor::{self, ActorFuture}; use heph::supervisor::NoSupervisor; use crate::process::{FutureProcess, ProcessId}; use crate::scheduler::shared::{Priority, ProcessData, Scheduler}; -use crate::test::{self, assert_size, nop_task_waker, AssertUnmoved}; +use crate::test::{self, assert_size, nop_task_waker, AssertUnmoved, TestAssertUnmovedNewActor}; use crate::ThreadSafe; #[test] @@ -128,24 +128,6 @@ fn scheduler_run_order() { assert_eq!(*run_order.lock().unwrap(), vec![2_usize, 1, 0]); } -struct TestAssertUnmovedNewActor; - -impl NewActor for TestAssertUnmovedNewActor { - type Message = (); - type Argument = (); - type Actor = AssertUnmoved>>; - type Error = !; - type RuntimeAccess = ThreadSafe; - - fn new( - &mut self, - _: actor::Context, - _: Self::Argument, - ) -> Result { - Ok(AssertUnmoved::new(pending())) - } -} - #[test] fn assert_actor_process_unmoved() { let scheduler = Scheduler::new(); @@ -153,7 +135,8 @@ fn assert_actor_process_unmoved() { let mut ctx = task::Context::from_waker(&waker); let rt = ThreadSafe::new(test::shared_internals()); - let (process, _) = ActorFuture::new(NoSupervisor, TestAssertUnmovedNewActor, (), rt).unwrap(); + let (process, _) = + ActorFuture::new(NoSupervisor, TestAssertUnmovedNewActor::new(), (), rt).unwrap(); let pid = scheduler.add_new_process(Priority::NORMAL, process); // Run the process multiple times, ensure it's not moved in the diff --git a/rt/src/scheduler/tests.rs b/rt/src/scheduler/tests.rs index 1ed6e21ae..aee92a145 100644 --- a/rt/src/scheduler/tests.rs +++ b/rt/src/scheduler/tests.rs @@ -1,19 +1,19 @@ //! Tests for the local scheduler. use std::cell::RefCell; +use std::future::pending; use std::future::Future; -use std::future::{pending, Pending}; use std::pin::Pin; use std::rc::Rc; use std::task::{self, Poll}; -use heph::actor::{self, ActorFuture, NewActor}; +use heph::actor::{self, ActorFuture}; use heph::supervisor::NoSupervisor; use crate::process::{FutureProcess, Process, ProcessId}; use crate::scheduler::{ProcessData, Scheduler}; use crate::spawn::options::Priority; -use crate::test::{self, assert_size, nop_task_waker, AssertUnmoved}; +use crate::test::{self, assert_size, nop_task_waker, AssertUnmoved, TestAssertUnmovedNewActor}; use crate::ThreadLocal; #[test] @@ -219,24 +219,6 @@ fn scheduler_run_order() { assert_eq!(*run_order.borrow(), vec![2_usize, 1, 0]); } -struct TestAssertUnmovedNewActor; - -impl NewActor for TestAssertUnmovedNewActor { - type Message = (); - type Argument = (); - type Actor = AssertUnmoved>>; - type Error = !; - type RuntimeAccess = ThreadLocal; - - fn new( - &mut self, - _: actor::Context, - _: Self::Argument, - ) -> Result { - Ok(AssertUnmoved::new(pending())) - } -} - #[test] fn assert_actor_process_unmoved() { let mut scheduler = Scheduler::new(); @@ -244,7 +226,8 @@ fn assert_actor_process_unmoved() { let mut ctx = task::Context::from_waker(&waker); let rt = ThreadLocal::new(test::runtime()); - let (process, _) = ActorFuture::new(NoSupervisor, TestAssertUnmovedNewActor, (), rt).unwrap(); + let (process, _) = + ActorFuture::new(NoSupervisor, TestAssertUnmovedNewActor::new(), (), rt).unwrap(); let pid = scheduler.add_new_process(Priority::NORMAL, process); // Run the process multiple times, ensure it's not moved in the process. diff --git a/rt/src/test.rs b/rt/src/test.rs index 41fa82907..0fc9fc130 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -515,6 +515,33 @@ unsafe impl Send for AssertUnmoved {} #[cfg(test)] unsafe impl std::marker::Sync for AssertUnmoved {} +#[cfg(test)] +pub(crate) struct TestAssertUnmovedNewActor(std::marker::PhantomData); + +#[cfg(test)] +impl TestAssertUnmovedNewActor { + pub(crate) const fn new() -> TestAssertUnmovedNewActor { + TestAssertUnmovedNewActor(std::marker::PhantomData) + } +} + +#[cfg(test)] +impl NewActor for TestAssertUnmovedNewActor { + type Message = (); + type Argument = (); + type Actor = AssertUnmoved>>; + type Error = !; + type RuntimeAccess = RT; + + fn new( + &mut self, + _: actor::Context, + _: Self::Argument, + ) -> Result { + Ok(AssertUnmoved::new(std::future::pending())) + } +} + /// Returns a no-op [`task::Waker`]. pub(crate) fn nop_task_waker() -> task::Waker { use std::task::{RawWaker, RawWakerVTable}; From 60a955522aa163a895eea44336849497c6b20175 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 28 Apr 2023 17:11:29 +0200 Subject: [PATCH 129/177] Update rustc version for heph-remote --- remote/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/remote/src/lib.rs b/remote/src/lib.rs index fc105e9c4..e3fae03c5 100644 --- a/remote/src/lib.rs +++ b/remote/src/lib.rs @@ -1,6 +1,6 @@ //! Remote messaging for Heph. -#![feature(never_type, type_alias_impl_trait)] +#![feature(never_type, impl_trait_in_assoc_type)] #![warn( anonymous_parameters, bare_trait_objects, From 6733c1f21db22402ec45d1f8ab00c4389ce79d71 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 28 Apr 2023 17:14:13 +0200 Subject: [PATCH 130/177] Fix size in example Our actor are now smaller! --- src/test.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test.rs b/src/test.rs index afe14c148..f495a7d9b 100644 --- a/src/test.rs +++ b/src/test.rs @@ -91,7 +91,7 @@ where /// } /// } /// -/// assert_eq!(size_of_actor_val(&(actor as fn(_) -> _)), 88); +/// assert_eq!(size_of_actor_val(&(actor as fn(_) -> _)), 72); /// ``` pub const fn size_of_actor_val(_: &NA) -> usize where From a56c79b0216d41c1ae81ce79277f220a9fec9503 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 28 Apr 2023 17:47:59 +0200 Subject: [PATCH 131/177] Reduce size of example Remove or hiding a bunch of setup code that is not really relevant to what the examples are trying to show. --- rt/src/log.rs | 23 +---- rt/src/net/tcp/listener.rs | 83 ++-------------- rt/src/net/tcp/server.rs | 187 ++++++++++++++----------------------- rt/src/net/tcp/stream.rs | 3 + rt/src/net/udp.rs | 47 ++-------- rt/src/pipe.rs | 4 +- 6 files changed, 92 insertions(+), 255 deletions(-) diff --git a/rt/src/log.rs b/rt/src/log.rs index 8b32a66d2..e714fc1d5 100644 --- a/rt/src/log.rs +++ b/rt/src/log.rs @@ -17,29 +17,16 @@ //! ``` //! #![feature(never_type)] //! -//! use heph::actor; -//! use heph::supervisor::NoSupervisor; -//! use heph_rt::spawn::ActorOptions; -//! use heph_rt::{self as rt, Runtime, RuntimeRef, ThreadLocal}; +//! use heph_rt::Runtime; //! use log::info; //! -//! fn main() -> Result<(), rt::Error> { +//! fn main() -> Result<(), heph_rt::Error> { //! // Enable logging. //! std_logger::Config::logfmt().init(); //! -//! let mut runtime = Runtime::new()?; -//! runtime.run_on_workers(add_greeter_actor)?; +//! let runtime = Runtime::new()?; +//! // Runtime setup etc. +//! info!("starting runtime"); //! runtime.start() //! } -//! -//! fn add_greeter_actor(mut system_ref: RuntimeRef) -> Result<(), !> { -//! let actor = greeter_actor as fn(_) -> _; -//! system_ref.spawn_local(NoSupervisor, actor, (), ActorOptions::default()); -//! Ok(()) -//! } -//! -//! async fn greeter_actor(_: actor::Context) { -//! // Log an informational message. -//! info!("Hello world"); -//! } //! ``` diff --git a/rt/src/net/tcp/listener.rs b/rt/src/net/tcp/listener.rs index 26cd95f81..37f82c510 100644 --- a/rt/src/net/tcp/listener.rs +++ b/rt/src/net/tcp/listener.rs @@ -33,47 +33,11 @@ use crate::net::{convert_address, SockAddr, TcpStream}; /// use std::io; /// use std::net::SocketAddr; /// -/// use log::error; -/// -/// use heph::{actor, SupervisorStrategy}; -/// # use heph_rt::net::TcpStream; +/// use heph::actor; +/// use heph_rt::ThreadLocal; /// use heph_rt::net::TcpListener; -/// use heph_rt::spawn::ActorOptions; -/// use heph_rt::{self as rt, Runtime, RuntimeRef, ThreadLocal}; /// use log::info; /// -/// fn main() -> Result<(), rt::Error> { -/// std_logger::Config::logfmt().init(); -/// -/// let mut runtime = Runtime::new()?; -/// runtime.run_on_workers(setup)?; -/// runtime.start() -/// } -/// -/// fn setup(mut runtime_ref: RuntimeRef) -> Result<(), !> { -/// let address = "127.0.0.1:8000".parse().unwrap(); -/// -/// runtime_ref.spawn_local(supervisor, actor as fn(_, _) -> _, address, ActorOptions::default()); -/// # runtime_ref.spawn_local(supervisor, client as fn(_, _) -> _, address, ActorOptions::default()); -/// -/// Ok(()) -/// } -/// # -/// # async fn client(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { -/// # let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; -/// # let local_address = stream.local_addr()?.to_string(); -/// # let buf = Vec::with_capacity(local_address.len() + 1); -/// # let buf = stream.recv_n(buf, local_address.len()).await?; -/// # assert_eq!(buf, local_address.as_bytes()); -/// # Ok(()) -/// # } -/// -/// // Simple supervisor that logs the error and stops the actor. -/// fn supervisor(err: io::Error) -> SupervisorStrategy { -/// error!("Encountered an error: {err}"); -/// SupervisorStrategy::Stop -/// } -/// /// async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { /// // Create a new listener. /// let listener = TcpListener::bind(ctx.runtime_ref(), address).await?; @@ -87,6 +51,7 @@ use crate::net::{convert_address, SockAddr, TcpStream}; /// stream.send_all(ip).await?; /// Ok(()) /// } +/// # drop(actor); // Silence unused item warning. /// ``` /// /// Accepting multiple [`TcpStream`]s, using [`TcpListener::incoming`]. @@ -97,46 +62,11 @@ use crate::net::{convert_address, SockAddr, TcpStream}; /// use std::io; /// use std::net::SocketAddr; /// -/// use log::{error, info}; -/// -/// use heph::{actor, SupervisorStrategy}; +/// use heph::actor; +/// use heph_rt::ThreadLocal; /// use heph_rt::net::TcpListener; -/// # use heph_rt::net::TcpStream; -/// use heph_rt::spawn::ActorOptions; -/// use heph_rt::{self as rt, Runtime, RuntimeRef, ThreadLocal}; /// use heph_rt::util::next; -/// -/// fn main() -> Result<(), rt::Error> { -/// std_logger::Config::logfmt().init(); -/// -/// let mut runtime = Runtime::new()?; -/// runtime.run_on_workers(setup)?; -/// runtime.start() -/// } -/// -/// fn setup(mut runtime_ref: RuntimeRef) -> Result<(), !> { -/// let address = "127.0.0.1:8000".parse().unwrap(); -/// -/// runtime_ref.spawn_local(supervisor, actor as fn(_, _) -> _, address, ActorOptions::default()); -/// # runtime_ref.spawn_local(supervisor, client as fn(_, _) -> _, address, ActorOptions::default()); -/// -/// Ok(()) -/// } -/// # -/// # async fn client(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { -/// # let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; -/// # let local_address = stream.local_addr()?.to_string(); -/// # let buf = Vec::with_capacity(local_address.len() + 1); -/// # let buf = stream.recv_n(buf, local_address.len()).await?; -/// # assert_eq!(buf, local_address.as_bytes()); -/// # Ok(()) -/// # } -/// -/// // Simple supervisor that logs the error and stops the actor. -/// fn supervisor(err: io::Error) -> SupervisorStrategy { -/// error!("Encountered an error: {err}"); -/// SupervisorStrategy::Stop -/// } +/// use log::info; /// /// async fn actor(ctx: actor::Context, address: SocketAddr) -> io::Result<()> { /// // Create a new listener. @@ -162,6 +92,7 @@ use crate::net::{convert_address, SockAddr, TcpStream}; /// # return Ok(()); /// } /// } +/// # drop(actor); // Silence unused warning. /// ``` pub struct TcpListener { fd: AsyncFd, diff --git a/rt/src/net/tcp/server.rs b/rt/src/net/tcp/server.rs index 6693b736c..8965ca444 100644 --- a/rt/src/net/tcp/server.rs +++ b/rt/src/net/tcp/server.rs @@ -28,16 +28,16 @@ //! //! use std::io; //! +//! use heph::actor; //! # use heph::messages::Terminate; -//! use heph::actor::{self, NewActor}; -//! use heph::supervisor::{Supervisor, SupervisorStrategy}; +//! use heph::supervisor::SupervisorStrategy; //! use heph_rt::net::{tcp, TcpStream}; //! use heph_rt::spawn::ActorOptions; //! use heph_rt::spawn::options::Priority; -//! use heph_rt::{self as rt, Runtime, RuntimeRef, ThreadLocal}; +//! use heph_rt::{Runtime, RuntimeRef, ThreadLocal}; //! use log::error; //! -//! fn main() -> Result<(), rt::Error> { +//! fn main() -> Result<(), heph_rt::Error> { //! // Create and start the Heph runtime. //! let mut runtime = Runtime::new()?; //! runtime.run_on_workers(setup)?; @@ -57,41 +57,23 @@ //! // overloading the system. //! let options = ActorOptions::default().with_priority(Priority::LOW); //! # let actor_ref = -//! runtime_ref.spawn_local(ServerSupervisor, server, (), options); +//! runtime_ref.spawn_local(server_supervisor, server, (), options); //! # actor_ref.try_send(Terminate).unwrap(); //! //! Ok(()) //! } //! //! /// Our supervisor for the TCP server. -//! #[derive(Copy, Clone, Debug)] -//! struct ServerSupervisor; -//! -//! impl Supervisor> for ServerSupervisor -//! where -//! // Trait bounds needed by `tcp::server::setup`. -//! S: Supervisor + Clone + 'static, -//! NA: NewActor + Clone + 'static, -//! { -//! fn decide(&mut self, err: tcp::server::Error) -> SupervisorStrategy<()> { -//! match err { -//! // When we hit an error accepting a connection we'll drop the old -//! // server and create a new one. -//! tcp::server::Error::Accept(err) => { -//! error!("error accepting new connection: {err}"); -//! SupervisorStrategy::Restart(()) -//! } -//! // Async function never return an error creating a new actor. -//! tcp::server::Error::NewActor(_) => unreachable!(), +//! fn server_supervisor(err: tcp::server::Error) -> SupervisorStrategy<()> { +//! match err { +//! // When we hit an error accepting a connection we'll drop the old +//! // server and create a new one. +//! tcp::server::Error::Accept(err) => { +//! error!("error accepting new connection: {err}"); +//! SupervisorStrategy::Restart(()) //! } -//! } -//! -//! fn decide_on_restart_error(&mut self, err: !) -> SupervisorStrategy<()> { -//! err -//! } -//! -//! fn second_restart_error(&mut self, err: !) { -//! err +//! // Async function never return an error creating a new actor. +//! tcp::server::Error::NewActor(_) => unreachable!(), //! } //! } //! @@ -109,27 +91,29 @@ //! ``` //! //! The following example shows how the actor can gracefully be shutdown by -//! sending it a [`Terminate`] message. +//! sending it a [`Terminate`] message. We'll use the same structure as we did +//! for the previous example, but change the `setup` function. //! //! ``` -//! #![feature(never_type)] -//! +//! # #![feature(never_type)] +//! # //! use std::io; //! +//! use heph::actor; //! use heph::messages::Terminate; -//! use heph::actor::{self, NewActor}; -//! use heph::supervisor::{Supervisor, SupervisorStrategy}; +//! # use heph::supervisor::SupervisorStrategy; //! use heph_rt::net::{tcp, TcpStream}; //! use heph_rt::spawn::options::{ActorOptions, Priority}; -//! use heph_rt::{self as rt, Runtime, RuntimeRef, ThreadLocal}; +//! use heph_rt::RuntimeRef; +//! # use heph_rt::{Runtime, ThreadLocal}; //! use log::error; //! -//! fn main() -> Result<(), rt::Error> { -//! let mut runtime = Runtime::new()?; -//! runtime.run_on_workers(setup)?; -//! runtime.start() -//! } -//! +//! # fn main() -> Result<(), heph_rt::Error> { +//! # let mut runtime = Runtime::new()?; +//! # runtime.run_on_workers(setup)?; +//! # runtime.start() +//! # } +//! # //! fn setup(mut runtime_ref: RuntimeRef) -> io::Result<()> { //! // This uses the same supervisors as in the previous example, not shown here. //! @@ -138,7 +122,7 @@ //! let address = "127.0.0.1:7890".parse().unwrap(); //! let server = tcp::server::setup(address, conn_supervisor, new_actor, ActorOptions::default())?; //! let options = ActorOptions::default().with_priority(Priority::LOW); -//! let server_ref = runtime_ref.spawn_local(ServerSupervisor, server, (), options); +//! let server_ref = runtime_ref.spawn_local(server_supervisor, server, (), options); //! //! // Because the server is just another actor we can send it messages. Here //! // we'll send it a terminate message so it will gracefully shutdown. @@ -147,45 +131,31 @@ //! Ok(()) //! } //! -//! # /// # Our supervisor for the TCP server. -//! # #[derive(Copy, Clone, Debug)] -//! # struct ServerSupervisor; +//! // NOTE: `main`, `server_supervisor`, `conn_supervisor` and `conn_actor` are the same as +//! // in the previous example. //! # -//! # impl Supervisor> for ServerSupervisor -//! # where -//! # S: Supervisor + Clone + 'static, -//! # NA: NewActor + Clone + 'static, -//! # { -//! # fn decide(&mut self, err: tcp::server::Error) -> SupervisorStrategy<()> { -//! # match err { -//! # tcp::server::Error::Accept(err) => { -//! # error!("error accepting new connection: {err}"); -//! # SupervisorStrategy::Restart(()) -//! # } -//! # tcp::server::Error::NewActor(_) => unreachable!(), +//! # fn server_supervisor(err: tcp::server::Error) -> SupervisorStrategy<()> { +//! # match err { +//! # // When we hit an error accepting a connection we'll drop the old +//! # // server and create a new one. +//! # tcp::server::Error::Accept(err) => { +//! # error!("error accepting new connection: {err}"); +//! # SupervisorStrategy::Restart(()) //! # } -//! # } -//! # -//! # fn decide_on_restart_error(&mut self, err: !) -> SupervisorStrategy<()> { -//! # err -//! # } -//! # -//! # fn second_restart_error(&mut self, err: !) { -//! # err +//! # // Async function never return an error creating a new actor. +//! # tcp::server::Error::NewActor(_) => unreachable!(), //! # } //! # } //! # -//! # /// # `conn_actor`'s supervisor. //! # fn conn_supervisor(err: io::Error) -> SupervisorStrategy { //! # error!("error handling connection: {err}"); //! # SupervisorStrategy::Stop //! # } //! # -//! /// The actor responsible for a single TCP stream. -//! async fn conn_actor(_: actor::Context, stream: TcpStream) -> io::Result<()> { -//! stream.send_all("Hello World").await?; -//! Ok(()) -//! } +//! # async fn conn_actor(_: actor::Context, stream: TcpStream) -> io::Result<()> { +//! # stream.send_all("Hello World").await?; +//! # Ok(()) +//! # } //! ``` //! //! This example is similar to the first example, but runs the TCP server actor @@ -197,9 +167,9 @@ //! //! use std::io; //! -//! use heph::actor::{self, NewActor}; +//! use heph::actor; //! # use heph::messages::Terminate; -//! use heph::supervisor::{Supervisor, SupervisorStrategy}; +//! use heph::supervisor::{SupervisorStrategy}; //! use heph_rt::net::{tcp, TcpStream}; //! use heph_rt::spawn::options::{ActorOptions, Priority}; //! use heph_rt::{self as rt, Runtime, ThreadSafe}; @@ -217,58 +187,39 @@ //! //! let options = ActorOptions::default().with_priority(Priority::LOW); //! # let actor_ref = -//! runtime.try_spawn(ServerSupervisor, server, (), options) +//! runtime.try_spawn(server_supervisor, server, (), options) //! .map_err(rt::Error::setup)?; //! # actor_ref.try_send(Terminate).unwrap(); //! //! runtime.start() //! } -//! -//! /// Our supervisor for the TCP server. -//! #[derive(Copy, Clone, Debug)] -//! struct ServerSupervisor; -//! -//! impl Supervisor> for ServerSupervisor -//! where -//! // Trait bounds needed by `tcp::server::setup` using a thread-safe actor. -//! S: Supervisor + Send + Sync + Clone + 'static, -//! NA: NewActor + Send + Sync + Clone + 'static, -//! NA::Actor: Send + Sync + 'static, -//! NA::Message: Send, -//! { -//! fn decide(&mut self, err: tcp::server::Error) -> SupervisorStrategy<()> { -//! match err { -//! // When we hit an error accepting a connection we'll drop the old -//! // server and create a new one. -//! tcp::server::Error::Accept(err) => { -//! error!("error accepting new connection: {err}"); -//! SupervisorStrategy::Restart(()) -//! } -//! // Async function never return an error creating a new actor. -//! tcp::server::Error::NewActor(_) => unreachable!(), -//! } -//! } -//! -//! fn decide_on_restart_error(&mut self, err: !) -> SupervisorStrategy<()> { -//! err -//! } -//! -//! fn second_restart_error(&mut self, err: !) { -//! err -//! } -//! } -//! -//! /// `conn_actor`'s supervisor. -//! fn conn_supervisor(err: io::Error) -> SupervisorStrategy { -//! error!("error handling connection: {err}"); -//! SupervisorStrategy::Stop -//! } -//! +//! # +//! # /// Our supervisor for the TCP server. +//! # fn server_supervisor(err: tcp::server::Error) -> SupervisorStrategy<()> { +//! # match err { +//! # // When we hit an error accepting a connection we'll drop the old +//! # // server and create a new one. +//! # tcp::server::Error::Accept(err) => { +//! # error!("error accepting new connection: {err}"); +//! # SupervisorStrategy::Restart(()) +//! # } +//! # // Async function never return an error creating a new actor. +//! # tcp::server::Error::NewActor(_) => unreachable!(), +//! # } +//! # } +//! # +//! # /// `conn_actor`'s supervisor. +//! # fn conn_supervisor(err: io::Error) -> SupervisorStrategy { +//! # error!("error handling connection: {err}"); +//! # SupervisorStrategy::Stop +//! # } +//! # //! /// The actor responsible for a single TCP stream. //! async fn conn_actor(_: actor::Context, stream: TcpStream) -> io::Result<()> { //! stream.send_all("Hello World").await?; //! Ok(()) //! } +//! ``` use std::convert::TryFrom; use std::future::Future; diff --git a/rt/src/net/tcp/stream.rs b/rt/src/net/tcp/stream.rs index 44ef3cbd2..e24aaac22 100644 --- a/rt/src/net/tcp/stream.rs +++ b/rt/src/net/tcp/stream.rs @@ -29,8 +29,11 @@ use crate::net::{ /// use heph_rt::ThreadLocal; /// /// async fn actor(ctx: actor::Context) -> io::Result<()> { +/// // Connect to an IP address. /// let address = "127.0.0.1:12345".parse().unwrap(); /// let stream = TcpStream::connect(ctx.runtime_ref(), address).await?; +/// +/// // Send them a nice greeting. /// stream.send_all("Hello world!").await?; /// Ok(()) /// } diff --git a/rt/src/net/udp.rs b/rt/src/net/udp.rs index 011b5109a..b65e82cf3 100644 --- a/rt/src/net/udp.rs +++ b/rt/src/net/udp.rs @@ -48,37 +48,12 @@ pub use crate::net::{Connected, Unconnected}; /// use std::net::SocketAddr; /// use std::{io, str}; /// -/// use log::error; -/// +/// use heph::actor; /// use heph::messages::Terminate; -/// use heph::{actor, SupervisorStrategy}; +/// use heph_rt::ThreadLocal; /// use heph_rt::net::UdpSocket; -/// use heph_rt::spawn::ActorOptions; /// use heph_rt::util::either; -/// use heph_rt::{self as rt, Runtime, RuntimeRef, ThreadLocal}; -/// -/// fn main() -> Result<(), rt::Error> { -/// std_logger::Config::logfmt().init(); -/// -/// let mut runtime = Runtime::new()?; -/// runtime.run_on_workers(setup)?; -/// runtime.start() -/// } -/// -/// fn setup(mut runtime: RuntimeRef) -> Result<(), !> { -/// let address = "127.0.0.1:7000".parse().unwrap(); -/// // Add our server actor. -/// runtime.spawn_local(supervisor, echo_server as fn(_, _) -> _, address, ActorOptions::default()); -/// // Add our client actor. -/// runtime.spawn_local(supervisor, client as fn(_, _) -> _, address, ActorOptions::default()); -/// Ok(()) -/// } -/// -/// /// Simple supervisor that logs the error and stops the actor. -/// fn supervisor(err: io::Error) -> SupervisorStrategy { -/// error!("Encountered an error: {err}"); -/// SupervisorStrategy::Stop -/// } +/// use log::info; /// /// /// Actor that will bind a UDP socket and waits for incoming packets and /// /// echos the message to standard out. @@ -102,23 +77,13 @@ pub use crate::net::{Connected, Unconnected}; /// }; /// /// match str::from_utf8(&buf) { -/// Ok(str) => println!("Got the following message: `{str}`, from {address}"), -/// Err(_) => println!("Got data: {buf:?}, from {address}"), +/// Ok(str) => info!("Got the following message: `{str}`, from {address}"), +/// Err(_) => info!("Got data: {buf:?}, from {address}"), /// } /// # return Ok(()); /// } /// } -/// -/// /// The client that will send a message to the server. -/// async fn client(ctx: actor::Context, server_address: SocketAddr) -> io::Result<()> { -/// let local_address = "127.0.0.1:7001".parse().unwrap(); -/// let socket = UdpSocket::bind(ctx.runtime_ref(), local_address).await? -/// .connect(server_address).await?; -/// -/// let (msg, n) = socket.send("Hello world").await?; -/// assert_eq!(n, msg.len()); -/// Ok(()) -/// } +/// # drop(echo_server); // Silence unused warnings. /// ``` pub struct UdpSocket { fd: AsyncFd, diff --git a/rt/src/pipe.rs b/rt/src/pipe.rs index 14e8f6fea..43eed33d1 100644 --- a/rt/src/pipe.rs +++ b/rt/src/pipe.rs @@ -23,7 +23,7 @@ //! //! const DATA: &[u8] = b"Hello, world!"; //! -//! async fn process_handler(ctx: actor::Context) -> io::Result<()> +//! async fn actor(ctx: actor::Context) -> io::Result<()> //! where RT: rt::Access, //! { //! let (sender, receiver) = pipe::new(ctx.runtime_ref())?; @@ -40,7 +40,7 @@ //! # //! # let actor_ref = heph_rt::test::try_spawn( //! # heph_rt::test::PanicSupervisor, -//! # process_handler as fn(_) -> _, +//! # actor as fn(_) -> _, //! # (), //! # heph_rt::spawn::ActorOptions::default(), //! # ).unwrap(); From ec14e0c8ba775bafeb572dde44d6a195a82b4598 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Mon, 8 May 2023 13:59:07 +0200 Subject: [PATCH 132/177] Update A10 --- rt/src/io/mod.rs | 2 +- rt/src/net/uds/datagram.rs | 4 ++-- rt/src/net/uds/stream.rs | 4 ++-- rt/src/pipe.rs | 10 +++++----- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/rt/src/io/mod.rs b/rt/src/io/mod.rs index 62a5daa31..b550e60fb 100644 --- a/rt/src/io/mod.rs +++ b/rt/src/io/mod.rs @@ -58,7 +58,7 @@ macro_rules! stdio { ) => { #[doc = concat!("Create a new `", stringify!($name), "`.\n\n")] pub fn $fn(rt: &RT) -> $name { - $name(std::mem::ManuallyDrop::new(unsafe { a10::AsyncFd::new( + $name(std::mem::ManuallyDrop::new(unsafe { a10::AsyncFd::from_raw_fd( $fd, rt.submission_queue(), )})) diff --git a/rt/src/net/uds/datagram.rs b/rt/src/net/uds/datagram.rs index c17095e79..7d926d54e 100644 --- a/rt/src/net/uds/datagram.rs +++ b/rt/src/net/uds/datagram.rs @@ -82,11 +82,11 @@ impl UnixDatagram { let s1 = UnixDatagram::new(rt, unsafe { // SAFETY: the call to `pair` above ensures the file descriptors are // valid. - AsyncFd::new(s1.into_raw_fd(), rt.submission_queue()) + AsyncFd::from_raw_fd(s1.into_raw_fd(), rt.submission_queue()) })?; let s2 = UnixDatagram::new(rt, unsafe { // SAFETY: Same as above. - AsyncFd::new(s2.into_raw_fd(), rt.submission_queue()) + AsyncFd::from_raw_fd(s2.into_raw_fd(), rt.submission_queue()) })?; Ok((s1, s2)) } diff --git a/rt/src/net/uds/stream.rs b/rt/src/net/uds/stream.rs index 7f6f13ece..c329c1ddb 100644 --- a/rt/src/net/uds/stream.rs +++ b/rt/src/net/uds/stream.rs @@ -74,11 +74,11 @@ impl UnixStream { let s1 = UnixStream::new(rt, unsafe { // SAFETY: the call to `pair` above ensures the file descriptors are // valid. - AsyncFd::new(s1.into_raw_fd(), rt.submission_queue()) + AsyncFd::from_raw_fd(s1.into_raw_fd(), rt.submission_queue()) }); let s2 = UnixStream::new(rt, unsafe { // SAFETY: Same as above. - AsyncFd::new(s2.into_raw_fd(), rt.submission_queue()) + AsyncFd::from_raw_fd(s2.into_raw_fd(), rt.submission_queue()) }); Ok((s1, s2)) } diff --git a/rt/src/pipe.rs b/rt/src/pipe.rs index 43eed33d1..b6878308a 100644 --- a/rt/src/pipe.rs +++ b/rt/src/pipe.rs @@ -126,8 +126,8 @@ where let sq = rt.submission_queue(); // SAFETY: we just initialised the `fds` above. - let r = unsafe { AsyncFd::new(fds[0], sq.clone()) }; - let w = unsafe { AsyncFd::new(fds[1], sq) }; + let r = unsafe { AsyncFd::from_raw_fd(fds[0], sq.clone()) }; + let w = unsafe { AsyncFd::from_raw_fd(fds[1], sq) }; Ok((Sender { fd: w }, Receiver { fd: r })) } @@ -146,7 +146,7 @@ impl Sender { RT: Access, { // SAFETY: `ChildStdin` is guaranteed to be a valid file descriptor. - let fd = unsafe { AsyncFd::new(stdin.into_raw_fd(), rt.submission_queue()) }; + let fd = unsafe { AsyncFd::from_raw_fd(stdin.into_raw_fd(), rt.submission_queue()) }; Ok(Sender { fd }) } @@ -206,7 +206,7 @@ impl Receiver { RT: Access, { // SAFETY: `ChildStdout` is guaranteed to be a valid file descriptor. - let fd = unsafe { AsyncFd::new(stdout.into_raw_fd(), rt.submission_queue()) }; + let fd = unsafe { AsyncFd::from_raw_fd(stdout.into_raw_fd(), rt.submission_queue()) }; Ok(Receiver { fd }) } @@ -216,7 +216,7 @@ impl Receiver { RT: Access, { // SAFETY: `ChildStderr` is guaranteed to be a valid file descriptor. - let fd = unsafe { AsyncFd::new(stderr.into_raw_fd(), rt.submission_queue()) }; + let fd = unsafe { AsyncFd::from_raw_fd(stderr.into_raw_fd(), rt.submission_queue()) }; Ok(Receiver { fd }) } From 9a74c23920b0eaf958f31ce0b69f95700f3af374 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 14 May 2023 14:03:04 +0200 Subject: [PATCH 133/177] Don't drop copy types Instead assign _ to ignore the unused variables warning. --- rt/src/local/waker.rs | 3 +-- rt/src/net/tcp/listener.rs | 4 ++-- rt/src/net/tcp/stream.rs | 6 +++--- rt/src/net/udp.rs | 2 +- rt/src/net/uds/listener.rs | 4 ++-- rt/src/net/uds/stream.rs | 6 +++--- rt/src/spawn/options.rs | 12 ++++++------ rt/src/trace.rs | 2 +- 8 files changed, 19 insertions(+), 20 deletions(-) diff --git a/rt/src/local/waker.rs b/rt/src/local/waker.rs index dd45ac0d1..dafceaed4 100644 --- a/rt/src/local/waker.rs +++ b/rt/src/local/waker.rs @@ -213,8 +213,7 @@ unsafe fn wake_by_ref(data: *const ()) { unsafe fn drop_wake_data(data: *const ()) { assert_copy::(); // Since the data is `Copy` we don't have to anything. - #[allow(clippy::drop_copy)] - drop(data); + _ = data; } #[cfg(test)] diff --git a/rt/src/net/tcp/listener.rs b/rt/src/net/tcp/listener.rs index 37f82c510..dd47973a3 100644 --- a/rt/src/net/tcp/listener.rs +++ b/rt/src/net/tcp/listener.rs @@ -51,7 +51,7 @@ use crate::net::{convert_address, SockAddr, TcpStream}; /// stream.send_all(ip).await?; /// Ok(()) /// } -/// # drop(actor); // Silence unused item warning. +/// # _ = actor; // Silence unused item warning. /// ``` /// /// Accepting multiple [`TcpStream`]s, using [`TcpListener::incoming`]. @@ -92,7 +92,7 @@ use crate::net::{convert_address, SockAddr, TcpStream}; /// # return Ok(()); /// } /// } -/// # drop(actor); // Silence unused warning. +/// # _ = actor; // Silence unused warning. /// ``` pub struct TcpListener { fd: AsyncFd, diff --git a/rt/src/net/tcp/stream.rs b/rt/src/net/tcp/stream.rs index e24aaac22..50bd7ef4e 100644 --- a/rt/src/net/tcp/stream.rs +++ b/rt/src/net/tcp/stream.rs @@ -37,7 +37,7 @@ use crate::net::{ /// stream.send_all("Hello world!").await?; /// Ok(()) /// } -/// # drop(actor); // Silent dead code warnings. +/// # _ = actor; // Silent dead code warnings. /// ``` #[derive(Debug)] pub struct TcpStream { @@ -196,7 +196,7 @@ impl TcpStream { /// Ok(()) /// } /// # - /// # drop(actor); // Silent dead code warnings. + /// # _ = actor; // Silent dead code warnings. /// ``` pub async fn recv(&self, buf: B) -> io::Result { Recv(self.fd.recv(BufWrapper(buf), 0)).await @@ -231,7 +231,7 @@ impl TcpStream { /// Ok(()) /// } /// # - /// # drop(actor); // Silent dead code warnings. + /// # _ = actor; // Silent dead code warnings. /// ``` pub async fn recv_n(&self, buf: B, n: usize) -> io::Result { debug_assert!( diff --git a/rt/src/net/udp.rs b/rt/src/net/udp.rs index b65e82cf3..8f0a34403 100644 --- a/rt/src/net/udp.rs +++ b/rt/src/net/udp.rs @@ -83,7 +83,7 @@ pub use crate::net::{Connected, Unconnected}; /// # return Ok(()); /// } /// } -/// # drop(echo_server); // Silence unused warnings. +/// # _ = echo_server; // Silence unused warnings. /// ``` pub struct UdpSocket { fd: AsyncFd, diff --git a/rt/src/net/uds/listener.rs b/rt/src/net/uds/listener.rs index a68fe4c8a..c63bb96bf 100644 --- a/rt/src/net/uds/listener.rs +++ b/rt/src/net/uds/listener.rs @@ -50,7 +50,7 @@ use crate::net::uds::{UnixAddr, UnixStream}; /// } /// Ok(()) /// } -/// # drop(actor); // Silent dead code warnings. +/// # _ = actor; // Silent dead code warnings. /// ``` /// /// Accepting multiple [`UnixStream`]s, using [`UnixListener::incoming`]. @@ -91,7 +91,7 @@ use crate::net::uds::{UnixAddr, UnixStream}; /// } /// } /// } -/// # drop(actor); // Silent dead code warnings. +/// # _ = actor; // Silent dead code warnings. /// ``` pub struct UnixListener { fd: AsyncFd, diff --git a/rt/src/net/uds/stream.rs b/rt/src/net/uds/stream.rs index c329c1ddb..72a7c0bc7 100644 --- a/rt/src/net/uds/stream.rs +++ b/rt/src/net/uds/stream.rs @@ -38,7 +38,7 @@ use crate::net::{ /// stream.send_all("Hello world!").await?; /// Ok(()) /// } -/// # drop(actor); // Silent dead code warnings. +/// # _ = actor; // Silent dead code warnings. /// ``` #[derive(Debug)] pub struct UnixStream { @@ -194,7 +194,7 @@ impl UnixStream { /// Ok(()) /// } /// # - /// # drop(actor); // Silent dead code warnings. + /// # _ = actor; // Silent dead code warnings. /// ``` pub async fn recv(&self, buf: B) -> io::Result { Recv(self.fd.recv(BufWrapper(buf), 0)).await @@ -229,7 +229,7 @@ impl UnixStream { /// Ok(()) /// } /// # - /// # drop(actor); // Silent dead code warnings. + /// # _ = actor; // Silent dead code warnings. /// ``` pub async fn recv_n(&self, buf: B, n: usize) -> io::Result { debug_assert!( diff --git a/rt/src/spawn/options.rs b/rt/src/spawn/options.rs index afcd34de4..c82ccedd7 100644 --- a/rt/src/spawn/options.rs +++ b/rt/src/spawn/options.rs @@ -23,7 +23,7 @@ use std::time::Duration; /// use heph_rt::spawn::ActorOptions; /// /// let opts = ActorOptions::default(); -/// # drop(opts); // Silence unused variable warning. +/// # _ = opts; // Silence unused variable warning. /// ``` /// /// Giving an actor a high priority. @@ -32,7 +32,7 @@ use std::time::Duration; /// use heph_rt::spawn::options::{ActorOptions, Priority}; /// /// let opts = ActorOptions::default().with_priority(Priority::HIGH); -/// # drop(opts); // Silence unused variable warning. +/// # _ = opts; // Silence unused variable warning. /// ``` #[derive(Clone, Debug, Default)] #[must_use] @@ -150,7 +150,7 @@ fn priority_duration_multiplication() { /// use heph_rt::spawn::SyncActorOptions; /// /// let opts = SyncActorOptions::default(); -/// # drop(opts); // Silence unused variable warning. +/// # _ = opts; // Silence unused variable warning. /// ``` /// /// Setting the name of the thread that runs the synchronous actor. @@ -159,7 +159,7 @@ fn priority_duration_multiplication() { /// use heph_rt::spawn::SyncActorOptions; /// /// let opts = SyncActorOptions::default().with_name("My sync actor".to_owned()); -/// # drop(opts); // Silence unused variable warning. +/// # _ = opts; // Silence unused variable warning. /// ``` #[derive(Debug, Default)] #[must_use] @@ -200,7 +200,7 @@ impl SyncActorOptions { /// use heph_rt::spawn::FutureOptions; /// /// let opts = FutureOptions::default(); -/// # drop(opts); // Silence unused variable warning. +/// # _ = opts; // Silence unused variable warning. /// ``` /// /// Giving the future a high priority. @@ -209,7 +209,7 @@ impl SyncActorOptions { /// use heph_rt::spawn::options::{FutureOptions, Priority}; /// /// let opts = FutureOptions::default().with_priority(Priority::HIGH); -/// # drop(opts); // Silence unused variable warning. +/// # _ = opts; // Silence unused variable warning. /// ``` #[derive(Clone, Debug, Default)] #[must_use] diff --git a/rt/src/trace.rs b/rt/src/trace.rs index e0acb9b2f..8fb4d9d8d 100644 --- a/rt/src/trace.rs +++ b/rt/src/trace.rs @@ -121,7 +121,7 @@ const RT_SUBSTREAM_ID: u64 = 0; /// } /// } /// -/// # drop(actor); +/// # _ = actor; /// ``` pub trait Trace { /// Start timing an event if tracing is enabled. From e1941f8cfed7cd7b98a6c04f9f5f1ad7574b654a Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 14 May 2023 14:12:22 +0200 Subject: [PATCH 134/177] Reduce a10::Ring sizes The previous sizes we chosen somewhat arbitrarily. For the coordinator we don't need to many events, so 32 should be enough, but this might become too small with a large number of worker threads, so maybe in the future it should be based on the amount of workers. For the worker we should poll the ring frequently enough that 64 is sufficient. Finally for the shared ring we use 128, which should also be polled frequently enough. --- rt/src/setup.rs | 2 +- rt/src/shared/mod.rs | 4 ++-- rt/src/worker.rs | 6 ++---- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/rt/src/setup.rs b/rt/src/setup.rs index bbb6ca4de..4e00852f4 100644 --- a/rt/src/setup.rs +++ b/rt/src/setup.rs @@ -148,7 +148,7 @@ impl Setup { let name = name.unwrap_or_else(default_app_name).into_boxed_str(); debug!(name = name, workers = threads; "building Heph runtime"); - let coordinator_ring = a10::Ring::new(512).map_err(Error::init_coordinator)?; + let coordinator_ring = a10::Ring::new(32).map_err(Error::init_coordinator)?; // Setup the worker threads. let timing = trace::start(&trace_log); diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 1d557a25c..2a1e4012a 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -110,7 +110,7 @@ impl RuntimeInternals { /// Setup new runtime internals. pub(crate) fn setup(coordinator_sq: &a10::SubmissionQueue) -> io::Result { let poll = Poll::new()?; - let ring = a10::Ring::config(512) + let ring = a10::Ring::config(128) .attach_queue(coordinator_sq) .build()?; Ok(RuntimeSetup { poll, ring }) @@ -120,7 +120,7 @@ impl RuntimeInternals { #[cfg(any(test, feature = "test"))] pub(crate) fn test_setup() -> io::Result { let poll = Poll::new()?; - let ring = a10::Ring::new(512)?; + let ring = a10::Ring::new(128)?; Ok(RuntimeSetup { poll, ring }) } diff --git a/rt/src/worker.rs b/rt/src/worker.rs index 8e7fdb8c3..04e53cdd3 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -64,9 +64,7 @@ pub(crate) fn setup( coordinator_sq: &a10::SubmissionQueue, ) -> io::Result<(WorkerSetup, &'static ThreadWaker)> { let poll = Poll::new()?; - let ring = a10::Ring::config(512) - .attach_queue(coordinator_sq) - .build()?; + let ring = a10::Ring::config(64).attach_queue(coordinator_sq).build()?; // Setup the waking mechanism. let (waker_sender, waker_events) = crossbeam_channel::unbounded(); @@ -285,7 +283,7 @@ impl Worker { mut receiver: rt::channel::Receiver, ) -> io::Result { let poll = Poll::new()?; - let ring = a10::Ring::config(512) + let ring = a10::Ring::config(64) .attach_queue(shared_internals.submission_queue()) .build()?; From 465846d2486daef74f337e0ddc26cb08f64e66a2 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 23 May 2023 19:32:00 +0200 Subject: [PATCH 135/177] Add BufMut::extend_from_slice Convenience method to copy some bytes to the buffer. --- rt/src/io/buf.rs | 14 ++++++++++++++ rt/tests/functional/io.rs | 11 +++++++++++ 2 files changed, 25 insertions(+) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index 57aeddad2..7cf484db6 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -68,6 +68,20 @@ pub unsafe trait BufMut: 'static { /// [`TcpStream::recv_n`]: crate::net::TcpStream::recv_n unsafe fn update_length(&mut self, n: usize); + /// Extend the buffer with `bytes`, returns the number of bytes copied. + fn extend_from_slice(&mut self, bytes: &[u8]) -> usize { + let (ptr, capacity) = unsafe { self.parts_mut() }; + let len = min(bytes.len(), capacity); + // SAFETY: since we have mutable access to `self` we know that `bytes` + // can point to (part of) the same buffer as that would be UB already. + // Furthermore we checked that the length doesn't overrun the buffer and + // `parts_mut` impl must ensure that the `ptr` is valid. + unsafe { ptr.copy_from_nonoverlapping(bytes.as_ptr(), len) }; + // SAFETY: just written the bytes in the call above. + unsafe { self.update_length(len) }; + len + } + /// Returns the length of the buffer as returned by [`parts_mut`]. /// /// [`parts_mut`]: BufMut::parts_mut diff --git a/rt/tests/functional/io.rs b/rt/tests/functional/io.rs index 25545157c..760fb4af4 100644 --- a/rt/tests/functional/io.rs +++ b/rt/tests/functional/io.rs @@ -81,6 +81,17 @@ fn test_buf_mut(mut buf: B) { assert!(!buf.has_spare_capacity()); } +#[test] +fn buf_mut_extend_from_slice() { + let mut buf = Vec::with_capacity(DATA.len() + DATA2.len() + 2); + + BufMut::extend_from_slice(&mut buf, DATA); + assert_eq!(buf, DATA); + + BufMut::extend_from_slice(&mut buf, DATA2); + assert_eq!(&buf[DATA.len()..], DATA2); +} + #[test] fn buf_for_vec() { test_buf(Vec::from(DATA)) From 27ecdbe97443481fda9a4b7f0df25efed6bd48b1 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 24 May 2023 14:57:27 +0200 Subject: [PATCH 136/177] Add BufMutSlice::extend_from_slice Convenience method to copy some bytes to the buffers, similar to BufMut::extend_from_slice. --- rt/src/io/buf.rs | 46 ++++++++++++++++++++++++++++++++------- rt/tests/functional/io.rs | 21 ++++++++++++++++-- 2 files changed, 57 insertions(+), 10 deletions(-) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index 7cf484db6..2aa1b8ae2 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -71,15 +71,11 @@ pub unsafe trait BufMut: 'static { /// Extend the buffer with `bytes`, returns the number of bytes copied. fn extend_from_slice(&mut self, bytes: &[u8]) -> usize { let (ptr, capacity) = unsafe { self.parts_mut() }; - let len = min(bytes.len(), capacity); - // SAFETY: since we have mutable access to `self` we know that `bytes` - // can point to (part of) the same buffer as that would be UB already. - // Furthermore we checked that the length doesn't overrun the buffer and - // `parts_mut` impl must ensure that the `ptr` is valid. - unsafe { ptr.copy_from_nonoverlapping(bytes.as_ptr(), len) }; + // SAFETY: `parts_mut` requirements are the same for `copy_bytes`. + let written = unsafe { copy_bytes(ptr, capacity, bytes) }; // SAFETY: just written the bytes in the call above. - unsafe { self.update_length(len) }; - len + unsafe { self.update_length(written) }; + written } /// Returns the length of the buffer as returned by [`parts_mut`]. @@ -106,6 +102,23 @@ pub unsafe trait BufMut: 'static { } } +/// Copies bytes from `src` to `dst`, copies up to `min(dst_len, src.len())`, +/// i.e. it won't write beyond `dst` or read beyond `src` bounds. Returns the +/// number of bytes copied. +/// +/// # Safety +/// +/// Caller must ensure that `dst` and `dst_len` are valid for writing. +unsafe fn copy_bytes(dst: *mut u8, dst_len: usize, src: &[u8]) -> usize { + let len = min(src.len(), dst_len); + // SAFETY: since we have mutable access to `self` we know that `bytes` + // can point to (part of) the same buffer as that would be UB already. + // Furthermore we checked that the length doesn't overrun the buffer and + // `parts_mut` impl must ensure that the `ptr` is valid. + unsafe { dst.copy_from_nonoverlapping(src.as_ptr(), len) }; + len +} + /// The implementation for `Vec` only uses the uninitialised capacity of the /// vector. In other words the bytes currently in the vector remain untouched. /// @@ -183,6 +196,23 @@ pub trait BufMutSlice: private::BufMutSlice + 'static { fn has_spare_capacity(&self) -> bool { self.total_spare_capacity() != 0 } + + /// Extend the buffer with `bytes`, returns the number of bytes copied. + fn extend_from_slice(&mut self, bytes: &[u8]) -> usize { + let mut left = bytes; + for iovec in unsafe { self.as_iovecs_mut() } { + // SAFETY: `as_iovecs_mut` requirements are the same for `copy_bytes`. + let n = unsafe { copy_bytes(iovec.iov_base.cast(), iovec.iov_len, left) }; + left = &left[n..]; + if left.is_empty() { + break; + } + } + let written = bytes.len() - left.len(); + // SAFETY: just written the bytes above. + unsafe { self.update_length(written) }; + written + } } // NOTE: see the `private` module below for the actual trait. diff --git a/rt/tests/functional/io.rs b/rt/tests/functional/io.rs index 760fb4af4..2a6ce0e86 100644 --- a/rt/tests/functional/io.rs +++ b/rt/tests/functional/io.rs @@ -85,10 +85,12 @@ fn test_buf_mut(mut buf: B) { fn buf_mut_extend_from_slice() { let mut buf = Vec::with_capacity(DATA.len() + DATA2.len() + 2); - BufMut::extend_from_slice(&mut buf, DATA); + let n = BufMut::extend_from_slice(&mut buf, DATA); + assert_eq!(n, DATA.len()); assert_eq!(buf, DATA); - BufMut::extend_from_slice(&mut buf, DATA2); + let n = BufMut::extend_from_slice(&mut buf, DATA2); + assert_eq!(n, DATA2.len()); assert_eq!(&buf[DATA.len()..], DATA2); } @@ -179,3 +181,18 @@ fn buf_mut_slice_for_tuple() { assert_eq!(bufs.total_spare_capacity(), 0); assert!(!bufs.has_spare_capacity()); } + +#[test] +fn buf_mut_slice_extend_from_slice() { + let buf1 = Vec::with_capacity(DATA.len()); + let buf2 = Vec::with_capacity(DATA2.len() + 1); + let mut bufs = [buf1, buf2]; + + let n = BufMutSlice::extend_from_slice(&mut bufs, DATA); + assert_eq!(n, DATA.len()); + assert_eq!(bufs[0], DATA); + + let n = BufMutSlice::extend_from_slice(&mut bufs, DATA2); + assert_eq!(n, DATA2.len()); + assert_eq!(bufs[1], DATA2); +} From 7d50fbe1190405de1f4b2a76a9bc379bf44c3dc5 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 24 May 2023 15:26:47 +0200 Subject: [PATCH 137/177] Add BufSlice::as_io_slices Returns the buffers as IoSlice. --- rt/src/io/buf.rs | 14 +++++++++++++- rt/tests/functional/io.rs | 27 ++++++++++++++++++++------- 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index 2aa1b8ae2..9e235996b 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -1,7 +1,9 @@ //! Buffers. use std::cmp::min; +use std::io::IoSlice; use std::mem::MaybeUninit; +use std::slice; use std::sync::Arc; /// Trait that defines the behaviour of buffers used in reading, which requires @@ -379,7 +381,17 @@ unsafe impl Buf for &'static str { /// /// This has the same safety requirements as [`Buf`], but then for all buffers /// used. -pub trait BufSlice: private::BufSlice + 'static {} +pub trait BufSlice: private::BufSlice + 'static { + /// Returns the reabable buffer as `IoSlice` structures. + fn as_io_slices<'a>(&'a self) -> [IoSlice<'a>; N] { + // SAFETY: `as_iovecs` requires the returned iovec to be valid. + unsafe { + self.as_iovecs().map(|iovec| { + IoSlice::new(slice::from_raw_parts(iovec.iov_base.cast(), iovec.iov_len)) + }) + } + } +} // NOTE: see the `private` module below for the actual trait. diff --git a/rt/tests/functional/io.rs b/rt/tests/functional/io.rs index 2a6ce0e86..720ad5d14 100644 --- a/rt/tests/functional/io.rs +++ b/rt/tests/functional/io.rs @@ -4,7 +4,7 @@ use std::cmp::min; use std::ptr; use std::sync::Arc; -use heph_rt::io::{Buf, BufMut, BufMutSlice}; +use heph_rt::io::{Buf, BufMut, BufMutSlice, BufSlice}; const DATA: &[u8] = b"Hello world!"; const DATA2: &[u8] = b"Hello mars."; @@ -124,12 +124,6 @@ fn buf_for_static_str() { test_buf(DATA) } -fn test_buf(buf: B) { - let (ptr, len) = unsafe { buf.parts() }; - let got = unsafe { std::slice::from_raw_parts(ptr, len) }; - assert_eq!(got, DATA); -} - #[test] fn buf_for_limited() { test_buf(DATA.limit(DATA.len())); // Same length. @@ -140,6 +134,25 @@ fn buf_for_limited() { test_buf(DATA.limit(DATA.len())); // Smaller. } +fn test_buf(buf: B) { + let (ptr, len) = unsafe { buf.parts() }; + let got = unsafe { std::slice::from_raw_parts(ptr, len) }; + assert_eq!(got, DATA); +} + +#[test] +fn buf_slice_as_io_slices() { + test_buf_slice([DATA, DATA2]); + test_buf_slice([Vec::from(DATA), Vec::from(DATA2)]); + test_buf_slice((DATA, Vec::from(DATA2))); +} + +fn test_buf_slice>(buf: B) { + let [got0, got1] = buf.as_io_slices(); + assert_eq!(&*got0, DATA); + assert_eq!(&*got1, DATA2); +} + #[test] fn buf_mut_slice_for_array() { let mut bufs = [Vec::with_capacity(1), Vec::with_capacity(DATA.len())]; From 631f0f764564b23ec03ebdf35bc80bc85b3215ed Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 24 May 2023 15:36:40 +0200 Subject: [PATCH 138/177] Add BufSlice::limit Limits the amount of bytes returned. --- rt/src/io/buf.rs | 32 ++++++++++++++++++++++++++++++++ rt/tests/functional/io.rs | 11 +++++++++++ 2 files changed, 43 insertions(+) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index 9e235996b..e49ecb5f9 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -391,6 +391,19 @@ pub trait BufSlice: private::BufSlice + 'static { }) } } + + /// Wrap the buffer in `Limited`, which limits the amount of bytes used to + /// `limit`. + /// + /// [`Limited::into_inner`] can be used to retrieve the buffer again, + /// or a mutable reference to the buffer can be used and the limited buffer + /// be dropped after usage. + fn limit(self, limit: usize) -> Limited + where + Self: Sized, + { + Limited { buf: self, limit } + } } // NOTE: see the `private` module below for the actual trait. @@ -684,3 +697,22 @@ unsafe impl Buf for Limited { (ptr, min(size, self.limit)) } } + +impl, const N: usize> BufSlice for Limited {} + +unsafe impl, const N: usize> private::BufSlice for Limited { + unsafe fn as_iovecs(&self) -> [libc::iovec; N] { + let mut total_len = 0; + let mut iovecs = unsafe { self.buf.as_iovecs() }; + for iovec in &mut iovecs { + let n = total_len + iovec.iov_len; + if n > self.limit { + iovec.iov_len = self.limit - total_len; + total_len = self.limit; + } else { + total_len = n; + } + } + iovecs + } +} diff --git a/rt/tests/functional/io.rs b/rt/tests/functional/io.rs index 720ad5d14..61216f4af 100644 --- a/rt/tests/functional/io.rs +++ b/rt/tests/functional/io.rs @@ -147,6 +147,17 @@ fn buf_slice_as_io_slices() { test_buf_slice((DATA, Vec::from(DATA2))); } +#[test] +fn buf_slice_for_limited() { + test_buf_slice([DATA, DATA2].limit(DATA.len() + DATA2.len())); // Same length. + test_buf_slice([DATA, DATA2].limit(DATA.len() + DATA2.len() + 1)); // Larger. + let buf0 = Vec::from(DATA); + let mut buf1 = Vec::with_capacity(30); + buf1.extend_from_slice(DATA2); + buf1.resize(DATA2.len() * 2, 0); + test_buf_slice([buf0, buf1].limit(DATA.len() + DATA2.len())); // Smaller. +} + fn test_buf_slice>(buf: B) { let [got0, got1] = buf.as_io_slices(); assert_eq!(&*got0, DATA); From 56868fe040649f126f71638d114323a7fb6f739a Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 24 May 2023 16:00:06 +0200 Subject: [PATCH 139/177] Add BufMutSlice::limit Limits the amount of bytes used. --- rt/src/io/buf.rs | 45 +++++++++++++++++++++++++++++++++++ rt/tests/functional/io.rs | 50 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 94 insertions(+), 1 deletion(-) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index e49ecb5f9..1c48a31d9 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -215,6 +215,19 @@ pub trait BufMutSlice: private::BufMutSlice + 'static { unsafe { self.update_length(written) }; written } + + /// Wrap the buffer in `Limited`, which limits the amount of bytes used to + /// `limit`. + /// + /// [`Limited::into_inner`] can be used to retrieve the buffer again, + /// or a mutable reference to the buffer can be used and the limited buffer + /// be dropped after usage. + fn limit(self, limit: usize) -> Limited + where + Self: Sized, + { + Limited { buf: self, limit } + } } // NOTE: see the `private` module below for the actual trait. @@ -698,6 +711,38 @@ unsafe impl Buf for Limited { } } +impl, const N: usize> BufMutSlice for Limited { + fn total_spare_capacity(&self) -> usize { + min(self.limit, self.buf.total_spare_capacity()) + } + + fn has_spare_capacity(&self) -> bool { + self.limit != 0 && self.buf.has_spare_capacity() + } +} + +unsafe impl, const N: usize> private::BufMutSlice for Limited { + unsafe fn as_iovecs_mut(&mut self) -> [libc::iovec; N] { + let mut total_len = 0; + let mut iovecs = unsafe { self.buf.as_iovecs_mut() }; + for iovec in &mut iovecs { + let n = total_len + iovec.iov_len; + if n > self.limit { + iovec.iov_len = self.limit - total_len; + total_len = self.limit; + } else { + total_len = n; + } + } + iovecs + } + + unsafe fn update_length(&mut self, n: usize) { + self.limit -= n; // For use in read N bytes kind of calls. + self.buf.update_length(n); + } +} + impl, const N: usize> BufSlice for Limited {} unsafe impl, const N: usize> private::BufSlice for Limited { diff --git a/rt/tests/functional/io.rs b/rt/tests/functional/io.rs index 61216f4af..e56fa3c89 100644 --- a/rt/tests/functional/io.rs +++ b/rt/tests/functional/io.rs @@ -155,7 +155,7 @@ fn buf_slice_for_limited() { let mut buf1 = Vec::with_capacity(30); buf1.extend_from_slice(DATA2); buf1.resize(DATA2.len() * 2, 0); - test_buf_slice([buf0, buf1].limit(DATA.len() + DATA2.len())); // Smaller. + test_buf_slice(BufSlice::limit([buf0, buf1], DATA.len() + DATA2.len())); // Smaller. } fn test_buf_slice>(buf: B) { @@ -220,3 +220,51 @@ fn buf_mut_slice_extend_from_slice() { assert_eq!(n, DATA2.len()); assert_eq!(bufs[1], DATA2); } + +#[test] +fn buf_mut_slice_for_limited() { + const TARGET_LENGTH: usize = DATA.len() + DATA2.len(); + // Same length. + test_buf_mut_slice( + BufMutSlice::limit( + [ + Vec::with_capacity(DATA.len()), + Vec::with_capacity(DATA2.len()), + ], + TARGET_LENGTH, + ), + TARGET_LENGTH, + ); + // Larger. + test_buf_mut_slice( + BufMutSlice::limit( + [ + Vec::with_capacity(DATA.len()), + Vec::with_capacity(DATA2.len()), + ], + TARGET_LENGTH + 1, + ), + TARGET_LENGTH, + ); + // Smaller. + test_buf_mut_slice( + BufMutSlice::limit( + [ + Vec::with_capacity(DATA.len()), + Vec::with_capacity(DATA2.len()), + ], + TARGET_LENGTH - 1, + ), + TARGET_LENGTH - 1, + ); +} + +fn test_buf_mut_slice>(mut bufs: B, expected_limit: usize) { + let total_capacity = bufs.total_spare_capacity(); + assert_eq!(total_capacity, expected_limit); + let n = bufs.extend_from_slice(DATA); + let m = bufs.extend_from_slice(DATA2); + assert!(total_capacity <= n + m); + assert!(!bufs.has_spare_capacity()); + assert!(bufs.total_spare_capacity() == 0); +} From 86b30a1d016e835adc128750925da495be846bbc Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 25 May 2023 01:06:32 +0200 Subject: [PATCH 140/177] Implement Buf for Cow For both byte slices and strings. --- rt/src/io/buf.rs | 17 +++++++++++++++++ rt/tests/functional/io.rs | 13 +++++++++++++ 2 files changed, 30 insertions(+) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index 1c48a31d9..3b3a20439 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -1,5 +1,6 @@ //! Buffers. +use std::borrow::Cow; use std::cmp::min; use std::io::IoSlice; use std::mem::MaybeUninit; @@ -379,6 +380,22 @@ unsafe impl Buf for &'static str { } } +// SAFETY: mix of `Vec` and `&'static [u8]`, see those Buf implementations +// for safety reasoning. +unsafe impl Buf for Cow<'static, [u8]> { + unsafe fn parts(&self) -> (*const u8, usize) { + (self.as_ptr(), self.len()) + } +} + +// SAFETY: mix of `String` and `&'static str`, see those Buf implementations for +// safety reasoning. +unsafe impl Buf for Cow<'static, str> { + unsafe fn parts(&self) -> (*const u8, usize) { + (self.as_ptr(), self.len()) + } +} + /// Trait that defines the behaviour of buffers used in writing using vectored /// I/O, which requires read only access. /// diff --git a/rt/tests/functional/io.rs b/rt/tests/functional/io.rs index e56fa3c89..92a8529bf 100644 --- a/rt/tests/functional/io.rs +++ b/rt/tests/functional/io.rs @@ -1,5 +1,6 @@ //! Tests for the io module. +use std::borrow::Cow; use std::cmp::min; use std::ptr; use std::sync::Arc; @@ -124,6 +125,18 @@ fn buf_for_static_str() { test_buf(DATA) } +#[test] +fn buf_for_static_cow() { + test_buf::>(Cow::Borrowed(DATA)); + test_buf::>(Cow::Owned(Vec::from(DATA))); +} + +#[test] +fn buf_for_static_cow_str() { + test_buf::>(Cow::Borrowed("Hello world!")); + test_buf::>(Cow::Owned(String::from("Hello world!"))); +} + #[test] fn buf_for_limited() { test_buf(DATA.limit(DATA.len())); // Same length. From 2cec4e9ddf3ef65f76996b9fa7cb8e5dd10e05ed Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 25 May 2023 15:04:15 +0200 Subject: [PATCH 141/177] Update Heph-http This updates the entire heph-http code base to be up to date with the v0.5 API and makes it able to compile with the latest nightly. It updates all dependencies and adds heph-rt as that used be part of the heph crate, but no longer. This makes a number of big changes to make everything compile again. But since they're mostly interconnected it's hard to split them up into small commits, so I'm just not going to bother (sorry future me). Changes to the body module ========================== This changes the design of the Body trait to take ownership of the HTTP head buffer passed by the caller. This is required by io_uring, but also makes the lifetime much simpler, which is a nice benefit. The OneshotBody now accepts a generic type B that must implement the Buf trait. So we can use owned or (statically) borrowed buffers. The StreamingBody API is also changed, from Stream to AsyncIterator (to update rustc version), but the item is changed from io::Result to B: Buf. The ChunkedBody API saw the same changes as StreamingBody. Removed the FileBody type as sendfile isn't supported by io_uring. Changes to the client module ============================ The Client::connect method now accept a &RT instead of an actor::Context, matching the changes made to TcpStream::connect and similar API. Furthermore the method is now an asynchronous function instead of return the Connect future (which is removed). The Client::{get, request, read_response} methods now return a single Result and the I/O errors are moved into the new ResponseError::IO variant. Adds Body::chunk_size_hint and is_chunked API. Removes the Body::read_all method and replaces them with Body::recv and recv_vectored, which are similar to the TcpStream::recv(_vectored) methods. Removes the Copy, Clone and Eq implementations from ResponseError because I added the IO variant to hold I/O errors. This was required to allow the Client methods to return a single result. Changes to the server module ============================ The Server type is now a type alias for heph_rt::nettcp::server::Setup. The ArgMap struct has been renamed HttpNewActor, which is slighly better (but still not great). The actor start by the HTTP server will no longer received the socket address as that is no longer returned by the underlying TCP server. The Connection::next_request method now returns a single error, the I/O error is moved into a RequestError::IO variant, same as we did for client::ResponseError. The Body::{recv, recv_vectored} methods are now async functions. --- Cargo.toml | 4 +- http/Cargo.toml | 17 +- http/src/body.rs | 597 +++---------- http/src/client.rs | 630 +++++++------ http/src/head/method.rs | 23 +- http/src/head/mod.rs | 4 +- http/src/lib.rs | 15 +- http/src/route.rs | 1 + http/src/server.rs | 1323 ++++++++++------------------ http/tests/functional.rs | 2 +- http/tests/functional/body.rs | 61 +- http/tests/functional/client.rs | 209 +++-- http/tests/functional/message.rs | 16 +- http/tests/functional/route.rs | 54 +- http/tests/functional/server.rs | 115 ++- http/tests/functional/transform.rs | 16 +- remote/Cargo.toml | 2 +- rt/Cargo.toml | 2 +- 18 files changed, 1159 insertions(+), 1932 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7bd9bd974..e76fc1298 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ test = ["getrandom"] [dependencies] heph-inbox = { version = "0.2.3", path = "./inbox", default-features = false } -log = { version = "0.4.16", default-features = false, features = ["kv_unstable", "kv_unstable_std"] } +log = { version = "0.4.17", default-features = false, features = ["kv_unstable", "kv_unstable_std"] } # Optional dependencies, enabled by features. # Required by the `test` feature. @@ -43,7 +43,7 @@ required-features = ["test"] [workspace] members = [ - #"http", # Stuck on 2021-11-01, also enable in Makefile. + "http", "inbox", "remote", "rt", diff --git a/http/Cargo.toml b/http/Cargo.toml index af7a0bdec..42cc12331 100644 --- a/http/Cargo.toml +++ b/http/Cargo.toml @@ -2,6 +2,7 @@ name = "heph-http" description = "Heph-HTTP is a HTTP library build on top of Heph." version = "0.1.0" +publish = false # In development. authors = ["Thomas de Zeeuw "] license = "MIT" documentation = "https://docs.rs/heph-http" @@ -13,11 +14,12 @@ include = ["/Cargo.toml", "/src/**/*.rs", "/README.md", "/LICENSE"] edition = "2021" [dependencies] -heph = { version = "0.5.0", path = "../", default-features = false, features = ["runtime"] } -httparse = { version = "1.5.1", default-features = false } -httpdate = { version = "1.0.0", default-features = false } -log = { version = "0.4.8", default-features = false } -itoa = { version = "0.4.7", default-features = false } +heph = { version = "0.5.0", default-features = false, path = "../" } +heph-rt = { version = "0.5.0", default-features = false, path = "../rt" } +httparse = { version = "1.8.0", default-features = false } +httpdate = { version = "1.0.2", default-features = false } +log = { version = "0.4.17", default-features = false } +itoa = { version = "1.0.6", default-features = false } [dev-dependencies] std-logger = { version = "0.5.0", default-features = false, features = ["log-panic", "nightly"] } @@ -25,3 +27,8 @@ std-logger = { version = "0.5.0", default-features = false, features = ["log-pan [dev-dependencies.heph] path = "../" features = ["test"] + + +[dev-dependencies.heph-rt] +path = "../rt" +features = ["test"] diff --git a/http/src/body.rs b/http/src/body.rs index 0502b6b23..fa15401a8 100644 --- a/http/src/body.rs +++ b/http/src/body.rs @@ -2,12 +2,16 @@ //! //! See the [`Body`] trait. -use std::io::{self, IoSlice}; -use std::marker::PhantomData; -use std::num::NonZeroUsize; -use std::stream::Stream; +use std::async_iter::AsyncIterator; +use std::future::Future; +use std::io; -use heph::net::tcp::stream::{FileSend, SendAll, TcpStream}; +use heph_rt::io::Buf; +use heph_rt::net::TcpStream; +use heph_rt::util::next; + +/// Last chunk of a body in a chunked response. +const LAST_CHUNK: &[u8] = b"0\r\n\r\n"; /// Trait that defines a HTTP body. /// @@ -15,13 +19,11 @@ use heph::net::tcp::stream::{FileSend, SendAll, TcpStream}; /// the following types: /// /// * [`EmptyBody`]: no/empty body. -/// * [`OneshotBody`]: body consisting of a single slice of bytes (`&[u8]`). +/// * [`OneshotBody`]: body consisting of a single chunk of bytes. /// * [`StreamingBody`]: body that is streaming, with a known length. /// * [`ChunkedBody`]: body that is streaming, with a *un*known length. This /// uses HTTP chunked encoding to transfer the body. -/// * [`FileBody`]: uses a file as body, sending it's content using the -/// `sendfile(2)` system call. -pub trait Body<'a>: PrivateBody<'a> { +pub trait Body: PrivateBody { /// Length of the body, or the body will be chunked. fn length(&self) -> BodyLength; } @@ -39,595 +41,198 @@ pub enum BodyLength { mod private { use std::future::Future; - use std::io::{self, IoSlice}; - use std::num::NonZeroUsize; - use std::pin::Pin; - use std::stream::Stream; - use std::task::{self, Poll}; - - use heph::net::tcp::stream::FileSend; - use heph::net::TcpStream; + use std::io; - const LAST_CHUNK: &[u8] = b"0\r\n\r\n"; + use heph_rt::net::TcpStream; /// Private extention of [`Body`]. /// /// [`Body`]: super::Body - pub trait PrivateBody<'body> { - type WriteBody<'stream, 'head>: Future>; + pub trait PrivateBody { + /// [`Future`] behind [`PrivateBody::write_message`]. + type WriteFuture<'stream>: Future>> + 'stream; - /// Write a HTTP message to `stream`. - /// - /// The `http_head` buffer contains the HTTP header (i.e. request/status - /// line and all headers), this must still be written to the `stream` - /// also. - fn write_message<'stream, 'head>( + /// Write an HTTP message to `stream` using the `http_head` as head. + /// Expects the `http_head` buffer to be returned. + fn write_message<'stream>( self, stream: &'stream mut TcpStream, - http_head: &'head [u8], - ) -> Self::WriteBody<'stream, 'head> - where - 'body: 'head; - } - - /// See [`super::OneshotBody`]. - #[derive(Debug)] - pub struct SendOneshotBody<'s, 'b> { - pub(super) stream: &'s mut TcpStream, - // HTTP head and body. - pub(super) bufs: [IoSlice<'b>; 2], - } - - impl<'s, 'b> Future for SendOneshotBody<'s, 'b> { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let SendOneshotBody { stream, bufs } = Pin::into_inner(self); - loop { - match stream.try_send_vectored(bufs) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::WriteZero.into())), - Ok(n) => { - let head_len = bufs[0].len(); - let body_len = bufs[1].len(); - if n >= head_len + body_len { - // Written everything. - return Poll::Ready(Ok(())); - } else if n <= head_len { - // Only written part of the head, advance the head - // buffer. - bufs[0].advance(n); - } else { - // Written entire head. - bufs[0] = IoSlice::new(&[]); - bufs[1].advance(n - head_len); - } - } - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - return Poll::Pending - } - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Poll::Ready(Err(err)), - } - } - } - } - - /// See [`super::StreamingBody`]. - #[derive(Debug)] - pub struct SendStreamingBody<'s, 'h, 'b, B> { - pub(super) stream: &'s mut TcpStream, - pub(super) head: &'h [u8], - /// Bytes left to write from `body`, not counting the HTTP head. - pub(super) left: usize, - pub(super) body: B, - /// Slice of bytes from `body`. - pub(super) body_bytes: Option<&'b [u8]>, - } - - impl<'s, 'h, 'b, B> Future for SendStreamingBody<'s, 'h, 'b, B> - where - B: Stream>, - { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { - // SAFETY: not moving `body: B`, ensuring it's still pinned. - #[rustfmt::skip] - let SendStreamingBody { stream, head, left, body, body_bytes } = unsafe { Pin::into_inner_unchecked(self) }; - let mut body = unsafe { Pin::new_unchecked(body) }; - - // Send the HTTP head first. - // TODO: try to use vectored I/O on first call. - while !head.is_empty() { - match stream.try_send(*head) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::WriteZero.into())), - Ok(n) => *head = &head[n..], - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - return Poll::Pending - } - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Poll::Ready(Err(err)), - } - } - - while *left != 0 { - // We have bytes we need to send. - if let Some(bytes) = body_bytes.as_mut() { - // TODO: check `bytes.len()` <= `left`. - match stream.try_send(*bytes) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::WriteZero.into())), - Ok(n) => { - *left -= n; - if n >= bytes.len() { - *body_bytes = None; - } else { - *bytes = &bytes[n..]; - continue; - } - } - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - return Poll::Pending - } - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Poll::Ready(Err(err)), - } - } - - // Read some bytes from the `body` stream. - match body.as_mut().poll_next(ctx) { - Poll::Ready(Some(Ok(bytes))) => *body_bytes = Some(bytes), - Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err)), - Poll::Ready(None) => { - // NOTE: this shouldn't happend. - debug_assert!(*left == 0, "short body provided to `StreamingBody`"); - return Poll::Ready(Ok(())); - } - Poll::Pending => return Poll::Pending, - } - } - - Poll::Ready(Ok(())) - } - } - - /// See [`super::ChunkedBody`]. - #[derive(Debug)] - pub struct SendChunkedBody<'s, 'h, 'b, B> { - pub(super) stream: &'s mut TcpStream, - pub(super) head: &'h [u8], - pub(super) body: B, - /// Slice of bytes from `body`. - pub(super) body_bytes: Option<&'b [u8]>, - pub(super) written_chunk_size: bool, - } - - impl<'s, 'h, 'b, B> Future for SendChunkedBody<'s, 'h, 'b, B> - where - B: Stream>, - { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { - // SAFETY: not moving `body: B`, ensuring it's still pinned. - #[rustfmt::skip] - let SendChunkedBody { stream, head, body, body_bytes, written_chunk_size } = unsafe { Pin::into_inner_unchecked(self) }; - let mut body = unsafe { Pin::new_unchecked(body) }; - - // Send the HTTP head first. - // TODO: try to use vectored I/O on first call. - while !head.is_empty() { - match stream.try_send(*head) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::WriteZero.into())), - Ok(n) => *head = &head[n..], - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - return Poll::Pending - } - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Poll::Ready(Err(err)), - } - } - - loop { - // We have bytes we need to send. - if let Some(bytes) = body_bytes.as_mut() { - let mut size_buf = itoa::Buffer::new(); - let (b1, b2) = if *written_chunk_size { - // Already written the chunk size. - ("", "") - } else { - (size_buf.format(bytes.len()), "\r\n") - }; - - let mut bufs = [ - // Chunk size. - IoSlice::new(b1.as_bytes()), - IoSlice::new(b2.as_bytes()), - IoSlice::new(bytes), // User's bytes. - IoSlice::new(b"\r\n"), // End of chunk. - ]; - loop { - match stream.try_send_vectored(&bufs) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::WriteZero.into())), - Ok(mut n) => { - // FIXME: deal with `n` < `b1.len() + b2.len()`. - n -= b1.len() + b2.len(); - if n >= bytes.len() { - *body_bytes = None; - break; - } - *bytes = &bytes[n..]; - bufs[2] = IoSlice::new(bytes); - continue; - } - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - return Poll::Pending - } - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Poll::Ready(Err(err)), - } - } - } - - // Read some bytes from the `body` stream. - match body.as_mut().poll_next(ctx) { - Poll::Ready(Some(Ok(bytes))) => { - *body_bytes = Some(bytes); - *written_chunk_size = false; - } - Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err)), - Poll::Ready(None) => loop { - match stream.try_send(LAST_CHUNK) { - // FIXME: properly deal with small write here. - Ok(n) if n < LAST_CHUNK.len() => { - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())) - } - Ok(_) => return Poll::Ready(Ok(())), - // FIXME: properly deal with this error; can't poll - // anymore. - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - return Poll::Pending - } - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Poll::Ready(Err(err)), - } - }, - Poll::Pending => return Poll::Pending, - } - } - } - } - - /// See [`super::FileBody`]. - #[derive(Debug)] - pub struct SendFileBody<'s, 'h, 'f, F> { - pub(super) stream: &'s mut TcpStream, - pub(super) head: &'h [u8], - pub(super) file: &'f F, - pub(super) offset: usize, - pub(super) end: NonZeroUsize, - } - - impl<'s, 'h, 'f, F> Future for SendFileBody<'s, 'h, 'f, F> - where - F: FileSend, - { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - #[rustfmt::skip] - let SendFileBody { stream, head, file, offset, end } = Pin::into_inner(self); - - // Send the HTTP head first. - while !head.is_empty() { - match stream.try_send(head) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::WriteZero.into())), - Ok(n) => *head = &head[n..], - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - return Poll::Pending - } - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Poll::Ready(Err(err)), - } - } - - while end.get() > *offset { - let length = NonZeroUsize::new(end.get() - *offset); - match stream.try_send_file(*file, *offset, length) { - // All bytes were send. - Ok(0) => return Poll::Ready(Ok(())), - Ok(n) => *offset += n, - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - return Poll::Pending - } - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Poll::Ready(Err(err)), - } - } - - Poll::Ready(Ok(())) - } + http_head: Vec, + ) -> Self::WriteFuture<'stream>; } } -pub(crate) use private::{PrivateBody, SendChunkedBody, SendStreamingBody}; -use private::{SendFileBody, SendOneshotBody}; +pub(crate) use private::PrivateBody; /// An empty body. #[derive(Copy, Clone, Debug)] pub struct EmptyBody; -impl<'b> Body<'b> for EmptyBody { +impl Body for EmptyBody { fn length(&self) -> BodyLength { BodyLength::Known(0) } } -impl<'b> PrivateBody<'b> for EmptyBody { - type WriteBody<'s, 'h> = SendAll<'s, 'h>; +impl PrivateBody for EmptyBody { + type WriteFuture<'stream> = impl Future>> + 'stream; - fn write_message<'s, 'h>( + fn write_message<'stream>( self, - stream: &'s mut TcpStream, - http_head: &'h [u8], - ) -> Self::WriteBody<'s, 'h> - where - 'b: 'h, - { - // Just need to write the HTTP head as we don't have a body. + stream: &'stream mut TcpStream, + http_head: Vec, + ) -> Self::WriteFuture<'stream> { stream.send_all(http_head) } } /// Body length and content is known in advance. Send in a single payload (i.e. /// not chunked). -#[derive(Debug, Clone)] -pub struct OneshotBody<'b> { - bytes: &'b [u8], +#[derive(Copy, Clone, Debug)] +pub struct OneshotBody { + bytes: B, } -impl<'b> OneshotBody<'b> { +impl OneshotBody { /// Create a new one-shot body. - pub const fn new(body: &'b [u8]) -> OneshotBody<'b> { + pub const fn new(body: B) -> OneshotBody { OneshotBody { bytes: body } } - /// Returns the bytes that make up the body. - pub const fn bytes(&'b self) -> &'b [u8] { + /// Returns the underlying buffer. + pub fn into_inner(self) -> B { self.bytes } } -impl<'b> Body<'b> for OneshotBody<'b> { +impl Body for OneshotBody { fn length(&self) -> BodyLength { - BodyLength::Known(self.bytes.len()) + // SAFETY: only using the length, nothing unsafe about that. + BodyLength::Known(unsafe { self.bytes.parts().1 }) } } -impl<'b> PrivateBody<'b> for OneshotBody<'b> { - type WriteBody<'s, 'h> = SendOneshotBody<'s, 'h>; +impl PrivateBody for OneshotBody { + type WriteFuture<'stream> = impl Future>> + 'stream; - fn write_message<'s, 'h>( + fn write_message<'stream>( self, - stream: &'s mut TcpStream, - http_head: &'h [u8], - ) -> Self::WriteBody<'s, 'h> - where - 'b: 'h, - { - let head = IoSlice::new(http_head); - let body = IoSlice::new(self.bytes); - SendOneshotBody { - stream, - bufs: [head, body], + stream: &'stream mut TcpStream, + http_head: Vec, + ) -> Self::WriteFuture<'stream> { + let bufs = (http_head, self.bytes); + async move { + let (http_head, _) = stream.send_vectored_all(bufs).await?; + Ok(http_head) } } } -impl<'b> From<&'b [u8]> for OneshotBody<'b> { - fn from(body: &'b [u8]) -> Self { - OneshotBody::new(body) - } -} - -impl<'b> From<&'b str> for OneshotBody<'b> { - fn from(body: &'b str) -> Self { - OneshotBody::new(body.as_bytes()) - } -} - -impl<'b> PartialEq<[u8]> for OneshotBody<'b> { - fn eq(&self, other: &[u8]) -> bool { - self.bytes.eq(other) - } -} - -impl<'b> PartialEq<&[u8]> for OneshotBody<'b> { - fn eq(&self, other: &&[u8]) -> bool { - self.bytes.eq(*other) - } -} - -impl<'b> PartialEq for OneshotBody<'b> { - fn eq(&self, other: &str) -> bool { - self.bytes.eq(other.as_bytes()) - } -} - -impl<'b> PartialEq<&str> for OneshotBody<'b> { - fn eq(&self, other: &&str) -> bool { - self.bytes.eq(other.as_bytes()) - } -} - /// Streaming body with a known length. Send in a single payload (i.e. not /// chunked). #[derive(Debug)] -pub struct StreamingBody<'b, B> { +pub struct StreamingBody { length: usize, - body: B, - _body_lifetime: PhantomData<&'b [u8]>, + body: S, } -impl<'b, B> StreamingBody<'b, B> +impl StreamingBody where - B: Stream>, + S: AsyncIterator + Unpin + 'static, + B: Buf, { - /// Use a [`Stream`] as HTTP body with a known length. - pub const fn new(length: usize, stream: B) -> StreamingBody<'b, B> { + /// Use a [`AsyncIterator`] as HTTP body with a known length. + pub const fn new(length: usize, stream: S) -> StreamingBody { StreamingBody { length, body: stream, - _body_lifetime: PhantomData, } } } -impl<'b, B> Body<'b> for StreamingBody<'b, B> +impl Body for StreamingBody where - B: Stream>, + S: AsyncIterator + Unpin + 'static, + B: Buf, { fn length(&self) -> BodyLength { BodyLength::Known(self.length) } } -impl<'b, B> PrivateBody<'b> for StreamingBody<'b, B> +impl PrivateBody for StreamingBody where - B: Stream>, + S: AsyncIterator + Unpin + 'static, + B: Buf, { - type WriteBody<'s, 'h> = SendStreamingBody<'s, 'h, 'b, B>; + type WriteFuture<'stream> = impl Future>> + 'stream; - fn write_message<'s, 'h>( + fn write_message<'stream>( self, - stream: &'s mut TcpStream, - head: &'h [u8], - ) -> Self::WriteBody<'s, 'h> - where - 'b: 'h, - { - SendStreamingBody { - stream, - body: self.body, - head, - left: self.length, - body_bytes: None, + stream: &'stream mut TcpStream, + http_head: Vec, + ) -> Self::WriteFuture<'stream> { + let mut body = self.body; + async move { + let http_head = stream.send_all(http_head).await?; + while let Some(chunk) = next(&mut body).await { + _ = stream.send_all(chunk).await?; + } + Ok(http_head) } } } /// Streaming body with an unknown length. Send in multiple chunks. #[derive(Debug)] -pub struct ChunkedBody<'b, B> { - body: B, - _body_lifetime: PhantomData<&'b [u8]>, +pub struct ChunkedBody { + body: S, } -impl<'b, B> ChunkedBody<'b, B> +impl ChunkedBody where - B: Stream>, + S: AsyncIterator + Unpin + 'static, + B: Buf, { - /// Use a [`Stream`] as HTTP body with a unknown length. + /// Use a [`AsyncIterator`] as HTTP body with a unknown length. /// /// If the total length of `stream` is known prefer to use /// [`StreamingBody`]. - pub const fn new(stream: B) -> ChunkedBody<'b, B> { - ChunkedBody { - body: stream, - _body_lifetime: PhantomData, - } + pub const fn new(stream: S) -> ChunkedBody { + ChunkedBody { body: stream } } } -impl<'b, B> Body<'b> for ChunkedBody<'b, B> +impl Body for ChunkedBody where - B: Stream>, + S: AsyncIterator + Unpin + 'static, + B: Buf, { fn length(&self) -> BodyLength { BodyLength::Chunked } } -impl<'b, B> PrivateBody<'b> for ChunkedBody<'b, B> +impl PrivateBody for ChunkedBody where - B: Stream>, + S: AsyncIterator + Unpin + 'static, + B: Buf, { - type WriteBody<'s, 'h> = SendChunkedBody<'s, 'h, 'b, B>; + type WriteFuture<'stream> = impl Future>> + 'stream; - fn write_message<'s, 'h>( + fn write_message<'stream>( self, - stream: &'s mut TcpStream, - head: &'h [u8], - ) -> Self::WriteBody<'s, 'h> - where - 'b: 'h, - { - SendChunkedBody { - stream, - body: self.body, - head, - body_bytes: None, - written_chunk_size: false, - } - } -} - -/// Body that sends the entire file `F`. -#[derive(Debug)] -pub struct FileBody<'f, F> { - file: &'f F, - /// Start offset into the `file`. - offset: usize, - /// Length of the file, or the maximum number of bytes to send (minus - /// `offset`). - /// Always: `end >= offset`. - end: NonZeroUsize, -} - -impl<'f, F> FileBody<'f, F> -where - F: FileSend, -{ - /// Use a file as HTTP body. - /// - /// This uses the bytes `offset..end` from `file` as HTTP body and sends - /// them using `sendfile(2)` (using [`TcpStream::send_file`]). - pub const fn new(file: &'f F, offset: usize, end: NonZeroUsize) -> FileBody<'f, F> { - debug_assert!(end.get() >= offset); - FileBody { file, offset, end } - } -} - -impl<'f, F> Body<'f> for FileBody<'f, F> -where - F: FileSend, -{ - fn length(&self) -> BodyLength { - // NOTE: per the comment on `end`: `end >= offset`, so this can't - // underflow. - BodyLength::Known(self.end.get() - self.offset) - } -} - -impl<'f, F> PrivateBody<'f> for FileBody<'f, F> -where - F: FileSend, -{ - type WriteBody<'s, 'h> = SendFileBody<'s, 'h, 'f, F>; - - fn write_message<'s, 'h>( - self, - stream: &'s mut TcpStream, - head: &'h [u8], - ) -> Self::WriteBody<'s, 'h> - where - 'f: 'h, - { - SendFileBody { - stream, - head, - file: self.file, - offset: self.offset, - end: self.end, + stream: &'stream mut TcpStream, + http_head: Vec, + ) -> Self::WriteFuture<'stream> { + let mut body = self.body; + async move { + let http_head = stream.send_all(http_head).await?; + while let Some(chunk) = next(&mut body).await { + _ = stream.send_all(chunk).await?; + } + _ = stream.send_all(LAST_CHUNK).await?; + Ok(http_head) } } } diff --git a/http/src/client.rs b/http/src/client.rs index 8d4265077..477c7c14e 100644 --- a/http/src/client.rs +++ b/http/src/client.rs @@ -1,20 +1,18 @@ //! Module with the HTTP client implementation. -use std::cmp::min; -use std::future::Future; +use std::mem::take; use std::net::SocketAddr; -use std::pin::Pin; -use std::task::{self, Poll}; use std::{fmt, io}; -use heph::net::tcp::stream::{self, TcpStream}; -use heph::{actor, rt}; +use heph_rt::io::{BufMut, BufMutSlice}; +use heph_rt::net::TcpStream; +use heph_rt::Access; use crate::body::{BodyLength, EmptyBody}; use crate::head::header::{FromHeaderValue, HeaderName, Headers}; use crate::{ - map_version_byte, trim_ws, Method, Response, StatusCode, BUF_SIZE, MAX_HEADERS, MAX_HEAD_SIZE, - MIN_READ_SIZE, + map_version_byte, trim_ws, Method, Response, StatusCode, BUF_SIZE, INIT_HEAD_SIZE, MAX_HEADERS, + MAX_HEAD_SIZE, MIN_READ_SIZE, }; /// HTTP/1.1 client. @@ -30,14 +28,17 @@ pub struct Client { impl Client { /// Create a new HTTP client, connected to `address`. - pub fn connect( - ctx: &mut actor::Context, - address: SocketAddr, - ) -> io::Result + pub async fn connect(rt: &RT, address: SocketAddr) -> io::Result where - RT: rt::Access, + RT: Access, { - TcpStream::connect(ctx, address).map(|connect| Connect { connect }) + let stream = TcpStream::connect(rt, address).await?; + stream.set_nodelay(true)?; + Ok(Client { + stream, + buf: Vec::with_capacity(BUF_SIZE), + parsed_bytes: 0, + }) } /// Send a GET request. @@ -46,15 +47,12 @@ impl Client { /// /// Any [`ResponseError`] are turned into [`io::Error`]. If you want to /// handle the `ResponseError`s separately use [`Client::request`]. - pub async fn get<'c, 'p>(&'c mut self, path: &'p str) -> io::Result>> { - let res = self - .request(Method::Get, path, &Headers::EMPTY, EmptyBody) - .await; - match res { - Ok(Ok(response)) => Ok(response), - Ok(Err(err)) => Err(err.into()), - Err(err) => Err(err), - } + pub async fn get<'c, 'p>( + &'c mut self, + path: &'p str, + ) -> Result>, ResponseError> { + self.request(Method::Get, path, &Headers::EMPTY, EmptyBody) + .await } /// Make a [`Request`] and wait (non-blocking) for a [`Response`]. @@ -64,27 +62,20 @@ impl Client { /// # Notes /// /// This always uses HTTP/1.1 to make the requests. - /// - /// If the server doesn't respond this return an [`io::Error`] with - /// [`io::ErrorKind::UnexpectedEof`]. pub async fn request<'c, 'b, B>( &'c mut self, method: Method, path: &str, headers: &Headers, body: B, - ) -> io::Result>, ResponseError>> + ) -> Result>, ResponseError> where - B: crate::Body<'b>, + B: crate::Body, { self.send_request(method, path, headers, body).await?; match self.read_response(method).await { - Ok(Ok(Some(request))) => Ok(Ok(request)), - Ok(Ok(None)) => Err(io::Error::new( - io::ErrorKind::UnexpectedEof, - "no HTTP response", - )), - Ok(Err(err)) => Ok(Err(err)), + Ok(Some(request)) => Ok(request), + Ok(None) => Err(ResponseError::IncompleteResponse), Err(err) => Err(err), } } @@ -106,18 +97,25 @@ impl Client { body: B, ) -> io::Result<()> where - B: crate::Body<'b>, + B: crate::Body, { // Clear bytes from the previous request, keeping the bytes of the // response. self.clear_buffer(); - let ignore_end = self.buf.len(); + + // If the read buffer is empty we can use, otherwise we need to create a + // new buffer to ensure we don't lose bytes. + let mut http_head = if self.buf.is_empty() { + take(&mut self.buf) + } else { + Vec::with_capacity(INIT_HEAD_SIZE) + }; // Request line. - self.buf.extend_from_slice(method.as_str().as_bytes()); - self.buf.push(b' '); - self.buf.extend_from_slice(path.as_bytes()); - self.buf.extend_from_slice(b" HTTP/1.1\r\n"); + http_head.extend_from_slice(method.as_str().as_bytes()); + http_head.push(b' '); + http_head.extend_from_slice(path.as_bytes()); + http_head.extend_from_slice(b" HTTP/1.1\r\n"); // Headers. let mut set_user_agent_header = false; @@ -126,13 +124,13 @@ impl Client { for header in headers.iter() { let name = header.name(); // Field-name. - self.buf.extend_from_slice(name.as_ref().as_bytes()); + http_head.extend_from_slice(name.as_ref().as_bytes()); // NOTE: spacing after the colon (`:`) is optional. - self.buf.extend_from_slice(b": "); + http_head.extend_from_slice(b": "); // Append the header's value. // NOTE: `header.value` shouldn't contain CRLF (`\r\n`). - self.buf.extend_from_slice(header.value()); - self.buf.extend_from_slice(b"\r\n"); + http_head.extend_from_slice(header.value()); + http_head.extend_from_slice(b"\r\n"); if name == &HeaderName::USER_AGENT { set_user_agent_header = true; @@ -146,13 +144,13 @@ impl Client { /* TODO: set "Host" header. // Provide the "Host" header if the user didn't. if !set_host_header { - write!(&mut self.buf, "Host: {}\r\n", self.host).unwrap(); + write!(&mut http_head, "Host: {}\r\n", self.host).unwrap(); } */ // Provide the "User-Agent" header if the user didn't. if !set_user_agent_header { - self.buf.extend_from_slice( + http_head.extend_from_slice( concat!("User-Agent: Heph-HTTP/", env!("CARGO_PKG_VERSION"), "\r\n").as_bytes(), ); } @@ -162,27 +160,27 @@ impl Client { BodyLength::Known(0) => {} // No need for a "Content-Length" header. BodyLength::Known(length) => { let mut itoa_buf = itoa::Buffer::new(); - self.buf.extend_from_slice(b"Content-Length: "); - self.buf - .extend_from_slice(itoa_buf.format(length).as_bytes()); - self.buf.extend_from_slice(b"\r\n"); + http_head.extend_from_slice(b"Content-Length: "); + http_head.extend_from_slice(itoa_buf.format(length).as_bytes()); + http_head.extend_from_slice(b"\r\n"); } BodyLength::Chunked => { - self.buf - .extend_from_slice(b"Transfer-Encoding: chunked\r\n"); + http_head.extend_from_slice(b"Transfer-Encoding: chunked\r\n"); } } } // End of the HTTP head. - self.buf.extend_from_slice(b"\r\n"); + http_head.extend_from_slice(b"\r\n"); // Write the request to the stream. - let http_head = &self.buf[ignore_end..]; - body.write_message(&mut self.stream, http_head).await?; + let mut http_head = body.write_message(&mut self.stream, http_head).await?; + if self.buf.is_empty() { + // We used the read buffer so let's put it back. + http_head.clear(); + self.buf = http_head; + } - // Remove the request from the buffer. - self.buf.truncate(ignore_end); Ok(()) } @@ -198,7 +196,7 @@ impl Client { pub async fn read_response<'a>( &'a mut self, request_method: Method, - ) -> io::Result>>, ResponseError>> { + ) -> Result>>, ResponseError> { let mut too_short = 0; loop { // In case of pipelined responses it could be that while reading a @@ -211,17 +209,15 @@ impl Client { // while we have less than `too_short` bytes we try to receive // some more bytes. - self.clear_buffer(); - self.buf.reserve(MIN_READ_SIZE); - if self.stream.recv(&mut self.buf).await? == 0 { + if self.recv().await? { return if self.buf.is_empty() { // Read the entire stream, so we're done. - Ok(Ok(None)) + Ok(None) } else { // Couldn't read any more bytes, but we still have bytes // in the buffer. This means it contains a partial // response. - Ok(Err(ResponseError::IncompleteResponse)) + Err(ResponseError::IncompleteResponse) }; } } @@ -242,90 +238,94 @@ impl Client { // RFC 7230 section 3.3.3 Message Body Length. let mut body_length: Option = None; - let res = Headers::from_httparse_headers(response.headers, |name, value| { - if *name == HeaderName::CONTENT_LENGTH { - // RFC 7230 section 3.3.3 point 4: - // > If a message is received without - // > Transfer-Encoding and with either multiple - // > Content-Length header fields having differing - // > field-values or a single Content-Length header - // > field having an invalid value, then the message - // > framing is invalid and the recipient MUST treat - // > it as an unrecoverable error. [..] If this is a - // > response message received by a user agent, the - // > user agent MUST close the connection to the - // > server and discard the received response. - if let Ok(length) = FromHeaderValue::from_bytes(value) { - match body_length.as_mut() { - Some(ResponseBodyLength::Known(body_length)) - if *body_length == length => {} - Some(ResponseBodyLength::Known(_)) => { - return Err(ResponseError::DifferentContentLengths) - } - Some( - ResponseBodyLength::Chunked | ResponseBodyLength::ReadToEnd, - ) => { - return Err(ResponseError::ContentLengthAndTransferEncoding) - } - // RFC 7230 section 3.3.3 point 5: - // > If a valid Content-Length header field - // > is present without Transfer-Encoding, - // > its decimal value defines the expected - // > message body length in octets. - None => body_length = Some(ResponseBodyLength::Known(length)), - } - } else { - return Err(ResponseError::InvalidContentLength); - } - } else if *name == HeaderName::TRANSFER_ENCODING { - let mut encodings = value.split(|b| *b == b',').peekable(); - while let Some(encoding) = encodings.next() { - match trim_ws(encoding) { - b"chunked" => { - // RFC 7230 section 3.3.3 point 3: - // > If a message is received with both - // > a Transfer-Encoding and a - // > Content-Length header field, the - // > Transfer-Encoding overrides the - // > Content-Length. Such a message - // > might indicate an attempt to - // > perform request smuggling (Section - // > 9.5) or response splitting (Section - // > 9.4) and ought to be handled as an - // > error. - if body_length.is_some() { + let headers = + Headers::from_httparse_headers(response.headers, |name, value| { + if *name == HeaderName::CONTENT_LENGTH { + // RFC 7230 section 3.3.3 point 4: + // > If a message is received without + // > Transfer-Encoding and with either multiple + // > Content-Length header fields having differing + // > field-values or a single Content-Length header + // > field having an invalid value, then the message + // > framing is invalid and the recipient MUST treat + // > it as an unrecoverable error. [..] If this is a + // > response message received by a user agent, the + // > user agent MUST close the connection to the + // > server and discard the received response. + if let Ok(length) = FromHeaderValue::from_bytes(value) { + match body_length.as_mut() { + Some(ResponseBodyLength::Known(body_length)) + if *body_length == length => {} + Some(ResponseBodyLength::Known(_)) => { + return Err(ResponseError::DifferentContentLengths) + } + Some( + ResponseBodyLength::Chunked + | ResponseBodyLength::ReadToEnd, + ) => { return Err( ResponseError::ContentLengthAndTransferEncoding, - ); + ) } - - // RFC 7230 section 3.3.3 point 3: - // > If a Transfer-Encoding header field - // > is present in a response and the - // > chunked transfer coding is not the - // > final encoding, the message body - // > length is determined by reading the - // > connection until it is closed by - // > the server. - if encodings.peek().is_some() { - body_length = Some(ResponseBodyLength::ReadToEnd) - } else { - body_length = Some(ResponseBodyLength::Chunked); + // RFC 7230 section 3.3.3 point 5: + // > If a valid Content-Length header field + // > is present without Transfer-Encoding, + // > its decimal value defines the expected + // > message body length in octets. + None => { + body_length = Some(ResponseBodyLength::Known(length)) + } + } + } else { + return Err(ResponseError::InvalidContentLength); + } + } else if *name == HeaderName::TRANSFER_ENCODING { + let mut encodings = value.split(|b| *b == b',').peekable(); + while let Some(encoding) = encodings.next() { + match trim_ws(encoding) { + b"chunked" => { + // RFC 7230 section 3.3.3 point 3: + // > If a message is received with both + // > a Transfer-Encoding and a + // > Content-Length header field, the + // > Transfer-Encoding overrides the + // > Content-Length. Such a message + // > might indicate an attempt to + // > perform request smuggling (Section + // > 9.5) or response splitting (Section + // > 9.4) and ought to be handled as an + // > error. + if body_length.is_some() { + return Err( + ResponseError::ContentLengthAndTransferEncoding, + ); + } + + // RFC 7230 section 3.3.3 point 3: + // > If a Transfer-Encoding header field + // > is present in a response and the + // > chunked transfer coding is not the + // > final encoding, the message body + // > length is determined by reading the + // > connection until it is closed by + // > the server. + if encodings.peek().is_some() { + body_length = Some(ResponseBodyLength::ReadToEnd) + } else { + body_length = Some(ResponseBodyLength::Chunked); + } + } + b"identity" => {} // No changes. + // TODO: support "compress", "deflate" and + // "gzip". + _ => { + return Err(ResponseError::UnsupportedTransferEncoding) } } - b"identity" => {} // No changes. - // TODO: support "compress", "deflate" and - // "gzip". - _ => return Err(ResponseError::UnsupportedTransferEncoding), } } - } - Ok(()) - }); - let headers = match res { - Ok(headers) => headers, - Err(err) => return Ok(Err(err)), - }; + Ok(()) + })?; let kind = match body_length { // RFC 7230 section 3.3.3 point 2: @@ -357,10 +357,12 @@ impl Client { left_in_chunk: 0, read_complete: false, }, - Err(_) => return Ok(Err(ResponseError::InvalidChunkSize)), + Err(_) => return Err(ResponseError::InvalidChunkSize), } } - Some(ResponseBodyLength::ReadToEnd) => BodyKind::Unknown, + Some(ResponseBodyLength::ReadToEnd) => BodyKind::Unknown { + read_complete: false, + }, // RFC 7230 section 3.3.3 point 1: // > Any response to a HEAD request and any response // > with a 1xx (Informational), 204 (No Content), or @@ -381,22 +383,24 @@ impl Client { // > length is determined by the number of octets // > received prior to the server closing the // > connection. - None => BodyKind::Unknown, + None => BodyKind::Unknown { + read_complete: false, + }, }; let body = Body { client: self, kind }; - return Ok(Ok(Some(Response::new(version, status, headers, body)))); + return Ok(Some(Response::new(version, status, headers, body))); } Ok(httparse::Status::Partial) => { // Buffer doesn't include the entire response head, try // reading more bytes (in the next iteration). too_short = self.buf.len(); if too_short >= MAX_HEAD_SIZE { - return Ok(Err(ResponseError::HeadTooLarge)); + return Err(ResponseError::HeadTooLarge); } continue; } - Err(err) => return Ok(Err(ResponseError::from_httparse(err))), + Err(err) => return Err(ResponseError::from_httparse(err)), } } } @@ -406,7 +410,7 @@ impl Client { // Fields of `BodyKind::Chunked`: left_in_chunk: &mut usize, read_complete: &mut bool, - ) -> io::Result<()> { + ) -> Result<(), ResponseError> { loop { match httparse::parse_chunk_size(&self.buf[self.parsed_bytes..]) { #[allow(clippy::cast_possible_truncation)] // For truncate below. @@ -421,31 +425,33 @@ impl Client { return Ok(()); } Ok(httparse::Status::Partial) => {} // Read some more data below. - Err(_) => { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - "invalid chunk size", - )) - } + Err(_) => return Err(ResponseError::InvalidChunkSize), } - // Ensure we have space in the buffer to read into. - self.clear_buffer(); - self.buf.reserve(MIN_READ_SIZE); - - if self.stream.recv(&mut self.buf).await? == 0 { - return Err(io::ErrorKind::UnexpectedEof.into()); + if self.recv().await? { + return Err(ResponseError::IncompleteResponse); } } } + /// Returns true if we read all bytes (i.e. we read 0 bytes). + async fn recv(&mut self) -> io::Result { + // Ensure we have space in the buffer to read into. + self.clear_buffer(); + self.buf.reserve(MIN_READ_SIZE); + + let buf_len = self.buf.len(); + self.buf = self.stream.recv(take(&mut self.buf)).await?; + Ok(self.buf.len() == buf_len) + } + /// Clear parsed request(s) from the buffer. fn clear_buffer(&mut self) { let buf_len = self.buf.len(); if self.parsed_bytes >= buf_len { // Parsed all bytes in the buffer, so we can clear it. self.buf.clear(); - self.parsed_bytes -= buf_len; + self.parsed_bytes = 0; } // TODO: move bytes to the start. @@ -463,33 +469,7 @@ enum ResponseBodyLength { ReadToEnd, } -/// [`Future`] behind [`Client::connect`]. -#[derive(Debug)] -pub struct Connect { - connect: stream::Connect, -} - -impl Future for Connect { - type Output = io::Result; - - #[track_caller] - fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { - match Pin::new(&mut self.connect).poll(ctx) { - Poll::Ready(Ok(mut stream)) => { - stream.set_nodelay(true)?; - Poll::Ready(Ok(Client { - stream, - buf: Vec::with_capacity(BUF_SIZE), - parsed_bytes: 0, - })) - } - Poll::Ready(Err(err)) => Poll::Ready(Err(err)), - Poll::Pending => Poll::Pending, - } - } -} - -/// Body returned used by [`Client`]. +/// Body used by the [`Client`] for response for responsess. #[derive(Debug)] pub struct Body<'c> { client: &'c mut Client, @@ -512,11 +492,13 @@ enum BodyKind { }, /// Body length is not known, read the body until the server closes the /// connection. - Unknown, + Unknown { + /// Last read call returned 0. + read_complete: bool, + }, } impl<'c> Body<'c> { - /* /// Returns `true` if the body is completely read (or was empty to begin /// with). /// @@ -530,6 +512,10 @@ impl<'c> Body<'c> { /// unknown and thus not empty. However if the body would then send a single /// empty chunk (signaling the end of the body), this would return `true` as /// it turns out the body is indeed empty. + /// + /// This can also incorrectly return `false` for cases where the server + /// doesn't return a Content-Length header, but instead closes the + /// connection after the entire response is send, common before HTTP/1.1. pub fn is_empty(&self) -> bool { match self.kind { BodyKind::Known { left } => left == 0, @@ -537,6 +523,21 @@ impl<'c> Body<'c> { left_in_chunk, read_complete, } => read_complete && left_in_chunk == 0, + BodyKind::Unknown { read_complete } => read_complete, + } + } + + /// Returns the size of the next chunk in the body, or the entire body if + /// not chunked. + /// + /// However note that this is based on the server's information and thus + /// should not be relied opun as it be not be accurate or even possible to + /// determine. + pub fn chunk_size_hint(&self) -> Option { + match self.kind { + BodyKind::Known { left } => Some(left), + BodyKind::Chunked { left_in_chunk, .. } => Some(left_in_chunk), + BodyKind::Unknown { .. } => None, } } @@ -544,7 +545,6 @@ impl<'c> Body<'c> { pub fn is_chunked(&self) -> bool { matches!(self.kind, BodyKind::Chunked { .. }) } - */ /* TODO: RFC 7230 section 3.3.3 point 5: @@ -553,76 +553,107 @@ impl<'c> Body<'c> { consider the message to be incomplete and close the connection. */ - /// Read the entire body into `buf`. - /// - /// Reads up to `limit` bytes. - pub async fn read_all(&mut self, buf: &mut Vec, limit: usize) -> io::Result<()> { - let mut total = 0; + /// Receive bytes from the request body, writing them into `buf`. + pub async fn recv(&mut self, mut buf: B) -> io::Result { loop { - // Copy bytes in our buffer. - let bytes = self.buf_bytes(); - let len = bytes.len(); - if limit < total + len { - return Err(io::Error::new(io::ErrorKind::Other, "body too large")); + // Quick return for if we read all bytes in the body already. + if self.is_empty() { + return Ok(buf); } - buf.extend_from_slice(bytes); - self.processed(len); - total += len; + // First try to copy already buffered bytes. + let buf_bytes = self.buf_bytes(); + if !buf_bytes.is_empty() { + let written = buf.extend_from_slice(buf_bytes); + self.processed(written); + return Ok(buf); + } - match &mut self.kind { - // Read all the bytes from the body. - BodyKind::Known { left: 0 } => return Ok(()), - // Read all the bytes in the chunk, so need to read another - // chunk. + // We need to ensure that we don't read another response head or + // chunk head into `buf`. So we need to determine a limit on the + // amount of bytes we can safely read. We only can't determine that + // for the case were we read an entire chunk, but don't know + // anything about the next chunk. In this case we need our own + // buffer to ensure we don't lose not-body bytes to the user's + // `buf`fer. + let limit = match &mut self.kind { + BodyKind::Known { left } => *left, BodyKind::Chunked { left_in_chunk, read_complete, - } if *left_in_chunk == 0 => { - if *read_complete { - return Ok(()); + } => { + if *left_in_chunk != 0 { + *left_in_chunk + } else { + self.client.read_chunk(left_in_chunk, read_complete).await?; + // Read from the client's buffer again. + continue; } - - self.client.read_chunk(left_in_chunk, read_complete).await?; - // Copy read bytes again. - continue; } - // Continue to reading below. - BodyKind::Known { .. } | BodyKind::Chunked { .. } | BodyKind::Unknown => break, - } + // We don't have an actual limit, but all the remaining bytes + // make up the response body, so we can safely read them all. + BodyKind::Unknown { .. } => usize::MAX, + }; + + let len_before = buf.spare_capacity(); + let limited_buf = self.client.stream.recv(buf.limit(limit)).await?; + let buf = limited_buf.into_inner(); + self.processed(buf.spare_capacity() - len_before); + return Ok(buf); } + } + /// Receive bytes from the request body, writing them into `bufs` using + /// vectored I/O. + pub async fn recv_vectored, const N: usize>( + &mut self, + mut bufs: B, + ) -> io::Result { loop { - // Limit the read until the end of the chunk/body. - let chunk_len = match self.kind { - BodyKind::Known { left } => Some(left), - BodyKind::Chunked { left_in_chunk, .. } => Some(left_in_chunk), - BodyKind::Unknown => None, - }; - - if let Some(chunk_len) = chunk_len { - if chunk_len == 0 { - return Ok(()); - } else if total + chunk_len > limit { - return Err(io::Error::new(io::ErrorKind::Other, "body too large")); - } + // Quick return for if we read all bytes in the body already. + if self.is_empty() { + return Ok(bufs); } - let capacity = chunk_len - .unwrap_or_else(|| min(MIN_READ_SIZE, limit.saturating_sub(buf.capacity()))); - (&mut *buf).reserve(capacity); - if let Some(chunk_len) = chunk_len { - // FIXME: doesn't deal with chunked bodies. - return self.client.stream.recv_n(&mut *buf, chunk_len).await; - } - let n = self.client.stream.recv(&mut *buf).await?; - if n == 0 { - return Ok(()); - } - total += n; - if total > limit { - return Err(io::Error::new(io::ErrorKind::Other, "body too large")); + // First try to copy already buffered bytes. + let buf_bytes = self.buf_bytes(); + if !buf_bytes.is_empty() { + let written = bufs.extend_from_slice(buf_bytes); + self.processed(written); + return Ok(bufs); } + + // We need to ensure that we don't read another response head or + // chunk head into `buf`. So we need to determine a limit on the + // amount of bytes we can safely read. We only can't determine that + // for the case were we read an entire chunk, but don't know + // anything about the next chunk. In this case we need our own + // buffer to ensure we don't lose not-body bytes to the user's + // `buf`fer. + let limit = match &mut self.kind { + BodyKind::Known { left } => *left, + BodyKind::Chunked { + left_in_chunk, + read_complete, + } => { + if *left_in_chunk != 0 { + *left_in_chunk + } else { + self.client.read_chunk(left_in_chunk, read_complete).await?; + // Read from the client's buffer again. + continue; + } + } + // We don't have an actual limit, but all the remaining bytes + // make up the response body, so we can safely read them all. + BodyKind::Unknown { .. } => usize::MAX, + }; + + let len_before = bufs.total_spare_capacity(); + let limited_bufs = self.client.stream.recv_vectored(bufs.limit(limit)).await?; + let bufs = limited_bufs.into_inner(); + self.processed(bufs.total_spare_capacity() - len_before); + return Ok(bufs); } } @@ -648,7 +679,7 @@ impl<'c> Body<'c> { match &mut self.kind { BodyKind::Known { left } => *left -= n, BodyKind::Chunked { left_in_chunk, .. } => *left_in_chunk -= n, - BodyKind::Unknown => {} + BodyKind::Unknown { .. } => {} } self.client.parsed_bytes += n; } @@ -658,9 +689,9 @@ impl<'c> Body<'c> { /// Error parsing HTTP response. #[non_exhaustive] -#[derive(Copy, Clone, Debug, Eq, PartialEq)] +#[derive(Debug)] pub enum ResponseError { - /// Missing part of response. + /// Missing the entire (or part of) a response. IncompleteResponse, /// HTTP Head (start line and headers) is too large. /// @@ -694,60 +725,107 @@ pub enum ResponseError { InvalidStatus, /// Chunk size is invalid. InvalidChunkSize, + /// I/O error. + Io(io::Error), } impl ResponseError { /// Returns `true` if the connection should be closed based on the error /// (after sending a error response). #[allow(clippy::unused_self)] - pub const fn should_close(self) -> bool { + pub const fn should_close(&self) -> bool { // Currently all errors are fatal for the connection. true } fn from_httparse(err: httparse::Error) -> ResponseError { - use httparse::Error::*; match err { - HeaderName => ResponseError::InvalidHeaderName, - HeaderValue => ResponseError::InvalidHeaderValue, - Token => unreachable!(), - NewLine => ResponseError::InvalidNewLine, - Version => ResponseError::InvalidVersion, - TooManyHeaders => ResponseError::TooManyHeaders, - Status => ResponseError::InvalidStatus, + httparse::Error::HeaderName => ResponseError::InvalidHeaderName, + httparse::Error::HeaderValue => ResponseError::InvalidHeaderValue, + // Actually unreachable, but don't want to create a panic branch. + httparse::Error::Token => ResponseError::IncompleteResponse, + httparse::Error::NewLine => ResponseError::InvalidNewLine, + httparse::Error::Version => ResponseError::InvalidVersion, + httparse::Error::TooManyHeaders => ResponseError::TooManyHeaders, + httparse::Error::Status => ResponseError::InvalidStatus, } } - fn as_str(self) -> &'static str { - use ResponseError::*; + #[rustfmt::skip] + fn as_str(&self) -> &'static str { match self { - IncompleteResponse => "incomplete response", - HeadTooLarge => "response head too large", - InvalidContentLength => "invalid response Content-Length header", - DifferentContentLengths => "response has different Content-Length headers", - InvalidHeaderName => "invalid response header name", - InvalidHeaderValue => "invalid response header value", - TooManyHeaders => "too many response headers", - UnsupportedTransferEncoding => "response has unsupported Transfer-Encoding header", - ContentLengthAndTransferEncoding => { - "response contained both Content-Length and Transfer-Encoding headers" - } - InvalidNewLine => "invalid response syntax", - InvalidVersion => "invalid HTTP response version", - InvalidStatus => "invalid HTTP response status", - InvalidChunkSize => "invalid response chunk size", + ResponseError::IncompleteResponse => "incomplete response", + ResponseError::HeadTooLarge => "response head too large", + ResponseError::InvalidContentLength => "invalid response Content-Length header", + ResponseError::DifferentContentLengths => "response has different Content-Length headers", + ResponseError::InvalidHeaderName => "invalid response header name", + ResponseError::InvalidHeaderValue => "invalid response header value", + ResponseError::TooManyHeaders => "too many response headers", + ResponseError::UnsupportedTransferEncoding => "response has unsupported Transfer-Encoding header", + ResponseError::ContentLengthAndTransferEncoding => "response contained both Content-Length and Transfer-Encoding headers", + ResponseError::InvalidNewLine => "invalid response syntax", + ResponseError::InvalidVersion => "invalid HTTP response version", + ResponseError::InvalidStatus => "invalid HTTP response status", + ResponseError::InvalidChunkSize => "invalid response chunk size", + ResponseError::Io(_) => "I/O error", + } + } +} + +impl From for ResponseError { + fn from(err: io::Error) -> ResponseError { + if let io::ErrorKind::UnexpectedEof = err.kind() { + ResponseError::IncompleteResponse + } else { + ResponseError::Io(err) } } } impl From for io::Error { fn from(err: ResponseError) -> io::Error { - io::Error::new(io::ErrorKind::InvalidData, err.as_str()) + match err { + ResponseError::Io(err) => err, + err => io::Error::new(io::ErrorKind::InvalidData, err.as_str()), + } + } +} + +impl PartialEq for ResponseError { + fn eq(&self, other: &ResponseError) -> bool { + use ResponseError::*; + match (self, other) { + (IncompleteResponse, IncompleteResponse) => true, + (HeadTooLarge, HeadTooLarge) => true, + (InvalidContentLength, InvalidContentLength) => true, + (DifferentContentLengths, DifferentContentLengths) => true, + (InvalidHeaderName, InvalidHeaderName) => true, + (InvalidHeaderValue, InvalidHeaderValue) => true, + (TooManyHeaders, TooManyHeaders) => true, + (UnsupportedTransferEncoding, UnsupportedTransferEncoding) => true, + (ContentLengthAndTransferEncoding, ContentLengthAndTransferEncoding) => true, + (InvalidNewLine, InvalidNewLine) => true, + (InvalidVersion, InvalidVersion) => true, + (InvalidStatus, InvalidStatus) => true, + (InvalidChunkSize, InvalidChunkSize) => true, + (Io(err1), Io(err2)) => { + if let (Some(errno1), Some(errno2)) = (err1.raw_os_error(), err2.raw_os_error()) { + errno1 == errno2 + } else { + // Not always accurate, but good enough for our testing. + false + } + } + (_, _) => false, + } } } impl fmt::Display for ResponseError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.as_str()) + match self { + ResponseError::Io(err) => err.fmt(f), + err => err.as_str().fmt(f), + } } } diff --git a/http/src/head/method.rs b/http/src/head/method.rs index c9c61ae97..dda73e274 100644 --- a/http/src/head/method.rs +++ b/http/src/head/method.rs @@ -53,9 +53,9 @@ impl Method { /// Returns `true` if the method is safe. /// /// RFC 7321 section 4.2.1. + #[rustfmt::skip] pub const fn is_safe(self) -> bool { - use Method::*; - matches!(self, Get | Head | Options | Trace) + matches!(self, Method::Get | Method::Head | Method::Options | Method::Trace) } /// Returns `true` if the method is idempotent. @@ -80,17 +80,16 @@ impl Method { /// Returns the method as string. pub const fn as_str(self) -> &'static str { - use Method::*; match self { - Options => "OPTIONS", - Get => "GET", - Post => "POST", - Put => "PUT", - Delete => "DELETE", - Head => "HEAD", - Trace => "TRACE", - Connect => "CONNECT", - Patch => "PATCH", + Method::Options => "OPTIONS", + Method::Get => "GET", + Method::Post => "POST", + Method::Put => "PUT", + Method::Delete => "DELETE", + Method::Head => "HEAD", + Method::Trace => "TRACE", + Method::Connect => "CONNECT", + Method::Patch => "PATCH", } } } diff --git a/http/src/head/mod.rs b/http/src/head/mod.rs index 967656a99..1b05cfaaa 100644 --- a/http/src/head/mod.rs +++ b/http/src/head/mod.rs @@ -61,12 +61,10 @@ impl RequestHead { /// /// # Notes /// - /// Requests from the [`HttpServer`] will return the highest version it + /// Requests from the HTTP server will return the highest version it /// understands, e.g. if a client used HTTP/1.2 (which doesn't exists) the /// version would be set to HTTP/1.1 (the highest version this crate /// understands) per RFC 7230 section 2.6. - /// - /// [`HttpServer`]: crate::HttpServer pub const fn version(&self) -> Version { self.version } diff --git a/http/src/lib.rs b/http/src/lib.rs index 4634ed1bd..18e287914 100644 --- a/http/src/lib.rs +++ b/http/src/lib.rs @@ -1,16 +1,12 @@ //! HTTP/1.1 implementation for Heph. #![feature( - async_stream, - const_fn_trait_bound, + async_iterator, const_mut_refs, - const_slice_from_raw_parts, drain_filter, - generic_associated_types, - io_slice_advance, + impl_trait_in_assoc_type, maybe_uninit_uninit_array, - maybe_uninit_write_slice, - ready_macro + maybe_uninit_write_slice )] #![warn( anonymous_parameters, @@ -52,7 +48,7 @@ pub use head::StatusCode; pub use request::Request; pub use response::Response; #[doc(no_inline)] -pub use server::{Connection, HttpServer}; +pub use server::Connection; /// Maximum size of the HTTP head (the start line and the headers). /// @@ -70,6 +66,9 @@ const MIN_READ_SIZE: usize = 4096; /// Size of the buffer used in [`server::Connection`] and [`Client`]. const BUF_SIZE: usize = 8192; +/// Initial size of a buffer holding just a request/response head. +const INIT_HEAD_SIZE: usize = 2048; + /// Map a `version` byte to a [`Version`]. const fn map_version_byte(version: u8) -> Version { match version { diff --git a/http/src/route.rs b/http/src/route.rs index c61046190..cc465fb0d 100644 --- a/http/src/route.rs +++ b/http/src/route.rs @@ -99,6 +99,7 @@ macro_rules! route { $( $method $(, $method2 )* $path => $handler, )+ _ => $not_found ), + _ => $not_found(request).await, } }}; } diff --git a/http/src/server.rs b/http/src/server.rs index 0384ba0e0..3f9da6bf8 100644 --- a/http/src/server.rs +++ b/http/src/server.rs @@ -1,6 +1,3 @@ -// TODO: `S: Supervisor` currently uses `TcpStream` as argument due to `ArgMap`. -// Maybe disconnect `S` from `NA`? -// // TODO: Continue reading RFC 7230 section 4 Transfer Codings. // // TODO: RFC 7230 section 3.3.3 point 5: @@ -9,307 +6,207 @@ // > received, the recipient MUST consider the message to be // > incomplete and close the connection. -//! Module with the HTTP server implementation. +//! HTTP server. +//! +//! The HTTP server is an actor that starts a new actor for each accepted HTTP +//! connection. This actor can start as a thread-local or thread-safe actor. +//! When using the thread-local variant one actor runs per worker thread which +//! spawns thread-local actors to handle the [`Connection`]s, from which HTTP +//! [`Request`]s can be read and HTTP [`Response`]s can be written. +//! +//! [`Response`]: crate::Response +//! +//! # Graceful shutdown +//! +//! Graceful shutdown is done by sending it a [`Terminate`] message. The HTTP +//! server can also handle (shutdown) process signals, see below for an example. +//! +//! [`Terminate`]: heph::messages::Terminate +//! +//! # Examples +//! +//! ```rust +//! # #![feature(never_type)] +//! use std::borrow::Cow; +//! use std::io; +//! use std::net::SocketAddr; +//! use std::time::Duration; +//! +//! use heph::actor::{self, Actor, NewActor}; +//! use heph::net::TcpStream; +//! use heph::rt::{self, Runtime, ThreadLocal}; +//! use heph::supervisor::{Supervisor, SupervisorStrategy}; +//! use heph::timer::Deadline; +//! use heph_http::body::OneshotBody; +//! use heph_http::{self as http, Header, HeaderName, Headers, HttpServer, Method, StatusCode}; +//! use heph_rt::spawn::options::{ActorOptions, Priority}; +//! use log::error; +//! +//! fn main() -> Result<(), rt::Error> { +//! // Setup the HTTP server. +//! let actor = http_actor as fn(_, _, _) -> _; +//! let address = "127.0.0.1:7890".parse().unwrap(); +//! let server = HttpServer::setup(address, conn_supervisor, actor, ActorOptions::default()) +//! .map_err(rt::Error::setup)?; +//! +//! // Build the runtime. +//! let mut runtime = Runtime::setup().use_all_cores().build()?; +//! // On each worker thread start our HTTP server. +//! runtime.run_on_workers(move |mut runtime_ref| -> io::Result<()> { +//! let options = ActorOptions::default().with_priority(Priority::LOW); +//! let server_ref = runtime_ref.try_spawn_local(server_supervisor, server, (), options)?; +//! +//! # server_ref.try_send(heph::messages::Terminate).unwrap(); +//! +//! // Allow graceful shutdown by responding to process signals. +//! runtime_ref.receive_signals(server_ref.try_map()); +//! Ok(()) +//! })?; +//! runtime.start() +//! } +//! +//! /// Our supervisor for the HTTP server. +//! fn server_supervisor(err: http::server::Error) -> SupervisorStrategy<()> { +//! match err { +//! // When we hit an error accepting a connection we'll drop the old +//! // server and create a new one. +//! tcp::server::Error::Accept(err) => { +//! error!("error accepting new connection: {err}"); +//! SupervisorStrategy::Restart(()) +//! } +//! // Async function never return an error creating a new actor. +//! tcp::server::Error::NewActor(_) => unreachable!(), +//! } +//! } +//! +//! fn conn_supervisor(err: io::Error) -> SupervisorStrategy { +//! error!("error handling connection: {err}"); +//! SupervisorStrategy::Stop +//! } +//! +//! /// Our actor that handles a single HTTP connection. +//! async fn http_actor( +//! mut ctx: actor::Context, +//! mut connection: http::Connection, +//! ) -> io::Result<()> { +//! // Set `TCP_NODELAY` on the underlying `TcpStream`. +//! connection.set_nodelay(true)?; +//! +//! let mut headers = Headers::EMPTY; +//! loop { +//! // Read the next request. +//! let (code, body, should_close) = match connection.next_request().await? { +//! Ok(Some(request)) => { +//! // Only support GET/HEAD to "/", with an empty body. +//! if request.path() != "/" { +//! (StatusCode::NOT_FOUND, "Not found".into(), false) +//! } else if !matches!(request.method(), Method::Get | Method::Head) { +//! // Add the "Allow" header to show the HTTP methods we do +//! // support. +//! headers.append(Header::new(HeaderName::ALLOW, b"GET, HEAD")); +//! (StatusCode::METHOD_NOT_ALLOWED, "Method not allowed".into(), false) +//! } else if !request.body().is_empty() { +//! (StatusCode::PAYLOAD_TOO_LARGE, "Not expecting a body".into(), true) +//! } else { +//! (StatusCode::OK, "Hello world".into(), false) +//! } +//! } +//! // No more requests. +//! Ok(None) => return Ok(()), +//! // Error parsing request. +//! Err(err) => { +//! // Determine the correct status code to return. +//! let code = err.proper_status_code(); +//! // Create a useful error message as body. +//! let body = Cow::from(format!("Bad request: {err}")); +//! (code, body, err.should_close()) +//! } +//! }; +//! +//! // If we want to close the connection add the "Connection: close" +//! // header. +//! if should_close { +//! headers.append(Header::new(HeaderName::CONNECTION, b"close")); +//! } +//! +//! // Send the body as a single payload. +//! let body = OneshotBody::new(body.as_bytes()); +//! // Respond to the request. +//! connection.respond(code, &headers, body).await?; +//! +//! if should_close { +//! return Ok(()); +//! } +//! headers.clear(); +//! } +//! } +//! ``` -use std::cmp::min; use std::fmt; -use std::future::Future; use std::io::{self, Write}; -use std::mem::MaybeUninit; +use std::mem::{take, MaybeUninit}; use std::net::SocketAddr; -use std::pin::Pin; -use std::task::ready; -use std::task::{self, Poll}; use std::time::SystemTime; -use heph::bytes::{Bytes, BytesVectored}; -use heph::net::{tcp, TcpServer, TcpStream}; -use heph::{actor, rt, Actor, NewActor, Supervisor}; -use heph_rt::spawn::{ActorOptions, Spawn}; +use heph::{actor, NewActor, Supervisor}; +use heph_rt::io::{BufMut, BufMutSlice}; +use heph_rt::net::{tcp, TcpStream}; +use heph_rt::spawn::ActorOptions; use httpdate::HttpDate; use crate::body::{BodyLength, EmptyBody}; use crate::head::header::{FromHeaderValue, Header, HeaderName, Headers}; use crate::{ map_version_byte, trim_ws, Method, Request, Response, StatusCode, Version, BUF_SIZE, - MAX_HEADERS, MAX_HEAD_SIZE, MIN_READ_SIZE, + INIT_HEAD_SIZE, MAX_HEADERS, MAX_HEAD_SIZE, MIN_READ_SIZE, }; -/// A intermediate structure that implements [`NewActor`], creating -/// [`HttpServer`]. -/// -/// See [`HttpServer::setup`] to create this and [`HttpServer`] for examples. -#[derive(Debug)] -pub struct Setup { - inner: tcp::server::Setup>, -} - -impl Setup { - /// Returns the address the server is bound to. - pub fn local_addr(&self) -> SocketAddr { - self.inner.local_addr() - } -} - -impl NewActor for Setup -where - S: Supervisor> + Clone + 'static, - NA: NewActor + Clone + 'static, - NA::RuntimeAccess: rt::Access + Spawn, NA::RuntimeAccess>, -{ - type Message = Message; - type Argument = (); - type Actor = HttpServer; - type Error = io::Error; - type RuntimeAccess = NA::RuntimeAccess; - - fn new( - &mut self, - ctx: actor::Context, - arg: Self::Argument, - ) -> Result { - self.inner.new(ctx, arg).map(|inner| HttpServer { inner }) - } -} - -impl Clone for Setup { - fn clone(&self) -> Setup { - Setup { - inner: self.inner.clone(), - } - } -} - -/// An actor that starts a new actor for each accepted HTTP [`Connection`]. -/// -/// `HttpServer` has the same design as [`TcpServer`]. It accept `TcpStream`s -/// and converts those into HTTP [`Connection`]s, from which HTTP [`Request`]s -/// can be read and HTTP [`Response`]s can be written. -/// -/// Similar to `TcpServer` this type works with thread-safe and thread-local -/// actors. -/// -/// [`Response`]: crate::Response -/// -/// # Graceful shutdown -/// -/// Graceful shutdown is done by sending it a [`Terminate`] message. The HTTP -/// server can also handle (shutdown) process signals, see below for an example. -/// -/// [`Terminate`]: heph::messages::Terminate -/// -/// # Examples -/// -/// ```rust -/// # #![feature(never_type)] -/// use std::borrow::Cow; -/// use std::io; -/// use std::net::SocketAddr; -/// use std::time::Duration; -/// -/// use heph::actor::{self, Actor, NewActor}; -/// use heph::net::TcpStream; -/// use heph::rt::{self, Runtime, ThreadLocal}; -/// use heph::supervisor::{Supervisor, SupervisorStrategy}; -/// use heph::timer::Deadline; -/// use heph_http::body::OneshotBody; -/// use heph_http::{self as http, Header, HeaderName, Headers, HttpServer, Method, StatusCode}; -/// use heph_rt::spawn::options::{ActorOptions, Priority}; -/// use log::error; -/// -/// fn main() -> Result<(), rt::Error> { -/// // Setup the HTTP server. -/// let actor = http_actor as fn(_, _, _) -> _; -/// let address = "127.0.0.1:7890".parse().unwrap(); -/// let server = HttpServer::setup(address, conn_supervisor, actor, ActorOptions::default()) -/// .map_err(rt::Error::setup)?; -/// -/// // Build the runtime. -/// let mut runtime = Runtime::setup().use_all_cores().build()?; -/// // On each worker thread start our HTTP server. -/// runtime.run_on_workers(move |mut runtime_ref| -> io::Result<()> { -/// let options = ActorOptions::default().with_priority(Priority::LOW); -/// let server_ref = runtime_ref.try_spawn_local(ServerSupervisor, server, (), options)?; -/// -/// # server_ref.try_send(heph::messages::Terminate).unwrap(); -/// -/// // Allow graceful shutdown by responding to process signals. -/// runtime_ref.receive_signals(server_ref.try_map()); -/// Ok(()) -/// })?; -/// runtime.start() -/// } -/// -/// /// Our supervisor for the TCP server. -/// #[derive(Copy, Clone, Debug)] -/// struct ServerSupervisor; -/// -/// impl Supervisor for ServerSupervisor -/// where -/// NA: NewActor, -/// NA::Actor: Actor>, -/// { -/// fn decide(&mut self, err: http::server::Error) -> SupervisorStrategy<()> { -/// use http::server::Error::*; -/// match err { -/// Accept(err) => { -/// error!("error accepting new connection: {err}"); -/// SupervisorStrategy::Restart(()) -/// } -/// NewActor(_) => unreachable!(), -/// } -/// } +/// Create a new [server setup]. /// -/// fn decide_on_restart_error(&mut self, err: io::Error) -> SupervisorStrategy<()> { -/// error!("error restarting the TCP server: {err}"); -/// SupervisorStrategy::Stop -/// } +/// Arguments: +/// * `address`: the address to listen on. +/// * `supervisor`: the [`Supervisor`] used to supervise each started actor, +/// * `new_actor`: the [`NewActor`] implementation to start each actor, and +/// * `options`: the actor options used to spawn the new actors. /// -/// fn second_restart_error(&mut self, err: io::Error) { -/// error!("error restarting the actor a second time: {err}"); -/// } -/// } +/// See the [module documentation] for examples. /// -/// fn conn_supervisor(err: io::Error) -> SupervisorStrategy<(TcpStream, SocketAddr)> { -/// error!("error handling connection: {err}"); -/// SupervisorStrategy::Stop -/// } -/// -/// /// Our actor that handles a single HTTP connection. -/// async fn http_actor( -/// mut ctx: actor::Context, -/// mut connection: http::Connection, -/// address: SocketAddr, -/// ) -> io::Result<()> { -/// // Set `TCP_NODELAY` on the `TcpStream`. -/// connection.set_nodelay(true)?; -/// -/// let mut headers = Headers::EMPTY; -/// loop { -/// // Read the next request. -/// let (code, body, should_close) = match connection.next_request().await? { -/// Ok(Some(request)) => { -/// // Only support GET/HEAD to "/", with an empty body. -/// if request.path() != "/" { -/// (StatusCode::NOT_FOUND, "Not found".into(), false) -/// } else if !matches!(request.method(), Method::Get | Method::Head) { -/// // Add the "Allow" header to show the HTTP methods we do -/// // support. -/// headers.append(Header::new(HeaderName::ALLOW, b"GET, HEAD")); -/// let body = "Method not allowed".into(); -/// (StatusCode::METHOD_NOT_ALLOWED, body, false) -/// } else if !request.body().is_empty() { -/// (StatusCode::PAYLOAD_TOO_LARGE, "Not expecting a body".into(), true) -/// } else { -/// // Use the IP address as body. -/// let body = Cow::from(address.ip().to_string()); -/// (StatusCode::OK, body, false) -/// } -/// } -/// // No more requests. -/// Ok(None) => return Ok(()), -/// // Error parsing request. -/// Err(err) => { -/// // Determine the correct status code to return. -/// let code = err.proper_status_code(); -/// // Create a useful error message as body. -/// let body = Cow::from(format!("Bad request: {err}")); -/// (code, body, err.should_close()) -/// } -/// }; -/// -/// // If we want to close the connection add the "Connection: close" -/// // header. -/// if should_close { -/// headers.append(Header::new(HeaderName::CONNECTION, b"close")); -/// } -/// -/// // Send the body as a single payload. -/// let body = OneshotBody::new(body.as_bytes()); -/// // Respond to the request. -/// connection.respond(code, &headers, body).await?; -/// -/// if should_close { -/// return Ok(()); -/// } -/// headers.clear(); -/// } -/// } -/// ``` -pub struct HttpServer> { - inner: TcpServer>, -} - -impl HttpServer -where - S: Supervisor> + Clone + 'static, - NA: NewActor + Clone + 'static, -{ - /// Create a new [server setup]. - /// - /// Arguments: - /// * `address`: the address to listen on. - /// * `supervisor`: the [`Supervisor`] used to supervise each started actor, - /// * `new_actor`: the [`NewActor`] implementation to start each actor, - /// and - /// * `options`: the actor options used to spawn the new actors. - /// - /// [server setup]: Setup - pub fn setup( - address: SocketAddr, - supervisor: S, - new_actor: NA, - options: ActorOptions, - ) -> io::Result> { - let new_actor = ArgMap { new_actor }; - TcpServer::setup(address, supervisor, new_actor, options).map(|inner| Setup { inner }) - } -} - -impl Actor for HttpServer +/// [server setup]: Setup +/// [module documentation]: crate::server +pub fn setup( + address: SocketAddr, + supervisor: S, + new_actor: NA, + options: ActorOptions, +) -> io::Result> where - S: Supervisor> + Clone + 'static, - NA: NewActor + Clone + 'static, - NA::RuntimeAccess: rt::Access + Spawn, NA::RuntimeAccess>, + S: Supervisor> + Clone + 'static, + NA: NewActor + Clone + 'static, { - type Error = Error; - - fn try_poll( - self: Pin<&mut Self>, - ctx: &mut task::Context<'_>, - ) -> Poll> { - let this = unsafe { self.map_unchecked_mut(|s| &mut s.inner) }; - this.try_poll(ctx) - } + let new_actor = HttpNewActor { new_actor }; + tcp::server::setup(address, supervisor, new_actor, options) } -impl fmt::Debug for HttpServer -where - S: fmt::Debug, - NA: NewActor + fmt::Debug, - NA::RuntimeAccess: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("HttpServer") - .field("inner", &self.inner) - .finish() - } -} +/// A intermediate structure that implements [`NewActor`], creating an actor +/// that spawn a new actor for each incoming HTTP connection. +/// +/// See [`setup`] to create this and the [module documentation] for examples. +/// +/// [module documentation]: crate::server +pub type Setup = tcp::server::Setup>; -// TODO: better name. Like `TcpStreamToConnection`? -/// Maps `NA` to accept `(TcpStream, SocketAddr)` as argument, creating a -/// [`Connection`]. +/// Maps `NA` to accept `TcpStream` as argument, creating a [`Connection`]. #[derive(Debug, Clone)] -pub struct ArgMap { +pub struct HttpNewActor { new_actor: NA, } -impl NewActor for ArgMap +impl NewActor for HttpNewActor where - NA: NewActor, + NA: NewActor, { type Message = NA::Message; - type Argument = (TcpStream, SocketAddr); + type Argument = TcpStream; type Actor = NA::Actor; type Error = NA::Error; type RuntimeAccess = NA::RuntimeAccess; @@ -317,13 +214,13 @@ where fn new( &mut self, ctx: actor::Context, - (stream, address): Self::Argument, + stream: Self::Argument, ) -> Result { let conn = Connection::new(stream); - self.new_actor.new(ctx, (conn, address)) + self.new_actor.new(ctx, conn) } - fn name(&self) -> &'static str { + fn name() -> &'static str { NA::name() } } @@ -367,19 +264,9 @@ impl Connection { /// Parse the next request from the connection. /// - /// The return is a bit complex so let's break it down. The outer type is an - /// [`io::Result`], which often needs to be handled seperately from errors - /// in the request, e.g. by using `?`. - /// - /// Next is a `Result, `[`RequestError`]`>`. - /// `Ok(None)` is returned if the connection contains no more requests, i.e. - /// when all bytes are read. If the connection contains a request it will - /// return `Ok(Some(`[`Request`]`)`. If the request is somehow invalid it - /// will return an `Err(`[`RequestError`]`)`. - /// /// # Notes /// - /// Most [`RequestError`]s can't be receover from and the connection should + /// Most [`RequestError`]s can't be recovered from and the connection should /// be closed when hitting them, see [`RequestError::should_close`]. If the /// connection is not closed and `next_request` is called again it will /// likely return the same error (but this is not guaranteed). @@ -388,9 +275,7 @@ impl Connection { /// [`Connection::last_request_method`] functions to properly respond to /// request errors. #[allow(clippy::too_many_lines)] // TODO. - pub async fn next_request<'a>( - &'a mut self, - ) -> io::Result>>, RequestError>> { + pub async fn next_request<'a>(&'a mut self) -> Result>>, RequestError> { // NOTE: not resetting the version as that doesn't change between // requests. self.last_method = None; @@ -407,17 +292,15 @@ impl Connection { // while we have less than `too_short` bytes we try to receive // some more bytes. - self.clear_buffer(); - self.buf.reserve(MIN_READ_SIZE); - if self.stream.recv(&mut self.buf).await? == 0 { + if self.recv().await? { return if self.buf.is_empty() { // Read the entire stream, so we're done. - Ok(Ok(None)) + Ok(None) } else { // Couldn't read any more bytes, but we still have bytes // in the buffer. This means it contains a partial // request. - Ok(Err(RequestError::IncompleteRequest)) + Err(RequestError::IncompleteRequest) }; } } @@ -434,7 +317,7 @@ impl Connection { // ensures there all `Some`. let method = match request.method.unwrap().parse() { Ok(method) => method, - Err(_) => return Ok(Err(RequestError::UnknownMethod)), + Err(_) => return Err(RequestError::UnknownMethod), }; self.last_method = Some(method); let path = request.path.unwrap().to_string(); @@ -443,91 +326,90 @@ impl Connection { // RFC 7230 section 3.3.3 Message Body Length. let mut body_length: Option = None; - let res = Headers::from_httparse_headers(request.headers, |name, value| { - if *name == HeaderName::CONTENT_LENGTH { - // RFC 7230 section 3.3.3 point 4: - // > If a message is received without - // > Transfer-Encoding and with either multiple - // > Content-Length header fields having differing - // > field-values or a single Content-Length header - // > field having an invalid value, then the message - // > framing is invalid and the recipient MUST treat - // > it as an unrecoverable error. If this is a - // > request message, the server MUST respond with a - // > 400 (Bad Request) status code and then close - // > the connection. - if let Ok(length) = FromHeaderValue::from_bytes(value) { - match body_length.as_mut() { - Some(BodyLength::Known(body_length)) - if *body_length == length => {} - Some(BodyLength::Known(_)) => { - return Err(RequestError::DifferentContentLengths) - } - Some(BodyLength::Chunked) => { - return Err(RequestError::ContentLengthAndTransferEncoding) - } - // RFC 7230 section 3.3.3 point 5: - // > If a valid Content-Length header field - // > is present without Transfer-Encoding, - // > its decimal value defines the expected - // > message body length in octets. - None => body_length = Some(BodyLength::Known(length)), - } - } else { - return Err(RequestError::InvalidContentLength); - } - } else if *name == HeaderName::TRANSFER_ENCODING { - let mut encodings = value.split(|b| *b == b',').peekable(); - while let Some(encoding) = encodings.next() { - match trim_ws(encoding) { - b"chunked" => { - // RFC 7230 section 3.3.3 point 3: - // > If a Transfer-Encoding header field - // > is present in a request and the - // > chunked transfer coding is not the - // > final encoding, the message body - // > length cannot be determined - // > reliably; the server MUST respond - // > with the 400 (Bad Request) status - // > code and then close the connection. - if encodings.peek().is_some() { - return Err( - RequestError::ChunkedNotLastTransferEncoding, - ); + let headers = + Headers::from_httparse_headers(request.headers, |name, value| { + if *name == HeaderName::CONTENT_LENGTH { + // RFC 7230 section 3.3.3 point 4: + // > If a message is received without + // > Transfer-Encoding and with either multiple + // > Content-Length header fields having differing + // > field-values or a single Content-Length header + // > field having an invalid value, then the message + // > framing is invalid and the recipient MUST treat + // > it as an unrecoverable error. If this is a + // > request message, the server MUST respond with a + // > 400 (Bad Request) status code and then close + // > the connection. + if let Ok(length) = FromHeaderValue::from_bytes(value) { + match body_length.as_mut() { + Some(BodyLength::Known(body_length)) + if *body_length == length => {} + Some(BodyLength::Known(_)) => { + return Err(RequestError::DifferentContentLengths) } - - // RFC 7230 section 3.3.3 point 3: - // > If a message is received with both - // > a Transfer-Encoding and a - // > Content-Length header field, the - // > Transfer-Encoding overrides the - // > Content-Length. Such a message - // > might indicate an attempt to - // > perform request smuggling (Section - // > 9.5) or response splitting (Section - // > 9.4) and ought to be handled as an - // > error. - if body_length.is_some() { + Some(BodyLength::Chunked) => { return Err( RequestError::ContentLengthAndTransferEncoding, - ); + ) } - - body_length = Some(BodyLength::Chunked); + // RFC 7230 section 3.3.3 point 5: + // > If a valid Content-Length header field + // > is present without Transfer-Encoding, + // > its decimal value defines the expected + // > message body length in octets. + None => body_length = Some(BodyLength::Known(length)), + } + } else { + return Err(RequestError::InvalidContentLength); + } + } else if *name == HeaderName::TRANSFER_ENCODING { + let mut encodings = value.split(|b| *b == b',').peekable(); + while let Some(encoding) = encodings.next() { + match trim_ws(encoding) { + b"chunked" => { + // RFC 7230 section 3.3.3 point 3: + // > If a Transfer-Encoding header field + // > is present in a request and the + // > chunked transfer coding is not the + // > final encoding, the message body + // > length cannot be determined + // > reliably; the server MUST respond + // > with the 400 (Bad Request) status + // > code and then close the connection. + if encodings.peek().is_some() { + return Err( + RequestError::ChunkedNotLastTransferEncoding, + ); + } + + // RFC 7230 section 3.3.3 point 3: + // > If a message is received with both + // > a Transfer-Encoding and a + // > Content-Length header field, the + // > Transfer-Encoding overrides the + // > Content-Length. Such a message + // > might indicate an attempt to + // > perform request smuggling (Section + // > 9.5) or response splitting (Section + // > 9.4) and ought to be handled as an + // > error. + if body_length.is_some() { + return Err( + RequestError::ContentLengthAndTransferEncoding, + ); + } + + body_length = Some(BodyLength::Chunked); + } + b"identity" => {} // No changes. + // TODO: support "compress", "deflate" and + // "gzip". + _ => return Err(RequestError::UnsupportedTransferEncoding), } - b"identity" => {} // No changes. - // TODO: support "compress", "deflate" and - // "gzip". - _ => return Err(RequestError::UnsupportedTransferEncoding), } } - } - Ok(()) - }); - let headers = match res { - Ok(headers) => headers, - Err(err) => return Ok(Err(err)), - }; + Ok(()) + })?; let kind = match body_length { Some(BodyLength::Known(left)) => BodyKind::Oneshot { left }, @@ -547,7 +429,7 @@ impl Connection { left_in_chunk: 0, read_complete: false, }, - Err(_) => return Ok(Err(RequestError::InvalidChunkSize)), + Err(_) => return Err(RequestError::InvalidChunkSize), } } // RFC 7230 section 3.3.3 point 6: @@ -557,7 +439,7 @@ impl Connection { None => BodyKind::Oneshot { left: 0 }, }; let body = Body { conn: self, kind }; - return Ok(Ok(Some(Request::new(method, path, version, headers, body)))); + return Ok(Some(Request::new(method, path, version, headers, body))); } Ok(httparse::Status::Partial) => { // Buffer doesn't include the entire request head, try @@ -569,12 +451,12 @@ impl Connection { } if too_short >= MAX_HEAD_SIZE { - return Ok(Err(RequestError::HeadTooLarge)); + return Err(RequestError::HeadTooLarge); } continue; } - Err(err) => return Ok(Err(RequestError::from_httparse(err))), + Err(err) => return Err(RequestError::from_httparse(err)), } } } @@ -652,14 +534,14 @@ impl Connection { /// /// See the notes for [`Connection::send_response`], they apply to this /// function also. - pub async fn respond<'b, B>( + pub async fn respond( &mut self, status: StatusCode, headers: &Headers, body: B, ) -> io::Result<()> where - B: crate::Body<'b>, + B: crate::Body, { let req_method = self.last_method.unwrap_or(Method::Get); let version = self.last_version.unwrap_or(Version::Http11).highest_minor(); @@ -670,9 +552,9 @@ impl Connection { /// Respond to the last parsed request with `response`. /// /// See [`Connection::respond`] for more documentation. - pub async fn respond_with<'b, B>(&mut self, response: Response) -> io::Result<()> + pub async fn respond_with(&mut self, response: Response) -> io::Result<()> where - B: crate::Body<'b>, + B: crate::Body, { let (head, body) = response.split(); self.respond(head.status(), head.headers(), body).await @@ -701,7 +583,7 @@ impl Connection { /// /// [`expects_body()`]: Method::expects_body /// [`includes_body()`]: StatusCode::includes_body - pub async fn send_response<'b, B>( + pub async fn send_response( &mut self, request_method: Method, // Response data: @@ -711,23 +593,29 @@ impl Connection { body: B, ) -> io::Result<()> where - B: crate::Body<'b>, + B: crate::Body, { let mut itoa_buf = itoa::Buffer::new(); - // Clear bytes from the previous request, keeping the bytes of the - // request. + // Clear bytes from the previous request, keeping the bytes of any + // unprocessed request(s). self.clear_buffer(); - let ignore_end = self.buf.len(); + + // If the read buffer is empty we can use, otherwise we need to create a + // new buffer to ensure we don't lose bytes. + let mut http_head = if self.buf.is_empty() { + take(&mut self.buf) + } else { + Vec::with_capacity(INIT_HEAD_SIZE) + }; // Format the status-line (RFC 7230 section 3.1.2). - self.buf.extend_from_slice(version.as_str().as_bytes()); - self.buf.push(b' '); - self.buf - .extend_from_slice(itoa_buf.format(status.0).as_bytes()); + http_head.extend_from_slice(version.as_str().as_bytes()); + http_head.push(b' '); + http_head.extend_from_slice(itoa_buf.format(status.0).as_bytes()); // NOTE: we're not sending a reason-phrase, but the space is required // before \r\n. - self.buf.extend_from_slice(b" \r\n"); + http_head.extend_from_slice(b" \r\n"); // Format the headers (RFC 7230 section 3.2). let mut set_connection_header = false; @@ -737,13 +625,13 @@ impl Connection { for header in headers.iter() { let name = header.name(); // Field-name: - self.buf.extend_from_slice(name.as_ref().as_bytes()); + http_head.extend_from_slice(name.as_ref().as_bytes()); // NOTE: spacing after the colon (`:`) is optional. - self.buf.extend_from_slice(b": "); + http_head.extend_from_slice(b": "); // Append the header's value. // NOTE: `header.value` shouldn't contain CRLF (`\r\n`). - self.buf.extend_from_slice(header.value()); - self.buf.extend_from_slice(b"\r\n"); + http_head.extend_from_slice(header.value()); + http_head.extend_from_slice(b"\r\n"); if name == &HeaderName::CONNECTION { set_connection_header = true; @@ -761,13 +649,13 @@ impl Connection { // Per RFC 7230 section 6.3, HTTP/1.0 needs the "Connection: // keep-alive" header to persistent the connection. Connections // using HTTP/1.1 persistent by default. - self.buf.extend_from_slice(b"Connection: keep-alive\r\n"); + http_head.extend_from_slice(b"Connection: keep-alive\r\n"); } // Provide the "Date" header if the user didn't. if !set_date_header { let now = HttpDate::from(SystemTime::now()); - write!(&mut self.buf, "Date: {now}\r\n").unwrap(); + write!(&mut http_head, "Date: {now}\r\n").unwrap(); } // Provide the "Conent-Length" or "Transfer-Encoding" header if the user @@ -777,31 +665,33 @@ impl Connection { match body.length() { _ if !request_method.expects_body() || !status.includes_body() => { send_body = false; - extend_content_length_header(&mut self.buf, &mut itoa_buf, 0) + extend_content_length_header(&mut http_head, &mut itoa_buf, 0) } BodyLength::Known(length) => { - extend_content_length_header(&mut self.buf, &mut itoa_buf, length) + extend_content_length_header(&mut http_head, &mut itoa_buf, length) } BodyLength::Chunked => { - self.buf - .extend_from_slice(b"Transfer-Encoding: chunked\r\n"); + http_head.extend_from_slice(b"Transfer-Encoding: chunked\r\n"); } } } // End of the HTTP head. - self.buf.extend_from_slice(b"\r\n"); + http_head.extend_from_slice(b"\r\n"); // Write the response to the stream. - let http_head = &self.buf[ignore_end..]; - if send_body { - body.write_message(&mut self.stream, http_head).await?; + let mut http_head = if send_body { + body.write_message(&mut self.stream, http_head).await? } else { - self.stream.send_all(http_head).await?; + self.stream.send_all(http_head).await? + }; + + if self.buf.is_empty() { + // We used the read buffer so let's put it back. + http_head.clear(); + self.buf = http_head; } - // Remove the response head from the buffer. - self.buf.truncate(ignore_end); Ok(()) } @@ -815,16 +705,6 @@ impl Connection { self.stream.local_addr() } - /// See [`TcpStream::set_ttl`]. - pub fn set_ttl(&mut self, ttl: u32) -> io::Result<()> { - self.stream.set_ttl(ttl) - } - - /// See [`TcpStream::ttl`]. - pub fn ttl(&mut self) -> io::Result { - self.stream.ttl() - } - /// See [`TcpStream::set_nodelay`]. pub fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { self.stream.set_nodelay(nodelay) @@ -835,57 +715,12 @@ impl Connection { self.stream.nodelay() } - /// See [`TcpStream::keepalive`]. - pub fn keepalive(&self) -> io::Result { - self.stream.keepalive() - } - - /// See [`TcpStream::set_keepalive`]. - pub fn set_keepalive(&self, enable: bool) -> io::Result<()> { - self.stream.set_keepalive(enable) - } - - /// Clear parsed request(s) from the buffer. - fn clear_buffer(&mut self) { - let buf_len = self.buf.len(); - if self.parsed_bytes >= buf_len { - // Parsed all bytes in the buffer, so we can clear it. - self.buf.clear(); - self.parsed_bytes -= buf_len; - } - - // TODO: move bytes to the start. - } - - /// Recv bytes from the underlying stream, reading into `self.buf`. - /// - /// Returns an `UnexpectedEof` error if zero bytes are received. - fn try_recv(&mut self) -> Poll> { - // Ensure we have space in the buffer to read into. - self.clear_buffer(); - self.buf.reserve(MIN_READ_SIZE); - - loop { - match self.stream.try_recv(&mut self.buf) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::UnexpectedEof.into())), - Ok(n) => return Poll::Ready(Ok(n)), - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => return Poll::Pending, - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Poll::Ready(Err(err)), - } - } - } - - /// Read a HTTP body chunk. - /// - /// Returns an I/O error, or an `InvalidData` error if the chunk size is - /// invalid. - fn try_read_chunk( + async fn read_chunk( &mut self, // Fields of `BodyKind::Chunked`: left_in_chunk: &mut usize, read_complete: &mut bool, - ) -> Poll> { + ) -> Result<(), RequestError> { loop { match httparse::parse_chunk_size(&self.buf[self.parsed_bytes..]) { #[allow(clippy::cast_possible_truncation)] // For truncate below. @@ -897,57 +732,39 @@ impl Connection { // FIXME: add check here. It's fine on 64 bit (only currently // supported). *left_in_chunk = chunk_size as usize; - return Poll::Ready(Ok(())); + return Ok(()); } Ok(httparse::Status::Partial) => {} // Read some more data below. - Err(_) => { - return Poll::Ready(Err(io::Error::new( - io::ErrorKind::InvalidData, - "invalid chunk size", - ))) - } + Err(_) => return Err(RequestError::InvalidChunkSize), } - let _ = ready!(self.try_recv())?; + if self.recv().await? { + return Err(RequestError::IncompleteRequest); + } } } - async fn read_chunk( - &mut self, - // Fields of `BodyKind::Chunked`: - left_in_chunk: &mut usize, - read_complete: &mut bool, - ) -> io::Result<()> { - loop { - match httparse::parse_chunk_size(&self.buf[self.parsed_bytes..]) { - #[allow(clippy::cast_possible_truncation)] // For truncate below. - Ok(httparse::Status::Complete((idx, chunk_size))) => { - self.parsed_bytes += idx; - if chunk_size == 0 { - *read_complete = true; - } - // FIXME: add check here. It's fine on 64 bit (only currently - // supported). - *left_in_chunk = chunk_size as usize; - return Ok(()); - } - Ok(httparse::Status::Partial) => {} // Read some more data below. - Err(_) => { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - "invalid chunk size", - )) - } - } + /// Returns true if we read all bytes (i.e. we read 0 bytes). + async fn recv(&mut self) -> io::Result { + // Ensure we have space in the buffer to read into. + self.clear_buffer(); + self.buf.reserve(MIN_READ_SIZE); - // Ensure we have space in the buffer to read into. - self.clear_buffer(); - self.buf.reserve(MIN_READ_SIZE); + let buf_len = self.buf.len(); + self.buf = self.stream.recv(take(&mut self.buf)).await?; + Ok(self.buf.len() == buf_len) + } - if self.stream.recv(&mut self.buf).await? == 0 { - return Err(io::ErrorKind::UnexpectedEof.into()); - } + /// Clear parsed request(s) from the buffer. + fn clear_buffer(&mut self) { + let buf_len = self.buf.len(); + if self.parsed_bytes >= buf_len { + // Parsed all bytes in the buffer, so we can clear it. + self.buf.clear(); + self.parsed_bytes = 0; } + + // TODO: move bytes to the start. } } @@ -991,7 +808,7 @@ enum BodyKind { } impl<'a> Body<'a> { - /// Returns the length of the body (in bytes) *left*, or a + /// Returns the length of the body (in bytes) *left*. /// /// Calling this before [`recv`] or [`recv_vectored`] will return the /// original body length, after removing bytes from the body this will @@ -1009,15 +826,6 @@ impl<'a> Body<'a> { } } - /// Return the length of this chunk *left*, or the entire body in case of a - /// oneshot body. - fn chunk_len(&self) -> usize { - match self.kind { - BodyKind::Oneshot { left } => left, - BodyKind::Chunked { left_in_chunk, .. } => left_in_chunk, - } - } - /// Returns `true` if the body is completely read (or was empty to begin /// with). /// @@ -1047,77 +855,100 @@ impl<'a> Body<'a> { } /// Receive bytes from the request body, writing them into `buf`. - pub const fn recv(&'a mut self, buf: B) -> Recv<'a, B> - where - B: Bytes, - { - Recv { body: self, buf } - } - - /// Receive bytes from the request body, writing them into `bufs`. - pub const fn recv_vectored(&'a mut self, bufs: B) -> RecvVectored<'a, B> - where - B: BytesVectored, - { - RecvVectored { body: self, bufs } - } - - /// Read the entire body into `buf`, up to `limit` bytes. - /// - /// If the body is larger then `limit` bytes it return an `io::Error`. - pub async fn read_all(&mut self, buf: &mut Vec, limit: usize) -> io::Result<()> { - let mut total = 0; + pub async fn recv(&mut self, mut buf: B) -> io::Result { loop { - // Copy bytes in our buffer. - let bytes = self.buf_bytes(); - let len = bytes.len(); - if limit < total + len { - return Err(io::Error::new(io::ErrorKind::Other, "body too large")); + // Quick return for if we read all bytes in the body already. + if self.is_empty() { + return Ok(buf); } - buf.extend_from_slice(bytes); - self.processed(len); - total += len; - - let chunk_len = self.chunk_len(); - if chunk_len == 0 { - match &mut self.kind { - // Read all the bytes from the oneshot body. - BodyKind::Oneshot { .. } => return Ok(()), - // Read all the bytes in the chunk, so need to read another - // chunk. - BodyKind::Chunked { - left_in_chunk, - read_complete, - } => { - if *read_complete { - return Ok(()); - } + // First try to copy already buffered bytes. + let buf_bytes = self.buf_bytes(); + if !buf_bytes.is_empty() { + let written = buf.extend_from_slice(buf_bytes); + self.processed(written); + return Ok(buf); + } + // We need to ensure that we don't read another response head or + // chunk head into `buf`. So we need to determine a limit on the + // amount of bytes we can safely read. We only can't determine that + // for the case were we read an entire chunk, but don't know + // anything about the next chunk. In this case we need our own + // buffer to ensure we don't lose not-body bytes to the user's + // `buf`fer. + let limit = match &mut self.kind { + BodyKind::Oneshot { left } => *left, + BodyKind::Chunked { + left_in_chunk, + read_complete, + } => { + if *left_in_chunk != 0 { + *left_in_chunk + } else { self.conn.read_chunk(left_in_chunk, read_complete).await?; - // Copy read bytes again. + // Read from the client's buffer again. continue; } } - } - // Continue to reading below. - break; + }; + + let len_before = buf.spare_capacity(); + let limited_buf = self.conn.stream.recv(buf.limit(limit)).await?; + let buf = limited_buf.into_inner(); + self.processed(buf.spare_capacity() - len_before); + return Ok(buf); } + } + /// Receive bytes from the request body, writing them into `bufs` using + /// vectored I/O. + pub async fn recv_vectored, const N: usize>( + &mut self, + mut bufs: B, + ) -> io::Result { loop { - // Limit the read until the end of the chunk/body. - let chunk_len = self.chunk_len(); - if chunk_len == 0 { - return Ok(()); - } else if total + chunk_len > limit { - return Err(io::Error::new(io::ErrorKind::Other, "body too large")); + // Quick return for if we read all bytes in the body already. + if self.is_empty() { + return Ok(bufs); } - (&mut *buf).reserve(chunk_len); - self.conn.stream.recv_n(&mut *buf, chunk_len).await?; - total += chunk_len; + // First try to copy already buffered bytes. + let buf_bytes = self.buf_bytes(); + if !buf_bytes.is_empty() { + let written = bufs.extend_from_slice(buf_bytes); + self.processed(written); + return Ok(bufs); + } + + // We need to ensure that we don't read another response head or + // chunk head into `buf`. So we need to determine a limit on the + // amount of bytes we can safely read. We only can't determine that + // for the case were we read an entire chunk, but don't know + // anything about the next chunk. In this case we need our own + // buffer to ensure we don't lose not-body bytes to the user's + // `buf`fer. + let limit = match &mut self.kind { + BodyKind::Oneshot { left } => *left, + BodyKind::Chunked { + left_in_chunk, + read_complete, + } => { + if *left_in_chunk != 0 { + *left_in_chunk + } else { + self.conn.read_chunk(left_in_chunk, read_complete).await?; + // Read from the client's buffer again. + continue; + } + } + }; - // FIXME: doesn't deal with chunked bodies. + let len_before = bufs.total_spare_capacity(); + let limited_bufs = self.conn.stream.recv_vectored(bufs.limit(limit)).await?; + let bufs = limited_bufs.into_inner(); + self.processed(bufs.total_spare_capacity() - len_before); + return Ok(bufs); } } @@ -1138,20 +969,6 @@ impl<'a> Body<'a> { } } - /// Copy already read bytes. - /// - /// Same as [`Body::buf_bytes`] this is limited to the bytes of this - /// request/chunk, i.e. it doesn't contain the next request/chunk. - fn copy_buf_bytes(&mut self, dst: &mut [MaybeUninit]) -> usize { - let bytes = self.buf_bytes(); - let len = min(bytes.len(), dst.len()); - if len != 0 { - let _ = MaybeUninit::write_slice(&mut dst[..len], &bytes[..len]); - self.processed(len); - } - len - } - /// Mark `n` bytes are processed. fn processed(&mut self, n: usize) { // TODO: should this be `unsafe`? We don't do underflow checks... @@ -1163,264 +980,6 @@ impl<'a> Body<'a> { } } -/// The [`Future`] behind [`Body::recv`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Recv<'b, B> { - body: &'b mut Body<'b>, - buf: B, -} - -impl<'b, B> Future for Recv<'b, B> -where - B: Bytes + Unpin, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let Recv { body, buf } = Pin::into_inner(self); - - let mut len = 0; - loop { - // Copy bytes in our buffer. - len += body.copy_buf_bytes(buf.as_bytes()); - if len != 0 { - unsafe { buf.update_length(len) }; - } - - let limit = body.chunk_len(); - if limit == 0 { - match &mut body.kind { - // Read all the bytes from the oneshot body. - BodyKind::Oneshot { .. } => return Poll::Ready(Ok(len)), - // Read all the bytes in the chunk, so need to read another - // chunk. - BodyKind::Chunked { - left_in_chunk, - read_complete, - } => { - ready!(body.conn.try_read_chunk(left_in_chunk, read_complete))?; - // Copy read bytes again. - continue; - } - } - } - // Continue to reading below. - break; - } - - // Read from the stream if there is space left. - if buf.has_spare_capacity() { - // Limit the read until the end of the chunk/body. - let limit = body.chunk_len(); - loop { - match body.conn.stream.try_recv(buf.limit(limit)) { - Ok(n) => return Poll::Ready(Ok(len + n)), - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - return if len == 0 { - Poll::Pending - } else { - Poll::Ready(Ok(len)) - } - } - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Poll::Ready(Err(err)), - } - } - } else { - Poll::Ready(Ok(len)) - } - } -} - -/// The [`Future`] behind [`Body::recv_vectored`]. -#[derive(Debug)] -#[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct RecvVectored<'b, B> { - body: &'b mut Body<'b>, - bufs: B, -} - -impl<'b, B> Future for RecvVectored<'b, B> -where - B: BytesVectored + Unpin, -{ - type Output = io::Result; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let RecvVectored { body, bufs } = Pin::into_inner(self); - - let mut len = 0; - loop { - // Copy bytes in our buffer. - for buf in bufs.as_bufs().as_mut() { - match body.copy_buf_bytes(buf) { - 0 => break, - n => len += n, - } - } - if len != 0 { - unsafe { bufs.update_lengths(len) }; - } - - let limit = body.chunk_len(); - if limit == 0 { - match &mut body.kind { - // Read all the bytes from the oneshot body. - BodyKind::Oneshot { .. } => return Poll::Ready(Ok(len)), - // Read all the bytes in the chunk, so need to read another - // chunk. - BodyKind::Chunked { - left_in_chunk, - read_complete, - } => { - ready!(body.conn.try_read_chunk(left_in_chunk, read_complete))?; - // Copy read bytes again. - continue; - } - } - } - // Continue to reading below. - break; - } - - // Read from the stream if there is space left. - if bufs.has_spare_capacity() { - // Limit the read until the end of the chunk/body. - let limit = body.chunk_len(); - loop { - match body.conn.stream.try_recv_vectored(bufs.limit(limit)) { - Ok(n) => return Poll::Ready(Ok(len + n)), - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - return if len == 0 { - Poll::Pending - } else { - Poll::Ready(Ok(len)) - } - } - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Poll::Ready(Err(err)), - } - } - } else { - Poll::Ready(Ok(len)) - } - } -} - -impl<'a> crate::Body<'a> for Body<'a> { - fn length(&self) -> BodyLength { - self.len() - } -} - -mod private { - use std::future::Future; - use std::io; - use std::pin::Pin; - use std::task::{self, ready, Poll}; - - use heph::net::TcpStream; - - use super::{Body, BodyKind}; - - #[derive(Debug)] - pub struct SendBody<'c, 's, 'h> { - pub(super) body: Body<'c>, - /// Stream we're writing the body to. - pub(super) stream: &'s mut TcpStream, - /// HTTP head for the response. - pub(super) head: &'h [u8], - } - - impl<'c, 's, 'h> Future for SendBody<'c, 's, 'h> { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll { - let SendBody { body, stream, head } = Pin::into_inner(self); - - // Send the HTTP head first. - // TODO: try to use vectored I/O on first call. - while !head.is_empty() { - match stream.try_send(*head) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::WriteZero.into())), - Ok(n) => *head = &head[n..], - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - return Poll::Pending - } - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Poll::Ready(Err(err)), - } - } - - while !body.is_empty() { - let limit = body.chunk_len(); - let bytes = body.buf_bytes(); - let bytes = if bytes.len() > limit { - &bytes[..limit] - } else { - bytes - }; - // TODO: maybe read first if we have less then N bytes? - if !bytes.is_empty() { - match stream.try_send(bytes) { - Ok(0) => return Poll::Ready(Err(io::ErrorKind::WriteZero.into())), - Ok(n) => { - body.processed(n); - continue; - } - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { - return Poll::Pending - } - Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue, - Err(err) => return Poll::Ready(Err(err)), - } - // NOTE: we don't continue here, we always return on start - // the next iteration of the loop. - } - - // Read some more data, or the next chunk. - match &mut body.kind { - BodyKind::Oneshot { .. } => { - let _ = ready!(body.conn.try_recv())?; - } - BodyKind::Chunked { - left_in_chunk, - read_complete, - } => { - if *left_in_chunk == 0 { - ready!(body.conn.try_read_chunk(left_in_chunk, read_complete))?; - } else { - let _ = ready!(body.conn.try_recv())?; - } - } - } - } - - Poll::Ready(Ok(())) - } - } -} - -impl<'c> crate::body::PrivateBody<'c> for Body<'c> { - type WriteBody<'s, 'h> = private::SendBody<'c, 's, 'h>; - - fn write_message<'s, 'h>( - self, - stream: &'s mut TcpStream, - head: &'h [u8], - ) -> Self::WriteBody<'s, 'h> - where - 'c: 'h, - { - private::SendBody { - body: self, - stream, - head, - } - } -} - impl<'a> Drop for Body<'a> { fn drop(&mut self) { if self.is_empty() { @@ -1451,7 +1010,7 @@ impl<'a> Drop for Body<'a> { /// Error parsing HTTP request. #[non_exhaustive] -#[derive(Copy, Clone, Debug, Eq, PartialEq)] +#[derive(Debug)] pub enum RequestError { /// Missing part of request. IncompleteRequest, @@ -1495,11 +1054,13 @@ pub enum RequestError { UnknownMethod, /// Chunk size is invalid. InvalidChunkSize, + /// I/O error. + Io(io::Error), } impl RequestError { /// Returns the proper status code for a given error. - pub const fn proper_status_code(self) -> StatusCode { + pub const fn proper_status_code(&self) -> StatusCode { use RequestError::*; // See the parsing code for various references to the RFC(s) that // determine the values here. @@ -1516,7 +1077,7 @@ impl RequestError { | InvalidToken | InvalidNewLine | InvalidVersion - | InvalidChunkSize=> StatusCode::BAD_REQUEST, + | InvalidChunkSize => StatusCode::BAD_REQUEST, // RFC 7230 section 3.3.1: // > A server that receives a request message with a transfer coding // > it does not understand SHOULD respond with 501 (Not @@ -1527,12 +1088,13 @@ impl RequestError { // > implemented by an origin server, the origin server SHOULD // > respond with the 501 (Not Implemented) status code. | UnknownMethod => StatusCode::NOT_IMPLEMENTED, + Io(_) => StatusCode::INTERNAL_SERVER_ERROR, } } /// Returns `true` if the connection should be closed based on the error /// (after sending a error response). - pub const fn should_close(self) -> bool { + pub const fn should_close(&self) -> bool { use RequestError::*; // See the parsing code for various references to the RFC(s) that // determine the values here. @@ -1550,7 +1112,8 @@ impl RequestError { | InvalidToken | InvalidNewLine | InvalidVersion - | InvalidChunkSize => true, + | InvalidChunkSize + | Io(_) => true, UnknownMethod => false, } } @@ -1561,7 +1124,7 @@ impl RequestError { /// [proper status code]: RequestError::proper_status_code /// [Connection]: HeaderName::CONNECTION /// [connection should be closed]: RequestError::should_close - pub fn response(self) -> Response { + pub fn response(&self) -> Response { let mut response = Response::build_new(self.proper_status_code()); if self.should_close() { response @@ -1580,16 +1143,17 @@ impl RequestError { NewLine => RequestError::InvalidNewLine, Version => RequestError::InvalidVersion, TooManyHeaders => RequestError::TooManyHeaders, - // SAFETY: request never contain a status, only responses do. - Status => unreachable!(), + // Requests never contain a status, only responses do, but we don't + // want a panic branch (from `unreachable!`) here. + Status => RequestError::IncompleteRequest, } } -} -impl fmt::Display for RequestError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + + #[rustfmt::skip] + fn as_str(&self) -> &'static str { use RequestError::*; - f.write_str(match self { + match self { IncompleteRequest => "incomplete request", HeadTooLarge => "head too large", InvalidContentLength => "invalid Content-Length header", @@ -1606,16 +1170,45 @@ impl fmt::Display for RequestError { InvalidVersion => "invalid version", UnknownMethod => "unknown method", InvalidChunkSize => "invalid chunk size", - }) + Io(_) => "I/O error", + } + } +} + +impl From for RequestError { + fn from(err: io::Error) -> RequestError { + if let io::ErrorKind::UnexpectedEof = err.kind() { + RequestError::IncompleteRequest + } else { + RequestError::Io(err) + } + } +} + +impl From for io::Error { + fn from(err: RequestError) -> io::Error { + match err { + RequestError::Io(err) => err, + err => io::Error::new(io::ErrorKind::InvalidData, err.as_str()), + } + } +} + +impl fmt::Display for RequestError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + RequestError::Io(err) => err.fmt(f), + err => err.as_str().fmt(f), + } } } -/// The message type used by [`HttpServer`] (and [`TcpServer`]). +/// The message type used by the HTTP server. /// #[doc(inline)] -pub use heph::net::tcp::server::Message; +pub use heph_rt::net::tcp::server::Message; -/// Error returned by [`HttpServer`] (and [`TcpServer`]). +/// Error returned by the HTTP server. /// #[doc(inline)] -pub use heph::net::tcp::server::Error; +pub use heph_rt::net::tcp::server::Error; diff --git a/http/tests/functional.rs b/http/tests/functional.rs index 09640ad3a..956459353 100644 --- a/http/tests/functional.rs +++ b/http/tests/functional.rs @@ -1,6 +1,6 @@ //! Functional tests. -#![feature(async_stream, never_type, const_weak_new)] +#![feature(async_iterator, never_type, const_weak_new)] use std::mem::size_of; diff --git a/http/tests/functional/body.rs b/http/tests/functional/body.rs index 4f8af7c4c..ac54197b8 100644 --- a/http/tests/functional/body.rs +++ b/http/tests/functional/body.rs @@ -1,12 +1,9 @@ -use std::fs::File; -use std::io; +use std::async_iter::AsyncIterator; use std::mem::replace; -use std::num::NonZeroUsize; use std::pin::Pin; -use std::stream::Stream; use std::task::{self, Poll}; -use heph_http::body::*; +use heph_http::body::{Body, BodyLength, ChunkedBody, EmptyBody, OneshotBody, StreamingBody}; use crate::{assert_send, assert_size, assert_sync}; @@ -15,8 +12,8 @@ const BODY1: &[u8] = b"Hello world!"; struct EmptyStream; -impl Stream for EmptyStream { - type Item = io::Result<&'static [u8]>; +impl AsyncIterator for EmptyStream { + type Item = &'static [u8]; fn poll_next(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll> { Poll::Ready(None) @@ -25,12 +22,12 @@ impl Stream for EmptyStream { struct SingleStream<'a>(&'a [u8]); -impl<'a> Stream for SingleStream<'a> { - type Item = io::Result<&'a [u8]>; +impl<'a> AsyncIterator for SingleStream<'a> { + type Item = &'a [u8]; fn poll_next(mut self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll> { if !self.0.is_empty() { - Poll::Ready(Some(Ok(replace(&mut self.0, &[])))) + Poll::Ready(Some(replace(&mut self.0, &[]))) } else { Poll::Ready(None) } @@ -41,8 +38,7 @@ impl<'a> Stream for SingleStream<'a> { fn size() { assert_size::>(0); assert_size::(0); - assert_size::>(24); - assert_size::(16); + assert_size::>(16); assert_size::>(8); } @@ -50,8 +46,7 @@ fn size() { fn send() { assert_send::>(); assert_send::(); - assert_send::>(); - assert_send::(); + assert_send::>(); assert_send::>(); } @@ -59,8 +54,7 @@ fn send() { fn sync() { assert_sync::>(); assert_sync::(); - assert_sync::>(); - assert_sync::(); + assert_sync::>(); assert_sync::>(); } @@ -69,24 +63,6 @@ fn empty_body() { assert_eq!(EmptyBody.length(), BodyLength::Known(0)); } -#[test] -fn oneshot_bytes() { - let body = OneshotBody::new(BODY1); - assert_eq!(body.bytes(), BODY1); -} - -#[test] -fn oneshot_cmp_bytes() { - let body = OneshotBody::new(BODY1); - assert_eq!(body, BODY1); -} - -#[test] -fn oneshot_cmp_string() { - let body = OneshotBody::new(BODY1); - assert_eq!(body, "Hello world!"); -} - #[test] fn oneshot_body() { assert_eq!( @@ -94,10 +70,10 @@ fn oneshot_body() { BodyLength::Known(BODY0.len()) ); assert_eq!( - OneshotBody::from(BODY1).length(), + OneshotBody::new(BODY1).length(), BodyLength::Known(BODY1.len()) ); - assert_eq!(OneshotBody::from("abc").length(), BodyLength::Known(3)); + assert_eq!(OneshotBody::new("abc").length(), BodyLength::Known(3)); } #[test] @@ -117,19 +93,6 @@ fn streaming_body() { ); } -#[test] -fn file_body() { - let file = File::open("Cargo.toml").unwrap(); - assert_eq!( - FileBody::new(&file, 0, NonZeroUsize::new(10).unwrap()).length(), - BodyLength::Known(10) - ); - assert_eq!( - FileBody::new(&file, 5, NonZeroUsize::new(10).unwrap()).length(), - BodyLength::Known(5) - ); -} - #[test] fn chunked_body() { assert_eq!(ChunkedBody::new(EmptyStream).length(), BodyLength::Chunked); diff --git a/http/tests/functional/client.rs b/http/tests/functional/client.rs index 12a31438e..280bbe98f 100644 --- a/http/tests/functional/client.rs +++ b/http/tests/functional/client.rs @@ -3,7 +3,6 @@ use std::borrow::Cow; use std::io::{self, Read, Write}; use std::net::{Shutdown, SocketAddr, TcpListener, TcpStream}; -use std::sync::LazyLock; use std::sync::{Arc, Condvar, Mutex, Weak}; use std::task::Poll; use std::thread::{self, sleep}; @@ -11,14 +10,14 @@ use std::time::{Duration, SystemTime}; use std::{fmt, str}; use heph::messages::Terminate; -use heph::rt::{self, Runtime, ThreadSafe}; -use heph::test::{init_actor, poll_actor}; use heph::{actor, Actor, ActorRef, NewActor, Supervisor, SupervisorStrategy}; use heph_http::body::{EmptyBody, OneshotBody}; use heph_http::client::{Client, ResponseError}; -use heph_http::server::{HttpServer, RequestError}; +use heph_http::server::RequestError; use heph_http::{self as http, Header, HeaderName, Headers, Method, Response, StatusCode, Version}; use heph_rt::spawn::options::{ActorOptions, Priority}; +use heph_rt::test::{init_actor, poll_actor}; +use heph_rt::{Runtime, ThreadSafe}; use httpdate::fmt_http_date; const USER_AGENT: &[u8] = b"Heph-HTTP/0.1.0"; @@ -36,10 +35,10 @@ macro_rules! with_test_server { fn get() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let response = client.get("/").await?; let headers = Headers::from([Header::new(HeaderName::CONTENT_LENGTH, b"2")]); expect_response(response, Version::Http11, StatusCode::OK, &headers, b"Ok").await; @@ -74,13 +73,12 @@ fn get() { fn get_no_response() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let err = client.get("/").await.unwrap_err(); - assert_eq!(err.kind(), io::ErrorKind::UnexpectedEof); - assert_eq!(err.to_string(), "no HTTP response"); + assert_eq!(err, ResponseError::IncompleteResponse); Ok(()) } @@ -110,13 +108,12 @@ fn get_no_response() { fn get_invalid_response() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let err = client.get("/").await.unwrap_err(); - assert_eq!(err.kind(), io::ErrorKind::InvalidData); - assert_eq!(err.to_string(), "invalid HTTP response status"); + assert_eq!(err, ResponseError::InvalidStatus); Ok(()) } @@ -148,14 +145,14 @@ fn get_invalid_response() { fn request_with_headers() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let headers = Headers::from([Header::new(HeaderName::HOST, b"localhost")]); let response = client .request(Method::Get, "/", &headers, EmptyBody) - .await? + .await .unwrap(); let headers = Headers::from([Header::new(HeaderName::CONTENT_LENGTH, b"2")]); expect_response(response, Version::Http11, StatusCode::OK, &headers, b"Ok").await; @@ -193,14 +190,14 @@ fn request_with_headers() { fn request_with_user_agent_header() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let headers = Headers::from([Header::new(HeaderName::USER_AGENT, b"my-user-agent")]); let response = client .request(Method::Get, "/", &headers, EmptyBody) - .await? + .await .unwrap(); let headers = Headers::from([Header::new(HeaderName::CONTENT_LENGTH, b"2")]); expect_response(response, Version::Http11, StatusCode::OK, &headers, b"Ok").await; @@ -232,29 +229,22 @@ fn request_with_user_agent_header() { } /* FIXME: The following tests have the following problem: -error: implementation of `body::private::PrivateBody` is not general enough - --> http/tests/functional/client.rs:255:48 - | -255 | let (mut stream, handle) = test_server.accept(|address| { - | ^^^^^^ implementation of `body::private::PrivateBody` is not general enough - | - = note: `body::private::PrivateBody<'1>` would have to be implemented for the type `OneshotBody<'0>`, for any two lifetimes `'0` and `'1`... - = note: ...but `body::private::PrivateBody<'2>` is actually implemented for the type `OneshotBody<'2>`, for some specific lifetime `'2` - + * error: higher-ranked lifetime error + * = note: could not prove `impl Future>: Send` #[test] fn request_with_content_length_header() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; - let body = OneshotBody::new(b"Hi"); + let mut client = Client::connect(ctx.runtime_ref(), address).await?; + let body = OneshotBody::new("Hi"); // NOTE: Content-Length is incorrect for this test! let headers = Headers::from([Header::new(HeaderName::CONTENT_LENGTH, b"3")]); let response = client .request(Method::Get, "/", &headers, body) - .await? + .await .unwrap(); let headers = Headers::from([Header::new(HeaderName::CONTENT_LENGTH, b"2")]); expect_response(response, Version::Http11, StatusCode::OK, &headers, b"Ok").await; @@ -292,15 +282,15 @@ fn request_with_content_length_header() { fn request_with_transfer_encoding_header() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let headers = Headers::from([Header::new(HeaderName::TRANSFER_ENCODING, b"identify")]); - let body = OneshotBody::new(b"Hi"); + let body = OneshotBody::new("Hi"); let response = client .request(Method::Get, "/", &headers, body) - .await? + .await .unwrap(); let headers = Headers::from([Header::new(HeaderName::CONTENT_LENGTH, b"2")]); expect_response(response, Version::Http11, StatusCode::OK, &headers, b"Ok").await; @@ -338,14 +328,14 @@ fn request_with_transfer_encoding_header() { fn request_sets_content_length_header() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; - let body = OneshotBody::new(b"Hello"); + let mut client = Client::connect(ctx.runtime_ref(), address).await?; + let body = OneshotBody::new("Ok"); let response = client .request(Method::Get, "/", &Headers::EMPTY, body) - .await? + .await .unwrap(); let headers = Headers::from([Header::new(HeaderName::CONTENT_LENGTH, b"2")]); expect_response(response, Version::Http11, StatusCode::OK, &headers, b"Ok").await; @@ -386,13 +376,13 @@ fn request_sets_content_length_header() { fn partial_response() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let err = client .request(Method::Get, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap_err(); assert_eq!(err, ResponseError::IncompleteResponse); Ok(()) @@ -427,10 +417,10 @@ fn partial_response() { fn same_content_length() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let response = client.get("/").await?; let headers = Headers::from([ Header::new(HeaderName::CONTENT_LENGTH, b"2"), @@ -467,13 +457,13 @@ fn same_content_length() { fn different_content_length() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let err = client .request(Method::Get, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap_err(); assert_eq!(err, ResponseError::DifferentContentLengths); Ok(()) @@ -506,13 +496,13 @@ fn different_content_length() { fn transfer_encoding_and_content_length_and() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let err = client .request(Method::Get, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap_err(); assert_eq!(err, ResponseError::ContentLengthAndTransferEncoding); Ok(()) @@ -545,13 +535,13 @@ fn transfer_encoding_and_content_length_and() { fn invalid_content_length() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let err = client .request(Method::Get, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap_err(); assert_eq!(err, ResponseError::InvalidContentLength); Ok(()) @@ -584,10 +574,10 @@ fn invalid_content_length() { fn chunked_transfer_encoding() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let response = client.get("/").await?; let headers = Headers::from([Header::new(HeaderName::TRANSFER_ENCODING, b"chunked")]); expect_response(response, Version::Http11, StatusCode::OK, &headers, b"Ok").await; @@ -621,10 +611,10 @@ fn chunked_transfer_encoding() { fn slow_chunked_transfer_encoding() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let response = client.get("/").await?; let headers = Headers::from([Header::new(HeaderName::TRANSFER_ENCODING, b"chunked")]); expect_response(response, Version::Http11, StatusCode::OK, &headers, b"Ok").await; @@ -660,10 +650,10 @@ fn slow_chunked_transfer_encoding() { fn empty_chunked_transfer_encoding() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let response = client.get("/").await?; let headers = Headers::from([Header::new(HeaderName::TRANSFER_ENCODING, b"chunked")]); expect_response(response, Version::Http11, StatusCode::OK, &headers, b"").await; @@ -697,10 +687,10 @@ fn empty_chunked_transfer_encoding() { fn content_length_and_identity_transfer_encoding() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let response = client.get("/").await?; let headers = Headers::from([ Header::new(HeaderName::CONTENT_LENGTH, b"2"), @@ -739,13 +729,13 @@ fn content_length_and_identity_transfer_encoding() { fn unsupported_transfer_encoding() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let err = client .request(Method::Get, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap_err(); assert_eq!(err, ResponseError::UnsupportedTransferEncoding); Ok(()) @@ -778,10 +768,10 @@ fn unsupported_transfer_encoding() { fn chunked_not_last_transfer_encoding() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let response = client.get("/").await?; let headers = Headers::from([Header::new( HeaderName::TRANSFER_ENCODING, @@ -819,13 +809,13 @@ fn chunked_not_last_transfer_encoding() { fn content_length_and_transfer_encoding() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let err = client .request(Method::Get, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap_err(); assert_eq!(err, ResponseError::ContentLengthAndTransferEncoding); Ok(()) @@ -858,13 +848,13 @@ fn content_length_and_transfer_encoding() { fn invalid_chunk_size() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let err = client .request(Method::Get, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap_err(); assert_eq!(err, ResponseError::InvalidChunkSize); Ok(()) @@ -897,13 +887,13 @@ fn invalid_chunk_size() { fn connect() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let response = client .request(Method::Connect, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap(); let headers = Headers::EMPTY; expect_response(response, Version::Http11, StatusCode::OK, &headers, b"").await; @@ -936,13 +926,13 @@ fn connect() { fn head() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let response = client .request(Method::Head, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap(); let headers = Headers::EMPTY; expect_response(response, Version::Http11, StatusCode::OK, &headers, b"").await; @@ -975,13 +965,13 @@ fn head() { fn response_status_204() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let response = client .request(Method::Get, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap(); let headers = Headers::EMPTY; let status = StatusCode::NO_CONTENT; @@ -1015,13 +1005,13 @@ fn response_status_204() { fn no_content_length_no_transfer_encoding_response() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let response = client .request(Method::Get, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap(); let headers = Headers::EMPTY; expect_response(response, Version::Http11, StatusCode::OK, &headers, b"Ok").await; @@ -1055,13 +1045,13 @@ fn no_content_length_no_transfer_encoding_response() { fn response_head_too_large() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let err = client .request(Method::Get, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap_err(); assert_eq!(err, ResponseError::HeadTooLarge); Ok(()) @@ -1096,13 +1086,13 @@ fn response_head_too_large() { fn invalid_header_name() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let err = client .request(Method::Get, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap_err(); assert_eq!(err, ResponseError::InvalidHeaderName); Ok(()) @@ -1134,13 +1124,13 @@ fn invalid_header_name() { fn invalid_header_value() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let err = client .request(Method::Get, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap_err(); assert_eq!(err, ResponseError::InvalidHeaderValue); Ok(()) @@ -1174,13 +1164,13 @@ fn invalid_header_value() { fn invalid_new_line() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let err = client .request(Method::Get, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap_err(); assert_eq!(err, ResponseError::InvalidNewLine); Ok(()) @@ -1212,13 +1202,13 @@ fn invalid_new_line() { fn invalid_version() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let err = client .request(Method::Get, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap_err(); assert_eq!(err, ResponseError::InvalidVersion); Ok(()) @@ -1250,13 +1240,13 @@ fn invalid_version() { fn invalid_status() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let err = client .request(Method::Get, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap_err(); assert_eq!(err, ResponseError::InvalidStatus); Ok(()) @@ -1288,13 +1278,13 @@ fn invalid_status() { fn too_many_headers() { with_test_server!(|test_server| { async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, address: SocketAddr, ) -> io::Result<()> { - let mut client = Client::connect(&mut ctx, address)?.await?; + let mut client = Client::connect(ctx.runtime_ref(), address).await?; let err = client .request(Method::Get, "/", &Headers::EMPTY, EmptyBody) - .await? + .await .unwrap_err(); assert_eq!(err, ResponseError::TooManyHeaders); Ok(()) @@ -1399,10 +1389,9 @@ async fn expect_response( str::from_utf8(expected) ); } - let mut got_body = Vec::new(); - response + let got_body = response .body_mut() - .read_all(&mut got_body, 1024) + .recv(Vec::with_capacity(1024)) .await .unwrap(); assert_eq!(got_body, body, "different bodies"); diff --git a/http/tests/functional/message.rs b/http/tests/functional/message.rs index 63ea9f81b..a30a12e77 100644 --- a/http/tests/functional/message.rs +++ b/http/tests/functional/message.rs @@ -111,7 +111,7 @@ fn request_map_body() { EmptyBody, ) .map_body(|_body: EmptyBody| OneshotBody::new(BODY1)); - assert_eq!(request.body(), BODY1); + assert_eq!(request.body().into_inner(), BODY1); } #[test] @@ -125,12 +125,12 @@ fn request_builder() { ]; for (create, expected) in tests { - let request = create("/".to_owned()).with_body::(BODY1.into()); + let request = create("/".to_owned()).with_body(OneshotBody::new(BODY1)); assert_eq!(request.method(), expected); assert_eq!(request.path(), "/"); assert_eq!(request.version(), Version::Http11); assert!(request.headers().is_empty()); - assert_eq!(request.body(), BODY1); + assert_eq!(request.body().into_inner(), BODY1); } } @@ -213,7 +213,7 @@ fn response_header_or_else() { fn response_map_body() { let response = Response::new(Version::Http10, StatusCode::OK, Headers::EMPTY, EmptyBody) .map_body(|_body: EmptyBody| OneshotBody::new(BODY1)); - assert_eq!(response.body(), BODY1); + assert_eq!(response.body().into_inner(), BODY1); } #[test] @@ -238,11 +238,11 @@ fn response_builder() { ]; for (create, expected) in tests { - let response = create().with_body::(BODY1.into()); + let response = create().with_body(OneshotBody::new(BODY1)); assert_eq!(response.version(), Version::Http11); assert_eq!(response.status(), expected); assert!(response.headers().is_empty()); - assert_eq!(response.body(), BODY1); + assert_eq!(response.body().into_inner(), BODY1); } let tests: [(fn(&str) -> Response, StatusCode); 3] = [ @@ -253,7 +253,7 @@ fn response_builder() { for (create, expected) in tests { let uri = "/other_location"; - let response = create(uri).with_body::(BODY1.into()); + let response = create(uri).with_body(OneshotBody::new(BODY1)); assert_eq!(response.version(), Version::Http11); assert_eq!(response.status(), expected); assert!(!response.headers().is_empty()); @@ -262,6 +262,6 @@ fn response_builder() { response.headers().get_bytes(&HeaderName::LOCATION), Some(uri.as_bytes()) ); - assert_eq!(response.body(), BODY1); + assert_eq!(response.body().into_inner(), BODY1); } } diff --git a/http/tests/functional/route.rs b/http/tests/functional/route.rs index 7a0d48bed..e62372c92 100644 --- a/http/tests/functional/route.rs +++ b/http/tests/functional/route.rs @@ -1,10 +1,10 @@ //! Tests for the [`route!`] macro. -use heph::test::block_on; use heph_http::body::{EmptyBody, OneshotBody}; use heph_http::{route, Headers, Method, Request, Response, Version}; +use heph_rt::test::block_on; -async fn route(request: Request) -> Response> { +async fn route(request: Request) -> Response> { route!(match request { GET | HEAD "/" => index, GET "/test1" => handlers::get, @@ -22,72 +22,72 @@ async fn route(request: Request) -> Response> { }) } -async fn index(request: Request) -> Response> { +async fn index(request: Request) -> Response> { assert!(matches!(request.method(), Method::Get | Method::Head)); assert_eq!(request.path(), "/"); - Response::ok().with_body("index".into()) + Response::ok().with_body(OneshotBody::new("index")) } mod handlers { use heph_http::body::OneshotBody; use heph_http::{Method, Request, Response}; - pub async fn get(request: Request) -> Response> { + pub async fn get(request: Request) -> Response> { assert!(matches!(request.method(), Method::Get)); assert_eq!(request.path(), "/test1"); - Response::ok().with_body("GET".into()) + Response::ok().with_body(OneshotBody::new("GET")) } - pub async fn head(request: Request) -> Response> { + pub async fn head(request: Request) -> Response> { assert!(matches!(request.method(), Method::Head)); assert_eq!(request.path(), "/test1"); - Response::ok().with_body("HEAD".into()) + Response::ok().with_body(OneshotBody::new("HEAD")) } - pub async fn post(request: Request) -> Response> { + pub async fn post(request: Request) -> Response> { assert!(matches!(request.method(), Method::Post)); assert_eq!(request.path(), "/test1"); - Response::ok().with_body("POST".into()) + Response::ok().with_body(OneshotBody::new("POST")) } - pub async fn put(request: Request) -> Response> { + pub async fn put(request: Request) -> Response> { assert!(matches!(request.method(), Method::Put)); assert_eq!(request.path(), "/test1"); - Response::ok().with_body("PUT".into()) + Response::ok().with_body(OneshotBody::new("PUT")) } - pub async fn delete(request: Request) -> Response> { + pub async fn delete(request: Request) -> Response> { assert!(matches!(request.method(), Method::Delete)); assert_eq!(request.path(), "/test1"); - Response::ok().with_body("DELETE".into()) + Response::ok().with_body(OneshotBody::new("DELETE")) } - pub async fn connect(request: Request) -> Response> { + pub async fn connect(request: Request) -> Response> { assert!(matches!(request.method(), Method::Connect)); assert_eq!(request.path(), "/test1"); - Response::ok().with_body("CONNECT".into()) + Response::ok().with_body(OneshotBody::new("CONNECT")) } - pub async fn options(request: Request) -> Response> { + pub async fn options(request: Request) -> Response> { assert!(matches!(request.method(), Method::Options)); assert_eq!(request.path(), "/test1"); - Response::ok().with_body("OPTIONS".into()) + Response::ok().with_body(OneshotBody::new("OPTIONS")) } - pub async fn trace(request: Request) -> Response> { + pub async fn trace(request: Request) -> Response> { assert!(matches!(request.method(), Method::Trace)); assert_eq!(request.path(), "/test1"); - Response::ok().with_body("TRACE".into()) + Response::ok().with_body(OneshotBody::new("TRACE")) } - pub async fn patch(request: Request) -> Response> { + pub async fn patch(request: Request) -> Response> { assert!(matches!(request.method(), Method::Patch)); assert_eq!(request.path(), "/test1"); - Response::ok().with_body("PATCH".into()) + Response::ok().with_body(OneshotBody::new("PATCH")) } - pub async fn not_found(_: Request) -> Response> { - Response::not_found().with_body("not found".into()) + pub async fn not_found(_: Request) -> Response> { + Response::not_found().with_body(OneshotBody::new("not found")) } } @@ -97,7 +97,7 @@ fn multiple_methods_same_route() { let tests = [Request::get("/".to_owned()), Request::head("/".to_owned())]; for test_request in tests { let response = route(test_request).await; - assert_eq!(response.body(), "index") + assert_eq!(response.body().into_inner(), "index") } }); } @@ -125,7 +125,7 @@ fn correct_routing_based_on_method() { EmptyBody, ); let response = route(request).await; - assert_eq!(response.body(), method.as_str()) + assert_eq!(response.body().into_inner(), method.as_str()) } }); } @@ -141,7 +141,7 @@ fn not_found_fallback() { ]; for test_request in tests { let response = route(test_request).await; - assert_eq!(response.body(), "not found") + assert_eq!(response.body().into_inner(), "not found") } }); } diff --git a/http/tests/functional/server.rs b/http/tests/functional/server.rs index 7f561f460..d42ff2d7f 100644 --- a/http/tests/functional/server.rs +++ b/http/tests/functional/server.rs @@ -1,18 +1,19 @@ use std::borrow::Cow; use std::io::{self, Read, Write}; -use std::net::{Shutdown, SocketAddr, TcpStream}; +use std::net::{self, Shutdown, SocketAddr}; use std::str; use std::sync::{Arc, Condvar, Mutex, Weak}; use std::thread::{self, sleep}; use std::time::{Duration, SystemTime}; use heph::messages::Terminate; -use heph::rt::{self, Runtime, ThreadLocal}; -use heph::{actor, Actor, ActorRef, NewActor, Supervisor, SupervisorStrategy}; +use heph::{actor, ActorRef, SupervisorStrategy}; use heph_http::body::OneshotBody; -use heph_http::server::{HttpServer, RequestError}; +use heph_http::server::{self, RequestError}; use heph_http::{self as http, Header, HeaderName, Headers, Method, StatusCode, Version}; +use heph_rt::net::TcpStream; use heph_rt::spawn::options::{ActorOptions, Priority}; +use heph_rt::{self, Runtime, ThreadLocal}; use httpdate::fmt_http_date; /// Macro to run with a test server. @@ -23,7 +24,17 @@ macro_rules! with_test_server { // server are dropped before we call `test_server.join()` below (which // would block a shutdown. { - let mut $stream = TcpStream::connect(test_server.address).unwrap(); + let mut $stream = loop { + match net::TcpStream::connect(test_server.address) { + Ok(stream) => break stream, + Err(err) if err.kind() == io::ErrorKind::ConnectionRefused => { + // Give the server some time to start up. + sleep(Duration::from_millis(1)); + continue; + } + Err(err) => panic!("failed to connect to {}: {err}", test_server.address), + } + }; $stream.set_nodelay(true).unwrap(); $stream .set_read_timeout(Some(Duration::from_secs(1))) @@ -499,7 +510,7 @@ fn too_many_header() { } fn expect_response( - stream: &mut TcpStream, + stream: &mut net::TcpStream, // Expected values: version: Version, status: StatusCode, @@ -566,36 +577,38 @@ impl TestServer { fn new() -> TestServer { const TIMEOUT: Duration = Duration::from_secs(1); - let actor = http_actor as fn(_, _, _) -> _; + let server_ref = Arc::new((Mutex::new(None), Condvar::new())); + let set_ref = server_ref.clone(); + + let actor = http_actor as fn(_, _) -> _; let address = "127.0.0.1:0".parse().unwrap(); - let server = HttpServer::setup(address, conn_supervisor, actor, ActorOptions::default()) - .map_err(rt::Error::setup) + let server = server::setup(address, conn_supervisor, actor, ActorOptions::default()) + .map_err(heph_rt::Error::setup) .unwrap(); let address = server.local_addr(); - let mut runtime = Runtime::setup().num_threads(1).build().unwrap(); - let server_ref = Arc::new(Mutex::new(None)); - let set_ref = Arc::new(Condvar::new()); - let srv_ref = server_ref.clone(); - let set_ref2 = set_ref.clone(); - runtime - .run_on_workers(move |mut runtime_ref| -> Result<(), !> { - let mut server_ref = srv_ref.lock().unwrap(); - let options = ActorOptions::default().with_priority(Priority::LOW); - *server_ref = Some( - runtime_ref - .try_spawn_local(ServerSupervisor, server, (), options) - .unwrap() - .map(), - ); - set_ref2.notify_all(); - Ok(()) - }) - .unwrap(); + let handle = thread::spawn(move || { + let mut runtime = Runtime::setup().num_threads(1).build().unwrap(); + runtime + .run_on_workers(move |mut runtime_ref| -> Result<(), !> { + let mut server_ref = set_ref.0.lock().unwrap(); + let options = ActorOptions::default().with_priority(Priority::LOW); + *server_ref = Some( + runtime_ref + .try_spawn_local(server_supervisor, server, (), options) + .unwrap() + .map(), + ); + set_ref.1.notify_all(); + Ok(()) + }) + .unwrap(); - let handle = thread::spawn(move || runtime.start().unwrap()); - let mut server_ref = set_ref - .wait_timeout_while(server_ref.lock().unwrap(), TIMEOUT, |r| r.is_none()) + runtime.start().unwrap() + }); + let mut server_ref = server_ref + .1 + .wait_timeout_while(server_ref.0.lock().unwrap(), TIMEOUT, |r| r.is_none()) .unwrap() .0; let server_ref = server_ref.take().unwrap(); @@ -614,32 +627,15 @@ impl TestServer { } } -#[derive(Copy, Clone, Debug)] -struct ServerSupervisor; - -impl Supervisor for ServerSupervisor -where - NA: NewActor, - NA::Actor: Actor>, -{ - fn decide(&mut self, err: http::server::Error) -> SupervisorStrategy<()> { - use http::server::Error::*; - match err { - Accept(err) => panic!("error accepting new connection: {err}"), - NewActor(_) => unreachable!(), - } - } - - fn decide_on_restart_error(&mut self, err: io::Error) -> SupervisorStrategy<()> { - panic!("error restarting the TCP server: {err}"); - } - - fn second_restart_error(&mut self, err: io::Error) { - panic!("error restarting the actor a second time: {err}"); +fn server_supervisor(err: http::server::Error) -> SupervisorStrategy<()> { + use http::server::Error::*; + match err { + Accept(err) => panic!("error accepting new connection: {err}"), + NewActor(_) => unreachable!(), } } -fn conn_supervisor(err: io::Error) -> SupervisorStrategy<(heph::net::TcpStream, SocketAddr)> { +fn conn_supervisor(err: io::Error) -> SupervisorStrategy { panic!("error handling connection: {err}") } @@ -650,7 +646,6 @@ fn conn_supervisor(err: io::Error) -> SupervisorStrategy<(heph::net::TcpStream, async fn http_actor( _: actor::Context, mut connection: http::Connection, - _: SocketAddr, ) -> io::Result<()> { connection.set_nodelay(true)?; @@ -658,7 +653,7 @@ async fn http_actor( loop { let mut got_version = None; let mut got_method = None; - let (code, body, should_close) = match connection.next_request().await? { + let (code, body, should_close) = match connection.next_request().await { Ok(Some(mut request)) => { got_version = Some(request.version()); got_method = Some(request.method()); @@ -667,8 +662,7 @@ async fn http_actor( (Method::Get | Method::Head, "/") => (StatusCode::OK, "OK".into(), false), (Method::Post, "/echo-body") => { let body_len = request.body().len(); - let mut buf = Vec::with_capacity(128); - request.body_mut().read_all(&mut buf, 1024).await?; + let buf = request.body_mut().recv(Vec::with_capacity(1024)).await?; assert!(request.body().is_empty()); if let http::body::BodyLength::Known(length) = body_len { assert_eq!(length, buf.len()); @@ -700,8 +694,9 @@ async fn http_actor( headers.append(Header::new(HeaderName::CONNECTION, b"close")); } - let body = OneshotBody::new(body.as_bytes()); - connection.respond(code, &headers, body).await?; + connection + .respond(code, &headers, OneshotBody::new(body)) + .await?; if should_close { return Ok(()); } diff --git a/http/tests/functional/transform.rs b/http/tests/functional/transform.rs index 87126f6c7..cd8a52f7b 100644 --- a/http/tests/functional/transform.rs +++ b/http/tests/functional/transform.rs @@ -1,23 +1,23 @@ //! Tests for the transform module. -use heph::test; use heph_http::body::OneshotBody; use heph_http::handler::{Handler, Middleware}; use heph_http::transform::{Body, Cloned, Path, TransformMiddleware}; use heph_http::{Header, HeaderName, Headers, Method, Request, Response, StatusCode, Version}; +use heph_rt::test; const REQ_BODY: &'static str = "test_body"; const OK_BODY: &'static str = "good"; const BAD_BODY: &'static str = "bad"; const HOST: &'static str = "localhost"; -type TestBody = OneshotBody<'static>; +type TestBody = OneshotBody<&'static str>; struct Error(&'static str); impl From for Response { fn from(err: Error) -> Response { - Response::bad_request().with_body(err.0.into()) + Response::bad_request().with_body(TestBody::new(err.0)) } } @@ -25,7 +25,7 @@ struct Text(&'static str); impl From for Response { fn from(txt: Text) -> Response { - Response::ok().with_body(txt.0.into()) + Response::ok().with_body(TestBody::new(txt.0)) } } @@ -63,8 +63,8 @@ fn transform_middleware() { .unwrap(), HOST, ); - assert_eq!((cloned_body.0).0, REQ_BODY); - assert_eq!(body.0, REQ_BODY); + assert_eq!((cloned_body.0).0.into_inner(), REQ_BODY); + assert_eq!(body.0.into_inner(), REQ_BODY); if path.0 == "/ok" { Ok(Text(OK_BODY)) @@ -89,10 +89,10 @@ fn transform_middleware() { headers.append(Header::new(HeaderName::HOST, HOST.as_bytes())); headers }, - TestBody::from(REQ_BODY), + TestBody::new(REQ_BODY), ); let response: Response = test::block_on(middleware.handle(request)); assert_eq!(response.status(), expected_status); - assert_eq!(response.body(), expected_body); + assert_eq!(response.body().into_inner(), expected_body); } } diff --git a/remote/Cargo.toml b/remote/Cargo.toml index 64a169ae8..024193a3e 100644 --- a/remote/Cargo.toml +++ b/remote/Cargo.toml @@ -11,7 +11,7 @@ json = ["serde_json"] [dependencies] heph = { version = "0.5.0", path = "../", default-features = false } heph-rt = { version = "0.5.0", path = "../rt", default-features = false } -log = { version = "0.4.14", default-features = false } +log = { version = "0.4.17", default-features = false } serde = { version = "1.0.130", default-features = false } getrandom = { version = "0.2.3", default-features = false } diff --git a/rt/Cargo.toml b/rt/Cargo.toml index 79735d3ed..461ba4acb 100644 --- a/rt/Cargo.toml +++ b/rt/Cargo.toml @@ -21,7 +21,7 @@ test = ["heph/test"] a10 = { version = "0.1.0", default-features = false, git = "https://github.com/Thomasdezeeuw/a10" } heph = { version = "0.5.0", path = "../", default-features = false } heph-inbox = { version = "0.2.3", path = "../inbox", default-features = false } -log = { version = "0.4.16", default-features = false, features = ["kv_unstable", "kv_unstable_std"] } +log = { version = "0.4.17", default-features = false, features = ["kv_unstable", "kv_unstable_std"] } crossbeam-channel = { version = "0.5.0", default-features = false, features = ["std"] } libc = { version = "0.2.96", default-features = false } mio = { version = "0.8.0", default-features = false, features = ["os-poll", "net"] } From a169ce0b3636ed15a3fac0d6ebd718c7ae2f0058 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 25 May 2023 16:50:46 +0200 Subject: [PATCH 142/177] Impl From for {Request,Response}Error So it's easier to use with timeouts when performing I/O. --- http/src/client.rs | 7 +++++++ http/src/server.rs | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/http/src/client.rs b/http/src/client.rs index 477c7c14e..52b29546d 100644 --- a/http/src/client.rs +++ b/http/src/client.rs @@ -6,6 +6,7 @@ use std::{fmt, io}; use heph_rt::io::{BufMut, BufMutSlice}; use heph_rt::net::TcpStream; +use heph_rt::timer::DeadlinePassed; use heph_rt::Access; use crate::body::{BodyLength, EmptyBody}; @@ -791,6 +792,12 @@ impl From for io::Error { } } +impl From for ResponseError { + fn from(_: DeadlinePassed) -> ResponseError { + ResponseError::Io(io::ErrorKind::TimedOut.into()) + } +} + impl PartialEq for ResponseError { fn eq(&self, other: &ResponseError) -> bool { use ResponseError::*; diff --git a/http/src/server.rs b/http/src/server.rs index 3f9da6bf8..66bdffbe8 100644 --- a/http/src/server.rs +++ b/http/src/server.rs @@ -152,6 +152,7 @@ use heph::{actor, NewActor, Supervisor}; use heph_rt::io::{BufMut, BufMutSlice}; use heph_rt::net::{tcp, TcpStream}; use heph_rt::spawn::ActorOptions; +use heph_rt::timer::DeadlinePassed; use httpdate::HttpDate; use crate::body::{BodyLength, EmptyBody}; @@ -1194,6 +1195,12 @@ impl From for io::Error { } } +impl From for RequestError { + fn from(_: DeadlinePassed) -> RequestError { + RequestError::Io(io::ErrorKind::TimedOut.into()) + } +} + impl fmt::Display for RequestError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { From 4b4853dd5d0c4c1eb3ce7dbb7b896fb76f89cffa Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Thu, 25 May 2023 16:51:41 +0200 Subject: [PATCH 143/177] Fix Heph-HTTP examples --- http/examples/my_ip.rs | 76 ++++++++++++++-------------------- http/examples/route.rs | 92 ++++++++++++++++++------------------------ http/src/server.rs | 28 ++++++------- 3 files changed, 84 insertions(+), 112 deletions(-) diff --git a/http/examples/my_ip.rs b/http/examples/my_ip.rs index d3a1b5460..b7a15127c 100644 --- a/http/examples/my_ip.rs +++ b/http/examples/my_ip.rs @@ -2,31 +2,30 @@ use std::borrow::Cow; use std::io; -use std::net::SocketAddr; use std::time::Duration; -use heph::actor::{self, Actor, NewActor}; -use heph::net::TcpStream; -use heph::rt::{self, Runtime, ThreadLocal}; -use heph::supervisor::{Supervisor, SupervisorStrategy}; -use heph::timer::Deadline; +use heph::actor; +use heph::supervisor::SupervisorStrategy; use heph_http::body::OneshotBody; -use heph_http::{self as http, Header, HeaderName, Headers, HttpServer, Method, StatusCode}; +use heph_http::{self as http, server, Header, HeaderName, Headers, Method, StatusCode}; +use heph_rt::net::TcpStream; use heph_rt::spawn::options::{ActorOptions, Priority}; +use heph_rt::timer::Deadline; +use heph_rt::{Runtime, ThreadLocal}; use log::{debug, error, info, warn}; -fn main() -> Result<(), rt::Error> { +fn main() -> Result<(), heph_rt::Error> { std_logger::Config::logfmt().init(); - let actor = http_actor as fn(_, _, _) -> _; + let actor = http_actor as fn(_, _) -> _; let address = "127.0.0.1:7890".parse().unwrap(); - let server = HttpServer::setup(address, conn_supervisor, actor, ActorOptions::default()) - .map_err(rt::Error::setup)?; + let server = server::setup(address, conn_supervisor, actor, ActorOptions::default()) + .map_err(heph_rt::Error::setup)?; let mut runtime = Runtime::setup().use_all_cores().build()?; runtime.run_on_workers(move |mut runtime_ref| -> io::Result<()> { let options = ActorOptions::default().with_priority(Priority::LOW); - let server_ref = runtime_ref.try_spawn_local(ServerSupervisor, server, (), options)?; + let server_ref = runtime_ref.spawn_local(server_supervisor, server, (), options); runtime_ref.receive_signals(server_ref.try_map()); Ok(()) @@ -35,37 +34,20 @@ fn main() -> Result<(), rt::Error> { runtime.start() } -/// Our supervisor for the TCP server. -#[derive(Copy, Clone, Debug)] -struct ServerSupervisor; - -impl Supervisor for ServerSupervisor -where - NA: NewActor, - NA::Actor: Actor>, -{ - fn decide(&mut self, err: http::server::Error) -> SupervisorStrategy<()> { - use http::server::Error::*; - match err { - Accept(err) => { - error!("error accepting new connection: {err}"); - SupervisorStrategy::Restart(()) - } - NewActor(_) => unreachable!(), +fn server_supervisor(err: server::Error) -> SupervisorStrategy<()> { + match err { + // When we hit an error accepting a connection we'll drop the old + // server and create a new one. + server::Error::Accept(err) => { + error!("error accepting new connection: {err}"); + SupervisorStrategy::Restart(()) } - } - - fn decide_on_restart_error(&mut self, err: io::Error) -> SupervisorStrategy<()> { - error!("error restarting the TCP server: {err}"); - SupervisorStrategy::Stop - } - - fn second_restart_error(&mut self, err: io::Error) { - error!("error restarting the actor a second time: {err}"); + // Async function never return an error creating a new actor. + server::Error::NewActor(_) => unreachable!(), } } -fn conn_supervisor(err: io::Error) -> SupervisorStrategy<(TcpStream, SocketAddr)> { +fn conn_supervisor(err: io::Error) -> SupervisorStrategy { error!("error handling connection: {err}"); SupervisorStrategy::Stop } @@ -75,18 +57,22 @@ const ALIVE_TIMEOUT: Duration = Duration::from_secs(120); const WRITE_TIMEOUT: Duration = Duration::from_secs(10); async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, mut connection: http::Connection, - address: SocketAddr, ) -> io::Result<()> { + let address = connection.peer_addr()?; info!("accepted connection: source={address}"); connection.set_nodelay(true)?; let mut read_timeout = READ_TIMEOUT; let mut headers = Headers::EMPTY; loop { - let fut = Deadline::after(&mut ctx, read_timeout, connection.next_request()); - let (code, body, should_close) = match fut.await? { + let fut = Deadline::after( + ctx.runtime_ref().clone(), + read_timeout, + connection.next_request(), + ); + let (code, body, should_close) = match fut.await { Ok(Some(request)) => { info!("received request: {request:?}: source={address}"); if request.path() != "/" { @@ -121,9 +107,9 @@ async fn http_actor( } debug!("sending response: code={code}, body='{body}', source={address}"); - let body = OneshotBody::new(body.as_bytes()); + let body = OneshotBody::new(body); let write_response = connection.respond(code, &headers, body); - Deadline::after(&mut ctx, WRITE_TIMEOUT, write_response).await?; + Deadline::after(ctx.runtime_ref().clone(), WRITE_TIMEOUT, write_response).await?; if should_close { warn!("closing connection: source={address}"); diff --git a/http/examples/route.rs b/http/examples/route.rs index 51d1f65ca..77e6faaa3 100644 --- a/http/examples/route.rs +++ b/http/examples/route.rs @@ -1,31 +1,30 @@ #![feature(never_type)] use std::io; -use std::net::SocketAddr; use std::time::Duration; -use heph::actor::{self, Actor, NewActor}; -use heph::net::TcpStream; -use heph::rt::{self, Runtime, ThreadLocal}; -use heph::supervisor::{Supervisor, SupervisorStrategy}; -use heph::timer::Deadline; +use heph::actor::{self}; +use heph::supervisor::SupervisorStrategy; use heph_http::body::OneshotBody; -use heph_http::{self as http, route, HttpServer, Request, Response}; +use heph_http::{self as http, route, server, Request, Response}; +use heph_rt::net::TcpStream; use heph_rt::spawn::options::{ActorOptions, Priority}; +use heph_rt::timer::Deadline; +use heph_rt::{Runtime, ThreadLocal}; use log::{error, info, warn}; -fn main() -> Result<(), rt::Error> { +fn main() -> Result<(), heph_rt::Error> { std_logger::Config::logfmt().init(); - let actor = http_actor as fn(_, _, _) -> _; + let actor = http_actor as fn(_, _) -> _; let address = "127.0.0.1:7890".parse().unwrap(); - let server = HttpServer::setup(address, conn_supervisor, actor, ActorOptions::default()) - .map_err(rt::Error::setup)?; + let server = server::setup(address, conn_supervisor, actor, ActorOptions::default()) + .map_err(heph_rt::Error::setup)?; let mut runtime = Runtime::setup().use_all_cores().build()?; runtime.run_on_workers(move |mut runtime_ref| -> io::Result<()> { let options = ActorOptions::default().with_priority(Priority::LOW); - let server_ref = runtime_ref.try_spawn_local(ServerSupervisor, server, (), options)?; + let server_ref = runtime_ref.spawn_local(server_supervisor, server, (), options); runtime_ref.receive_signals(server_ref.try_map()); Ok(()) @@ -34,37 +33,20 @@ fn main() -> Result<(), rt::Error> { runtime.start() } -/// Our supervisor for the TCP server. -#[derive(Copy, Clone, Debug)] -struct ServerSupervisor; - -impl Supervisor for ServerSupervisor -where - NA: NewActor, - NA::Actor: Actor>, -{ - fn decide(&mut self, err: http::server::Error) -> SupervisorStrategy<()> { - use http::server::Error::*; - match err { - Accept(err) => { - error!("error accepting new connection: {err}"); - SupervisorStrategy::Restart(()) - } - NewActor(_) => unreachable!(), +fn server_supervisor(err: server::Error) -> SupervisorStrategy<()> { + match err { + // When we hit an error accepting a connection we'll drop the old + // server and create a new one. + server::Error::Accept(err) => { + error!("error accepting new connection: {err}"); + SupervisorStrategy::Restart(()) } - } - - fn decide_on_restart_error(&mut self, err: io::Error) -> SupervisorStrategy<()> { - error!("error restarting the TCP server: {err}"); - SupervisorStrategy::Stop - } - - fn second_restart_error(&mut self, err: io::Error) { - error!("error restarting the actor a second time: {err}"); + // Async function never return an error creating a new actor. + server::Error::NewActor(_) => unreachable!(), } } -fn conn_supervisor(err: io::Error) -> SupervisorStrategy<(TcpStream, SocketAddr)> { +fn conn_supervisor(err: io::Error) -> SupervisorStrategy { error!("error handling connection: {err}"); SupervisorStrategy::Stop } @@ -74,18 +56,22 @@ const ALIVE_TIMEOUT: Duration = Duration::from_secs(120); const WRITE_TIMEOUT: Duration = Duration::from_secs(10); async fn http_actor( - mut ctx: actor::Context, + ctx: actor::Context, mut connection: http::Connection, - address: SocketAddr, ) -> io::Result<()> { + let address = connection.peer_addr()?; info!("accepted connection: source={address}"); connection.set_nodelay(true)?; let mut read_timeout = READ_TIMEOUT; loop { - let fut = Deadline::after(&mut ctx, read_timeout, connection.next_request()); + let fut = Deadline::after( + ctx.runtime_ref().clone(), + read_timeout, + connection.next_request(), + ); - let response = match fut.await? { + let response = match fut.await { Ok(Some(request)) => { info!("received request: {request:?}: source={address}"); route!(match request { @@ -99,12 +85,12 @@ async fn http_actor( Ok(None) => return Ok(()), Err(err) => { warn!("error reading request: {err}: source={address}"); - err.response().with_body("Bad request".into()) + err.response().with_body(OneshotBody::new("Bad request")) } }; let write_response = connection.respond_with(response); - Deadline::after(&mut ctx, WRITE_TIMEOUT, write_response).await?; + Deadline::after(ctx.runtime_ref().clone(), WRITE_TIMEOUT, write_response).await?; // Now that we've read a single request we can wait a little for the // next one so that we can reuse the resources for the next request. @@ -112,18 +98,18 @@ async fn http_actor( } } -async fn index(_req: Request) -> Response> { - Response::ok().with_body("Index".into()) +async fn index(_req: Request) -> Response> { + Response::ok().with_body(OneshotBody::new("Index")) } -async fn other_page(_req: Request) -> Response> { - Response::ok().with_body("Other page!".into()) +async fn other_page(_req: Request) -> Response> { + Response::ok().with_body(OneshotBody::new("Other page!")) } -async fn post(_req: Request) -> Response> { - Response::ok().with_body("POST".into()) +async fn post(_req: Request) -> Response> { + Response::ok().with_body(OneshotBody::new("POST")) } -async fn not_found(_req: Request) -> Response> { - Response::not_found().with_body("Page not found".into()) +async fn not_found(_req: Request) -> Response> { + Response::not_found().with_body(OneshotBody::new("Page not found")) } diff --git a/http/src/server.rs b/http/src/server.rs index 66bdffbe8..2696aa8b6 100644 --- a/http/src/server.rs +++ b/http/src/server.rs @@ -33,28 +33,28 @@ //! use std::time::Duration; //! //! use heph::actor::{self, Actor, NewActor}; -//! use heph::net::TcpStream; -//! use heph::rt::{self, Runtime, ThreadLocal}; //! use heph::supervisor::{Supervisor, SupervisorStrategy}; -//! use heph::timer::Deadline; //! use heph_http::body::OneshotBody; -//! use heph_http::{self as http, Header, HeaderName, Headers, HttpServer, Method, StatusCode}; +//! use heph_http::{self as http, server, Header, HeaderName, Headers, Method, StatusCode}; +//! use heph_rt::net::TcpStream; //! use heph_rt::spawn::options::{ActorOptions, Priority}; +//! use heph_rt::timer::Deadline; +//! use heph_rt::{Runtime, ThreadLocal}; //! use log::error; //! -//! fn main() -> Result<(), rt::Error> { +//! fn main() -> Result<(), heph_rt::Error> { //! // Setup the HTTP server. -//! let actor = http_actor as fn(_, _, _) -> _; +//! let actor = http_actor as fn(_, _) -> _; //! let address = "127.0.0.1:7890".parse().unwrap(); -//! let server = HttpServer::setup(address, conn_supervisor, actor, ActorOptions::default()) -//! .map_err(rt::Error::setup)?; +//! let server = server::setup(address, conn_supervisor, actor, ActorOptions::default()) +//! .map_err(heph_rt::Error::setup)?; //! //! // Build the runtime. //! let mut runtime = Runtime::setup().use_all_cores().build()?; //! // On each worker thread start our HTTP server. //! runtime.run_on_workers(move |mut runtime_ref| -> io::Result<()> { //! let options = ActorOptions::default().with_priority(Priority::LOW); -//! let server_ref = runtime_ref.try_spawn_local(server_supervisor, server, (), options)?; +//! let server_ref = runtime_ref.spawn_local(server_supervisor, server, (), options); //! //! # server_ref.try_send(heph::messages::Terminate).unwrap(); //! @@ -66,16 +66,16 @@ //! } //! //! /// Our supervisor for the HTTP server. -//! fn server_supervisor(err: http::server::Error) -> SupervisorStrategy<()> { +//! fn server_supervisor(err: server::Error) -> SupervisorStrategy<()> { //! match err { //! // When we hit an error accepting a connection we'll drop the old //! // server and create a new one. -//! tcp::server::Error::Accept(err) => { +//! server::Error::Accept(err) => { //! error!("error accepting new connection: {err}"); //! SupervisorStrategy::Restart(()) //! } //! // Async function never return an error creating a new actor. -//! tcp::server::Error::NewActor(_) => unreachable!(), +//! server::Error::NewActor(_) => unreachable!(), //! } //! } //! @@ -95,7 +95,7 @@ //! let mut headers = Headers::EMPTY; //! loop { //! // Read the next request. -//! let (code, body, should_close) = match connection.next_request().await? { +//! let (code, body, should_close) = match connection.next_request().await { //! Ok(Some(request)) => { //! // Only support GET/HEAD to "/", with an empty body. //! if request.path() != "/" { @@ -130,7 +130,7 @@ //! } //! //! // Send the body as a single payload. -//! let body = OneshotBody::new(body.as_bytes()); +//! let body = OneshotBody::new(body); //! // Respond to the request. //! connection.respond(code, &headers, body).await?; //! From 635e9d2329d82abdd9d8a0676b4865e3fc6728da Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 26 May 2023 00:03:46 +0200 Subject: [PATCH 144/177] Add test::block_on_local_actor Spawns a thread-local [actor] and waits for the result. --- rt/src/test.rs | 115 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 114 insertions(+), 1 deletion(-) diff --git a/rt/src/test.rs b/rt/src/test.rs index 0fc9fc130..4b084c5b5 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -16,6 +16,8 @@ //! * [`join`], [`join_many`]: wait for the actor(s) to finish running. //! * [`join_all`]: wait all actors in a group to finish running. //! * Blocking on [`Future`]s: +//! * [`block_on_local_actor`]: spawns a thread-local [actor] and waits for +//! the result. //! * [`block_on`]: spawns a `Future` and waits for the result. //! * Initialising actors: //! * [`init_local_actor`]: initialise a thread-local actor. @@ -47,14 +49,16 @@ //! features = ["test"] //! ``` +use std::any::Any; use std::async_iter::AsyncIterator; use std::future::Future; +use std::panic::{catch_unwind, AssertUnwindSafe}; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, OnceLock}; use std::task::{self, Poll}; use std::time::{Duration, Instant}; -use std::{io, slice, thread}; +use std::{fmt, io, slice, thread}; use heph::actor::{self, Actor, NewActor, SyncActor, SyncWaker}; use heph::actor_ref::{ActorGroup, ActorRef}; @@ -199,6 +203,115 @@ where .expect("failed to receive result from future") } +/// Spawn a thread-local actor on the *test* runtime and wait for it to +/// complete. +/// +/// See the [module documentation] for more information about the *test* +/// runtime. And see the [`Spawn`] trait for more information about spawning +/// actors. +/// +/// [module documentation]: crate::test +/// [`Spawn`]: crate::spawn::Spawn +/// +/// # Notes +/// +/// No superisor is used instead all errors and panics are returned by this +/// function. +/// +/// This requires the `NewActor` (`NA`) to be [`Send`] as they are send to +/// another thread which runs the *test* runtime (and thus the actor). The actor +/// (`NA::Actor`) itself doesn't have to be `Send`. +pub fn block_on_local_actor( + mut new_actor: NA, + arg: NA::Argument, +) -> Result<(), BlockOnError> +where + NA: NewActor + Send + 'static, + NA::Actor: 'static, + ::Error: Send, + NA::Argument: Send, + NA::Error: Send, +{ + let (sender, mut receiver) = new_oneshot(); + let waker = SyncWaker::new(); + _ = receiver.register_waker(&task::Waker::from(waker.clone())); + run_on_test_runtime(move |mut runtime_ref| { + let (_, receiver) = heph_inbox::new(heph_inbox::MIN_CAP); + let ctx = actor::Context::new(receiver, ThreadLocal::new(runtime_ref.clone())); + let actor = match new_actor.new(ctx, arg) { + Ok(actor) => actor, + Err(err) => { + _ = sender.try_send(Err(BlockOnError::Creating(err))); + return Ok(()); + } + }; + + let future = ErrorCatcher { + sender: Some(sender), + actor, + }; + + runtime_ref.spawn_local_future(future, FutureOptions::default()); + Ok(()) + }); + waker + .block_on(receiver.recv_once()) + .expect("failed to receive result from test runtime") +} + +/// Error return by spawn an actor and waiting for the result. +pub enum BlockOnError { + /// Error creating the actor. + Creating(NA::Error), + /// Error running the actor. + Running(::Error), + /// Panic while running the actor. + Panic(Box), +} + +impl fmt::Debug for BlockOnError +where + NA: NewActor, + NA::Error: fmt::Debug, + ::Error: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("BlockOnError::")?; + match self { + BlockOnError::Creating(err) => f.debug_tuple("Creating").field(&err).finish(), + BlockOnError::Running(err) => f.debug_tuple("Running").field(&err).finish(), + BlockOnError::Panic(err) => f.debug_tuple("Panic").field(&err).finish(), + } + } +} + +/// [`Future`]/[`Actor`] wrapper to catch errors and panics. +#[derive(Debug)] +struct ErrorCatcher { + sender: Option>>>, + actor: NA::Actor, +} + +impl Future for ErrorCatcher { + type Output = (); + + fn poll(self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { + // SAFETY: not moving the actor. + let this = unsafe { Pin::get_unchecked_mut(self) }; + // SAFETY: undoing the previous operation, still ensuring that the actor + // is not moved. + let mut actor = unsafe { Pin::new_unchecked(&mut this.actor) }; + let res = match catch_unwind(AssertUnwindSafe(|| actor.as_mut().try_poll(ctx))) { + Ok(Poll::Ready(Ok(()))) => Ok(()), + Ok(Poll::Ready(Err(err))) => Err(BlockOnError::Running(err)), + Ok(Poll::Pending) => return Poll::Pending, + Err(panic) => Err(BlockOnError::Panic(panic)), + }; + _ = this.sender.take().unwrap().try_send(res); + Poll::Ready(()) + } +} + /// Attempt to spawn a thread-local actor on the *test* runtime. /// /// See the [module documentation] for more information about the *test* From 19e3b2e864ea4bfdb188324776cd117134b7333f Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 26 May 2023 00:10:21 +0200 Subject: [PATCH 145/177] Add test::block_on_actor Same as test::block_on_local_actor, but for thread-safe actors. --- rt/src/test.rs | 57 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/rt/src/test.rs b/rt/src/test.rs index 4b084c5b5..2b97f0ac2 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -18,6 +18,8 @@ //! * Blocking on [`Future`]s: //! * [`block_on_local_actor`]: spawns a thread-local [actor] and waits for //! the result. +//! * [`block_on_actor`]: spawns a thread-safe [actor] and waits for the +//! result. //! * [`block_on`]: spawns a `Future` and waits for the result. //! * Initialising actors: //! * [`init_local_actor`]: initialise a thread-local actor. @@ -259,6 +261,59 @@ where .expect("failed to receive result from test runtime") } +/// Spawn a thread-safe actor on the *test* runtime and wait for it to +/// complete. +/// +/// See the [module documentation] for more information about the *test* +/// runtime. And see the [`Spawn`] trait for more information about spawning +/// actors. +/// +/// [module documentation]: crate::test +/// [`Spawn`]: crate::spawn::Spawn +/// +/// # Notes +/// +/// No superisor is used instead all errors and panics are returned by this +/// function. +/// +/// This requires the `NewActor` (`NA`) to be [`Send`] as they are send to +/// another thread which runs the *test* runtime (and thus the actor). The actor +/// (`NA::Actor`) itself doesn't have to be `Send`. +pub fn block_on_actor(mut new_actor: NA, arg: NA::Argument) -> Result<(), BlockOnError> +where + NA: NewActor + Send + 'static, + NA::Actor: Send + std::marker::Sync + 'static, + ::Error: Send, + NA::Argument: Send, + NA::Error: Send, +{ + let (sender, mut receiver) = new_oneshot(); + let waker = SyncWaker::new(); + _ = receiver.register_waker(&task::Waker::from(waker.clone())); + run_on_test_runtime(move |mut runtime_ref| { + let (_, receiver) = heph_inbox::new(heph_inbox::MIN_CAP); + let ctx = actor::Context::new(receiver, ThreadSafe::new(runtime_ref.clone_shared())); + let actor = match new_actor.new(ctx, arg) { + Ok(actor) => actor, + Err(err) => { + _ = sender.try_send(Err(BlockOnError::Creating(err))); + return Ok(()); + } + }; + + let future = ErrorCatcher { + sender: Some(sender), + actor, + }; + + runtime_ref.spawn_future(future, FutureOptions::default()); + Ok(()) + }); + waker + .block_on(receiver.recv_once()) + .expect("failed to receive result from test runtime") +} + /// Error return by spawn an actor and waiting for the result. pub enum BlockOnError { /// Error creating the actor. @@ -367,7 +422,7 @@ pub fn try_spawn( ) -> Result, NA::Error> where S: Supervisor + Send + std::marker::Sync + 'static, - NA: NewActor + std::marker::Sync + Send + 'static, + NA: NewActor + Send + std::marker::Sync + 'static, NA::Actor: Send + std::marker::Sync + 'static, NA::Message: Send, NA::Argument: Send, From e60804ea13dfe14c5527a63fa876cd687be698d8 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 26 May 2023 20:55:11 +0200 Subject: [PATCH 146/177] Add fs module Somewhat mimics the std::fs and a10::fs modules. --- rt/src/fs.rs | 721 ++++++++++++++++++++++++++++++++++++++ rt/src/io/buf.rs | 3 +- rt/src/lib.rs | 1 + rt/tests/functional.rs | 1 + rt/tests/functional/fs.rs | 212 +++++++++++ rt/tests/util/mod.rs | 11 +- 6 files changed, 946 insertions(+), 3 deletions(-) create mode 100644 rt/src/fs.rs create mode 100644 rt/tests/functional/fs.rs diff --git a/rt/src/fs.rs b/rt/src/fs.rs new file mode 100644 index 000000000..691d02f18 --- /dev/null +++ b/rt/src/fs.rs @@ -0,0 +1,721 @@ +//! Filesystem manipulation operations. +//! +//! To open a [`File`] use [`File::open`] or [`OpenOptions`]. + +use std::path::PathBuf; +use std::time::SystemTime; +use std::{fmt, io}; + +use a10::{AsyncFd, Extract}; + +use crate::access::Access; +use crate::io::{ + Buf, BufMut, BufMutSlice, BufSlice, BufWrapper, Read, ReadN, ReadNVectored, ReadVectored, + Write, WriteAll, WriteAllVectored, WriteVectored, +}; + +/// Access to an open file on the filesystem. +/// +/// A`File` can be read and/or written depending on what options it was opened +/// with. +pub struct File { + fd: AsyncFd, +} + +impl File { + /// Open a file in read-only mode. + pub async fn open(rt: &RT, path: PathBuf) -> io::Result + where + RT: Access, + { + OpenOptions::new().read().open(rt, path).await + } + + /// Opens a file in write-only mode. + /// + /// This will create a new file if it does not exist, and will truncate it + /// if it does. + pub async fn create(rt: &RT, path: PathBuf) -> io::Result + where + RT: Access, + { + OpenOptions::new() + .write() + .create() + .truncate() + .open(rt, path) + .await + } + + /// Returns the default `OpenOptions`. + pub const fn options() -> OpenOptions { + OpenOptions::new() + } + + /// Read bytes from the file, writing them into `buf`. + pub async fn read(&self, buf: B) -> io::Result { + Read(self.fd.read(BufWrapper(buf))).await + } + + /// Read bytes from the file at `offset`, writing them into `buf`. + /// + /// The current file cursor is not affected by this function. This means + /// that a call `read_at(buf, 1024)` with a buffer of 1kb will **not** + /// continue reading at 2kb in the next call to `read`. + pub async fn read_at(&self, buf: B, offset: u64) -> io::Result { + Read(self.fd.read_at(BufWrapper(buf), offset)).await + } + + /// Read at least `n` bytes from the file, writing them into `buf`. + /// + /// Returns [`io::ErrorKind::UnexpectedEof`] if less than `n` bytes could be + /// read. + pub async fn read_n(&self, buf: B, n: usize) -> io::Result { + debug_assert!( + buf.spare_capacity() >= n, + "called `File::read_n` with a buffer smaller than `n`", + ); + ReadN(self.fd.read_n(BufWrapper(buf), n)).await + } + + /// Read at least `n` bytes from the file at `offset`, writing them into + /// `buf`. + /// + /// Returns [`io::ErrorKind::UnexpectedEof`] if less than `n` bytes could be + /// read. + /// + /// The current file cursor is not affected by this function. + pub async fn read_n_at(&self, buf: B, offset: u64, n: usize) -> io::Result { + debug_assert!( + buf.spare_capacity() >= n, + "called `File::read_n_at` with a buffer smaller than `n`", + ); + ReadN(self.fd.read_n_at(BufWrapper(buf), offset, n)).await + } + + /// Read bytes from the file, writing them into `bufs`. + pub async fn read_vectored, const N: usize>(&self, bufs: B) -> io::Result { + ReadVectored(self.fd.read_vectored(BufWrapper(bufs))).await + } + + /// Read bytes from the file, writing them into `bufs`. + /// + /// The current file cursor is not affected by this function. + pub async fn read_vectored_at, const N: usize>( + &self, + bufs: B, + offset: u64, + ) -> io::Result { + ReadVectored(self.fd.read_vectored_at(BufWrapper(bufs), offset)).await + } + + /// Read at least `n` bytes from the file, writing them into `bufs`. + pub async fn read_n_vectored, const N: usize>( + &self, + bufs: B, + n: usize, + ) -> io::Result { + debug_assert!( + bufs.total_spare_capacity() >= n, + "called `File::read_n_vectored` with buffers smaller than `n`" + ); + ReadNVectored(self.fd.read_n_vectored(BufWrapper(bufs), n)).await + } + + /// Read at least `n` bytes from the file at `offset`, writing them into `bufs`. + /// + /// The current file cursor is not affected by this function. + pub async fn read_n_vectored_at, const N: usize>( + &self, + bufs: B, + offset: u64, + n: usize, + ) -> io::Result { + debug_assert!( + bufs.total_spare_capacity() >= n, + "called `File::read_n_vectored_at` with buffers smaller than `n`" + ); + ReadNVectored(self.fd.read_n_vectored_at(BufWrapper(bufs), offset, n)).await + } + + /// Write the bytes in `buf` to the file. + /// + /// Returns the number of bytes written. This may we fewer than the length + /// of `buf`. To ensure that all bytes are written use + /// [`File::write_all`]. + pub async fn write(&self, buf: B) -> io::Result<(B, usize)> { + Write(self.fd.write(BufWrapper(buf)).extract()).await + } + + /// Write the bytes in `buf` to the file at `offset`. + /// + /// Returns the number of bytes written. This may we fewer than the length + /// of `buf`. To ensure that all bytes are written use + /// [`File::write_all`]. + pub async fn write_at(&self, buf: B, offset: u64) -> io::Result<(B, usize)> { + Write(self.fd.write_at(BufWrapper(buf), offset).extract()).await + } + + /// Write the all bytes in `buf` to the file. + /// + /// If this fails to write all bytes (this happens if a write returns + /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. + pub async fn write_all(&self, buf: B) -> io::Result { + WriteAll(self.fd.write_all(BufWrapper(buf)).extract()).await + } + + /// Write the all bytes in `buf` to the file at `offset`. + /// + /// If this fails to write all bytes (this happens if a write returns + /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. + pub async fn write_all_at(&self, buf: B, offset: u64) -> io::Result { + WriteAll(self.fd.write_all_at(BufWrapper(buf), offset).extract()).await + } + + /// Write the bytes in `bufs` to the file. + /// + /// Return the number of bytes written. This may we fewer than the length of + /// `bufs`. To ensure that all bytes are written use + /// [`File::write_vectored_all`]. + pub async fn write_vectored, const N: usize>( + &self, + bufs: B, + ) -> io::Result<(B, usize)> { + WriteVectored(self.fd.write_vectored(BufWrapper(bufs)).extract()).await + } + + /// Write the bytes in `bufs` to the file at `offset`. + /// + /// Return the number of bytes written. This may we fewer than the length of + /// `bufs`. To ensure that all bytes are written use + /// [`File::write_vectored_all`]. + pub async fn write_vectored_at, const N: usize>( + &self, + bufs: B, + offset: u64, + ) -> io::Result<(B, usize)> { + WriteVectored( + self.fd + .write_vectored_at(BufWrapper(bufs), offset) + .extract(), + ) + .await + } + + /// Write the all bytes in `bufs` to the file. + /// + /// If this fails to write all bytes (this happens if a write returns + /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. + pub async fn write_vectored_all, const N: usize>( + &self, + bufs: B, + ) -> io::Result { + WriteAllVectored(self.fd.write_all_vectored(BufWrapper(bufs)).extract()).await + } + + /// Write the all bytes in `bufs` to the file at `offset`. + /// + /// If this fails to write all bytes (this happens if a write returns + /// `Ok(0)`) this will return [`io::ErrorKind::WriteZero`]. + pub async fn write_vectored_all_at, const N: usize>( + &self, + bufs: B, + offset: u64, + ) -> io::Result { + WriteAllVectored( + self.fd + .write_all_vectored_at(BufWrapper(bufs), offset) + .extract(), + ) + .await + } + + /// Sync all OS-internal metadata to disk. + /// + /// # Notes + /// + /// Any uncompleted writes may not be synced to disk. + pub async fn sync_all(&self) -> io::Result<()> { + self.fd.sync_all().await + } + + /// This function is similar to [`sync_all`], except that it may not + /// synchronize file metadata to the filesystem. + /// + /// This is intended for use cases that must synchronize content, but don’t + /// need the metadata on disk. The goal of this method is to reduce disk + /// operations. + /// + /// [`sync_all`]: AsyncFd::sync_all + /// + /// # Notes + /// + /// Any uncompleted writes may not be synced to disk. + pub async fn sync_data(&self) -> io::Result<()> { + self.fd.sync_data().await + } + + /// Retrieve metadata about the file. + pub async fn metadata(&self) -> io::Result { + self.fd.metadata().await.map(|m| Metadata { inner: *m }) + } + + /// Predeclare an access pattern for file data. + /// + /// Announce an intention to access file data in a specific pattern in the + /// future, thus allowing the kernel to perform appropriate optimizations. + /// + /// The advice applies to a (not necessarily existent) region starting at + /// offset and extending for len bytes (or until the end of the file if len + /// is 0). The advice is not binding; it merely constitutes an expectation + /// on behalf of the application. + pub async fn advise(&self, offset: u64, length: u32, advice: Advice) -> io::Result<()> { + self.fd.advise(offset, length, advice.as_libc()).await + } + + /// Manipulate file space. + /// + /// Manipulate the allocated disk space for the file referred for the byte + /// range starting at `offset` and continuing for `length` bytes. + pub async fn allocate(&self, offset: u64, length: u32, mode: AllocateMode) -> io::Result<()> { + self.fd.allocate(offset, length, mode.as_libc()).await + } +} + +impl fmt::Debug for File { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.fd.fmt(f) + } +} + +/// Options used to configure how a [`File`] is opened. +#[derive(Clone)] +#[must_use = "no file is opened until `fs::OpenOptions::open` or `open_temp_file` is called"] +pub struct OpenOptions { + inner: a10::fs::OpenOptions, +} + +impl OpenOptions { + /// Empty `OpenOptions`, has reading enabled by default. + pub const fn new() -> OpenOptions { + OpenOptions { + inner: a10::fs::OpenOptions::new(), + } + } + + /// Enable read access. + /// + /// Note that read access is already enabled by default, so this is only + /// useful if you called [`OpenOptions::write_only`] and want to enable read + /// access as well. + pub const fn read(self) -> Self { + OpenOptions { + inner: self.inner.read(), + } + } + + /// Enable write access. + pub const fn write(self) -> Self { + OpenOptions { + inner: self.inner.write(), + } + } + + /// Only enable write access, disabling read access. + pub const fn write_only(self) -> Self { + OpenOptions { + inner: self.inner.write_only(), + } + } + + /// Set writing to append only mode. + /// + /// # Notes + /// + /// This requires [writing access] to be enabled. + /// + /// [writing access]: OpenOptions::write + pub const fn append(self) -> Self { + OpenOptions { + inner: self.inner.append(), + } + } + + /// Truncate the file if it exists. + pub const fn truncate(self) -> Self { + OpenOptions { + inner: self.inner.truncate(), + } + } + + /// If the file doesn't exist create it. + pub const fn create(self) -> Self { + OpenOptions { + inner: self.inner.create(), + } + } + + /// Force a file to be created, failing if a file already exists. + /// + /// This options implies [`OpenOptions::create`]. + pub const fn create_new(self) -> Self { + OpenOptions { + inner: self.inner.create_new(), + } + } + + /// Write operations on the file will complete according to the requirements + /// of synchronized I/O *data* integrity completion. + /// + /// By the time `write(2)` (and similar) return, the output data has been + /// transferred to the underlying hardware, along with any file metadata + /// that would be required to retrieve that data (i.e., as though each + /// `write(2)` was followed by a call to `fdatasync(2)`). + pub const fn data_sync(self) -> Self { + OpenOptions { + inner: self.inner.data_sync(), + } + } + + /// Write operations on the file will complete according to the requirements + /// of synchronized I/O *file* integrity completion (by contrast with the + /// synchronized I/O data integrity completion provided by + /// [`OpenOptions::data_sync`].) + /// + /// By the time `write(2)` (or similar) returns, the output data and + /// associated file metadata have been transferred to the underlying + /// hardware (i.e., as though each `write(2)` was followed by a call to + /// `fsync(2)`). + pub const fn sync(self) -> Self { + OpenOptions { + inner: self.inner.sync(), + } + } + + /// Try to minimize cache effects of the I/O to and from this file. + /// + /// File I/O is done directly to/from user-space buffers. This uses the + /// `O_DIRECT` flag which on its own makes an effort to transfer data + /// synchronously, but does not give the guarantees of the `O_SYNC` flag + /// ([`OpenOptions::sync`]) that data and necessary metadata are + /// transferred. To guarantee synchronous I/O, `O_SYNC` must be used in + /// addition to `O_DIRECT`. + pub const fn direct(self) -> Self { + OpenOptions { + inner: self.inner.direct(), + } + } + + /// Create an unnamed temporary regular file. The `dir` argument specifies a + /// directory; an unnamed inode will be created in that directory's + /// filesystem. Anything written to the resulting file will be lost when the + /// last file descriptor is closed, unless the file is given a name. + /// + /// [`OpenOptions::write`] must be set. The `linkat(2)` system call can be + /// used to make the temporary file permanent. + pub async fn open_temp_file(self, rt: &RT, dir: PathBuf) -> io::Result + where + RT: Access, + { + self.inner + .open_temp_file(rt.submission_queue(), dir) + .await + .map(|fd| File { fd }) + } + + /// Open `path`. + pub async fn open(self, rt: &RT, path: PathBuf) -> io::Result + where + RT: Access, + { + self.inner + .open(rt.submission_queue(), path) + .await + .map(|fd| File { fd }) + } +} + +impl fmt::Debug for OpenOptions { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +/// Metadata information about a file. +/// +/// See [`File::metadata`]. +pub struct Metadata { + inner: a10::fs::Metadata, +} + +impl Metadata { + /// Returns the file type for this metadata. + pub const fn file_type(&self) -> FileType { + FileType(self.inner.file_type()) + } + + /// Returns `true` if this represents a directory. + pub const fn is_dir(&self) -> bool { + self.inner.is_dir() + } + + /// Returns `true` if this represents a file. + pub const fn is_file(&self) -> bool { + self.inner.is_file() + } + + /// Returns `true` if this represents a symbolic link. + pub const fn is_symlink(&self) -> bool { + self.inner.is_symlink() + } + + /// Returns the size of the file, in bytes, this metadata is for. + pub const fn len(&self) -> u64 { + self.inner.len() + } + + /// The "preferred" block size for efficient filesystem I/O. + pub const fn block_size(&self) -> u32 { + self.inner.block_size() + } + + /// Returns the permissions of the file this metadata is for. + pub const fn permissions(&self) -> Permissions { + Permissions(self.inner.permissions()) + } + + /// Returns the time this file was last modified. + pub fn modified(&self) -> SystemTime { + self.inner.modified() + } + + /// Returns the time this file was last accessed. + /// + /// # Notes + /// + /// It's possible to disable keeping track of this access time, which makes + /// this function return an invalid value. + pub fn accessed(&self) -> SystemTime { + self.inner.accessed() + } + + /// Returns the time this file was created. + pub fn created(&self) -> SystemTime { + self.inner.created() + } +} + +impl fmt::Debug for Metadata { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +/// A structure representing a type of file with accessors for each file type. +/// +/// See [`Metadata`]. +#[derive(Copy, Clone)] +pub struct FileType(a10::fs::FileType); + +impl FileType { + /// Returns `true` if this represents a directory. + pub const fn is_dir(&self) -> bool { + self.0.is_dir() + } + + /// Returns `true` if this represents a file. + pub const fn is_file(&self) -> bool { + self.0.is_file() + } + + /// Returns `true` if this represents a symbolic link. + pub const fn is_symlink(&self) -> bool { + self.0.is_symlink() + } + + /// Returns `true` if this represents a socket. + pub const fn is_socket(&self) -> bool { + self.0.is_socket() + } + + /// Returns `true` if this represents a block device. + pub const fn is_block_device(&self) -> bool { + self.0.is_block_device() + } + + /// Returns `true` if this represents a character device. + pub const fn is_character_device(&self) -> bool { + self.0.is_character_device() + } + + /// Returns `true` if this represents a named fifo pipe. + pub const fn is_named_pipe(&self) -> bool { + self.0.is_named_pipe() + } +} + +impl fmt::Debug for FileType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +/// Access permissions. +/// +/// See [`Metadata`]. +#[derive(Copy, Clone)] +pub struct Permissions(a10::fs::Permissions); + +impl Permissions { + /// Return `true` if the owner has read permission. + pub const fn owner_can_read(&self) -> bool { + self.0.owner_can_read() + } + + /// Return `true` if the owner has write permission. + pub const fn owner_can_write(&self) -> bool { + self.0.owner_can_write() + } + + /// Return `true` if the owner has execute permission. + pub const fn owner_can_execute(&self) -> bool { + self.0.owner_can_execute() + } + + /// Return `true` if the group the file belongs to has read permission. + pub const fn group_can_read(&self) -> bool { + self.0.group_can_read() + } + + /// Return `true` if the group the file belongs to has write permission. + pub const fn group_can_write(&self) -> bool { + self.0.group_can_write() + } + + /// Return `true` if the group the file belongs to has execute permission. + pub const fn group_can_execute(&self) -> bool { + self.0.group_can_execute() + } + + /// Return `true` if others have read permission. + pub const fn others_can_read(&self) -> bool { + self.0.others_can_read() + } + + /// Return `true` if others have write permission. + pub const fn others_can_write(&self) -> bool { + self.0.others_can_write() + } + + /// Return `true` if others have execute permission. + pub const fn others_can_execute(&self) -> bool { + self.0.others_can_execute() + } +} + +impl fmt::Debug for Permissions { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +/// Advice passed to [`File::advise`]. +#[non_exhaustive] +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum Advice { + /// Indicates that the application has no advice to give about its access + /// pattern for the specified data. If no advice is given for an open file, + /// this is the default assumption. + Normal, + /// The specified data will be accessed in random order. + Random, + /// The application expects to access the specified data sequentially (with + /// lower offsets read before higher ones). + Sequential, + /// The specified data will be accessed in the near future. + WillNeed, + /// The specified data will not be accessed in the near future. + DontNeed, + /// The specified data will be accessed only once. + Noreuse, +} + +impl Advice { + const fn as_libc(self) -> libc::c_int { + match self { + Advice::Normal => libc::POSIX_FADV_NORMAL, + Advice::Random => libc::POSIX_FADV_RANDOM, + Advice::Sequential => libc::POSIX_FADV_SEQUENTIAL, + Advice::WillNeed => libc::POSIX_FADV_WILLNEED, + Advice::DontNeed => libc::POSIX_FADV_DONTNEED, + Advice::Noreuse => libc::POSIX_FADV_NOREUSE, + } + } +} + +/// Allocation mode passed to [`File::allocate`]. +/// +/// # Notes +/// +/// The availability of these operations differ per Linux version **and** file +/// system used, see the [`fallocate(2)`] man page. +/// +/// [`fallocate(2)`]: https://www.man7.org/linux/man-pages/man2/fallocate.2.html +#[non_exhaustive] +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum AllocateMode { + /// Initiase range to zero. + InitRange, + /// Initiase range to zero, but don't change the file size. + InitRangeKeepSize, + /// Remove the range from the file. + RemoveRange, + /// Zero the range from the file. + ZeroRange, + /// Insert a range into the file. + InsertRange, +} + +impl AllocateMode { + const fn as_libc(self) -> libc::c_int { + match self { + AllocateMode::InitRange => 0, + AllocateMode::InitRangeKeepSize => libc::FALLOC_FL_KEEP_SIZE, + AllocateMode::RemoveRange => libc::FALLOC_FL_COLLAPSE_RANGE, + AllocateMode::ZeroRange => libc::FALLOC_FL_ZERO_RANGE, + AllocateMode::InsertRange => libc::FALLOC_FL_INSERT_RANGE, + } + } +} + +/// Creates a new, empty directory. +pub async fn create_dir(rt: &RT, path: PathBuf) -> io::Result<()> +where + RT: Access, +{ + a10::fs::create_dir(rt.submission_queue(), path).await +} + +/// Rename a file or directory to a new name. +pub async fn rename(rt: &RT, from: PathBuf, to: PathBuf) -> io::Result<()> +where + RT: Access, +{ + a10::fs::rename(rt.submission_queue(), from, to).await +} + +/// Remove a file. +pub async fn remove_file(rt: &RT, path: PathBuf) -> io::Result<()> +where + RT: Access, +{ + a10::fs::remove_file(rt.submission_queue(), path).await +} + +/// Remove a directory. +pub async fn remove_dir(rt: &RT, path: PathBuf) -> io::Result<()> +where + RT: Access, +{ + a10::fs::remove_dir(rt.submission_queue(), path).await +} diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index 3b3a20439..addf978a3 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -687,7 +687,8 @@ unsafe impl, const N: usize> private::BufSlice for BufWrapper< /// Wrapper to limit the number of bytes `B` can use. /// -/// See [`Buf::limit`] and [`BufMut::limit`]. +/// Created using [`Buf::limit`], [`BufMut::limit`], [`BufSlice::limit`] or +/// [`BufMutSlice::limit`]. #[derive(Debug)] pub struct Limited { buf: B, diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 60207c707..2c13ebbc3 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -218,6 +218,7 @@ pub mod access; mod channel; mod coordinator; mod error; +pub mod fs; pub mod io; mod local; pub mod log; diff --git a/rt/tests/functional.rs b/rt/tests/functional.rs index 8af72eb4d..57c92dc6d 100644 --- a/rt/tests/functional.rs +++ b/rt/tests/functional.rs @@ -19,6 +19,7 @@ mod functional { mod actor_group; mod actor_ref; mod from_message; + mod fs; mod future; mod io; mod pipe; diff --git a/rt/tests/functional/fs.rs b/rt/tests/functional/fs.rs new file mode 100644 index 000000000..f3eb096e2 --- /dev/null +++ b/rt/tests/functional/fs.rs @@ -0,0 +1,212 @@ +//! Tests for the filesystem operations. + +use std::io; +use std::path::PathBuf; + +use heph::actor; +use heph_rt::access::ThreadLocal; +use heph_rt::fs::{self, Advice, AllocateMode, File}; +use heph_rt::test::block_on_local_actor; + +use crate::util::{temp_dir_root, temp_file}; + +const DATA1: &[u8] = b"Hello, World"; +const DATA2: &[u8] = b"Hello, Mars!"; + +#[test] +fn file_read_write() { + async fn actor(ctx: actor::Context) { + let path = temp_file("file_read_write"); + let file = File::create(ctx.runtime_ref(), path).await.unwrap(); + let buf = file.read(Vec::with_capacity(128)).await.unwrap(); + assert!(buf.is_empty()); + + file.write_all(DATA1).await.unwrap(); + let mut buf = file.read_at(buf, 0).await.unwrap(); + assert_eq!(buf, DATA1); + + file.write_all_at(&DATA2[7..], 7).await.unwrap(); + buf.clear(); + let buf = file.read_at(buf, 0).await.unwrap(); + assert_eq!(buf, DATA2); + } + + block_on_local_actor(actor as fn(_) -> _, ()).unwrap(); +} + +#[test] +fn file_sync_all() { + async fn actor(ctx: actor::Context) { + let path = temp_file("file_sync_all"); + let file = File::create(ctx.runtime_ref(), path).await.unwrap(); + + file.write_all(DATA1).await.unwrap(); + file.sync_all().await.unwrap(); + } + + block_on_local_actor(actor as fn(_) -> _, ()).unwrap(); +} + +#[test] +fn file_sync_data() { + async fn actor(ctx: actor::Context) { + let path = temp_file("file_sync_data"); + let file = File::create(ctx.runtime_ref(), path).await.unwrap(); + + file.write_all(DATA1).await.unwrap(); + file.sync_all().await.unwrap(); + } + + block_on_local_actor(actor as fn(_) -> _, ()).unwrap(); +} + +#[test] +fn file_metadata() { + async fn actor(ctx: actor::Context) { + let path = PathBuf::from("src/lib.rs"); + let file = File::open(ctx.runtime_ref(), path).await.unwrap(); + + let metadata = file.metadata().await.unwrap(); + assert!(!metadata.is_dir()); + assert!(metadata.is_file()); + assert!(!metadata.is_symlink()); + assert!(metadata.len() >= 20_000); + assert!(metadata.block_size() >= 512); + + let file_type = metadata.file_type(); + assert!(!file_type.is_dir()); + assert!(file_type.is_file()); + assert!(!file_type.is_symlink()); + assert!(!file_type.is_socket()); + assert!(!file_type.is_block_device()); + assert!(!file_type.is_character_device()); + assert!(!file_type.is_named_pipe()); + + let permissions = metadata.permissions(); + assert!(permissions.owner_can_read()); + assert!(permissions.owner_can_write()); + assert!(!permissions.owner_can_execute()); + } + + block_on_local_actor(actor as fn(_) -> _, ()).unwrap(); +} + +#[test] +fn file_advise() { + async fn actor(ctx: actor::Context) { + let path = PathBuf::from("src/lib.rs"); + let file = File::open(ctx.runtime_ref(), path).await.unwrap(); + file.advise(0, 0, Advice::Sequential).await.unwrap(); // Entire file. + + let len = file.metadata().await.unwrap().len() as usize; + let buf = file.read_n(Vec::with_capacity(len), len).await.unwrap(); + assert_eq!(buf.len(), len); + + file.advise(0, 0, Advice::DontNeed).await.unwrap(); // Entire file. + drop(file); + } + + block_on_local_actor(actor as fn(_) -> _, ()).unwrap(); +} + +#[test] +fn file_allocate() { + const SIZE: usize = 1024; + + async fn actor(ctx: actor::Context) { + let path = temp_file("file_allocate"); + let file = File::create(ctx.runtime_ref(), path).await.unwrap(); + + file.allocate(0, SIZE as u32, AllocateMode::InitRangeKeepSize) + .await + .unwrap(); + assert_eq!(file.metadata().await.unwrap().len(), 0); + + file.allocate(0, SIZE as u32, AllocateMode::InitRange) + .await + .unwrap(); + assert_eq!(file.metadata().await.unwrap().len(), SIZE as u64); + + let buf = file.read_n(Vec::with_capacity(SIZE), SIZE).await.unwrap(); + assert_eq!(buf.len(), SIZE); + } + + block_on_local_actor(actor as fn(_) -> _, ()).unwrap(); +} + +#[test] +fn create_dir() { + async fn actor(ctx: actor::Context) { + let mut dir = temp_dir_root(); + dir.push("create_dir"); + fs::create_dir(ctx.runtime_ref(), dir.clone()) + .await + .unwrap(); + + dir.push("test.txt"); + let file = File::create(ctx.runtime_ref(), dir).await.unwrap(); + file.write(DATA1).await.unwrap(); + } + + block_on_local_actor(actor as fn(_) -> _, ()).unwrap(); +} + +#[test] +fn rename() { + async fn actor(ctx: actor::Context) { + let to = temp_file("rename.1"); + let from = temp_file("rename.2"); + let file = File::create(ctx.runtime_ref(), from.clone()).await.unwrap(); + file.write(DATA1).await.unwrap(); + drop(file); + + fs::rename(ctx.runtime_ref(), from, to.clone()) + .await + .unwrap(); + + let file = File::open(ctx.runtime_ref(), to).await.unwrap(); + let buf = file + .read(Vec::with_capacity(DATA1.len() + 1)) + .await + .unwrap(); + assert_eq!(buf, DATA1); + } + + block_on_local_actor(actor as fn(_) -> _, ()).unwrap(); +} + +#[test] +fn remove_file() { + async fn actor(ctx: actor::Context) { + let path = temp_file("remove_file"); + let file = File::create(ctx.runtime_ref(), path.clone()).await.unwrap(); + file.write(DATA1).await.unwrap(); + drop(file); + + fs::remove_file(ctx.runtime_ref(), path.clone()) + .await + .unwrap(); + + let err = File::open(ctx.runtime_ref(), path).await.unwrap_err(); + assert_eq!(err.kind(), io::ErrorKind::NotFound); + } + + block_on_local_actor(actor as fn(_) -> _, ()).unwrap(); +} + +#[test] +fn remove_dir() { + async fn actor(ctx: actor::Context) { + let path = temp_file("remove_dir"); + std::fs::create_dir(&path).unwrap(); + + fs::remove_dir(ctx.runtime_ref(), path.clone()) + .await + .unwrap(); + + let err = std::fs::read_dir(path).unwrap_err(); + assert_eq!(err.kind(), io::ErrorKind::NotFound); + } + + block_on_local_actor(actor as fn(_) -> _, ()).unwrap(); +} diff --git a/rt/tests/util/mod.rs b/rt/tests/util/mod.rs index 03300786a..d4a15b2df 100644 --- a/rt/tests/util/mod.rs +++ b/rt/tests/util/mod.rs @@ -57,10 +57,18 @@ pub fn refused_address() -> SocketAddr { /// Returns a path to a non-existing temporary file. pub fn temp_file(name: &str) -> PathBuf { + let mut dir = temp_dir_root(); + dir.push(name); + dir +} + +/// Returns the path to root of our temporary directory, cleaned before each +/// test run. +pub fn temp_dir_root() -> PathBuf { static CLEANUP: Once = Once::new(); let mut dir = temp_dir(); - dir.push("heph.test/"); + dir.push("heph_rt.test/"); CLEANUP.call_once(|| { let _ = remove_dir_all(&dir); @@ -69,7 +77,6 @@ pub fn temp_file(name: &str) -> PathBuf { } }); - dir.push(name); dir } From b283d4d574438efcd1f8094ae168d56590eebb3a Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 26 May 2023 20:55:29 +0200 Subject: [PATCH 147/177] Show panic message in fmt::Debug for BlockOnError Helps with debugging, is still missing the location though, which is a shame. --- rt/src/lib.rs | 13 +++++++++++++ rt/src/process/mod.rs | 14 +------------- rt/src/test.rs | 9 ++++++--- 3 files changed, 20 insertions(+), 16 deletions(-) diff --git a/rt/src/lib.rs b/rt/src/lib.rs index 2c13ebbc3..a6f098458 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -201,6 +201,7 @@ macro_rules! syscall { }}; } +use std::any::Any; use std::convert::TryInto; use std::future::Future; use std::rc::Rc; @@ -724,3 +725,15 @@ fn cpu_usage(clock_id: libc::clockid_t) -> Duration { ) } } + +/// Attempts to extract a message from a panic, defaulting to ``. +/// NOTE: be sure to derefence the `Box`! +fn panic_message<'a>(panic: &'a (dyn Any + Send + 'static)) -> &'a str { + match panic.downcast_ref::<&'static str>() { + Some(s) => s, + None => match panic.downcast_ref::() { + Some(s) => s, + None => "", + }, + } +} diff --git a/rt/src/process/mod.rs b/rt/src/process/mod.rs index 3bea266f7..8096c7a03 100644 --- a/rt/src/process/mod.rs +++ b/rt/src/process/mod.rs @@ -1,6 +1,5 @@ //! Module containing the `Process` trait, related types and implementations. -use std::any::Any; use std::cmp::Ordering; use std::future::Future; use std::mem::size_of_val; @@ -15,6 +14,7 @@ use heph::supervisor::Supervisor; use log::{as_debug, error, trace}; use mio::Token; +use crate::panic_message; use crate::spawn::options::Priority; #[cfg(test)] @@ -97,18 +97,6 @@ impl> Future for FutureProcess { } } -/// Attempts to extract a message from a panic, defaulting to ``. -/// NOTE: be sure to derefence the `Box`! -fn panic_message<'a>(panic: &'a (dyn Any + Send + 'static)) -> &'a str { - match panic.downcast_ref::<&'static str>() { - Some(s) => s, - None => match panic.downcast_ref::() { - Some(s) => s, - None => "", - }, - } -} - impl Process for FutureProcess where Fut: Future, diff --git a/rt/src/test.rs b/rt/src/test.rs index 2b97f0ac2..7a7c4f6b3 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -74,8 +74,8 @@ use crate::thread_waker::ThreadWaker; use crate::wakers::shared::Wakers; use crate::worker::{Control, Worker}; use crate::{ - self as rt, shared, RuntimeRef, Sync, ThreadLocal, ThreadSafe, SYNC_WORKER_ID_END, - SYNC_WORKER_ID_START, + self as rt, panic_message, shared, RuntimeRef, Sync, ThreadLocal, ThreadSafe, + SYNC_WORKER_ID_END, SYNC_WORKER_ID_START, }; #[doc(no_inline)] @@ -335,7 +335,10 @@ where match self { BlockOnError::Creating(err) => f.debug_tuple("Creating").field(&err).finish(), BlockOnError::Running(err) => f.debug_tuple("Running").field(&err).finish(), - BlockOnError::Panic(err) => f.debug_tuple("Panic").field(&err).finish(), + BlockOnError::Panic(err) => f + .debug_tuple("Panic") + .field(&panic_message(&**err)) + .finish(), } } } From c3ac0ab32e6d27e8f45b88df16acbf6ac99b3a8e Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 26 May 2023 20:57:26 +0200 Subject: [PATCH 148/177] Ignore Clippy lint Doesn't make sense for filesystem metadata to have an is_empty method. --- rt/src/fs.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/rt/src/fs.rs b/rt/src/fs.rs index 691d02f18..8d2cebedb 100644 --- a/rt/src/fs.rs +++ b/rt/src/fs.rs @@ -470,6 +470,7 @@ impl Metadata { } /// Returns the size of the file, in bytes, this metadata is for. + #[allow(clippy::len_without_is_empty)] // Doesn't make sense. pub const fn len(&self) -> u64 { self.inner.len() } From 9e0def87bc4601d4f07b2e840abf65ebacc7e4d9 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 26 May 2023 21:07:19 +0200 Subject: [PATCH 149/177] Enable HTTP in Makefile The crate compiles again! --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 193e5ac06..156ffad0f 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ include Makefile.include # Crates in this repo. -CRATES := ./ inbox rt remote # http, stuck on old nightly. +CRATES := ./ inbox rt remote http # Target that run the target in all $CRATES. TARGETS := test_all test_sanitizers_all test_sanitizer_all check_all clippy_all From 0199533db626e34def9a2763c214a9187fcf6e23 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 26 May 2023 21:10:43 +0200 Subject: [PATCH 150/177] Fix a couple of warnings We shouldn't use drop on Copy types, so use `_ =` instead. --- src/actor/context.rs | 12 +++--------- src/lib.rs | 2 +- src/quick_start.rs | 6 +++--- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/src/actor/context.rs b/src/actor/context.rs index ad1debc94..da8ca9ec2 100644 --- a/src/actor/context.rs +++ b/src/actor/context.rs @@ -52,9 +52,7 @@ impl Context { /// println!("Hello world"); /// } /// } - /// - /// # // Use the `greeter_actor` function to silence dead code warning. - /// # drop(greeter_actor); + /// # _ = greeter_actor; // Silence dead code warnings. /// ``` pub fn try_receive_next(&mut self) -> Result { self.inbox.try_recv().map_err(RecvError::from) @@ -77,9 +75,7 @@ impl Context { /// println!("Got a message: {msg}"); /// } /// } - /// - /// # // Use the `print_actor` function to silence dead code warning. - /// # drop(print_actor); + /// # _ = print_actor; // Silence dead code warnings. /// ``` /// /// Same as the example above, but this actor will only wait for a limited @@ -107,9 +103,7 @@ impl Context { /// Err(_) => println!("Timed out receiving message"), /// } /// } - /// - /// # // Use the `print_actor` function to silence dead code warning. - /// # drop(print_actor); + /// # _ = print_actor; // Silence dead code warnings. /// ``` pub fn receive_next<'ctx>(&'ctx mut self) -> ReceiveMessage<'ctx, M> { ReceiveMessage { diff --git a/src/lib.rs b/src/lib.rs index 91e9e1323..303e8b6c6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,7 +20,7 @@ //! println!("got a message: {msg}"); //! } //! } -//! # drop(actor); // Silence dead code warnings. +//! # _ = actor; // Silence dead code warnings. //! ``` //! //! Heph uses an event-driven, non-blocking I/O, share nothing design. But what diff --git a/src/quick_start.rs b/src/quick_start.rs index 7b0dc53a5..397bbd323 100644 --- a/src/quick_start.rs +++ b/src/quick_start.rs @@ -37,7 +37,7 @@ //! println!("got a message: {msg}"); //! } //! } -//! # drop(actor); // Silence dead code warnings. +//! # _ = actor; // Silence dead code warnings. //! ``` //! //! The example above also shows how an actor can receive and process messages. @@ -75,7 +75,7 @@ //! } //! Ok(()) //! } -//! # drop(filter); // Silence dead code warnings. +//! # _ = filter; // Silence dead code warnings. //! ``` //! //! See the [actor reference] module for more information about what actor @@ -173,7 +173,7 @@ //! } //! Ok(()) //! } -//! # drop(spawn_actor); // Silence dead code warnings. +//! # _ = spawn_actor; // Silence dead code warnings. //! ``` //! //! See the [`Spawn`] trait for more information about spawning actors and see From 0993086da1777f83fc2c6e188ecbba0606c76599 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 26 May 2023 21:11:59 +0200 Subject: [PATCH 151/177] Remove unused any attribute configuration --- remote/src/net_relay/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/remote/src/net_relay/mod.rs b/remote/src/net_relay/mod.rs index f3025c9a2..d6effe49e 100644 --- a/remote/src/net_relay/mod.rs +++ b/remote/src/net_relay/mod.rs @@ -154,7 +154,7 @@ pub enum Tcp {} pub enum Udp {} /// Use JSON serialisation. -#[cfg(any(feature = "json"))] +#[cfg(feature = "json")] #[allow(missing_debug_implementations)] #[allow(clippy::empty_enum)] pub enum Json {} @@ -241,7 +241,7 @@ impl Config { impl Config { /// Use [`Json`] serialisation. - #[cfg(any(feature = "json"))] + #[cfg(feature = "json")] pub fn json(self) -> Config { Config { router: self.router, @@ -337,7 +337,7 @@ mod private { use serde::de::DeserializeOwned; use serde::Serialize; - #[cfg(any(feature = "json"))] + #[cfg(feature = "json")] use super::Json; /// Trait that defined (de)serialisation. @@ -376,7 +376,7 @@ mod private { fn byte_offset(&self) -> usize; } - #[cfg(any(feature = "json"))] + #[cfg(feature = "json")] impl Serde for Json { type Iter<'a, T> = serde_json::StreamDeserializer<'a, serde_json::de::SliceRead<'a>, T> where T: DeserializeOwned; @@ -404,7 +404,7 @@ mod private { } } - #[cfg(any(feature = "json"))] + #[cfg(feature = "json")] impl<'de, R, T> DeIter for serde_json::StreamDeserializer<'de, R, T> where T: DeserializeOwned, From 7dad410bf0dfa275546426a3eb41be3bde45533a Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 26 May 2023 21:19:53 +0200 Subject: [PATCH 152/177] Fix Clippy lints in HTTP crate --- http/src/client.rs | 45 ++++++++++++++++++---------------------- http/src/head/header.rs | 6 +++--- http/src/head/version.rs | 1 + http/src/server.rs | 14 ++++++------- 4 files changed, 30 insertions(+), 36 deletions(-) diff --git a/http/src/client.rs b/http/src/client.rs index 52b29546d..7627072a1 100644 --- a/http/src/client.rs +++ b/http/src/client.rs @@ -274,7 +274,7 @@ impl Client { // > its decimal value defines the expected // > message body length in octets. None => { - body_length = Some(ResponseBodyLength::Known(length)) + body_length = Some(ResponseBodyLength::Known(length)); } } } else { @@ -311,7 +311,7 @@ impl Client { // > connection until it is closed by // > the server. if encodings.peek().is_some() { - body_length = Some(ResponseBodyLength::ReadToEnd) + body_length = Some(ResponseBodyLength::ReadToEnd); } else { body_length = Some(ResponseBodyLength::Chunked); } @@ -361,9 +361,6 @@ impl Client { Err(_) => return Err(ResponseError::InvalidChunkSize), } } - Some(ResponseBodyLength::ReadToEnd) => BodyKind::Unknown { - read_complete: false, - }, // RFC 7230 section 3.3.3 point 1: // > Any response to a HEAD request and any response // > with a 1xx (Informational), 204 (No Content), or @@ -384,7 +381,7 @@ impl Client { // > length is determined by the number of octets // > received prior to the server closing the // > connection. - None => BodyKind::Unknown { + None | Some(ResponseBodyLength::ReadToEnd) => BodyKind::Unknown { read_complete: false, }, }; @@ -583,13 +580,12 @@ impl<'c> Body<'c> { left_in_chunk, read_complete, } => { - if *left_in_chunk != 0 { - *left_in_chunk - } else { + if *left_in_chunk == 0 { self.client.read_chunk(left_in_chunk, read_complete).await?; // Read from the client's buffer again. continue; } + *left_in_chunk } // We don't have an actual limit, but all the remaining bytes // make up the response body, so we can safely read them all. @@ -637,13 +633,12 @@ impl<'c> Body<'c> { left_in_chunk, read_complete, } => { - if *left_in_chunk != 0 { - *left_in_chunk - } else { + if *left_in_chunk == 0 { self.client.read_chunk(left_in_chunk, read_complete).await?; // Read from the client's buffer again. continue; } + *left_in_chunk } // We don't have an actual limit, but all the remaining bytes // make up the response body, so we can safely read them all. @@ -802,19 +797,19 @@ impl PartialEq for ResponseError { fn eq(&self, other: &ResponseError) -> bool { use ResponseError::*; match (self, other) { - (IncompleteResponse, IncompleteResponse) => true, - (HeadTooLarge, HeadTooLarge) => true, - (InvalidContentLength, InvalidContentLength) => true, - (DifferentContentLengths, DifferentContentLengths) => true, - (InvalidHeaderName, InvalidHeaderName) => true, - (InvalidHeaderValue, InvalidHeaderValue) => true, - (TooManyHeaders, TooManyHeaders) => true, - (UnsupportedTransferEncoding, UnsupportedTransferEncoding) => true, - (ContentLengthAndTransferEncoding, ContentLengthAndTransferEncoding) => true, - (InvalidNewLine, InvalidNewLine) => true, - (InvalidVersion, InvalidVersion) => true, - (InvalidStatus, InvalidStatus) => true, - (InvalidChunkSize, InvalidChunkSize) => true, + (IncompleteResponse, IncompleteResponse) + | (HeadTooLarge, HeadTooLarge) + | (InvalidContentLength, InvalidContentLength) + | (DifferentContentLengths, DifferentContentLengths) + | (InvalidHeaderName, InvalidHeaderName) + | (InvalidHeaderValue, InvalidHeaderValue) + | (TooManyHeaders, TooManyHeaders) + | (UnsupportedTransferEncoding, UnsupportedTransferEncoding) + | (ContentLengthAndTransferEncoding, ContentLengthAndTransferEncoding) + | (InvalidNewLine, InvalidNewLine) + | (InvalidVersion, InvalidVersion) + | (InvalidStatus, InvalidStatus) + | (InvalidChunkSize, InvalidChunkSize) => true, (Io(err1), Io(err2)) => { if let (Some(errno1), Some(errno2)) = (err1.raw_os_error(), err2.raw_os_error()) { errno1 == errno2 diff --git a/http/src/head/header.rs b/http/src/head/header.rs index ec9b536df..479390269 100644 --- a/http/src/head/header.rs +++ b/http/src/head/header.rs @@ -95,13 +95,13 @@ impl Headers { /// If you don't want duplicate headers you can use (the more expansive) /// [`Headers::insert`] method. pub fn append(&mut self, header: Header<'static, '_>) { - self._append(header.name, header.value) + self._append(header.name, header.value); } /// Insert `header`, removing all existing headers with the same name. pub fn insert(&mut self, header: Header<'static, '_>) { self.remove_all(&header.name); - self._append(header.name, header.value) + self._append(header.name, header.value); } fn _append(&mut self, name: HeaderName<'static>, value: &[u8]) { @@ -1087,7 +1087,7 @@ macro_rules! int_impl { let mut value: $ty = 0; for b in src.iter().copied() { - if (b'0'..=b'9').contains(&b) { + if b.is_ascii_digit() { match value.checked_mul(10) { Some(v) => value = v, None => return Err(ParseIntError), diff --git a/http/src/head/version.rs b/http/src/head/version.rs index 6558c5b41..053e94856 100644 --- a/http/src/head/version.rs +++ b/http/src/head/version.rs @@ -44,6 +44,7 @@ impl Version { /// /// This function can be used to return the highest version given a major /// version. + #[must_use] pub const fn highest_minor(self) -> Version { match self { Version::Http10 | Version::Http11 => Version::Http11, diff --git a/http/src/server.rs b/http/src/server.rs index 2696aa8b6..f01749a92 100644 --- a/http/src/server.rs +++ b/http/src/server.rs @@ -666,10 +666,10 @@ impl Connection { match body.length() { _ if !request_method.expects_body() || !status.includes_body() => { send_body = false; - extend_content_length_header(&mut http_head, &mut itoa_buf, 0) + extend_content_length_header(&mut http_head, &mut itoa_buf, 0); } BodyLength::Known(length) => { - extend_content_length_header(&mut http_head, &mut itoa_buf, length) + extend_content_length_header(&mut http_head, &mut itoa_buf, length); } BodyLength::Chunked => { http_head.extend_from_slice(b"Transfer-Encoding: chunked\r\n"); @@ -884,13 +884,12 @@ impl<'a> Body<'a> { left_in_chunk, read_complete, } => { - if *left_in_chunk != 0 { - *left_in_chunk - } else { + if *left_in_chunk == 0 { self.conn.read_chunk(left_in_chunk, read_complete).await?; // Read from the client's buffer again. continue; } + *left_in_chunk } }; @@ -935,13 +934,12 @@ impl<'a> Body<'a> { left_in_chunk, read_complete, } => { - if *left_in_chunk != 0 { - *left_in_chunk - } else { + if *left_in_chunk == 0 { self.conn.read_chunk(left_in_chunk, read_complete).await?; // Read from the client's buffer again. continue; } + *left_in_chunk } }; From 3ac2e5af72573acd5bcc4b883c3b509e7046fcde Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 26 May 2023 21:22:28 +0200 Subject: [PATCH 153/177] Don't allow single-match-else Clippy lint Only had a single occurence so might as well fix it. --- Makefile.include | 5 ++--- rt/src/systemd.rs | 11 +++++------ 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/Makefile.include b/Makefile.include index c8df4bc84..b32076a97 100644 --- a/Makefile.include +++ b/Makefile.include @@ -43,8 +43,8 @@ $(TARGETS): # `equatable-if-let`: bad lint. # `future-not-send`: we don't want to require all generic parameters to be `Send`. # `manual-let-else`: not really a fan of this. -# `match-bool`, `single-match-else`: often less lines of code and I find that -# use `match` generally strictly better then `if`s. +# `match-bool`: often less lines of code and I find that use `match` generally +# strictly better then `if`s. # `missing-const-for-fn`: See https://github.com/rust-lang/rust-clippy/issues/4979. # `module-name-repetitions`: we re-export various names. # `needless-lifetimes`: lifetime serves as documentation. @@ -85,7 +85,6 @@ clippy: --allow clippy::option-if-let-else \ --allow clippy::redundant-pub-crate \ --allow clippy::significant-drop-tightening \ - --allow clippy::single-match-else \ --allow clippy::use-self \ doc: diff --git a/rt/src/systemd.rs b/rt/src/systemd.rs index 62d3853b1..22bf452e6 100644 --- a/rt/src/systemd.rs +++ b/rt/src/systemd.rs @@ -284,12 +284,11 @@ where H: FnMut() -> Result<(), E>, E: ToString, { - let notify = match Notify::new(ctx.runtime_ref()).await? { - Some(notify) => notify, - None => { - debug!("not started via systemd, not starting `systemd::watchdog`"); - return Ok(()); - } + let notify = if let Some(notify) = Notify::new(ctx.runtime_ref()).await? { + notify + } else { + debug!("not started via systemd, not starting `systemd::watchdog`"); + return Ok(()); }; notify.change_state(State::Ready, None).await?; From e66f7bb5427d5916a6265ab02538448e1bd323ed Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 26 May 2023 21:25:58 +0200 Subject: [PATCH 154/177] Fix Clippy manual-let-else warnings --- Makefile.include | 2 -- http/src/server.rs | 5 ++--- rt/src/systemd.rs | 4 +--- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/Makefile.include b/Makefile.include index b32076a97..ea328cc33 100644 --- a/Makefile.include +++ b/Makefile.include @@ -42,7 +42,6 @@ $(TARGETS): # `doc-markdown`: too many false positives. # `equatable-if-let`: bad lint. # `future-not-send`: we don't want to require all generic parameters to be `Send`. -# `manual-let-else`: not really a fan of this. # `match-bool`: often less lines of code and I find that use `match` generally # strictly better then `if`s. # `missing-const-for-fn`: See https://github.com/rust-lang/rust-clippy/issues/4979. @@ -74,7 +73,6 @@ clippy: --allow clippy::enum-glob-use \ --allow clippy::equatable-if-let \ --allow clippy::future-not-send \ - --allow clippy::manual-let-else \ --allow clippy::match-bool \ --allow clippy::missing-const-for-fn \ --allow clippy::missing-errors-doc \ diff --git a/http/src/server.rs b/http/src/server.rs index f01749a92..fba79357a 100644 --- a/http/src/server.rs +++ b/http/src/server.rs @@ -316,9 +316,8 @@ impl Connection { // SAFETY: all these unwraps are safe because `parse` above // ensures there all `Some`. - let method = match request.method.unwrap().parse() { - Ok(method) => method, - Err(_) => return Err(RequestError::UnknownMethod), + let Ok(method) = request.method.unwrap().parse() else { + return Err(RequestError::UnknownMethod); }; self.last_method = Some(method); let path = request.path.unwrap().to_string(); diff --git a/rt/src/systemd.rs b/rt/src/systemd.rs index 22bf452e6..053d54bea 100644 --- a/rt/src/systemd.rs +++ b/rt/src/systemd.rs @@ -284,9 +284,7 @@ where H: FnMut() -> Result<(), E>, E: ToString, { - let notify = if let Some(notify) = Notify::new(ctx.runtime_ref()).await? { - notify - } else { + let Some(notify) = Notify::new(ctx.runtime_ref()).await? else { debug!("not started via systemd, not starting `systemd::watchdog`"); return Ok(()); }; From c661be624a0a225f44a156e66d4a574b94841ac7 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 26 May 2023 21:36:10 +0200 Subject: [PATCH 155/177] Mark various functions as const Basically where possible. --- inbox/src/lib.rs | 2 +- remote/src/net_relay/uuid.rs | 2 +- rt/src/local/waker.rs | 2 +- rt/src/net/tcp/listener.rs | 2 +- rt/src/net/uds/listener.rs | 2 +- rt/src/scheduler/shared/inactive.rs | 6 +++--- rt/src/scheduler/shared/mod.rs | 2 +- rt/src/shared/mod.rs | 2 +- rt/src/systemd.rs | 2 +- rt/src/wakers/mod.rs | 2 +- rt/src/wakers/shared.rs | 2 +- 11 files changed, 13 insertions(+), 13 deletions(-) diff --git a/inbox/src/lib.rs b/inbox/src/lib.rs index d30a1fc6a..cccddde0e 100644 --- a/inbox/src/lib.rs +++ b/inbox/src/lib.rs @@ -1297,7 +1297,7 @@ pub struct Id(usize); impl Id { #[doc(hidden)] // Not part of the stable API. - pub fn as_usize(self) -> usize { + pub const fn as_usize(self) -> usize { self.0 } } diff --git a/remote/src/net_relay/uuid.rs b/remote/src/net_relay/uuid.rs index a814b613a..56655f84a 100644 --- a/remote/src/net_relay/uuid.rs +++ b/remote/src/net_relay/uuid.rs @@ -179,7 +179,7 @@ fn from_hex_hyphenated(input: &[u8]) -> Result { Ok(Uuid(bytes)) } -fn from_hex_byte(b: u8) -> Result { +const fn from_hex_byte(b: u8) -> Result { match b { b'A'..=b'F' => Ok(b - b'A' + 10), b'a'..=b'f' => Ok(b - b'a' + 10), diff --git a/rt/src/local/waker.rs b/rt/src/local/waker.rs index dafceaed4..ee55e5454 100644 --- a/rt/src/local/waker.rs +++ b/rt/src/local/waker.rs @@ -188,7 +188,7 @@ impl WakerData { static WAKER_VTABLE: task::RawWakerVTable = task::RawWakerVTable::new(clone_wake_data, wake, wake_by_ref, drop_wake_data); -fn assert_copy() {} +const fn assert_copy() {} unsafe fn clone_wake_data(data: *const ()) -> task::RawWaker { assert_copy::(); diff --git a/rt/src/net/tcp/listener.rs b/rt/src/net/tcp/listener.rs index dd47973a3..d1d91ce9d 100644 --- a/rt/src/net/tcp/listener.rs +++ b/rt/src/net/tcp/listener.rs @@ -190,7 +190,7 @@ impl TcpListener { /// The CPU affinity is **not** set on the returned TCP stream. To set that /// use [`TcpStream::set_auto_cpu_affinity`]. #[allow(clippy::doc_markdown)] // For "io_uring". - pub fn incoming(&self) -> Incoming<'_> { + pub const fn incoming(&self) -> Incoming<'_> { Incoming(self.fd.multishot_accept()) } diff --git a/rt/src/net/uds/listener.rs b/rt/src/net/uds/listener.rs index c63bb96bf..dac2569c9 100644 --- a/rt/src/net/uds/listener.rs +++ b/rt/src/net/uds/listener.rs @@ -165,7 +165,7 @@ impl UnixListener { /// The CPU affinity is **not** set on the returned Unix stream. To set that /// use [`UnixStream::set_auto_cpu_affinity`]. #[allow(clippy::doc_markdown)] // For "io_uring". - pub fn incoming(&self) -> Incoming<'_> { + pub const fn incoming(&self) -> Incoming<'_> { Incoming(self.fd.multishot_accept()) } diff --git a/rt/src/scheduler/shared/inactive.rs b/rt/src/scheduler/shared/inactive.rs index c1776080d..63fdbec4e 100644 --- a/rt/src/scheduler/shared/inactive.rs +++ b/rt/src/scheduler/shared/inactive.rs @@ -19,7 +19,7 @@ const SKIP_BITS: usize = 2; const SKIP_MASK: usize = (1 << SKIP_BITS) - 1; /// Returns `false` if `pid`'s `SKIP_BITS` aren't valid. -fn ok_pid(pid: ProcessId) -> bool { +const fn ok_pid(pid: ProcessId) -> bool { pid.0 & SKIP_MASK == 0 } @@ -593,7 +593,7 @@ fn tag_branch(branch: Pin>) -> TaggedPointer { } /// Create a mark ready-to-run `Pointer`. -fn ready_to_run(pid: ProcessId) -> TaggedPointer { +const fn ready_to_run(pid: ProcessId) -> TaggedPointer { (pid.0 | READY_TO_RUN) as *mut () } @@ -658,7 +658,7 @@ fn as_ptr(ptr: TaggedPointer) -> *mut () { } /// Returns the working pid for `ptr` at `depth`. -fn wpid_for(pid: ProcessId, depth: usize) -> usize { +const fn wpid_for(pid: ProcessId, depth: usize) -> usize { pid.0 >> ((depth * LEVEL_SHIFT) + SKIP_BITS) } diff --git a/rt/src/scheduler/shared/mod.rs b/rt/src/scheduler/shared/mod.rs index 939bb6f5f..b5650ec66 100644 --- a/rt/src/scheduler/shared/mod.rs +++ b/rt/src/scheduler/shared/mod.rs @@ -88,7 +88,7 @@ pub(crate) struct Scheduler { impl Scheduler { /// Create a new `Scheduler`. - pub(crate) fn new() -> Scheduler { + pub(crate) const fn new() -> Scheduler { Scheduler { ready: RunQueue::empty(), inactive: Inactive::empty(), diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index 2a1e4012a..fd648f0db 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -173,7 +173,7 @@ impl RuntimeInternals { } /// Returns the io_uring submission queue. - pub(crate) fn submission_queue(&self) -> &a10::SubmissionQueue { + pub(crate) const fn submission_queue(&self) -> &a10::SubmissionQueue { &self.sq } diff --git a/rt/src/systemd.rs b/rt/src/systemd.rs index 053d54bea..cd1015be2 100644 --- a/rt/src/systemd.rs +++ b/rt/src/systemd.rs @@ -125,7 +125,7 @@ impl Notify { } /// Returns the watchdog timeout, if any. - pub fn watchdog_timeout(&self) -> Option { + pub const fn watchdog_timeout(&self) -> Option { self.watch_dog } diff --git a/rt/src/wakers/mod.rs b/rt/src/wakers/mod.rs index 3422892a0..4b793b7cf 100644 --- a/rt/src/wakers/mod.rs +++ b/rt/src/wakers/mod.rs @@ -111,6 +111,6 @@ unsafe fn data_as_raw_ptr(data: *const ()) -> (*const AtomicBitMap, usize) { } /// Returns the minimum bitmap size such that `id` can be set. -fn min_bitmap_size(id: usize) -> usize { +const fn min_bitmap_size(id: usize) -> usize { (id + usize::BITS as usize) / usize::BITS as usize } diff --git a/rt/src/wakers/shared.rs b/rt/src/wakers/shared.rs index 8580e7a49..cb8a53b6b 100644 --- a/rt/src/wakers/shared.rs +++ b/rt/src/wakers/shared.rs @@ -139,7 +139,7 @@ impl WakerData { static WAKER_VTABLE: task::RawWakerVTable = task::RawWakerVTable::new(clone_wake_data, wake, wake_by_ref, drop_wake_data); -fn assert_copy() {} +const fn assert_copy() {} unsafe fn clone_wake_data(data: *const ()) -> task::RawWaker { assert_copy::(); From 86722ee3f31d4e927c9f779dfe0e7724c89b840d Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Mon, 29 May 2023 15:36:42 +0200 Subject: [PATCH 156/177] Remove double Arc from SyncWaker --- rt/src/test.rs | 6 +-- src/actor/sync.rs | 95 ++++++++++++++++++++++++++++++++++------------- 2 files changed, 72 insertions(+), 29 deletions(-) diff --git a/rt/src/test.rs b/rt/src/test.rs index 7a7c4f6b3..b28965a4e 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -170,7 +170,7 @@ where { let (sender, mut receiver) = new_oneshot(); let waker = SyncWaker::new(); - _ = receiver.register_waker(&task::Waker::from(waker.clone())); + _ = receiver.register_waker(&waker.clone().into_waker()); run_on_test_runtime(move |runtime_ref| { drop(sender.try_send(f(runtime_ref))); Ok(()) @@ -236,7 +236,7 @@ where { let (sender, mut receiver) = new_oneshot(); let waker = SyncWaker::new(); - _ = receiver.register_waker(&task::Waker::from(waker.clone())); + _ = receiver.register_waker(&waker.clone().into_waker()); run_on_test_runtime(move |mut runtime_ref| { let (_, receiver) = heph_inbox::new(heph_inbox::MIN_CAP); let ctx = actor::Context::new(receiver, ThreadLocal::new(runtime_ref.clone())); @@ -289,7 +289,7 @@ where { let (sender, mut receiver) = new_oneshot(); let waker = SyncWaker::new(); - _ = receiver.register_waker(&task::Waker::from(waker.clone())); + _ = receiver.register_waker(&waker.clone().into_waker()); run_on_test_runtime(move |mut runtime_ref| { let (_, receiver) = heph_inbox::new(heph_inbox::MIN_CAP); let ctx = actor::Context::new(receiver, ThreadSafe::new(runtime_ref.clone_shared())); diff --git a/src/actor/sync.rs b/src/actor/sync.rs index ca45fe9d6..f5b1a9f0a 100644 --- a/src/actor/sync.rs +++ b/src/actor/sync.rs @@ -3,8 +3,7 @@ use std::future::Future; use std::io; use std::pin::pin; -use std::sync::Arc; -use std::task::{self, Poll}; +use std::task::{self, Poll, RawWaker, RawWakerVTable}; use std::thread::{self, Thread}; use std::time::{Duration, Instant}; @@ -168,7 +167,7 @@ impl_sync_actor!( #[derive(Debug)] pub struct SyncContext { inbox: Receiver, - future_waker: Option>, + future_waker: Option, /// Runtime access. rt: RT, } @@ -264,7 +263,7 @@ impl SyncContext { } /// Returns the [`SyncWaker`] used as [`task::Waker`] in futures. - fn future_waker(&mut self) -> Arc { + fn future_waker(&mut self) -> SyncWaker { if let Some(waker) = self.future_waker.as_ref() { waker.clone() } else { @@ -278,40 +277,37 @@ impl SyncContext { /// [`task::Waker`] implementation for blocking on [`Future`]s. // TODO: a `Thread` is already wrapped in an `Arc`, which mean we're double // `Arc`ing for the `Waker` implementation, try to remove that. -#[derive(Debug)] +#[derive(Clone, Debug)] #[doc(hidden)] // Not part of the stable API. pub struct SyncWaker { handle: Thread, } -impl task::Wake for SyncWaker { - fn wake(self: Arc) { - self.handle.unpark(); - } - - fn wake_by_ref(self: &Arc) { - self.handle.unpark(); - } -} - impl SyncWaker { + const VTABLE: RawWakerVTable = RawWakerVTable::new( + sync_waker_clone, + sync_waker_wake, + sync_waker_wake_by_ref, + sync_waker_drop, + ); + /// Create a new `SyncWaker`. #[doc(hidden)] // Not part of the stable API. - pub fn new() -> Arc { - Arc::new(SyncWaker { + pub fn new() -> SyncWaker { + SyncWaker { handle: thread::current(), - }) + } } /// Poll the `future` until completion, blocking when it can't make /// progress. #[doc(hidden)] // Not part of the stable API. - pub fn block_on(self: Arc, future: Fut) -> Fut::Output + pub fn block_on(self: SyncWaker, future: Fut) -> Fut::Output where Fut: Future, { let mut future = pin!(future); - let task_waker = task::Waker::from(self); + let task_waker = self.into_waker(); let mut task_ctx = task::Context::from_waker(&task_waker); loop { match Future::poll(future.as_mut(), &mut task_ctx) { @@ -325,16 +321,12 @@ impl SyncWaker { /// Poll the `future` until completion, blocking when it can't make /// progress, waiting up to `timeout` time. #[doc(hidden)] // Not part of the stable API. - pub fn block_for( - self: Arc, - future: Fut, - timeout: Duration, - ) -> Option + pub fn block_for(self: SyncWaker, future: Fut, timeout: Duration) -> Option where Fut: Future, { let mut future = pin!(future); - let task_waker = task::Waker::from(self); + let task_waker = self.into_waker(); let mut task_ctx = task::Context::from_waker(&task_waker); let start = Instant::now(); @@ -353,6 +345,57 @@ impl SyncWaker { } } } + + /// Returns the `SyncWaker` as task `Waker`. + #[doc(hidden)] // Not part of the stable API. + pub fn into_waker(self) -> task::Waker { + let data = self.into_data(); + let raw_waker = RawWaker::new(data, &SyncWaker::VTABLE); + unsafe { task::Waker::from_raw(raw_waker) } + } + + /// Returns itself as `task::RawWaker` data. + fn into_data(self) -> *const () { + // SAFETY: this is not safe. This only works because `Thread` uses + // `Pin>`, which is a pointer underneath. + unsafe { std::mem::transmute(self) } + } + + /// Inverse of [`SyncWaker::into_data`]. + /// + /// # Safety + /// + /// `data` MUST be created by [`SyncWaker::into_data`]. + unsafe fn from_data(data: *const ()) -> SyncWaker { + // SAFETY: inverse of `into_data`, see that for more info. + unsafe { std::mem::transmute(data) } + } + + /// Same as [`SyncWaker::from_data`], but returns a reference instead of an + /// owned `SyncWaker`. + unsafe fn from_data_ref(data: &*const ()) -> &SyncWaker { + // SAFETY: inverse of `into_data`, see that for more info, also see + // `from_data`. + unsafe { std::mem::transmute(data) } + } +} + +unsafe fn sync_waker_clone(data: *const ()) -> RawWaker { + let waker = SyncWaker::from_data_ref(&data); + let data = waker.clone().into_data(); + RawWaker::new(data, &SyncWaker::VTABLE) +} + +unsafe fn sync_waker_wake(data: *const ()) { + SyncWaker::from_data(data).handle.unpark(); +} + +unsafe fn sync_waker_wake_by_ref(data: *const ()) { + SyncWaker::from_data_ref(&data).handle.unpark(); +} + +unsafe fn sync_waker_drop(data: *const ()) { + drop(SyncWaker::from_data(data)); } /// Spawn a synchronous actor. From 1d0c82ed822d5fad0773f62a7862f2861c162e16 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Mon, 29 May 2023 18:03:13 +0200 Subject: [PATCH 157/177] Rename test::block_on to block_on_future --- rt/src/test.rs | 4 ++-- rt/tests/functional/test.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/rt/src/test.rs b/rt/src/test.rs index b28965a4e..dfd9cf4c2 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -20,7 +20,7 @@ //! the result. //! * [`block_on_actor`]: spawns a thread-safe [actor] and waits for the //! result. -//! * [`block_on`]: spawns a `Future` and waits for the result. +//! * [`block_on_future`]: spawns a `Future` and waits for the result. //! * Initialising actors: //! * [`init_local_actor`]: initialise a thread-local actor. //! * [`init_actor`]: initialise a thread-safe actor. @@ -183,7 +183,7 @@ where /// Spawn `future` on the *test* runtime and wait for the result. /// /// This is useful to test async functions and futures in synchronous tests. -pub fn block_on(future: Fut) -> Fut::Output +pub fn block_on_future(future: Fut) -> Fut::Output where Fut: Future + Send + 'static, Fut::Output: Send, diff --git a/rt/tests/functional/test.rs b/rt/tests/functional/test.rs index cf8a0eea1..01425dcfa 100644 --- a/rt/tests/functional/test.rs +++ b/rt/tests/functional/test.rs @@ -20,8 +20,8 @@ use heph_rt::timer::Timer; use heph_rt::{self as rt, ThreadLocal}; #[test] -fn block_on() { - let result = test::block_on(async move { "All good" }); +fn block_on_future() { + let result = test::block_on_future(async move { "All good" }); assert_eq!(result, "All good"); } From 51bda13f71dcbd49cf73eef89ac77615a6d07d5c Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Mon, 29 May 2023 18:06:48 +0200 Subject: [PATCH 158/177] Update license years --- LICENSE | 2 +- http/LICENSE | 2 +- inbox/LICENSE | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/LICENSE b/LICENSE index 11861dcda..b701811c4 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (C) 2017-2022 Thomas de Zeeuw +Copyright (C) 2017-2023 Thomas de Zeeuw Permission is hereby granted, free of charge, to any person obtaining a copy of diff --git a/http/LICENSE b/http/LICENSE index 1cc94c7c8..e171d4ed6 100644 --- a/http/LICENSE +++ b/http/LICENSE @@ -1,4 +1,4 @@ -Copyright (C) 2021 Thomas de Zeeuw +Copyright (C) 2021-2023 Thomas de Zeeuw Permission is hereby granted, free of charge, to any person obtaining a copy of diff --git a/inbox/LICENSE b/inbox/LICENSE index de4243495..c77bbd337 100644 --- a/inbox/LICENSE +++ b/inbox/LICENSE @@ -1,4 +1,4 @@ -Copyright (C) 2020-2022 Thomas de Zeeuw +Copyright (C) 2020-2023 Thomas de Zeeuw Permission is hereby granted, free of charge, to any person obtaining a copy of From cd2da6541b69b1b42bdf3ba14fccde6185eb442c Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 30 May 2023 13:56:20 +0200 Subject: [PATCH 159/177] Update head module based on RFC 9110 The new RFC that defines HTTP. --- http/src/head/header.rs | 4 +- http/src/head/method.rs | 30 +++---- http/src/head/mod.rs | 4 +- http/src/head/status_code.rs | 149 +++++++++++++++++------------------ http/src/head/version.rs | 14 +--- 5 files changed, 91 insertions(+), 110 deletions(-) diff --git a/http/src/head/header.rs b/http/src/head/header.rs index 479390269..71eac4056 100644 --- a/http/src/head/header.rs +++ b/http/src/head/header.rs @@ -1,4 +1,4 @@ -//! Module with HTTP header related types. +//! Header related types. //! //! This module has three main types: //! * [`Headers`] is a list of mulitple headers, @@ -366,7 +366,7 @@ impl<'a> FusedIterator for Names<'a> {} /// HTTP header. /// -/// RFC 7230 section 3.2. +/// RFC 9110 section 6.3. #[derive(Clone, PartialEq, Eq)] pub struct Header<'n, 'v> { name: HeaderName<'n>, diff --git a/http/src/head/method.rs b/http/src/head/method.rs index dda73e274..bc2bd0a4a 100644 --- a/http/src/head/method.rs +++ b/http/src/head/method.rs @@ -1,4 +1,4 @@ -//! Module with HTTP method related types. +//! Method related types. use std::fmt; use std::str::FromStr; @@ -7,41 +7,41 @@ use crate::cmp_lower_case; /// HTTP method. /// -/// RFC 7231 section 4. +/// RFC 9110 section 9.3 #[non_exhaustive] #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum Method { /// GET method. /// - /// RFC 7231 section 4.3.1. + /// RFC 9110 section 9.3.1. Get, /// HEAD method. /// - /// RFC 7231 section 4.3.2. + /// RFC 9110 section 9.3.2. Head, /// POST method. /// - /// RFC 7231 section 4.3.3. + /// RFC 9110 section 9.3.3. Post, /// PUT method. /// - /// RFC 7231 section 4.3.4. + /// RFC 9110 section 9.3.4. Put, /// DELETE method. /// - /// RFC 7231 section 4.3.5. + /// RFC 9110 section 9.3.5. Delete, /// CONNECT method. /// - /// RFC 7231 section 4.3.6. + /// RFC 9110 section 9.3.6. Connect, /// OPTIONS method. /// - /// RFC 7231 section 4.3.7. + /// RFC 9110 section 9.3.7. Options, /// TRACE method. /// - /// RFC 7231 section 4.3.8. + /// RFC 9110 section 9.3.8. Trace, /// PATCH method. /// @@ -52,7 +52,7 @@ pub enum Method { impl Method { /// Returns `true` if the method is safe. /// - /// RFC 7321 section 4.2.1. + /// RFC 9110 section 9.2.1. #[rustfmt::skip] pub const fn is_safe(self) -> bool { matches!(self, Method::Get | Method::Head | Method::Options | Method::Trace) @@ -60,7 +60,7 @@ impl Method { /// Returns `true` if the method is idempotent. /// - /// RFC 7321 section 4.2.2. + /// RFC 9110 section 9.2.2. pub const fn is_idempotent(self) -> bool { matches!(self, Method::Put | Method::Delete) || self.is_safe() } @@ -69,12 +69,8 @@ impl Method { /// /// This is only the case for the HEAD method. /// - /// RFC 7230 section 3.3 and RFC 7321 section 4.3.2. + /// RFC 9110 section 6.4.1. pub const fn expects_body(self) -> bool { - // RFC 7231 section 4.3.2: - // > The HEAD method is identical to GET except that the server MUST NOT - // > send a message body in the response (i.e., the response terminates - // > at the end of the header section). !matches!(self, Method::Head) } diff --git a/http/src/head/mod.rs b/http/src/head/mod.rs index 1b05cfaaa..b72009ce2 100644 --- a/http/src/head/mod.rs +++ b/http/src/head/mod.rs @@ -1,4 +1,4 @@ -//! Module with the type part of a HTTP message head. +//! Types for the HTTP message head. use std::fmt; @@ -64,7 +64,7 @@ impl RequestHead { /// Requests from the HTTP server will return the highest version it /// understands, e.g. if a client used HTTP/1.2 (which doesn't exists) the /// version would be set to HTTP/1.1 (the highest version this crate - /// understands) per RFC 7230 section 2.6. + /// understands) per RFC 9110 section 6.2. pub const fn version(&self) -> Version { self.version } diff --git a/http/src/head/status_code.rs b/http/src/head/status_code.rs index 1d5b68327..b7935e375 100644 --- a/http/src/head/status_code.rs +++ b/http/src/head/status_code.rs @@ -5,7 +5,7 @@ use std::fmt; /// A complete list can be found at the HTTP Status Code Registry: /// . /// -/// RFC 7231 section 6. +/// RFC 9110 section 15. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct StatusCode(pub u16); @@ -13,261 +13,261 @@ impl StatusCode { // 1xx range. /// 100 Continue. /// - /// RFC 7231 section 6.2.1. + /// RFC 9110 section 15.2.1. pub const CONTINUE: StatusCode = StatusCode(100); /// 101 Switching Protocols. /// - /// RFC 7231 section 6.2.2. + /// RFC 9110 section 15.2.2. pub const SWITCHING_PROTOCOLS: StatusCode = StatusCode(101); - /// 103 Processing. + /// 102 Processing. /// - /// RFC 2518. - pub const PROCESSING: StatusCode = StatusCode(103); - /// 104 Early Hints. + /// RFC 2518 section 10.1. + pub const PROCESSING: StatusCode = StatusCode(102); + /// 103 Early Hints. /// - /// RFC 8297. - pub const EARLY_HINTS: StatusCode = StatusCode(104); + /// RFC 8297 section 2. + pub const EARLY_HINTS: StatusCode = StatusCode(103); // 2xx range. /// 200 OK. /// - /// RFC 7231 section 6.3.1. + /// RFC 9110 section 15.3.1. pub const OK: StatusCode = StatusCode(200); /// 201 Created. /// - /// RFC 7231 section 6.3.2. + /// RFC 9110 section 15.3.2. pub const CREATED: StatusCode = StatusCode(201); /// 202 Accepted. /// - /// RFC 7231 section 6.3.3. + /// RFC 9110 section 15.3.3. pub const ACCEPTED: StatusCode = StatusCode(202); /// 203 Non-Authoritative Information. /// - /// RFC 7231 section 6.3.4. + /// RFC 9110 section 15.3.4. pub const NON_AUTHORITATIVE_INFORMATION: StatusCode = StatusCode(203); /// 204 No Content. /// - /// RFC 7231 section 6.3.5. + /// RFC 9110 section 15.3.5. pub const NO_CONTENT: StatusCode = StatusCode(204); /// 205 Reset Content. /// - /// RFC 7231 section 6.3.6. + /// RFC 9110 section 15.3.6. pub const RESET_CONTENT: StatusCode = StatusCode(205); /// 206 Partial Content. /// - /// RFC 7233 section 4.1. + /// RFC 9110 section 15.3.7. pub const PARTIAL_CONTENT: StatusCode = StatusCode(206); /// 207 Multi-Status. /// - /// RFC 4918. + /// RFC 4918 section 11.1. pub const MULTI_STATUS: StatusCode = StatusCode(207); /// 208 Already Reported. /// - /// RFC 5842. + /// RFC 5842 section 7.1. pub const ALREADY_REPORTED: StatusCode = StatusCode(208); /// 226 IM Used. /// - /// RFC 3229. + /// RFC 3229 section 10.4.1. pub const IM_USED: StatusCode = StatusCode(226); // 3xx range. /// 300 Multiple Choices. /// - /// RFC 7231 section 6.4.1. + /// RFC 9110 section 15.4.1. pub const MULTIPLE_CHOICES: StatusCode = StatusCode(300); /// 301 Moved Permanently. /// - /// RFC 7231 section 6.4.2. + /// RFC 9110 section 15.4.2. pub const MOVED_PERMANENTLY: StatusCode = StatusCode(301); /// 302 Found. /// - /// RFC 7231 section 6.4.3. + /// RFC 9110 section 15.4.3. pub const FOUND: StatusCode = StatusCode(302); /// 303 See Other. /// - /// RFC 7231 section 6.4.4. + /// RFC 9110 section 15.4.4. pub const SEE_OTHER: StatusCode = StatusCode(303); /// 304 Not Modified. /// - /// RFC 7232 section 4.1. + /// RFC 9110 section 15.4.5. pub const NOT_MODIFIED: StatusCode = StatusCode(304); - // NOTE: 306 is unused, per RFC 7231 section 6.4.6. /// 305 Use Proxy. /// - /// RFC 7231 section 6.4.5. + /// RFC 9110 section 15.4.6. pub const USE_PROXY: StatusCode = StatusCode(305); + // NOTE: 306 is unused, per RFC 9110 section 15.4.7. /// 307 Temporary Redirect. /// - /// RFC 7231 section 6.4.7. + /// RFC 9110 section 15.4.8. pub const TEMPORARY_REDIRECT: StatusCode = StatusCode(307); /// 308 Permanent Redirect. /// - /// RFC 7538. + /// RFC 9110 section 15.4.9. pub const PERMANENT_REDIRECT: StatusCode = StatusCode(308); // 4xx range. /// 400 Bad Request. /// - /// RFC 7231 section 6.5.1. + /// RFC 9110 section 15.5.1. pub const BAD_REQUEST: StatusCode = StatusCode(400); /// 401 Unauthorized. /// - /// RFC 7235 section 3.1. + /// RFC 9110 section 15.5.2. pub const UNAUTHORIZED: StatusCode = StatusCode(401); /// 402 Payment Required. /// - /// RFC 7231 section 6.5.2. + /// RFC 9110 section 15.5.3. pub const PAYMENT_REQUIRED: StatusCode = StatusCode(402); /// 403 Forbidden. /// - /// RFC 7231 section 6.5.3. + /// RFC 9110 section 15.5.4. pub const FORBIDDEN: StatusCode = StatusCode(403); /// 404 Not Found. /// - /// RFC 7231 section 6.5.4. + /// RFC 9110 section 15.5.5. pub const NOT_FOUND: StatusCode = StatusCode(404); /// 405 Method Not Allowed. /// - /// RFC 7231 section 6.5.5. + /// RFC 9110 section 15.5.6. pub const METHOD_NOT_ALLOWED: StatusCode = StatusCode(405); /// 406 Not Acceptable. /// - /// RFC 7231 section 6.5.6. + /// RFC 9110 section 15.5.7. pub const NOT_ACCEPTABLE: StatusCode = StatusCode(406); /// 407 Proxy Authentication Required. /// - /// RFC 7235 section 3.2. + /// RFC 9110 section 15.5.8. pub const PROXY_AUTHENTICATION_REQUIRED: StatusCode = StatusCode(407); /// 408 Request Timeout. /// - /// RFC 7231 section 6.5.7. + /// RFC 9110 section 15.5.9. pub const REQUEST_TIMEOUT: StatusCode = StatusCode(408); /// 409 Conflict. /// - /// RFC 7231 section 6.5.8. + /// RFC 9110 section 15.5.10. pub const CONFLICT: StatusCode = StatusCode(409); /// 410 Gone. /// - /// RFC 7231 section 6.5.9. + /// RFC 9110 section 15.5.11. pub const GONE: StatusCode = StatusCode(410); /// 411 Length Required. /// - /// RFC 7231 section 6.5.10. + /// RFC 9110 section 15.5.12. pub const LENGTH_REQUIRED: StatusCode = StatusCode(411); /// 412 Precondition Failed. /// - /// RFC 7232 section 4.2 and RFC 8144 section 3.2. + /// RFC 9110 section 15.5.13. pub const PRECONDITION_FAILED: StatusCode = StatusCode(412); /// 413 Payload Too Large. /// - /// RFC 7231 section 6.5.11. + /// RFC 9110 section 15.5.14. pub const PAYLOAD_TOO_LARGE: StatusCode = StatusCode(413); /// 414 URI Too Long. /// - /// RFC 7231 section 6.5.12. + /// RFC 9110 section 15.5.15. pub const URI_TOO_LONG: StatusCode = StatusCode(414); /// 415 Unsupported Media Type. /// - /// RFC 7231 section 6.5.13 and RFC 7694 section 3. + /// RFC 9110 section 15.5.16. pub const UNSUPPORTED_MEDIA_TYPE: StatusCode = StatusCode(415); /// 416 Range Not Satisfiable. /// - /// RFC 7233 section 4.4. + /// RFC 9110 section 15.5.17. pub const RANGE_NOT_SATISFIABLE: StatusCode = StatusCode(416); /// 417 Expectation Failed. /// - /// RFC 7231 section 6.5.14. + /// RFC 9110 section 15.5.18. pub const EXPECTATION_FAILED: StatusCode = StatusCode(417); - // NOTE: 418-420 are unassigned. + // NOTE: 418 is unused, 419-420 are unassigned. /// 421 Misdirected Request. /// - /// RFC 7540 section 9.1.2. + /// RFC 7540 section 9.1.20. pub const MISDIRECTED_REQUEST: StatusCode = StatusCode(421); /// 422 Unprocessable Entity. /// - /// RFC 4918. + /// RFC 7540 section 9.1.21. pub const UNPROCESSABLE_ENTITY: StatusCode = StatusCode(422); /// 423 Locked. /// - /// RFC 4918. + /// RFC 4918, section 11.3. pub const LOCKED: StatusCode = StatusCode(423); /// 424 Failed Dependency. /// - /// RFC 4918. + /// RFC 4918, section 11.4. pub const FAILED_DEPENDENCY: StatusCode = StatusCode(424); /// 425 Too Early. /// - /// RFC 8470. + /// RFC 8470, section 5.2. pub const TOO_EARLY: StatusCode = StatusCode(425); /// 426 Upgrade Required. /// - /// RFC 7231 section 6.5.15. + /// RFC 9110 section 15.5.22. pub const UPGRADE_REQUIRED: StatusCode = StatusCode(426); // NOTE: 427 is unassigned. /// 428 Precondition Required. /// - /// RFC 6585. + /// RFC 6585 section 3. pub const PRECONDITION_REQUIRED: StatusCode = StatusCode(428); /// 429 Too Many Requests. /// - /// RFC 6585. + /// RFC 6585 section 4. pub const TOO_MANY_REQUESTS: StatusCode = StatusCode(429); // NOTE: 320 is unassigned. /// 431 Request Header Fields Too Large. /// - /// RFC 6585. + /// RFC 6585 section 5. pub const REQUEST_HEADER_FIELDS_TOO_LARGE: StatusCode = StatusCode(431); // NOTE: 432-450 are unassigned. /// 451 Unavailable For Legal Reasons. /// - /// RFC 7725. + /// RFC 7725 section 3. pub const UNAVAILABLE_FOR_LEGAL_REASONS: StatusCode = StatusCode(451); // 5xx range. /// 500 Internal Server Error. /// - /// RFC 7231 section 6.6.1. + /// RFC 9110 section 15.6.1. pub const INTERNAL_SERVER_ERROR: StatusCode = StatusCode(500); /// 501 Not Implemented. /// - /// RFC 7231 section 6.6.2. + /// RFC 9110 section 15.6.2. pub const NOT_IMPLEMENTED: StatusCode = StatusCode(501); /// 502 Bad Gateway. /// - /// RFC 7231 section 6.6.3. + /// RFC 9110 section 15.6.3. pub const BAD_GATEWAY: StatusCode = StatusCode(502); /// 503 Service Unavailable. /// - /// RFC 7231 section 6.6.4. + /// RFC 9110 section 15.6.4. pub const SERVICE_UNAVAILABLE: StatusCode = StatusCode(503); /// 504 Gateway Timeout. /// - /// RFC 7231 section 6.6.5. + /// RFC 9110 section 15.6.5. pub const GATEWAY_TIMEOUT: StatusCode = StatusCode(504); /// 505 HTTP Version Not Supported. /// - /// RFC 7231 section 6.6.6. + /// RFC 9110 section 15.6.6. pub const HTTP_VERSION_NOT_SUPPORTED: StatusCode = StatusCode(505); /// 506 Variant Also Negotiates. /// - /// RFC 2295. + /// RFC 2295 section 8.1. pub const VARIANT_ALSO_NEGOTIATES: StatusCode = StatusCode(506); /// 507 Insufficient Storage. /// - /// RFC 4918. + /// RFC 4918 section 11.5. pub const INSUFFICIENT_STORAGE: StatusCode = StatusCode(507); - /// 408 Loop Detected. + /// 508 Loop Detected. /// - /// RFC 5842. + /// RFC 5842 section 7.2. pub const LOOP_DETECTED: StatusCode = StatusCode(508); // NOTE: 509 is unassigned. /// 510 Not Extended. /// - /// RFC 2774. + /// RFC 2774 section 7. pub const NOT_EXTENDED: StatusCode = StatusCode(510); /// 511 Network Authentication Required. /// - /// RFC 6585. + /// RFC 6585 section 6. pub const NETWORK_AUTHENTICATION_REQUIRED: StatusCode = StatusCode(511); /// Returns `true` if the status code is in 1xx range. @@ -299,14 +299,7 @@ impl StatusCode { /// /// This includes the entire 1xx (Informational) range, 204 (No Content), /// and 304 (Not Modified). - /// - /// Also see RFC 7230 section 3.3 and RFC 7231 section 6 (the individual - /// status codes). pub const fn includes_body(self) -> bool { - // RFC 7230 section 3.3: - // > All 1xx (Informational), 204 (No Content), and 304 (Not Modified) - // > responses do not include a message body. All other responses do - // > include a message body, although the body might be of zero length. !matches!(self.0, 100..=199 | 204 | 304) } @@ -315,8 +308,8 @@ impl StatusCode { match self.0 { 100 => Some("Continue"), 101 => Some("Switching Protocols"), - 103 => Some("Processing"), - 104 => Some("Early Hints"), + 102 => Some("Processing"), + 103 => Some("Early Hints"), 200 => Some("OK"), 201 => Some("Created"), diff --git a/http/src/head/version.rs b/http/src/head/version.rs index 053e94856..b588fe4b0 100644 --- a/http/src/head/version.rs +++ b/http/src/head/version.rs @@ -1,11 +1,11 @@ -//! Module with HTTP version related types. +//! Version related types. use std::fmt; use std::str::FromStr; /// HTTP version. /// -/// RFC 7231 section 2.6. +/// RFC 9110 section 2.5. #[non_exhaustive] #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum Version { @@ -15,7 +15,7 @@ pub enum Version { Http10, /// HTTP/1.1. /// - /// RFC 7230. + /// RFC 9112. Http11, } @@ -36,14 +36,6 @@ impl Version { } /// Returns the highest minor version with the same major version as `self`. - /// - /// According to RFC 7230 section 2.6: - /// > A server SHOULD send a response version equal to the highest version - /// > to which the server is conformant that has a major version less than or - /// > equal to the one received in the request. - /// - /// This function can be used to return the highest version given a major - /// version. #[must_use] pub const fn highest_minor(self) -> Version { match self { From 096a49195b628fb48dbd12b024a1273a718087df Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 30 May 2023 13:56:51 +0200 Subject: [PATCH 160/177] Fix HTTP tests block_on -> block_on_future --- http/tests/functional/route.rs | 8 ++++---- http/tests/functional/transform.rs | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/http/tests/functional/route.rs b/http/tests/functional/route.rs index e62372c92..29bbc2eaa 100644 --- a/http/tests/functional/route.rs +++ b/http/tests/functional/route.rs @@ -2,7 +2,7 @@ use heph_http::body::{EmptyBody, OneshotBody}; use heph_http::{route, Headers, Method, Request, Response, Version}; -use heph_rt::test::block_on; +use heph_rt::test::block_on_future; async fn route(request: Request) -> Response> { route!(match request { @@ -93,7 +93,7 @@ mod handlers { #[test] fn multiple_methods_same_route() { - block_on(async move { + block_on_future(async move { let tests = [Request::get("/".to_owned()), Request::head("/".to_owned())]; for test_request in tests { let response = route(test_request).await; @@ -104,7 +104,7 @@ fn multiple_methods_same_route() { #[test] fn correct_routing_based_on_method() { - block_on(async move { + block_on_future(async move { let methods = [ Method::Options, Method::Get, @@ -132,7 +132,7 @@ fn correct_routing_based_on_method() { #[test] fn not_found_fallback() { - block_on(async move { + block_on_future(async move { let tests = [ // Unknown path. Request::get("/unknown".to_owned()), diff --git a/http/tests/functional/transform.rs b/http/tests/functional/transform.rs index cd8a52f7b..266f8700d 100644 --- a/http/tests/functional/transform.rs +++ b/http/tests/functional/transform.rs @@ -91,7 +91,7 @@ fn transform_middleware() { }, TestBody::new(REQ_BODY), ); - let response: Response = test::block_on(middleware.handle(request)); + let response: Response = test::block_on_future(middleware.handle(request)); assert_eq!(response.status(), expected_status); assert_eq!(response.body().into_inner(), expected_body); } From ae680e45adddf81121aa8402a9f9809fdb4e521b Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 30 May 2023 13:57:04 +0200 Subject: [PATCH 161/177] Derive Debug for {Request,Response}Head --- http/src/head/mod.rs | 25 ++----------------------- 1 file changed, 2 insertions(+), 23 deletions(-) diff --git a/http/src/head/mod.rs b/http/src/head/mod.rs index b72009ce2..061ed15ea 100644 --- a/http/src/head/mod.rs +++ b/http/src/head/mod.rs @@ -1,7 +1,5 @@ //! Types for the HTTP message head. -use std::fmt; - pub mod header; pub mod method; mod status_code; @@ -19,6 +17,7 @@ use crate::{Request, Response}; use header::FromHeaderValue; /// Head of a [`Request`]. +#[derive(Debug)] pub struct RequestHead { method: Method, pub(crate) path: String, @@ -130,18 +129,8 @@ impl RequestHead { } } -impl fmt::Debug for RequestHead { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RequestHead") - .field("method", &self.method) - .field("path", &self.path) - .field("version", &self.version) - .field("headers", &self.headers) - .finish() - } -} - /// Head of a [`Response`]. +#[derive(Debug)] pub struct ResponseHead { version: Version, status: StatusCode, @@ -233,13 +222,3 @@ impl ResponseHead { Response::from_head(self, body) } } - -impl fmt::Debug for ResponseHead { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ResponseHead") - .field("version", &self.version) - .field("status", &self.status) - .field("headers", &self.headers) - .finish() - } -} From b2fb82dbfcdc8ecffe0271a12461a2f059171a6e Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 30 May 2023 13:57:49 +0200 Subject: [PATCH 162/177] Derive Clone for {Request,Response}Head --- http/src/head/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/http/src/head/mod.rs b/http/src/head/mod.rs index 061ed15ea..65173e3b2 100644 --- a/http/src/head/mod.rs +++ b/http/src/head/mod.rs @@ -17,7 +17,7 @@ use crate::{Request, Response}; use header::FromHeaderValue; /// Head of a [`Request`]. -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct RequestHead { method: Method, pub(crate) path: String, @@ -130,7 +130,7 @@ impl RequestHead { } /// Head of a [`Response`]. -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct ResponseHead { version: Version, status: StatusCode, From 2879fe5d317671057ada836a79ae8eadfd59de80 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Tue, 30 May 2023 15:39:52 +0200 Subject: [PATCH 163/177] Update known headers --- http/src/head/header.rs | 392 +++++++++++++------------------- http/src/parse_headers.bash | 41 +++- http/tests/functional/header.rs | 96 ++------ 3 files changed, 206 insertions(+), 323 deletions(-) diff --git a/http/src/head/header.rs b/http/src/head/header.rs index 71eac4056..da8194472 100644 --- a/http/src/head/header.rs +++ b/http/src/head/header.rs @@ -478,119 +478,87 @@ macro_rules! known_headers { } impl<'n> HeaderName<'n> { - // NOTE: these are automatically generated by the `parse_headers.bash` + // NOTE: these are automatically generated by the `src/parse_headers.bash` // script. // NOTE: we adding here also add to the // `functional::header::from_str_known_headers` test. known_headers!( 2: [ - #[doc = "IM.\n\nRFC 4229."] + #[doc = "IM.\n\nRFC 3229."] (IM, "im"), #[doc = "If.\n\nRFC 4918."] (IF, "if"), - #[doc = "TE.\n\nRFC 7230 section 4.3."] + #[doc = "TE.\n\nRFC 9110 section 10.1.4."] (TE, "te"), ], 3: [ - #[doc = "Age.\n\nRFC 7234 section 5.1."] + #[doc = "Age.\n\nRFC 9111 section 5.1."] (AGE, "age"), #[doc = "DAV.\n\nRFC 4918."] (DAV, "dav"), - #[doc = "Ext.\n\nRFC 4229."] - (EXT, "ext"), - #[doc = "Man.\n\nRFC 4229."] - (MAN, "man"), - #[doc = "Opt.\n\nRFC 4229."] - (OPT, "opt"), - #[doc = "P3P.\n\nRFC 4229."] - (P3P, "p3p"), - #[doc = "PEP.\n\nRFC 4229."] - (PEP, "pep"), - #[doc = "TCN.\n\nRFC 4229."] + #[doc = "TCN.\n\nRFC 2295."] (TCN, "tcn"), #[doc = "TTL.\n\nRFC 8030 section 5.2."] (TTL, "ttl"), - #[doc = "URI.\n\nRFC 4229."] - (URI, "uri"), - #[doc = "Via.\n\nRFC 7230 section 5.7.1."] + #[doc = "Via.\n\nRFC 9110 section 7.6.3."] (VIA, "via"), ], 4: [ - #[doc = "A-IM.\n\nRFC 4229."] + #[doc = "A-IM.\n\nRFC 3229."] (A_IM, "a-im"), #[doc = "ALPN.\n\nRFC 7639 section 2."] (ALPN, "alpn"), #[doc = "DASL.\n\nRFC 5323."] (DASL, "dasl"), - #[doc = "Date.\n\nRFC 7231 section 7.1.1.2."] + #[doc = "DPoP.\n\nRFC -ietf-oauth-dpop-16."] + (DPOP, "dpop"), + #[doc = "Date.\n\nRFC 9110 section 6.6.1."] (DATE, "date"), - #[doc = "ETag.\n\nRFC 7232 section 2.3."] + #[doc = "ETag.\n\nRFC 9110 section 8.8.3."] (ETAG, "etag"), - #[doc = "From.\n\nRFC 7231 section 5.5.1."] + #[doc = "From.\n\nRFC 9110 section 10.1.2."] (FROM, "from"), - #[doc = "Host.\n\nRFC 7230 section 5.4."] + #[doc = "Host.\n\nRFC 9110 section 7.2."] (HOST, "host"), #[doc = "Link.\n\nRFC 8288."] (LINK, "link"), - #[doc = "Safe.\n\nRFC 4229."] - (SAFE, "safe"), #[doc = "SLUG.\n\nRFC 5023."] (SLUG, "slug"), - #[doc = "Vary.\n\nRFC 7231 section 7.1.4."] + #[doc = "Vary.\n\nRFC 9110 section 12.5.5."] (VARY, "vary"), - #[doc = "Cost.\n\nRFC 4229."] - (COST, "cost"), ], 5: [ - #[doc = "Allow.\n\nRFC 7231 section 7.4.1."] + #[doc = "Allow.\n\nRFC 9110 section 10.2.1."] (ALLOW, "allow"), - #[doc = "C-Ext.\n\nRFC 4229."] - (C_EXT, "c-ext"), - #[doc = "C-Man.\n\nRFC 4229."] - (C_MAN, "c-man"), - #[doc = "C-Opt.\n\nRFC 4229."] - (C_OPT, "c-opt"), - #[doc = "C-PEP.\n\nRFC 4229."] - (C_PEP, "c-pep"), - #[doc = "Close.\n\nRFC 7230 section 8.1."] + #[doc = "Close.\n\nRFC 9112 section 9.6."] (CLOSE, "close"), #[doc = "Depth.\n\nRFC 4918."] (DEPTH, "depth"), - #[doc = "Label.\n\nRFC 4229."] + #[doc = "Label.\n\nRFC 3253."] (LABEL, "label"), - #[doc = "Meter.\n\nRFC 4229."] + #[doc = "Meter.\n\nRFC 2227."] (METER, "meter"), - #[doc = "Range.\n\nRFC 7233 section 3.1."] + #[doc = "Range.\n\nRFC 9110 section 14.2."] (RANGE, "range"), #[doc = "Topic.\n\nRFC 8030 section 5.4."] (TOPIC, "topic"), - #[doc = "SubOK.\n\nRFC 4229."] - (SUBOK, "subok"), - #[doc = "Subst.\n\nRFC 4229."] - (SUBST, "subst"), - #[doc = "Title.\n\nRFC 4229."] - (TITLE, "title"), ], 6: [ - #[doc = "Accept.\n\nRFC 7231 section 5.3.2."] + #[doc = "Accept.\n\nRFC 9110 section 12.5.1."] (ACCEPT, "accept"), #[doc = "Cookie.\n\nRFC 6265."] (COOKIE, "cookie"), - #[doc = "Digest.\n\nRFC 4229."] + #[doc = "Digest.\n\nRFC 3230."] (DIGEST, "digest"), - #[doc = "Expect.\n\nRFC 7231 section 5.1.1."] + #[doc = "Expect.\n\nRFC 9110 section 10.1.1."] (EXPECT, "expect"), - #[doc = "Origin.\n\nRFC 6454."] - (ORIGIN, "origin"), #[doc = "OSCORE.\n\nRFC 8613 section 11.1."] (OSCORE, "oscore"), - #[doc = "Pragma.\n\nRFC 7234 section 5.4."] - (PRAGMA, "pragma"), + #[doc = "Origin.\n\nRFC 6454."] + (ORIGIN, "origin"), #[doc = "Prefer.\n\nRFC 7240."] (PREFER, "prefer"), - #[doc = "Public.\n\nRFC 4229."] - (PUBLIC, "public"), - #[doc = "Server.\n\nRFC 7231 section 7.4.2."] + #[doc = "Server.\n\nRFC 9110 section 10.2.4."] (SERVER, "server"), #[doc = "Sunset.\n\nRFC 8594."] (SUNSET, "sunset"), @@ -598,379 +566,325 @@ impl<'n> HeaderName<'n> { 7: [ #[doc = "Alt-Svc.\n\nRFC 7838."] (ALT_SVC, "alt-svc"), - #[doc = "Cookie2.\n\nRFC 2965, RFC 6265."] - (COOKIE2, "cookie2"), - #[doc = "Expires.\n\nRFC 7234 section 5.3."] + #[doc = "Expires.\n\nRFC 9111 section 5.3."] (EXPIRES, "expires"), #[doc = "Hobareg.\n\nRFC 7486 section 6.1.1."] (HOBAREG, "hobareg"), - #[doc = "Referer.\n\nRFC 7231 section 5.5.2."] + #[doc = "Ping-To.\n\nHTML."] + (PING_TO, "ping-to"), + #[doc = "Referer.\n\nRFC 9110 section 10.1.3."] (REFERER, "referer"), + #[doc = "Refresh.\n\nHTML."] + (REFRESH, "refresh"), #[doc = "Timeout.\n\nRFC 4918."] (TIMEOUT, "timeout"), - #[doc = "Trailer.\n\nRFC 7230 section 4.4."] + #[doc = "Trailer.\n\nRFC 9110 section 6.6.2."] (TRAILER, "trailer"), + #[doc = "Upgrade.\n\nRFC 9110 section 7.8."] + (UPGRADE, "upgrade"), #[doc = "Urgency.\n\nRFC 8030 section 5.3."] (URGENCY, "urgency"), - #[doc = "Upgrade.\n\nRFC 7230 section 6.7."] - (UPGRADE, "upgrade"), - #[doc = "Warning.\n\nRFC 7234 section 5.5."] - (WARNING, "warning"), - #[doc = "Version.\n\nRFC 4229."] - (VERSION, "version"), ], 8: [ #[doc = "Alt-Used.\n\nRFC 7838."] (ALT_USED, "alt-used"), #[doc = "CDN-Loop.\n\nRFC 8586."] (CDN_LOOP, "cdn-loop"), - #[doc = "If-Match.\n\nRFC 7232 section 3.1."] + #[doc = "If-Match.\n\nRFC 9110 section 13.1.1."] (IF_MATCH, "if-match"), - #[doc = "If-Range.\n\nRFC 7233 section 3.2."] + #[doc = "If-Range.\n\nRFC 9110 section 13.1.5."] (IF_RANGE, "if-range"), - #[doc = "Location.\n\nRFC 7231 section 7.1.2."] + #[doc = "Location.\n\nRFC 9110 section 10.2.2."] (LOCATION, "location"), - #[doc = "Pep-Info.\n\nRFC 4229."] - (PEP_INFO, "pep-info"), - #[doc = "Position.\n\nRFC 4229."] + #[doc = "Position.\n\nRFC 3648."] (POSITION, "position"), - #[doc = "Protocol.\n\nRFC 4229."] - (PROTOCOL, "protocol"), - #[doc = "Optional.\n\nRFC 4229."] - (OPTIONAL, "optional"), - #[doc = "UA-Color.\n\nRFC 4229."] - (UA_COLOR, "ua-color"), - #[doc = "UA-Media.\n\nRFC 4229."] - (UA_MEDIA, "ua-media"), + #[doc = "Priority.\n\nRFC 9218."] + (PRIORITY, "priority"), ], 9: [ #[doc = "Accept-CH.\n\nRFC 8942 section 3.1."] (ACCEPT_CH, "accept-ch"), - #[doc = "Expect-CT.\n\nRFC -ietf-httpbis-expect-ct-08."] + #[doc = "Expect-CT.\n\nRFC 9163."] (EXPECT_CT, "expect-ct"), #[doc = "Forwarded.\n\nRFC 7239."] (FORWARDED, "forwarded"), - #[doc = "Negotiate.\n\nRFC 4229."] + #[doc = "Negotiate.\n\nRFC 2295."] (NEGOTIATE, "negotiate"), #[doc = "Overwrite.\n\nRFC 4918."] (OVERWRITE, "overwrite"), - #[doc = "Isolation.\n\nOData Version 4.01 Part 1: Protocol, OASIS, Chet_Ensign."] - (ISOLATION, "isolation"), - #[doc = "UA-Pixels.\n\nRFC 4229."] - (UA_PIXELS, "ua-pixels"), + #[doc = "Ping-From.\n\nHTML."] + (PING_FROM, "ping-from"), ], 10: [ - #[doc = "Alternates.\n\nRFC 4229."] + #[doc = "Alternates.\n\nRFC 2295."] (ALTERNATES, "alternates"), - #[doc = "C-PEP-Info.\n\nRFC 4229."] - (C_PEP_INFO, "c-pep-info"), - #[doc = "Connection.\n\nRFC 7230 section 6.1."] + #[doc = "Connection.\n\nRFC 9110 section 7.6.1."] (CONNECTION, "connection"), - #[doc = "Content-ID.\n\nRFC 4229."] + #[doc = "Content-ID.\n\nThe HTTP Distribution and Replication Protocol."] (CONTENT_ID, "content-id"), - #[doc = "Delta-Base.\n\nRFC 4229."] + #[doc = "DPoP-Nonce.\n\nRFC -ietf-oauth-dpop-16."] + (DPOP_NONCE, "dpop-nonce"), + #[doc = "Delta-Base.\n\nRFC 3229."] (DELTA_BASE, "delta-base"), #[doc = "Early-Data.\n\nRFC 8470."] (EARLY_DATA, "early-data"), - #[doc = "GetProfile.\n\nRFC 4229."] - (GETPROFILE, "getprofile"), - #[doc = "Keep-Alive.\n\nRFC 4229."] + #[doc = "Keep-Alive.\n\nRFC 2068."] (KEEP_ALIVE, "keep-alive"), #[doc = "Lock-Token.\n\nRFC 4918."] (LOCK_TOKEN, "lock-token"), - #[doc = "PICS-Label.\n\nRFC 4229."] - (PICS_LABEL, "pics-label"), #[doc = "Set-Cookie.\n\nRFC 6265."] (SET_COOKIE, "set-cookie"), - #[doc = "SetProfile.\n\nRFC 4229."] - (SETPROFILE, "setprofile"), - #[doc = "SoapAction.\n\nRFC 4229."] + #[doc = "SoapAction.\n\nSimple Object Access Protocol (SOAP) 1.1."] (SOAPACTION, "soapaction"), - #[doc = "Status-URI.\n\nRFC 4229."] + #[doc = "Status-URI.\n\nRFC 2518."] (STATUS_URI, "status-uri"), - #[doc = "User-Agent.\n\nRFC 7231 section 5.5.3."] - (USER_AGENT, "user-agent"), - #[doc = "Compliance.\n\nRFC 4229."] - (COMPLIANCE, "compliance"), - #[doc = "Message-ID.\n\nRFC 4229."] - (MESSAGE_ID, "message-id"), - #[doc = "Tracestate.\n\n."] + #[doc = "Tracestate.\n\nTrace Context."] (TRACESTATE, "tracestate"), + #[doc = "User-Agent.\n\nRFC 9110 section 10.1.5."] + (USER_AGENT, "user-agent"), ], 11: [ - #[doc = "Accept-Post.\n\n."] + #[doc = "Accept-Post.\n\nLinked Data Platform 1.0."] (ACCEPT_POST, "accept-post"), - #[doc = "Content-MD5.\n\nRFC 4229."] - (CONTENT_MD5, "content-md5"), + #[doc = "Client-Cert.\n\nRFC -ietf-httpbis-client-cert-field-06 section 2."] + (CLIENT_CERT, "client-cert"), #[doc = "Destination.\n\nRFC 4918."] (DESTINATION, "destination"), - #[doc = "Retry-After.\n\nRFC 7231 section 7.1.3."] + #[doc = "Retry-After.\n\nRFC 9110 section 10.2.3."] (RETRY_AFTER, "retry-after"), - #[doc = "Set-Cookie2.\n\nRFC 2965, RFC 6265."] - (SET_COOKIE2, "set-cookie2"), - #[doc = "Want-Digest.\n\nRFC 4229."] - (WANT_DIGEST, "want-digest"), - #[doc = "Traceparent.\n\n."] + #[doc = "Sec-Purpose.\n\nFetch."] + (SEC_PURPOSE, "sec-purpose"), + #[doc = "Traceparent.\n\nTrace Context."] (TRACEPARENT, "traceparent"), + #[doc = "Want-Digest.\n\nRFC 3230."] + (WANT_DIGEST, "want-digest"), ], 12: [ #[doc = "Accept-Patch.\n\nRFC 5789."] (ACCEPT_PATCH, "accept-patch"), - #[doc = "Content-Base.\n\nRFC 2068, RFC 2616."] - (CONTENT_BASE, "content-base"), - #[doc = "Content-Type.\n\nRFC 7231 section 3.1.1.5."] + #[doc = "Cache-Status.\n\nRFC 9211."] + (CACHE_STATUS, "cache-status"), + #[doc = "Content-Type.\n\nRFC 9110 section 8.3."] (CONTENT_TYPE, "content-type"), - #[doc = "Derived-From.\n\nRFC 4229."] - (DERIVED_FROM, "derived-from"), - #[doc = "Max-Forwards.\n\nRFC 7231 section 5.1.2."] - (MAX_FORWARDS, "max-forwards"), - #[doc = "MIME-Version.\n\nRFC 7231, Appendix A.1."] + #[doc = "MIME-Version.\n\nRFC 9112 Appendix B.1."] (MIME_VERSION, "mime-version"), + #[doc = "Max-Forwards.\n\nRFC 9110 section 7.6.2."] + (MAX_FORWARDS, "max-forwards"), + #[doc = "Proxy-Status.\n\nRFC 9209."] + (PROXY_STATUS, "proxy-status"), #[doc = "Redirect-Ref.\n\nRFC 4437."] (REDIRECT_REF, "redirect-ref"), #[doc = "Replay-Nonce.\n\nRFC 8555 section 6.5.1."] (REPLAY_NONCE, "replay-nonce"), - #[doc = "Schedule-Tag.\n\nRFC 6638."] + #[doc = "Schedule-Tag.\n\nRFC 6338."] (SCHEDULE_TAG, "schedule-tag"), - #[doc = "Variant-Vary.\n\nRFC 4229."] + #[doc = "Variant-Vary.\n\nRFC 2295."] (VARIANT_VARY, "variant-vary"), - #[doc = "Method-Check.\n\nW3C Web Application Formats Working Group."] - (METHOD_CHECK, "method-check"), - #[doc = "Referer-Root.\n\nW3C Web Application Formats Working Group."] - (REFERER_ROOT, "referer-root"), #[doc = "X-Request-ID."] (X_REQUEST_ID, "x-request-id"), ], 13: [ - #[doc = "Accept-Ranges.\n\nRFC 7233 section 2.3."] + #[doc = "Accept-Ranges.\n\nRFC 9110 section 14.3."] (ACCEPT_RANGES, "accept-ranges"), - #[doc = "Authorization.\n\nRFC 7235 section 4.2."] + #[doc = "Authorization.\n\nRFC 9110 section 11.6.2."] (AUTHORIZATION, "authorization"), - #[doc = "Cache-Control.\n\nRFC 7234 section 5.2."] + #[doc = "Cache-Control.\n\nRFC 9111 section 5.2."] (CACHE_CONTROL, "cache-control"), - #[doc = "Content-Range.\n\nRFC 7233 section 4.2."] + #[doc = "Content-Range.\n\nRFC 9110 section 14.4."] (CONTENT_RANGE, "content-range"), - #[doc = "Default-Style.\n\nRFC 4229."] - (DEFAULT_STYLE, "default-style"), - #[doc = "If-None-Match.\n\nRFC 7232 section 3.2."] + #[doc = "If-None-Match.\n\nRFC 9110 section 13.1.2."] (IF_NONE_MATCH, "if-none-match"), - #[doc = "Last-Modified.\n\nRFC 7232 section 2.2."] + #[doc = "Last-Event-ID.\n\nHTML."] + (LAST_EVENT_ID, "last-event-id"), + #[doc = "Last-Modified.\n\nRFC 9110 section 8.8.2."] (LAST_MODIFIED, "last-modified"), #[doc = "OData-Version.\n\nOData Version 4.01 Part 1: Protocol, OASIS, Chet_Ensign."] (ODATA_VERSION, "odata-version"), - #[doc = "Ordering-Type.\n\nRFC 4229."] + #[doc = "Ordering-Type.\n\nRFC 3648."] (ORDERING_TYPE, "ordering-type"), - #[doc = "ProfileObject.\n\nRFC 4229."] - (PROFILEOBJECT, "profileobject"), - #[doc = "Protocol-Info.\n\nRFC 4229."] - (PROTOCOL_INFO, "protocol-info"), - #[doc = "UA-Resolution.\n\nRFC 4229."] - (UA_RESOLUTION, "ua-resolution"), + #[doc = "Server-Timing.\n\nServer Timing."] + (SERVER_TIMING, "server-timing"), ], 14: [ - #[doc = "Accept-Charset.\n\nRFC 7231 section 5.3.3."] - (ACCEPT_CHARSET, "accept-charset"), #[doc = "Cal-Managed-ID.\n\nRFC 8607 section 5.1."] (CAL_MANAGED_ID, "cal-managed-id"), #[doc = "Cert-Not-After.\n\nRFC 8739 section 3.3."] (CERT_NOT_AFTER, "cert-not-after"), - #[doc = "Content-Length.\n\nRFC 7230 section 3.3.2."] + #[doc = "Content-Length.\n\nRFC 9110 section 8.6."] (CONTENT_LENGTH, "content-length"), - #[doc = "HTTP2-Settings.\n\nRFC 7540 section 3.2.1."] - (HTTP2_SETTINGS, "http2-settings"), #[doc = "OData-EntityId.\n\nOData Version 4.01 Part 1: Protocol, OASIS, Chet_Ensign."] (ODATA_ENTITYID, "odata-entityid"), - #[doc = "Protocol-Query.\n\nRFC 4229."] - (PROTOCOL_QUERY, "protocol-query"), - #[doc = "Proxy-Features.\n\nRFC 4229."] - (PROXY_FEATURES, "proxy-features"), #[doc = "Schedule-Reply.\n\nRFC 6638."] (SCHEDULE_REPLY, "schedule-reply"), - #[doc = "Access-Control.\n\nW3C Web Application Formats Working Group."] - (ACCESS_CONTROL, "access-control"), - #[doc = "Non-Compliance.\n\nRFC 4229."] - (NON_COMPLIANCE, "non-compliance"), ], 15: [ #[doc = "Accept-Datetime.\n\nRFC 7089."] (ACCEPT_DATETIME, "accept-datetime"), - #[doc = "Accept-Encoding.\n\nRFC 7231 section 5.3.4, RFC 7694 section 3."] + #[doc = "Accept-Encoding.\n\nRFC 9110 section 12.5.3."] (ACCEPT_ENCODING, "accept-encoding"), - #[doc = "Accept-Features.\n\nRFC 4229."] + #[doc = "Accept-Features.\n\nRFC 2295."] (ACCEPT_FEATURES, "accept-features"), - #[doc = "Accept-Language.\n\nRFC 7231 section 5.3.5."] + #[doc = "Accept-Language.\n\nRFC 9110 section 12.5.4."] (ACCEPT_LANGUAGE, "accept-language"), #[doc = "Cert-Not-Before.\n\nRFC 8739 section 3.3."] (CERT_NOT_BEFORE, "cert-not-before"), - #[doc = "Content-Version.\n\nRFC 4229."] - (CONTENT_VERSION, "content-version"), - #[doc = "Differential-ID.\n\nRFC 4229."] + #[doc = "Clear-Site-Data.\n\nClear Site Data."] + (CLEAR_SITE_DATA, "clear-site-data"), + #[doc = "Differential-ID.\n\nThe HTTP Distribution and Replication Protocol."] (DIFFERENTIAL_ID, "differential-id"), #[doc = "OData-Isolation.\n\nOData Version 4.01 Part 1: Protocol, OASIS, Chet_Ensign."] (ODATA_ISOLATION, "odata-isolation"), #[doc = "Public-Key-Pins.\n\nRFC 7469."] (PUBLIC_KEY_PINS, "public-key-pins"), - #[doc = "Security-Scheme.\n\nRFC 4229."] - (SECURITY_SCHEME, "security-scheme"), - #[doc = "X-Frame-Options.\n\nRFC 7034."] + #[doc = "X-Frame-Options.\n\nHTML."] (X_FRAME_OPTIONS, "x-frame-options"), - #[doc = "EDIINT-Features.\n\nRFC 6017."] - (EDIINT_FEATURES, "ediint-features"), - #[doc = "Resolution-Hint.\n\nRFC 4229."] - (RESOLUTION_HINT, "resolution-hint"), - #[doc = "UA-Windowpixels.\n\nRFC 4229."] - (UA_WINDOWPIXELS, "ua-windowpixels"), - #[doc = "X-Device-Accept.\n\nW3C Mobile Web Best Practices Working Group."] - (X_DEVICE_ACCEPT, "x-device-accept"), ], 16: [ - #[doc = "Accept-Additions.\n\nRFC 4229."] + #[doc = "Accept-Additions.\n\nRFC 2324."] (ACCEPT_ADDITIONS, "accept-additions"), #[doc = "CalDAV-Timezones.\n\nRFC 7809 section 7.1."] (CALDAV_TIMEZONES, "caldav-timezones"), - #[doc = "Content-Encoding.\n\nRFC 7231 section 3.1.2.2."] + #[doc = "Capsule-Protocol.\n\nRFC 9297."] + (CAPSULE_PROTOCOL, "capsule-protocol"), + #[doc = "Content-Encoding.\n\nRFC 9110 section 8.4."] (CONTENT_ENCODING, "content-encoding"), - #[doc = "Content-Language.\n\nRFC 7231 section 3.1.3.2."] + #[doc = "Content-Language.\n\nRFC 9110 section 8.5."] (CONTENT_LANGUAGE, "content-language"), - #[doc = "Content-Location.\n\nRFC 7231 section 3.1.4.2."] + #[doc = "Content-Location.\n\nRFC 9110 section 8.7."] (CONTENT_LOCATION, "content-location"), #[doc = "Memento-Datetime.\n\nRFC 7089."] (MEMENTO_DATETIME, "memento-datetime"), #[doc = "OData-MaxVersion.\n\nOData Version 4.01 Part 1: Protocol, OASIS, Chet_Ensign."] (ODATA_MAXVERSION, "odata-maxversion"), - #[doc = "Protocol-Request.\n\nRFC 4229."] - (PROTOCOL_REQUEST, "protocol-request"), - #[doc = "WWW-Authenticate.\n\nRFC 7235 section 4.1."] + #[doc = "WWW-Authenticate.\n\nRFC 9110 section 11.6.1."] (WWW_AUTHENTICATE, "www-authenticate"), ], 17: [ - #[doc = "If-Modified-Since.\n\nRFC 7232 section 3.3."] + #[doc = "CDN-Cache-Control.\n\nRFC 9213."] + (CDN_CACHE_CONTROL, "cdn-cache-control"), + #[doc = "Client-Cert-Chain.\n\nRFC -ietf-httpbis-client-cert-field-06 section 2."] + (CLIENT_CERT_CHAIN, "client-cert-chain"), + #[doc = "If-Modified-Since.\n\nRFC 9110 section 13.1.3."] (IF_MODIFIED_SINCE, "if-modified-since"), - #[doc = "Proxy-Instruction.\n\nRFC 4229."] - (PROXY_INSTRUCTION, "proxy-instruction"), + #[doc = "OSLC-Core-Version.\n\nOASIS Project Specification 01, OASIS, Chet_Ensign."] + (OSLC_CORE_VERSION, "oslc-core-version"), #[doc = "Sec-Token-Binding.\n\nRFC 8473."] (SEC_TOKEN_BINDING, "sec-token-binding"), #[doc = "Sec-WebSocket-Key.\n\nRFC 6455."] (SEC_WEBSOCKET_KEY, "sec-websocket-key"), - #[doc = "Surrogate-Control.\n\nRFC 4229."] + #[doc = "Surrogate-Control.\n\nEdge Architecture Specification."] (SURROGATE_CONTROL, "surrogate-control"), - #[doc = "Transfer-Encoding.\n\nRFC 7230 section 3.3.1."] + #[doc = "Transfer-Encoding.\n\nRFC 9112 section 6.1."] (TRANSFER_ENCODING, "transfer-encoding"), - #[doc = "OSLC-Core-Version.\n\nOASIS Project Specification 01, OASIS, Chet_Ensign."] - (OSLC_CORE_VERSION, "oslc-core-version"), - #[doc = "Resolver-Location.\n\nRFC 4229."] - (RESOLVER_LOCATION, "resolver-location"), ], 18: [ - #[doc = "Content-Style-Type.\n\nRFC 4229."] - (CONTENT_STYLE_TYPE, "content-style-type"), #[doc = "Preference-Applied.\n\nRFC 7240."] (PREFERENCE_APPLIED, "preference-applied"), - #[doc = "Proxy-Authenticate.\n\nRFC 7235 section 4.3."] + #[doc = "Proxy-Authenticate.\n\nRFC 9110 section 11.7.1."] (PROXY_AUTHENTICATE, "proxy-authenticate"), ], 19: [ - #[doc = "Authentication-Info.\n\nRFC 7615 section 3."] + #[doc = "Authentication-Info.\n\nRFC 9110 section 11.6.3."] (AUTHENTICATION_INFO, "authentication-info"), #[doc = "Content-Disposition.\n\nRFC 6266."] (CONTENT_DISPOSITION, "content-disposition"), - #[doc = "Content-Script-Type.\n\nRFC 4229."] - (CONTENT_SCRIPT_TYPE, "content-script-type"), - #[doc = "If-Unmodified-Since.\n\nRFC 7232 section 3.4."] + #[doc = "If-Unmodified-Since.\n\nRFC 9110 section 13.1.4."] (IF_UNMODIFIED_SINCE, "if-unmodified-since"), - #[doc = "Proxy-Authorization.\n\nRFC 7235 section 4.4."] + #[doc = "Proxy-Authorization.\n\nRFC 9110 section 11.7.2."] (PROXY_AUTHORIZATION, "proxy-authorization"), - #[doc = "AMP-Cache-Transform.\n\n."] - (AMP_CACHE_TRANSFORM, "amp-cache-transform"), - #[doc = "Timing-Allow-Origin.\n\n."] - (TIMING_ALLOW_ORIGIN, "timing-allow-origin"), - #[doc = "X-Device-User-Agent.\n\nW3C Mobile Web Best Practices Working Group."] - (X_DEVICE_USER_AGENT, "x-device-user-agent"), ], 20: [ + #[doc = "Origin-Agent-Cluster.\n\nHTML."] + (ORIGIN_AGENT_CLUSTER, "origin-agent-cluster"), #[doc = "Sec-WebSocket-Accept.\n\nRFC 6455."] (SEC_WEBSOCKET_ACCEPT, "sec-websocket-accept"), - #[doc = "Surrogate-Capability.\n\nRFC 4229."] + #[doc = "Surrogate-Capability.\n\nEdge Architecture Specification."] (SURROGATE_CAPABILITY, "surrogate-capability"), - #[doc = "Method-Check-Expires.\n\nW3C Web Application Formats Working Group."] - (METHOD_CHECK_EXPIRES, "method-check-expires"), - #[doc = "Repeatability-Result.\n\nRepeatable Requests Version 1.0, OASIS, Chet_Ensign."] - (REPEATABILITY_RESULT, "repeatability-result"), ], 21: [ #[doc = "Apply-To-Redirect-Ref.\n\nRFC 4437."] (APPLY_TO_REDIRECT_REF, "apply-to-redirect-ref"), - #[doc = "If-Schedule-Tag-Match.\n\nRFC 6638."] + #[doc = "If-Schedule-Tag-Match.\n\n RFC 6338: Scheduling Extensions to CalDAV."] (IF_SCHEDULE_TAG_MATCH, "if-schedule-tag-match"), #[doc = "Sec-WebSocket-Version.\n\nRFC 6455."] (SEC_WEBSOCKET_VERSION, "sec-websocket-version"), ], 22: [ + #[doc = "Access-Control-Max-Age.\n\nFetch."] + (ACCESS_CONTROL_MAX_AGE, "access-control-max-age"), #[doc = "Authentication-Control.\n\nRFC 8053 section 4."] (AUTHENTICATION_CONTROL, "authentication-control"), #[doc = "Sec-WebSocket-Protocol.\n\nRFC 6455."] (SEC_WEBSOCKET_PROTOCOL, "sec-websocket-protocol"), - #[doc = "X-Content-Type-Options.\n\n."] + #[doc = "X-Content-Type-Options.\n\nFetch."] (X_CONTENT_TYPE_OPTIONS, "x-content-type-options"), - #[doc = "Access-Control-Max-Age.\n\nW3C Web Application Formats Working Group."] - (ACCESS_CONTROL_MAX_AGE, "access-control-max-age"), ], 23: [ - #[doc = "Repeatability-Client-ID.\n\nRepeatable Requests Version 1.0, OASIS, Chet_Ensign."] - (REPEATABILITY_CLIENT_ID, "repeatability-client-id"), - #[doc = "X-Device-Accept-Charset.\n\nW3C Mobile Web Best Practices Working Group."] - (X_DEVICE_ACCEPT_CHARSET, "x-device-accept-charset"), + #[doc = "Content-Security-Policy.\n\nContent Security Policy Level 3."] + (CONTENT_SECURITY_POLICY, "content-security-policy"), ], 24: [ #[doc = "Sec-WebSocket-Extensions.\n\nRFC 6455."] (SEC_WEBSOCKET_EXTENSIONS, "sec-websocket-extensions"), - #[doc = "Repeatability-First-Sent.\n\nRepeatable Requests Version 1.0, OASIS, Chet_Ensign."] - (REPEATABILITY_FIRST_SENT, "repeatability-first-sent"), - #[doc = "Repeatability-Request-ID.\n\nRepeatable Requests Version 1.0, OASIS, Chet_Ensign."] - (REPEATABILITY_REQUEST_ID, "repeatability-request-id"), - #[doc = "X-Device-Accept-Encoding.\n\nW3C Mobile Web Best Practices Working Group."] - (X_DEVICE_ACCEPT_ENCODING, "x-device-accept-encoding"), - #[doc = "X-Device-Accept-Language.\n\nW3C Mobile Web Best Practices Working Group."] - (X_DEVICE_ACCEPT_LANGUAGE, "x-device-accept-language"), ], 25: [ #[doc = "Optional-WWW-Authenticate.\n\nRFC 8053 section 3."] (OPTIONAL_WWW_AUTHENTICATE, "optional-www-authenticate"), - #[doc = "Proxy-Authentication-Info.\n\nRFC 7615 section 4."] + #[doc = "Proxy-Authentication-Info.\n\nRFC 9110 section 11.7.3."] (PROXY_AUTHENTICATION_INFO, "proxy-authentication-info"), #[doc = "Strict-Transport-Security.\n\nRFC 6797."] (STRICT_TRANSPORT_SECURITY, "strict-transport-security"), - #[doc = "Content-Transfer-Encoding.\n\nRFC 4229."] - (CONTENT_TRANSFER_ENCODING, "content-transfer-encoding"), + ], + 26: [ + #[doc = "Cross-Origin-Opener-Policy.\n\nHTML."] + (CROSS_ORIGIN_OPENER_POLICY, "cross-origin-opener-policy"), ], 27: [ + #[doc = "Access-Control-Allow-Origin.\n\nFetch."] + (ACCESS_CONTROL_ALLOW_ORIGIN, "access-control-allow-origin"), #[doc = "Public-Key-Pins-Report-Only.\n\nRFC 7469."] (PUBLIC_KEY_PINS_REPORT_ONLY, "public-key-pins-report-only"), - #[doc = "Access-Control-Allow-Origin.\n\nW3C Web Application Formats Working Group."] - (ACCESS_CONTROL_ALLOW_ORIGIN, "access-control-allow-origin"), ], 28: [ - #[doc = "Access-Control-Allow-Headers.\n\nW3C Web Application Formats Working Group."] + #[doc = "Access-Control-Allow-Headers.\n\nFetch."] (ACCESS_CONTROL_ALLOW_HEADERS, "access-control-allow-headers"), - #[doc = "Access-Control-Allow-Methods.\n\nW3C Web Application Formats Working Group."] + #[doc = "Access-Control-Allow-Methods.\n\nFetch."] (ACCESS_CONTROL_ALLOW_METHODS, "access-control-allow-methods"), + #[doc = "Cross-Origin-Embedder-Policy.\n\nHTML."] + (CROSS_ORIGIN_EMBEDDER_POLICY, "cross-origin-embedder-policy"), + #[doc = "Cross-Origin-Resource-Policy.\n\nFetch."] + (CROSS_ORIGIN_RESOURCE_POLICY, "cross-origin-resource-policy"), ], 29: [ - #[doc = "Access-Control-Request-Method.\n\nW3C Web Application Formats Working Group."] + #[doc = "Access-Control-Expose-Headers.\n\nFetch."] + (ACCESS_CONTROL_EXPOSE_HEADERS, "access-control-expose-headers"), + #[doc = "Access-Control-Request-Method.\n\nFetch."] (ACCESS_CONTROL_REQUEST_METHOD, "access-control-request-method"), ], 30: [ - #[doc = "Access-Control-Request-Headers.\n\nW3C Web Application Formats Working Group."] + #[doc = "Access-Control-Request-Headers.\n\nFetch."] (ACCESS_CONTROL_REQUEST_HEADERS, "access-control-request-headers"), ], 32: [ - #[doc = "Access-Control-Allow-Credentials.\n\nW3C Web Application Formats Working Group."] + #[doc = "Access-Control-Allow-Credentials.\n\nFetch."] (ACCESS_CONTROL_ALLOW_CREDENTIALS, "access-control-allow-credentials"), ], 33: [ #[doc = "Include-Referred-Token-Binding-ID.\n\nRFC 8473."] (INCLUDE_REFERRED_TOKEN_BINDING_ID, "include-referred-token-binding-id"), ], + 35: [ + #[doc = "Content-Security-Policy-Report-Only.\n\nContent Security Policy Level 3."] + (CONTENT_SECURITY_POLICY_REPORT_ONLY, "content-security-policy-report-only"), + ], + 38: [ + #[doc = "Cross-Origin-Opener-Policy-Report-Only.\n\nHTML."] + (CROSS_ORIGIN_OPENER_POLICY_REPORT_ONLY, "cross-origin-opener-policy-report-only"), + ], + 40: [ + #[doc = "Cross-Origin-Embedder-Policy-Report-Only.\n\nHTML."] + (CROSS_ORIGIN_EMBEDDER_POLICY_REPORT_ONLY, "cross-origin-embedder-policy-report-only"), + ], ); /// Create a new HTTP `HeaderName`. diff --git a/http/src/parse_headers.bash b/http/src/parse_headers.bash index a4ce63b5c..7f1bdb04c 100755 --- a/http/src/parse_headers.bash +++ b/http/src/parse_headers.bash @@ -1,9 +1,9 @@ #!/usr/bin/env bash -# Get the two csv files (permanent and provisional) from: -# https://www.iana.org/assignments/message-headers/message-headers.xhtml -# Remove the header from both file and run: -# $ cat perm-headers.csv prov-headers.csv | ./parse.bash +# Get the csv file with registered headers from +# . +# Remove the header from the file and run: +# $ cat field-names.csv | ./src/parse_headers.bash set -eu @@ -27,8 +27,10 @@ clean_reference_partial() { if [[ "${reference:0:3}" == "RFC" ]]; then # Add a space after 'RFC'. reference="RFC ${reference:3}" - # Remove comma and lower case section. - reference="${reference/, S/ s}" + # Remove double space + # Lowercase "Section" + # Remove ": $section_name" part. + reference=$(echo "$reference" | sed -e 's/Section/section/g' -e 's/:.*//' -e 's/ / /g') fi echo -n "$reference" @@ -57,10 +59,27 @@ clean_reference() { # Collect all known header name by length in `header_names`. declare -a header_names -while IFS=$',' read -r name template protocol status reference; do - # We're only interested in HTTP headers. - if [[ "http" != "$protocol" ]]; then - continue +while IFS=$',' read -r -a values; do + # The reference column may contain commas, so we have to use an array to + # extract the columns we're interested in. Specifically in the case of + # the reference column we need to join multiple columns. + + name="${values[0]}" + + # Only include "permanent" headers, ignoring deprecated and obsoleted + # headers. + status="${values[2]}" + if [[ "permanent" != "$status" ]]; then + continue; + fi + + # Stitch together the reference. + reference='' + if [[ "${#values[@]}" == 5 ]]; then + reference="${values[3]}" + else + unset values[-1] # Remove the comment. + reference=$(echo "${values[@]:3}" | xargs) fi reference="$(clean_reference "$reference")" @@ -71,6 +90,8 @@ while IFS=$',' read -r name template protocol status reference; do value_length="${#const_value}" # Value length. docs="#[doc = \"$name.\\\\n\\\\n$reference.\"]" + # NOTE: can't assign arrays/list to array values, so we have to use a + # string and parse that below (which is a little error prone). header_names[$value_length]+="$docs|$const_name|$const_value " done diff --git a/http/tests/functional/header.rs b/http/tests/functional/header.rs index b8ee2a505..e7609339b 100644 --- a/http/tests/functional/header.rs +++ b/http/tests/functional/header.rs @@ -371,11 +371,9 @@ fn from_str_known_headers() { let known_headers = &[ "A-IM", "ALPN", - "AMP-Cache-Transform", "Accept", "Accept-Additions", "Accept-CH", - "Accept-Charset", "Accept-Datetime", "Accept-Encoding", "Accept-Features", @@ -383,7 +381,6 @@ fn from_str_known_headers() { "Accept-Patch", "Accept-Post", "Accept-Ranges", - "Access-Control", "Access-Control-Allow-Credentials", "Access-Control-Allow-Headers", "Access-Control-Allow-Methods", @@ -400,58 +397,50 @@ fn from_str_known_headers() { "Authentication-Control", "Authentication-Info", "Authorization", - "C-Ext", - "C-Man", - "C-Opt", - "C-PEP", - "C-PEP-Info", + "CDN-Cache-Control", "CDN-Loop", "Cache-Control", "Cal-Managed-ID", "CalDAV-Timezones", + "Capsule-Protocol", "Cert-Not-After", "Cert-Not-Before", + "Clear-Site-Data", + "Client-Cert", + "Client-Cert-Chain", "Close", - "Compliance", "Connection", - "Content-Base", "Content-Disposition", - "Content-Encoding", "Content-ID", "Content-Language", "Content-Length", "Content-Location", - "Content-MD5", "Content-Range", - "Content-Script-Type", - "Content-Style-Type", - "Content-Transfer-Encoding", + "Content-Security-Policy-Report-Only", "Content-Type", - "Content-Version", "Cookie", - "Cookie2", - "Cost", + "Cross-Origin-Embedder-Policy", + "Cross-Origin-Embedder-Policy-Report-Only", + "Cross-Origin-Opener-Policy", + "Cross-Origin-Opener-Policy-Report-Only", + "Cross-Origin-Resource-Policy", "DASL", "DAV", + "DPoP", + "DPoP-Nonce", "Date", - "Default-Style", "Delta-Base", "Depth", - "Derived-From", "Destination", "Differential-ID", "Digest", - "EDIINT-Features", "ETag", "Early-Data", "Expect", "Expect-CT", "Expires", - "Ext", "Forwarded", "From", - "GetProfile", - "HTTP2-Settings", "Hobareg", "Host", "IM", @@ -463,88 +452,62 @@ fn from_str_known_headers() { "If-Schedule-Tag-Match", "If-Unmodified-Since", "Include-Referred-Token-Binding-ID", - "Isolation", "Keep-Alive", "Label", + "Last-Event-ID", "Last-Modified", "Link", "Location", "Lock-Token", "MIME-Version", - "Man", "Max-Forwards", "Memento-Datetime", - "Message-ID", "Meter", - "Method-Check", - "Method-Check-Expires", "Negotiate", - "Non-Compliance", "OData-EntityId", "OData-Isolation", "OData-MaxVersion", "OData-Version", "OSCORE", "OSLC-Core-Version", - "Opt", - "Optional", "Optional-WWW-Authenticate", "Ordering-Type", "Origin", + "Origin-Agent-Cluster", "Overwrite", - "P3P", - "PEP", - "PICS-Label", - "Pep-Info", + "Ping-From", + "Ping-To", "Position", - "Pragma", "Prefer", "Preference-Applied", - "ProfileObject", - "Protocol", - "Protocol-Info", - "Protocol-Query", - "Protocol-Request", + "Priority", "Proxy-Authenticate", "Proxy-Authentication-Info", "Proxy-Authorization", - "Proxy-Features", - "Proxy-Instruction", - "Public", + "Proxy-Status", "Public-Key-Pins", "Public-Key-Pins-Report-Only", "Range", "Redirect-Ref", "Referer", - "Referer-Root", - "Repeatability-Client-ID", - "Repeatability-First-Sent", - "Repeatability-Request-ID", - "Repeatability-Result", + "Refresh", "Replay-Nonce", - "Resolution-Hint", - "Resolver-Location", "Retry-After", "SLUG", - "Safe", "Schedule-Reply", "Schedule-Tag", + "Sec-Purpose", "Sec-Token-Binding", "Sec-WebSocket-Accept", "Sec-WebSocket-Extensions", "Sec-WebSocket-Key", "Sec-WebSocket-Protocol", "Sec-WebSocket-Version", - "Security-Scheme", "Server", + "Server-Timing", "Set-Cookie", - "Set-Cookie2", - "SetProfile", - "SoapAction", "Status-URI", "Strict-Transport-Security", - "SubOK", - "Subst", "Sunset", "Surrogate-Capability", "Surrogate-Control", @@ -552,35 +515,20 @@ fn from_str_known_headers() { "TE", "TTL", "Timeout", - "Timing-Allow-Origin", - "Title", "Topic", "Traceparent", "Tracestate", "Trailer", "Transfer-Encoding", - "UA-Color", - "UA-Media", - "UA-Pixels", - "UA-Resolution", - "UA-Windowpixels", - "URI", "Upgrade", "Urgency", "User-Agent", "Variant-Vary", "Vary", - "Version", "Via", "WWW-Authenticate", "Want-Digest", - "Warning", "X-Content-Type-Options", - "X-Device-Accept", - "X-Device-Accept-Charset", - "X-Device-Accept-Encoding", - "X-Device-Accept-Language", - "X-Device-User-Agent", "X-Frame-Options", "X-Request-ID", ]; From de64f6ef0b312042a2da418f78926196b67837b5 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Wed, 31 May 2023 17:02:30 +0200 Subject: [PATCH 164/177] Small doc fixes Nothing major. --- http/src/client.rs | 5 ----- http/src/head/header.rs | 6 +++--- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/http/src/client.rs b/http/src/client.rs index 7627072a1..b9187b597 100644 --- a/http/src/client.rs +++ b/http/src/client.rs @@ -43,11 +43,6 @@ impl Client { } /// Send a GET request. - /// - /// # Notes - /// - /// Any [`ResponseError`] are turned into [`io::Error`]. If you want to - /// handle the `ResponseError`s separately use [`Client::request`]. pub async fn get<'c, 'p>( &'c mut self, path: &'p str, diff --git a/http/src/head/header.rs b/http/src/head/header.rs index da8194472..93912904c 100644 --- a/http/src/head/header.rs +++ b/http/src/head/header.rs @@ -19,8 +19,8 @@ use crate::{cmp_lower_case, is_lower_case}; /// List of headers. /// -/// A complete list can be found at the "Message Headers" registry: -/// . +/// A complete list can be found at the HTTP Field Name Registry: +/// . #[derive(Clone)] pub struct Headers { /// All values appended in a single allocation. @@ -964,7 +964,7 @@ impl<'a> fmt::Display for HeaderName<'a> { /// Analogous trait to [`FromStr`]. /// -/// The main use case for this trait in [`Header::parse`]. Because of this the +/// The main use case for this trait is [`Header::parse`]. Because of this the /// implementations should expect the `value`s passed to be ASCII/UTF-8, but /// this not true in all cases. /// From 1b3bc098f7db746db8ddfbf1005f3b0d98e10114 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 9 Jun 2023 17:26:48 +0200 Subject: [PATCH 165/177] Implement NewActor for ! Useful when the NewActor trait is used in a trait bound for types. --- src/actor/mod.rs | 16 ++++++++++++++++ src/lib.rs | 8 +++++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/src/actor/mod.rs b/src/actor/mod.rs index 92274c2f8..6629ccfda 100644 --- a/src/actor/mod.rs +++ b/src/actor/mod.rs @@ -463,6 +463,22 @@ impl_new_actor!( (arg1: Arg1, arg2: Arg2, arg3: Arg3, arg4: Arg4, arg5: Arg5), ); +impl NewActor for ! { + type Message = !; + type Argument = !; + type Actor = impl Actor; + type Error = !; + type RuntimeAccess = !; + + fn new( + &mut self, + _: Context, + _: Self::Argument, + ) -> Result { + Ok(std::future::ready(Ok(()))) + } +} + /// Asynchronous actor. /// /// Effectively an `Actor` is a [`Future`] which returns a result. All `Future`s diff --git a/src/lib.rs b/src/lib.rs index 303e8b6c6..6a2c9083f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -61,7 +61,13 @@ //! This crate has one optional: `test`. The `test` feature will enable the //! `test` module which adds testing facilities. -#![feature(const_option, doc_auto_cfg, doc_cfg_hide, never_type)] +#![feature( + const_option, + doc_auto_cfg, + doc_cfg_hide, + impl_trait_in_assoc_type, + never_type +)] #![warn( anonymous_parameters, bare_trait_objects, From 0f3b80a812c685b5a04ee67c1e0d6484af7a8da5 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Fri, 9 Jun 2023 17:27:21 +0200 Subject: [PATCH 166/177] Return BlockOnError for test::block_on_future This way the caller can handle the panics themselves. --- rt/src/test.rs | 16 ++++++++++++---- rt/tests/functional/test.rs | 10 ++++++++-- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/rt/src/test.rs b/rt/src/test.rs index dfd9cf4c2..56a96df6e 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -53,9 +53,9 @@ use std::any::Any; use std::async_iter::AsyncIterator; -use std::future::Future; +use std::future::{poll_fn, Future}; use std::panic::{catch_unwind, AssertUnwindSafe}; -use std::pin::Pin; +use std::pin::{pin, Pin}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, OnceLock}; use std::task::{self, Poll}; @@ -183,7 +183,7 @@ where /// Spawn `future` on the *test* runtime and wait for the result. /// /// This is useful to test async functions and futures in synchronous tests. -pub fn block_on_future(future: Fut) -> Fut::Output +pub fn block_on_future(future: Fut) -> Result> where Fut: Future + Send + 'static, Fut::Output: Send, @@ -192,7 +192,15 @@ where let waker = SyncWaker::new(); spawn_local_future( async move { - let result = future.await; + let mut future = pin!(future); + let result = poll_fn(move |ctx| { + match catch_unwind(AssertUnwindSafe(|| future.as_mut().poll(ctx))) { + Ok(Poll::Ready(output)) => Poll::Ready(Ok(output)), + Ok(Poll::Pending) => Poll::Pending, + Err(panic) => Poll::Ready(Err(BlockOnError::Panic(panic))), + } + }) + .await; assert!( sender.try_send(result).is_ok(), "failed to return future result" diff --git a/rt/tests/functional/test.rs b/rt/tests/functional/test.rs index 01425dcfa..c65660156 100644 --- a/rt/tests/functional/test.rs +++ b/rt/tests/functional/test.rs @@ -14,17 +14,23 @@ use heph::supervisor::NoSupervisor; use heph_rt::spawn::{ActorOptions, FutureOptions}; use heph_rt::test::{ self, join, join_all, join_many, size_of_actor, size_of_actor_val, spawn_future, try_spawn, - try_spawn_local, JoinResult, + try_spawn_local, BlockOnError, JoinResult, }; use heph_rt::timer::Timer; use heph_rt::{self as rt, ThreadLocal}; #[test] fn block_on_future() { - let result = test::block_on_future(async move { "All good" }); + let result = test::block_on_future(async move { "All good" }).unwrap(); assert_eq!(result, "All good"); } +#[test] +fn block_on_future_panic() { + let result = test::block_on_future(async move { panic!("Not good") }).unwrap_err(); + assert!(matches!(result, BlockOnError::Panic(_))); +} + #[test] fn test_size_of_actor() { async fn actor1(_: actor::Context) { From 79376fcb0c14eb92fc1bae6933a67b21565fb869 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sat, 10 Jun 2023 14:28:16 +0200 Subject: [PATCH 167/177] Improve test module docs --- rt/src/test.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/rt/src/test.rs b/rt/src/test.rs index 56a96df6e..e92158f57 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -6,6 +6,12 @@ //! (properly). //! //! Available utilities: +//! * Blocking: +//! * [`block_on_local_actor`]: spawns a thread-local [actor] and waits for +//! the result. +//! * [`block_on_actor`]: spawns a thread-safe [actor] and waits for the +//! result. +//! * [`block_on_future`]: spawns a `Future` and waits for the result. //! * Spawning: //! * [`try_spawn_local`]: attempt to spawn a thread-local [actor]. //! * [`try_spawn`]: attempt to spawn a thread-safe [actor]. @@ -15,12 +21,6 @@ //! * Waiting on spawned actors: //! * [`join`], [`join_many`]: wait for the actor(s) to finish running. //! * [`join_all`]: wait all actors in a group to finish running. -//! * Blocking on [`Future`]s: -//! * [`block_on_local_actor`]: spawns a thread-local [actor] and waits for -//! the result. -//! * [`block_on_actor`]: spawns a thread-safe [actor] and waits for the -//! result. -//! * [`block_on_future`]: spawns a `Future` and waits for the result. //! * Initialising actors: //! * [`init_local_actor`]: initialise a thread-local actor. //! * [`init_actor`]: initialise a thread-safe actor. @@ -615,7 +615,7 @@ where Future::poll(future, &mut ctx) } -/// Poll a [`AsyncIterator`]. +/// Poll an [`AsyncIterator`]. /// /// # Notes /// From 4d8e504c073d0b3dbd6b858cdf2b159ced200758 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sat, 10 Jun 2023 14:40:04 +0200 Subject: [PATCH 168/177] Don't use init_actor in some tests Trying to move away from it. --- rt/tests/functional/from_message.rs | 44 ++++++++++------------------- rt/tests/functional/timer.rs | 39 ++++++------------------- 2 files changed, 24 insertions(+), 59 deletions(-) diff --git a/rt/tests/functional/from_message.rs b/rt/tests/functional/from_message.rs index 62974c4b4..366d30f74 100644 --- a/rt/tests/functional/from_message.rs +++ b/rt/tests/functional/from_message.rs @@ -1,11 +1,12 @@ //! Tests for the `from_message!` macro. -use std::pin::Pin; -use std::task::Poll; +use std::time::Duration; use heph::actor_ref::{ActorRef, RpcMessage}; +use heph::supervisor::NoSupervisor; use heph::{actor, from_message}; -use heph_rt::test::{init_local_actor, poll_actor}; +use heph_rt::spawn::ActorOptions; +use heph_rt::test::{join, try_spawn_local}; use heph_rt::ThreadLocal; #[derive(Debug)] @@ -22,34 +23,19 @@ from_message!(Message::Rpc2(String, usize) -> (usize, usize)); #[test] fn from_message() { let pong_actor = pong_actor as fn(_) -> _; - let (pong_actor, actor_ref) = init_local_actor(pong_actor, ()).unwrap(); - let mut pong_actor = Box::pin(pong_actor); + let pong_ref = try_spawn_local(NoSupervisor, pong_actor, (), ActorOptions::default()).unwrap(); let ping_actor = ping_actor as fn(_, _) -> _; - let (ping_actor, actor_ref) = init_local_actor(ping_actor, actor_ref).unwrap(); - drop(actor_ref); - let mut ping_actor = Box::pin(ping_actor); - - // Waiting for the first message. - assert_eq!(poll_actor(Pin::as_mut(&mut pong_actor)), Poll::Pending); - // Wait for first RPC call. - assert_eq!(poll_actor(Pin::as_mut(&mut ping_actor)), Poll::Pending); - - // Receives first message and first RPC, waits for second RPC. - assert_eq!(poll_actor(Pin::as_mut(&mut pong_actor)), Poll::Pending); - // Receives first RPC response, waits on second RPC. - assert_eq!(poll_actor(Pin::as_mut(&mut ping_actor)), Poll::Pending); - - // Receives second RPC and is done. - assert_eq!( - poll_actor(Pin::as_mut(&mut pong_actor)), - Poll::Ready(Ok(())) - ); - // Receives second RPC response and is done. - assert_eq!( - poll_actor(Pin::as_mut(&mut ping_actor)), - Poll::Ready(Ok(())) - ); + let ping_ref = try_spawn_local( + NoSupervisor, + ping_actor, + pong_ref.clone(), + ActorOptions::default(), + ) + .unwrap(); + + join(&ping_ref, Duration::from_secs(1)).unwrap(); + join(&pong_ref, Duration::from_secs(1)).unwrap(); } async fn ping_actor(_: actor::Context, actor_ref: ActorRef) { diff --git a/rt/tests/functional/timer.rs b/rt/tests/functional/timer.rs index 537892178..7e89d1ca5 100644 --- a/rt/tests/functional/timer.rs +++ b/rt/tests/functional/timer.rs @@ -2,13 +2,12 @@ use std::future::Future; use std::io; use std::pin::Pin; use std::task::{self, Poll}; -use std::thread; use std::time::{Duration, Instant}; use heph::actor; use heph::supervisor::NoSupervisor; use heph_rt::spawn::ActorOptions; -use heph_rt::test::{init_local_actor, poll_actor, poll_future, poll_next}; +use heph_rt::test::{block_on_local_actor, poll_future, poll_next}; use heph_rt::timer::{Deadline, DeadlinePassed, Interval, Timer}; use heph_rt::util::next; use heph_rt::{self as rt, Runtime, RuntimeRef, ThreadLocal, ThreadSafe}; @@ -46,15 +45,10 @@ fn timer() { let _ = (&mut timer).await; assert!(timer.deadline() >= start + TIMEOUT); assert!(timer.has_passed()); + assert!(start.elapsed() >= TIMEOUT); } - let actor = actor as fn(_) -> _; - let (actor, _) = init_local_actor(actor, ()).unwrap(); - let mut actor = Box::pin(actor); - assert_eq!(poll_actor(Pin::as_mut(&mut actor)), Poll::Pending); - - thread::sleep(TIMEOUT); - assert_eq!(poll_actor(Pin::as_mut(&mut actor)), Poll::Ready(Ok(()))); + block_on_local_actor(actor as fn(_) -> _, ()).unwrap(); } #[derive(Clone, Debug, Eq, PartialEq)] @@ -81,15 +75,10 @@ fn timer_wrap() { assert_eq!(res, Err(DeadlinePassed)); assert!(deadline.deadline() >= start + TIMEOUT); assert!(deadline.has_passed()); + assert!(start.elapsed() >= TIMEOUT); } - let actor = actor as fn(_) -> _; - let (actor, _) = init_local_actor(actor, ()).unwrap(); - let mut actor = Box::pin(actor); - assert_eq!(poll_actor(Pin::as_mut(&mut actor)), Poll::Pending); - - thread::sleep(TIMEOUT); - assert_eq!(poll_actor(Pin::as_mut(&mut actor)), Poll::Ready(Ok(()))); + block_on_local_actor(actor as fn(_) -> _, ()).unwrap(); } #[test] @@ -110,15 +99,10 @@ fn deadline() { assert_eq!(*deadline.get_ref(), future); assert_eq!(*deadline.get_mut(), future); assert_eq!(deadline.into_inner(), future); + assert!(start.elapsed() >= TIMEOUT); } - let actor = actor as fn(_) -> _; - let (actor, _) = init_local_actor(actor, ()).unwrap(); - let mut actor = Box::pin(actor); - assert_eq!(poll_actor(Pin::as_mut(&mut actor)), Poll::Pending); - - thread::sleep(TIMEOUT); - assert_eq!(poll_actor(Pin::as_mut(&mut actor)), Poll::Ready(Ok(()))); + block_on_local_actor(actor as fn(_) -> _, ()).unwrap(); } #[test] @@ -128,15 +112,10 @@ fn interval() { let mut interval = Interval::every(ctx.runtime_ref().clone(), TIMEOUT); assert!(interval.next_deadline() >= start + TIMEOUT); let _ = next(&mut interval).await; + assert!(start.elapsed() >= TIMEOUT); } - let actor = actor as fn(_) -> _; - let (actor, _) = init_local_actor(actor, ()).unwrap(); - let mut actor = Box::pin(actor); - assert_eq!(poll_actor(Pin::as_mut(&mut actor)), Poll::Pending); - - thread::sleep(TIMEOUT); - assert_eq!(poll_actor(Pin::as_mut(&mut actor)), Poll::Ready(Ok(()))); + block_on_local_actor(actor as fn(_) -> _, ()).unwrap(); } #[test] From bea425f2792f9d401d3b5ff2021dfd1814a72e61 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 11 Jun 2023 16:30:14 +0200 Subject: [PATCH 169/177] Don't use BlockOnError for test::block_on_future Since only the panic variant is used of the BlockOnError enum we just return that as Result::Err. --- rt/src/test.rs | 10 ++++++---- rt/tests/functional/test.rs | 5 ++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/rt/src/test.rs b/rt/src/test.rs index e92158f57..3c0c38806 100644 --- a/rt/src/test.rs +++ b/rt/src/test.rs @@ -66,7 +66,7 @@ use heph::actor::{self, Actor, NewActor, SyncActor, SyncWaker}; use heph::actor_ref::{ActorGroup, ActorRef}; use heph::supervisor::{Supervisor, SyncSupervisor}; use heph_inbox as inbox; -use heph_inbox::oneshot::new_oneshot; +use heph_inbox::oneshot::{self, new_oneshot}; use crate::spawn::{ActorOptions, FutureOptions, SyncActorOptions}; use crate::sync_worker::SyncWorker; @@ -183,7 +183,9 @@ where /// Spawn `future` on the *test* runtime and wait for the result. /// /// This is useful to test async functions and futures in synchronous tests. -pub fn block_on_future(future: Fut) -> Result> +/// +/// If the future panics it will be caught and returned as error. +pub fn block_on_future(future: Fut) -> Result> where Fut: Future + Send + 'static, Fut::Output: Send, @@ -197,7 +199,7 @@ where match catch_unwind(AssertUnwindSafe(|| future.as_mut().poll(ctx))) { Ok(Poll::Ready(output)) => Poll::Ready(Ok(output)), Ok(Poll::Pending) => Poll::Pending, - Err(panic) => Poll::Ready(Err(BlockOnError::Panic(panic))), + Err(panic) => Poll::Ready(Err(panic)), } }) .await; @@ -354,7 +356,7 @@ where /// [`Future`]/[`Actor`] wrapper to catch errors and panics. #[derive(Debug)] struct ErrorCatcher { - sender: Option>>>, + sender: Option>>>, actor: NA::Actor, } diff --git a/rt/tests/functional/test.rs b/rt/tests/functional/test.rs index c65660156..6f259cd28 100644 --- a/rt/tests/functional/test.rs +++ b/rt/tests/functional/test.rs @@ -14,7 +14,7 @@ use heph::supervisor::NoSupervisor; use heph_rt::spawn::{ActorOptions, FutureOptions}; use heph_rt::test::{ self, join, join_all, join_many, size_of_actor, size_of_actor_val, spawn_future, try_spawn, - try_spawn_local, BlockOnError, JoinResult, + try_spawn_local, JoinResult, }; use heph_rt::timer::Timer; use heph_rt::{self as rt, ThreadLocal}; @@ -27,8 +27,7 @@ fn block_on_future() { #[test] fn block_on_future_panic() { - let result = test::block_on_future(async move { panic!("Not good") }).unwrap_err(); - assert!(matches!(result, BlockOnError::Panic(_))); + assert!(test::block_on_future(async move { panic!("Not good") }).is_err()); } #[test] From 6d5aab261d5a31b1592f66885283a70786496e31 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 11 Jun 2023 16:30:48 +0200 Subject: [PATCH 170/177] Remove NewActor impl for ! --- src/actor/mod.rs | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/src/actor/mod.rs b/src/actor/mod.rs index 6629ccfda..92274c2f8 100644 --- a/src/actor/mod.rs +++ b/src/actor/mod.rs @@ -463,22 +463,6 @@ impl_new_actor!( (arg1: Arg1, arg2: Arg2, arg3: Arg3, arg4: Arg4, arg5: Arg5), ); -impl NewActor for ! { - type Message = !; - type Argument = !; - type Actor = impl Actor; - type Error = !; - type RuntimeAccess = !; - - fn new( - &mut self, - _: Context, - _: Self::Argument, - ) -> Result { - Ok(std::future::ready(Ok(()))) - } -} - /// Asynchronous actor. /// /// Effectively an `Actor` is a [`Future`] which returns a result. All `Future`s From 4b0a2ae6f61fdf10639a500fec371d0282e9868a Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sun, 18 Jun 2023 16:14:55 +0200 Subject: [PATCH 171/177] Handle block_on_future errors in HTTP tests --- http/tests/functional/route.rs | 9 ++++++--- http/tests/functional/transform.rs | 3 ++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/http/tests/functional/route.rs b/http/tests/functional/route.rs index 29bbc2eaa..814205dcc 100644 --- a/http/tests/functional/route.rs +++ b/http/tests/functional/route.rs @@ -99,7 +99,8 @@ fn multiple_methods_same_route() { let response = route(test_request).await; assert_eq!(response.body().into_inner(), "index") } - }); + }) + .unwrap(); } #[test] @@ -127,7 +128,8 @@ fn correct_routing_based_on_method() { let response = route(request).await; assert_eq!(response.body().into_inner(), method.as_str()) } - }); + }) + .unwrap(); } #[test] @@ -143,7 +145,8 @@ fn not_found_fallback() { let response = route(test_request).await; assert_eq!(response.body().into_inner(), "not found") } - }); + }) + .unwrap(); } // TODO: test compile failure with the following errors: diff --git a/http/tests/functional/transform.rs b/http/tests/functional/transform.rs index 266f8700d..e87d9ef13 100644 --- a/http/tests/functional/transform.rs +++ b/http/tests/functional/transform.rs @@ -91,7 +91,8 @@ fn transform_middleware() { }, TestBody::new(REQ_BODY), ); - let response: Response = test::block_on_future(middleware.handle(request)); + let response: Response = + test::block_on_future(middleware.handle(request)).unwrap(); assert_eq!(response.status(), expected_status); assert_eq!(response.body().into_inner(), expected_body); } From b1e8d215ef70c107a4c3d15ec4df06d2085974e2 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sat, 24 Jun 2023 14:12:34 +0200 Subject: [PATCH 172/177] Update rustc version --- rt/src/coordinator.rs | 2 +- rt/src/lib.rs | 2 +- rt/tests/functional.rs | 8 +------- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/rt/src/coordinator.rs b/rt/src/coordinator.rs index e27195d48..d378f0e27 100644 --- a/rt/src/coordinator.rs +++ b/rt/src/coordinator.rs @@ -366,7 +366,7 @@ fn register_sync_workers(registry: &Registry, sync_workers: &mut [SyncWorker]) - /// stopped. fn check_sync_worker_alive(sync_workers: &mut Vec) -> Result<(), rt::Error> { sync_workers - .drain_filter(|sync_worker| !sync_worker.is_alive()) + .extract_if(|sync_worker| !sync_worker.is_alive()) .try_for_each(|sync_worker| { debug!(sync_worker_id = sync_worker.id(); "sync actor worker thread stopped"); sync_worker.join().map_err(rt::Error::sync_actor_panic) diff --git a/rt/src/lib.rs b/rt/src/lib.rs index a6f098458..655d9502b 100644 --- a/rt/src/lib.rs +++ b/rt/src/lib.rs @@ -148,7 +148,7 @@ const_weak_new, doc_auto_cfg, doc_cfg_hide, - drain_filter, + extract_if, impl_trait_in_assoc_type, io_slice_advance, is_sorted, diff --git a/rt/tests/functional.rs b/rt/tests/functional.rs index 57c92dc6d..f75411364 100644 --- a/rt/tests/functional.rs +++ b/rt/tests/functional.rs @@ -1,12 +1,6 @@ //! Functional tests. -#![feature( - async_iterator, - drain_filter, - maybe_uninit_slice, - never_type, - write_all_vectored -)] +#![feature(async_iterator, never_type, write_all_vectored)] #[path = "util/mod.rs"] // rustfmt can't find the file. #[macro_use] From c2f28e7cede7344c241f65fd207dd462b0c9c6f1 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Sat, 1 Jul 2023 14:38:13 +0200 Subject: [PATCH 173/177] Update rustc and clippy version And fixes/ignores all the Clippy warnings. --- http/src/head/header.rs | 4 +++- http/src/lib.rs | 2 +- http/src/request.rs | 1 + http/src/response.rs | 1 + http/src/str.rs | 2 +- inbox/src/lib.rs | 7 ++++--- rt/src/coordinator.rs | 3 ++- rt/src/io/buf.rs | 2 +- rt/src/process/mod.rs | 1 + src/actor/future.rs | 2 ++ src/actor/sync.rs | 2 +- 11 files changed, 18 insertions(+), 9 deletions(-) diff --git a/http/src/head/header.rs b/http/src/head/header.rs index 93912904c..6deef7de5 100644 --- a/http/src/head/header.rs +++ b/http/src/head/header.rs @@ -173,7 +173,9 @@ impl Headers { /// Remove all headers with `name`. pub fn remove_all(&mut self, name: &HeaderName<'_>) { - drop(self.parts.drain_filter(move |part| part.name == *name)); + self.parts + .extract_if(move |part| part.name == *name) + .for_each(drop); } /// Returns an iterator that iterates over all headers. diff --git a/http/src/lib.rs b/http/src/lib.rs index 18e287914..f54e708c1 100644 --- a/http/src/lib.rs +++ b/http/src/lib.rs @@ -3,7 +3,7 @@ #![feature( async_iterator, const_mut_refs, - drain_filter, + extract_if, impl_trait_in_assoc_type, maybe_uninit_uninit_array, maybe_uninit_write_slice diff --git a/http/src/request.rs b/http/src/request.rs index 43517f153..9a6bc44a6 100644 --- a/http/src/request.rs +++ b/http/src/request.rs @@ -115,6 +115,7 @@ impl DerefMut for Request { } } +#[allow(clippy::missing_fields_in_debug)] impl fmt::Debug for Request { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Request").field("head", &self.head).finish() diff --git a/http/src/response.rs b/http/src/response.rs index 081c598ad..8c7b66ccd 100644 --- a/http/src/response.rs +++ b/http/src/response.rs @@ -206,6 +206,7 @@ impl DerefMut for Response { } } +#[allow(clippy::missing_fields_in_debug)] impl fmt::Debug for Response { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Response") diff --git a/http/src/str.rs b/http/src/str.rs index b14590b69..c247b2298 100644 --- a/http/src/str.rs +++ b/http/src/str.rs @@ -124,7 +124,7 @@ impl<'a> Drop for Str<'a> { fn drop(&mut self) { if self.is_heap_allocated() { let len = self.len(); - unsafe { drop(Vec::::from_raw_parts(self.ptr as *mut u8, len, len)) } + unsafe { drop(Vec::::from_raw_parts(self.ptr.cast_mut(), len, len)) } } } } diff --git a/inbox/src/lib.rs b/inbox/src/lib.rs index cccddde0e..b4e16a2c2 100644 --- a/inbox/src/lib.rs +++ b/inbox/src/lib.rs @@ -325,7 +325,7 @@ impl Sender { /// Returns the id of this sender. pub fn id(&self) -> Id { - Id(self.channel.as_ptr() as *const () as usize) + Id(self.channel.as_ptr().cast_const().cast::<()>() as usize) } fn channel(&self) -> &Channel { @@ -749,7 +749,7 @@ impl Receiver { /// Returns the id of this receiver. pub fn id(&self) -> Id { - Id(self.channel.as_ptr() as *const () as usize) + Id(self.channel.as_ptr().cast_const().cast::<()>() as usize) } fn channel(&self) -> &Channel { @@ -1100,6 +1100,7 @@ impl Deref for Channel { } } +#[allow(clippy::missing_fields_in_debug)] impl fmt::Debug for Channel { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let status = self.status.load(Ordering::Relaxed); @@ -1228,7 +1229,7 @@ impl Manager { /// Returns the id of the channel. pub fn id(&self) -> Id { - Id(self.channel.as_ptr() as *const () as usize) + Id(self.channel.as_ptr().cast_const().cast::<()>() as usize) } fn channel(&self) -> &Channel { diff --git a/rt/src/coordinator.rs b/rt/src/coordinator.rs index d378f0e27..49d6c6e03 100644 --- a/rt/src/coordinator.rs +++ b/rt/src/coordinator.rs @@ -316,14 +316,15 @@ impl Coordinator { } } +#[allow(clippy::missing_fields_in_debug)] impl fmt::Debug for Coordinator { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Coordinator") .field("ring", &self.ring) .field("poll", &self.poll) .field("signals", &self.signals) - .field("internals", &self.internals) .field("futures_ready", &self.futures_ready) + .field("internals", &self.internals) .field("start", &self.start) .field("app_name", &self.app_name) .field("host_os", &self.host_os) diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index addf978a3..9c82707d1 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -448,7 +448,7 @@ unsafe impl private::BufSlice for [B; N] { for (buf, iovec) in self.iter().zip(iovecs.iter_mut()) { let (ptr, len) = buf.parts(); _ = iovec.write(libc::iovec { - iov_base: ptr as _, + iov_base: ptr.cast::().cast_mut(), iov_len: len, }); } diff --git a/rt/src/process/mod.rs b/rt/src/process/mod.rs index 8096c7a03..9a6bdf747 100644 --- a/rt/src/process/mod.rs +++ b/rt/src/process/mod.rs @@ -209,6 +209,7 @@ impl PartialOrd for ProcessData

{ } } +#[allow(clippy::missing_fields_in_debug)] impl fmt::Debug for ProcessData

{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Process") diff --git a/src/actor/future.rs b/src/actor/future.rs index a82ecc5b1..d3c4b0440 100644 --- a/src/actor/future.rs +++ b/src/actor/future.rs @@ -188,6 +188,7 @@ where } } +#[allow(clippy::missing_fields_in_debug)] impl fmt::Debug for ActorFuture where S: Supervisor + fmt::Debug, @@ -198,6 +199,7 @@ where f.debug_struct("ActorFuture") .field("supervisor", &self.supervisor) .field("actor", &NA::name()) + .field("inbox", &self.inbox) .field("rt", &self.rt) .finish() } diff --git a/src/actor/sync.rs b/src/actor/sync.rs index f5b1a9f0a..d6fec2b7b 100644 --- a/src/actor/sync.rs +++ b/src/actor/sync.rs @@ -376,7 +376,7 @@ impl SyncWaker { unsafe fn from_data_ref(data: &*const ()) -> &SyncWaker { // SAFETY: inverse of `into_data`, see that for more info, also see // `from_data`. - unsafe { std::mem::transmute(data) } + &*((data as *const *const ()).cast::()) } } From c954e2c9b3ca144bf9467d41c5dca658193f831c Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Mon, 10 Jul 2023 16:25:36 +0200 Subject: [PATCH 174/177] Use A10 v0.1.0 Finally it's published! --- rt/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rt/Cargo.toml b/rt/Cargo.toml index 461ba4acb..83955b58b 100644 --- a/rt/Cargo.toml +++ b/rt/Cargo.toml @@ -18,7 +18,7 @@ edition = "2021" test = ["heph/test"] [dependencies] -a10 = { version = "0.1.0", default-features = false, git = "https://github.com/Thomasdezeeuw/a10" } +a10 = { version = "0.1.0", default-features = false, features = ["nightly"] } heph = { version = "0.5.0", path = "../", default-features = false } heph-inbox = { version = "0.2.3", path = "../inbox", default-features = false } log = { version = "0.4.17", default-features = false, features = ["kv_unstable", "kv_unstable_std"] } From 92ee77d39070480043df5004320bb73bbe5e0ca9 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Mon, 10 Jul 2023 16:28:38 +0200 Subject: [PATCH 175/177] Ignore arc_with_non_send_sync in ActorRef --- src/actor_ref/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/actor_ref/mod.rs b/src/actor_ref/mod.rs index 7e61b13a0..8123701b2 100644 --- a/src/actor_ref/mod.rs +++ b/src/actor_ref/mod.rs @@ -332,6 +332,7 @@ impl ActorRef { /// /// Prefer to clone an existing mapped `ActorRef` over creating a new one as /// that can reuse the allocation mentioned above. + #[allow(clippy::arc_with_non_send_sync)] pub fn try_map(self) -> ActorRef where M: TryFrom + 'static, @@ -387,6 +388,7 @@ impl ActorRef { /// /// Prefer to clone an existing mapped `ActorRef` over creating a new one as /// that can reuse the allocation mentioned above. + #[allow(clippy::arc_with_non_send_sync)] pub fn try_map_fn(self, map: F) -> ActorRef where F: Fn(Msg) -> Result + 'static, From ea5bf6ebb65b5995d7a67587e0ac48096f294bd0 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Mon, 10 Jul 2023 16:44:49 +0200 Subject: [PATCH 176/177] Fix Clippy warnings --- rt/src/coordinator.rs | 4 ++-- rt/src/io/buf.rs | 2 +- rt/src/net/tcp/server.rs | 1 + rt/src/shared/mod.rs | 2 +- rt/src/trace.rs | 2 +- rt/src/wakers/bitmap.rs | 2 +- rt/src/worker.rs | 4 ++-- 7 files changed, 9 insertions(+), 8 deletions(-) diff --git a/rt/src/coordinator.rs b/rt/src/coordinator.rs index 49d6c6e03..965fc4399 100644 --- a/rt/src/coordinator.rs +++ b/rt/src/coordinator.rs @@ -142,7 +142,7 @@ impl Coordinator { let timing = trace::start(&trace_log); // Poll for events. - for event in events.iter() { + for event in &events { trace!(event = as_debug!(event); "got OS event"); match event.token() { @@ -395,7 +395,7 @@ fn relay_signals( } debug!(signal = as_debug!(signal); "relaying process signal to worker threads"); - for worker in workers.iter_mut() { + for worker in &mut *workers { if let Err(err) = worker.send_signal(signal) { // NOTE: if the worker is unable to receive a message // it's likely already shutdown or is shutting down. diff --git a/rt/src/io/buf.rs b/rt/src/io/buf.rs index 9c82707d1..0ef5cb12d 100644 --- a/rt/src/io/buf.rs +++ b/rt/src/io/buf.rs @@ -261,7 +261,7 @@ unsafe impl private::BufMutSlice for [B; N] { unsafe fn update_length(&mut self, n: usize) { let mut left = n; - for buf in self.iter_mut() { + for buf in self { let (_, len) = buf.parts_mut(); if len < left { // Fully initialised the buffer. diff --git a/rt/src/net/tcp/server.rs b/rt/src/net/tcp/server.rs index 8965ca444..c378ff6e2 100644 --- a/rt/src/net/tcp/server.rs +++ b/rt/src/net/tcp/server.rs @@ -251,6 +251,7 @@ use crate::Signal; /// /// [server setup]: Setup /// [module documentation]: crate::net::tcp::server +#[allow(clippy::arc_with_non_send_sync)] pub fn setup( mut address: SocketAddr, supervisor: S, diff --git a/rt/src/shared/mod.rs b/rt/src/shared/mod.rs index fd648f0db..38a52737f 100644 --- a/rt/src/shared/mod.rs +++ b/rt/src/shared/mod.rs @@ -305,7 +305,7 @@ impl RuntimeInternals { /// Wake all worker threads, ignoring errors. pub(crate) fn wake_all_workers(&self) { trace!("waking all worker thread(s)"); - for worker in self.worker_wakers.iter() { + for worker in &*self.worker_wakers { drop(worker.wake()); } } diff --git a/rt/src/trace.rs b/rt/src/trace.rs index 8fb4d9d8d..cd2a2571f 100644 --- a/rt/src/trace.rs +++ b/rt/src/trace.rs @@ -705,7 +705,7 @@ mod private { #[allow(clippy::cast_possible_truncation)] let length = self.len() as u16; buf.extend_from_slice(&length.to_be_bytes()); - for attribute in self.iter() { + for attribute in self { attribute.write_attribute(buf); } } diff --git a/rt/src/wakers/bitmap.rs b/rt/src/wakers/bitmap.rs index c1982d68e..a0b16b235 100644 --- a/rt/src/wakers/bitmap.rs +++ b/rt/src/wakers/bitmap.rs @@ -78,7 +78,7 @@ const fn is_set(value: usize, n: usize) -> bool { impl fmt::Debug for AtomicBitMap { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { const WIDTH: usize = usize::BITS as usize; - for data in self.data.iter() { + for data in &self.data { let value = data.load(Ordering::Relaxed); write!(f, "{value:0WIDTH$b}")?; } diff --git a/rt/src/worker.rs b/rt/src/worker.rs index 04e53cdd3..314dbc858 100644 --- a/rt/src/worker.rs +++ b/rt/src/worker.rs @@ -476,7 +476,7 @@ impl Worker { let mut check_ring = false; let mut check_shared_ring = false; let mut amount = 0; - for event in self.events.iter() { + for event in &self.events { trace!(worker_id = self.internals.id.get(); "got OS event: {event:?}"); match event.token() { WAKER => { /* Need to wake up to handle user space events. */ } @@ -553,7 +553,7 @@ impl Worker { let mut amount = 0; if self.internals.shared.try_poll(&mut self.events)? { - for event in self.events.iter() { + for event in &self.events { trace!(worker_id = self.internals.id.get(); "got shared OS event: {event:?}"); let pid = ProcessId::from(event.token()); trace!( From bd39579879356894c6143ce772647168eecaf820 Mon Sep 17 00:00:00 2001 From: Thomas de Zeeuw Date: Mon, 10 Jul 2023 17:12:16 +0200 Subject: [PATCH 177/177] Use function as TCP server supervisor in example --- src/actor/mod.rs | 36 ++++++++---------------------------- 1 file changed, 8 insertions(+), 28 deletions(-) diff --git a/src/actor/mod.rs b/src/actor/mod.rs index 92274c2f8..04a17c04e 100644 --- a/src/actor/mod.rs +++ b/src/actor/mod.rs @@ -243,11 +243,11 @@ pub trait NewActor { /// use std::io; /// use heph::actor::{self, NewActor}; /// # use heph::messages::Terminate; + /// # use heph::supervisor::SupervisorStrategy; /// use heph_rt::net::{tcp, TcpStream}; /// # use heph_rt::net::tcp::server; /// use heph_rt::spawn::ActorOptions; /// use heph_rt::{self as rt, Runtime, RuntimeRef, ThreadLocal}; - /// # use heph::supervisor::{Supervisor, SupervisorStrategy}; /// # use log::error; /// /// fn main() -> Result<(), rt::Error> { @@ -273,37 +273,17 @@ pub trait NewActor { /// let address = "127.0.0.1:7890".parse().unwrap(); /// let server = tcp::server::setup(address, conn_supervisor, new_actor, ActorOptions::default())?; /// # let actor_ref = - /// runtime_ref.spawn_local(ServerSupervisor, server, (), ActorOptions::default()); + /// runtime_ref.spawn_local(server_supervisor, server, (), ActorOptions::default()); /// # actor_ref.try_send(Terminate).unwrap(); /// Ok(()) /// } /// - /// # #[derive(Copy, Clone, Debug)] - /// # struct ServerSupervisor; - /// # - /// # impl Supervisor> for ServerSupervisor - /// # where - /// # S: Supervisor + Clone + 'static, - /// # NA: NewActor + Clone + 'static, - /// # { - /// # fn decide(&mut self, err: server::Error) -> SupervisorStrategy<()> { - /// # use server::Error::*; - /// # match err { - /// # Accept(err) => { - /// # error!("error accepting new connection: {err}"); - /// # SupervisorStrategy::Restart(()) - /// # } - /// # NewActor(_) => unreachable!(), - /// # } - /// # } - /// # - /// # fn decide_on_restart_error(&mut self, err: !) -> SupervisorStrategy<()> { - /// # err - /// # } - /// # - /// # fn second_restart_error(&mut self, err: !) { - /// # err - /// # } + /// # fn server_supervisor(err: server::Error) -> SupervisorStrategy<()> { + /// # match err { + /// # server::Error::Accept(err) => error!("error accepting new connection: {err}"), + /// # server::Error::NewActor::(_) => {}, + /// # } + /// # SupervisorStrategy::Restart(()) /// # } /// # /// # fn conn_supervisor(err: io::Error) -> SupervisorStrategy {