Old xtask test pass

This commit is contained in:
Emil Fresk 2023-01-02 14:34:05 +01:00 committed by Henrik Tjäder
parent 7614b96fe4
commit 582c602912
64 changed files with 1418 additions and 316 deletions

View file

@ -1,11 +1,13 @@
#![allow(clippy::inline_always)]
pub use crate::{
sll::{IntrusiveSortedLinkedList, Node as IntrusiveNode},
tq::{TaskNotReady, TimerQueue, WakerNotReady},
};
pub use bare_metal::CriticalSection;
use core::{
cell::Cell,
sync::atomic::{AtomicBool, Ordering},
};
pub use crate::tq::{NotReady, TimerQueue};
pub use bare_metal::CriticalSection;
pub use cortex_m::{
asm::nop,
asm::wfi,
@ -16,10 +18,134 @@ pub use cortex_m::{
pub use heapless::sorted_linked_list::SortedLinkedList;
pub use heapless::spsc::Queue;
pub use heapless::BinaryHeap;
pub use heapless::Vec;
pub use rtic_monotonic as monotonic;
pub mod idle_executor {
use core::{
future::Future,
pin::Pin,
task::{Context, Poll, RawWaker, RawWakerVTable, Waker},
};
fn no_op(_: *const ()) {}
fn no_op_clone(_: *const ()) -> RawWaker {
noop_raw_waker()
}
static IDLE_WAKER_TABLE: RawWakerVTable = RawWakerVTable::new(no_op_clone, no_op, no_op, no_op);
#[inline]
fn noop_raw_waker() -> RawWaker {
RawWaker::new(core::ptr::null(), &IDLE_WAKER_TABLE)
}
pub struct IdleExecutor<T>
where
T: Future,
{
idle: T,
}
impl<T> IdleExecutor<T>
where
T: Future,
{
#[inline(always)]
pub fn new(idle: T) -> Self {
Self { idle }
}
#[inline(always)]
pub fn run(&mut self) -> ! {
let w = unsafe { Waker::from_raw(noop_raw_waker()) };
let mut ctxt = Context::from_waker(&w);
loop {
match unsafe { Pin::new_unchecked(&mut self.idle) }.poll(&mut ctxt) {
Poll::Pending => {
// All ok!
}
Poll::Ready(_) => {
// The idle executor will never return
unreachable!()
}
}
}
}
}
}
pub mod executor {
use core::{
future::Future,
mem,
pin::Pin,
task::{Context, Poll, RawWaker, RawWakerVTable, Waker},
};
static WAKER_VTABLE: RawWakerVTable =
RawWakerVTable::new(waker_clone, waker_wake, waker_wake, waker_drop);
unsafe fn waker_clone(p: *const ()) -> RawWaker {
RawWaker::new(p, &WAKER_VTABLE)
}
unsafe fn waker_wake(p: *const ()) {
// The only thing we need from a waker is the function to call to pend the async
// dispatcher.
let f: fn() = mem::transmute(p);
f();
}
unsafe fn waker_drop(_: *const ()) {
// nop
}
//============
// AsyncTaskExecutor
pub struct AsyncTaskExecutor<F: Future + 'static> {
task: Option<F>,
}
impl<F: Future + 'static> AsyncTaskExecutor<F> {
pub const fn new() -> Self {
Self { task: None }
}
pub fn is_running(&self) -> bool {
self.task.is_some()
}
pub fn spawn(&mut self, future: F) {
self.task = Some(future);
}
pub fn poll(&mut self, wake: fn()) -> bool {
if let Some(future) = &mut self.task {
unsafe {
let waker = Waker::from_raw(RawWaker::new(wake as *const (), &WAKER_VTABLE));
let mut cx = Context::from_waker(&waker);
let future = Pin::new_unchecked(future);
match future.poll(&mut cx) {
Poll::Ready(_) => {
self.task = None;
true // Only true if we finished now
}
Poll::Pending => false,
}
}
} else {
false
}
}
}
}
pub type SCFQ<const N: usize> = Queue<u8, N>;
pub type SCRQ<T, const N: usize> = Queue<(T, u8), N>;
pub type ASYNCRQ<T, const N: usize> = Queue<T, N>;
/// Mask is used to store interrupt masks on systems without a BASEPRI register (M0, M0+, M23).
/// It needs to be large enough to cover all the relevant interrupts in use.
@ -117,7 +243,7 @@ impl Priority {
///
/// Will overwrite the current Priority
#[inline(always)]
pub unsafe fn new(value: u8) -> Self {
pub const unsafe fn new(value: u8) -> Self {
Priority {
inner: Cell::new(value),
}

View file

@ -1,14 +1,125 @@
pub fn add(left: usize, right: usize) -> usize {
left + right
//! Real-Time Interrupt-driven Concurrency (RTIC) framework for ARM Cortex-M microcontrollers.
//!
//! **IMPORTANT**: This crate is published as [`cortex-m-rtic`] on crates.io but the name of the
//! library is `rtic`.
//!
//! [`cortex-m-rtic`]: https://crates.io/crates/cortex-m-rtic
//!
//! The user level documentation can be found [here].
//!
//! [here]: https://rtic.rs
//!
//! Don't forget to check the documentation of the `#[app]` attribute (listed under the reexports
//! section), which is the main component of the framework.
//!
//! # Minimum Supported Rust Version (MSRV)
//!
//! This crate is compiled and tested with the latest toolchain (rolling) as of the release date.
//! If you run into compilation errors, try the latest stable release of the rust toolchain.
//!
//! # Semantic Versioning
//!
//! Like the Rust project, this crate adheres to [SemVer]: breaking changes in the API and semantics
//! require a *semver bump* (since 1.0.0 a new major version release), with the exception of breaking changes
//! that fix soundness issues -- those are considered bug fixes and can be landed in a new patch
//! release.
//!
//! [SemVer]: https://semver.org/spec/v2.0.0.html
#![deny(missing_docs)]
#![deny(rust_2021_compatibility)]
#![deny(rust_2018_compatibility)]
#![deny(rust_2018_idioms)]
#![no_std]
#![doc(
html_logo_url = "https://raw.githubusercontent.com/rtic-rs/cortex-m-rtic/master/book/en/src/RTIC.svg",
html_favicon_url = "https://raw.githubusercontent.com/rtic-rs/cortex-m-rtic/master/book/en/src/RTIC.svg"
)]
//deny_warnings_placeholder_for_ci
#![allow(clippy::inline_always)]
use cortex_m::{interrupt::InterruptNumber, peripheral::NVIC};
pub use rtic_core::{prelude as mutex_prelude, Exclusive, Mutex};
pub use rtic_macros::app;
pub use rtic_monotonic::{self, Monotonic};
/// module `mutex::prelude` provides `Mutex` and multi-lock variants. Recommended over `mutex_prelude`
pub mod mutex {
pub use rtic_core::prelude;
pub use rtic_core::Mutex;
}
#[cfg(test)]
mod tests {
use super::*;
#[doc(hidden)]
pub mod export;
#[doc(hidden)]
pub mod sll;
#[doc(hidden)]
mod tq;
#[test]
fn it_works() {
let result = add(2, 2);
assert_eq!(result, 4);
/// Sets the given `interrupt` as pending
///
/// This is a convenience function around
/// [`NVIC::pend`](../cortex_m/peripheral/struct.NVIC.html#method.pend)
pub fn pend<I>(interrupt: I)
where
I: InterruptNumber,
{
NVIC::pend(interrupt);
}
use core::cell::UnsafeCell;
/// Internal replacement for `static mut T`
///
/// Used to represent RTIC Resources
///
/// Soundness:
/// 1) Unsafe API for internal use only
/// 2) ``get_mut(&self) -> *mut T``
/// returns a raw mutable pointer to the inner T
/// casting to &mut T is under control of RTIC
/// RTIC ensures &mut T to be unique under Rust aliasing rules.
///
/// Implementation uses the underlying ``UnsafeCell<T>``
/// self.0.get() -> *mut T
///
/// 3) get(&self) -> *const T
/// returns a raw immutable (const) pointer to the inner T
/// casting to &T is under control of RTIC
/// RTIC ensures &T to be shared under Rust aliasing rules.
///
/// Implementation uses the underlying ``UnsafeCell<T>``
/// self.0.get() -> *mut T, demoted to *const T
///
#[repr(transparent)]
pub struct RacyCell<T>(UnsafeCell<T>);
impl<T> RacyCell<T> {
/// Create a ``RacyCell``
#[inline(always)]
pub const fn new(value: T) -> Self {
RacyCell(UnsafeCell::new(value))
}
/// Get `*mut T`
///
/// # Safety
///
/// See documentation notes for [`RacyCell`]
#[inline(always)]
pub unsafe fn get_mut(&self) -> *mut T {
self.0.get()
}
/// Get `*const T`
///
/// # Safety
///
/// See documentation notes for [`RacyCell`]
#[inline(always)]
pub unsafe fn get(&self) -> *const T {
self.0.get()
}
}
unsafe impl<T> Sync for RacyCell<T> {}

421
src/sll.rs Normal file
View file

@ -0,0 +1,421 @@
//! An intrusive sorted priority linked list, designed for use in `Future`s in RTIC.
use core::cmp::Ordering;
use core::fmt;
use core::marker::PhantomData;
use core::ops::{Deref, DerefMut};
use core::ptr::NonNull;
/// Marker for Min sorted [`IntrusiveSortedLinkedList`].
pub struct Min;
/// Marker for Max sorted [`IntrusiveSortedLinkedList`].
pub struct Max;
/// The linked list kind: min-list or max-list
pub trait Kind: private::Sealed {
#[doc(hidden)]
fn ordering() -> Ordering;
}
impl Kind for Min {
fn ordering() -> Ordering {
Ordering::Less
}
}
impl Kind for Max {
fn ordering() -> Ordering {
Ordering::Greater
}
}
/// Sealed traits
mod private {
pub trait Sealed {}
}
impl private::Sealed for Max {}
impl private::Sealed for Min {}
/// A node in the [`IntrusiveSortedLinkedList`].
pub struct Node<T> {
pub val: T,
next: Option<NonNull<Node<T>>>,
}
impl<T> Node<T> {
pub fn new(val: T) -> Self {
Self { val, next: None }
}
}
/// The linked list.
pub struct IntrusiveSortedLinkedList<'a, T, K> {
head: Option<NonNull<Node<T>>>,
_kind: PhantomData<K>,
_lt: PhantomData<&'a ()>,
}
impl<'a, T, K> fmt::Debug for IntrusiveSortedLinkedList<'a, T, K>
where
T: Ord + core::fmt::Debug,
K: Kind,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut l = f.debug_list();
let mut current = self.head;
while let Some(head) = current {
let head = unsafe { head.as_ref() };
current = head.next;
l.entry(&head.val);
}
l.finish()
}
}
impl<'a, T, K> IntrusiveSortedLinkedList<'a, T, K>
where
T: Ord,
K: Kind,
{
pub const fn new() -> Self {
Self {
head: None,
_kind: PhantomData,
_lt: PhantomData,
}
}
// Push to the list.
pub fn push(&mut self, new: &'a mut Node<T>) {
unsafe {
if let Some(head) = self.head {
if head.as_ref().val.cmp(&new.val) != K::ordering() {
// This is newer than head, replace head
new.next = self.head;
self.head = Some(NonNull::new_unchecked(new));
} else {
// It's not head, search the list for the correct placement
let mut current = head;
while let Some(next) = current.as_ref().next {
if next.as_ref().val.cmp(&new.val) != K::ordering() {
break;
}
current = next;
}
new.next = current.as_ref().next;
current.as_mut().next = Some(NonNull::new_unchecked(new));
}
} else {
// List is empty, place at head
self.head = Some(NonNull::new_unchecked(new))
}
}
}
/// Get an iterator over the sorted list.
pub fn iter(&self) -> Iter<'_, T, K> {
Iter {
_list: self,
index: self.head,
}
}
/// Find an element in the list that can be changed and resorted.
pub fn find_mut<F>(&mut self, mut f: F) -> Option<FindMut<'_, 'a, T, K>>
where
F: FnMut(&T) -> bool,
{
let head = self.head?;
// Special-case, first element
if f(&unsafe { head.as_ref() }.val) {
return Some(FindMut {
is_head: true,
prev_index: None,
index: self.head,
list: self,
maybe_changed: false,
});
}
let mut current = head;
while let Some(next) = unsafe { current.as_ref() }.next {
if f(&unsafe { next.as_ref() }.val) {
return Some(FindMut {
is_head: false,
prev_index: Some(current),
index: Some(next),
list: self,
maybe_changed: false,
});
}
current = next;
}
None
}
/// Peek at the first element.
pub fn peek(&self) -> Option<&T> {
self.head.map(|head| unsafe { &head.as_ref().val })
}
/// Pops the first element in the list.
///
/// Complexity is worst-case `O(1)`.
pub fn pop(&mut self) -> Option<&'a Node<T>> {
if let Some(head) = self.head {
let v = unsafe { head.as_ref() };
self.head = v.next;
Some(v)
} else {
None
}
}
/// Checks if the linked list is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.head.is_none()
}
}
/// Iterator for the linked list.
pub struct Iter<'a, T, K>
where
T: Ord,
K: Kind,
{
_list: &'a IntrusiveSortedLinkedList<'a, T, K>,
index: Option<NonNull<Node<T>>>,
}
impl<'a, T, K> Iterator for Iter<'a, T, K>
where
T: Ord,
K: Kind,
{
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
let index = self.index?;
let node = unsafe { index.as_ref() };
self.index = node.next;
Some(&node.val)
}
}
/// Comes from [`IntrusiveSortedLinkedList::find_mut`].
pub struct FindMut<'a, 'b, T, K>
where
T: Ord + 'b,
K: Kind,
{
list: &'a mut IntrusiveSortedLinkedList<'b, T, K>,
is_head: bool,
prev_index: Option<NonNull<Node<T>>>,
index: Option<NonNull<Node<T>>>,
maybe_changed: bool,
}
impl<'a, 'b, T, K> FindMut<'a, 'b, T, K>
where
T: Ord,
K: Kind,
{
unsafe fn pop_internal(&mut self) -> &'b mut Node<T> {
if self.is_head {
// If it is the head element, we can do a normal pop
let mut head = self.list.head.unwrap_unchecked();
let v = head.as_mut();
self.list.head = v.next;
v
} else {
// Somewhere in the list
let mut prev = self.prev_index.unwrap_unchecked();
let mut curr = self.index.unwrap_unchecked();
// Re-point the previous index
prev.as_mut().next = curr.as_ref().next;
curr.as_mut()
}
}
/// This will pop the element from the list.
///
/// Complexity is worst-case `O(1)`.
#[inline]
pub fn pop(mut self) -> &'b mut Node<T> {
unsafe { self.pop_internal() }
}
/// This will resort the element into the correct position in the list if needed. The resorting
/// will only happen if the element has been accessed mutably.
///
/// Same as calling `drop`.
///
/// Complexity is worst-case `O(N)`.
#[inline]
pub fn finish(self) {
drop(self)
}
}
impl<'b, T, K> Drop for FindMut<'_, 'b, T, K>
where
T: Ord + 'b,
K: Kind,
{
fn drop(&mut self) {
// Only resort the list if the element has changed
if self.maybe_changed {
unsafe {
let val = self.pop_internal();
self.list.push(val);
}
}
}
}
impl<T, K> Deref for FindMut<'_, '_, T, K>
where
T: Ord,
K: Kind,
{
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &self.index.unwrap_unchecked().as_ref().val }
}
}
impl<T, K> DerefMut for FindMut<'_, '_, T, K>
where
T: Ord,
K: Kind,
{
fn deref_mut(&mut self) -> &mut Self::Target {
self.maybe_changed = true;
unsafe { &mut self.index.unwrap_unchecked().as_mut().val }
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn const_new() {
static mut _V1: IntrusiveSortedLinkedList<u32, Max> = IntrusiveSortedLinkedList::new();
}
#[test]
fn test_peek() {
let mut ll: IntrusiveSortedLinkedList<u32, Max> = IntrusiveSortedLinkedList::new();
let mut a = Node { val: 1, next: None };
ll.push(&mut a);
assert_eq!(ll.peek().unwrap(), &1);
let mut a = Node { val: 2, next: None };
ll.push(&mut a);
assert_eq!(ll.peek().unwrap(), &2);
let mut a = Node { val: 3, next: None };
ll.push(&mut a);
assert_eq!(ll.peek().unwrap(), &3);
let mut ll: IntrusiveSortedLinkedList<u32, Min> = IntrusiveSortedLinkedList::new();
let mut a = Node { val: 2, next: None };
ll.push(&mut a);
assert_eq!(ll.peek().unwrap(), &2);
let mut a = Node { val: 1, next: None };
ll.push(&mut a);
assert_eq!(ll.peek().unwrap(), &1);
let mut a = Node { val: 3, next: None };
ll.push(&mut a);
assert_eq!(ll.peek().unwrap(), &1);
}
#[test]
fn test_empty() {
let ll: IntrusiveSortedLinkedList<u32, Max> = IntrusiveSortedLinkedList::new();
assert!(ll.is_empty())
}
#[test]
fn test_updating() {
let mut ll: IntrusiveSortedLinkedList<u32, Max> = IntrusiveSortedLinkedList::new();
let mut a = Node { val: 1, next: None };
ll.push(&mut a);
let mut a = Node { val: 2, next: None };
ll.push(&mut a);
let mut a = Node { val: 3, next: None };
ll.push(&mut a);
let mut find = ll.find_mut(|v| *v == 2).unwrap();
*find += 1000;
find.finish();
assert_eq!(ll.peek().unwrap(), &1002);
let mut find = ll.find_mut(|v| *v == 3).unwrap();
*find += 1000;
find.finish();
assert_eq!(ll.peek().unwrap(), &1003);
// Remove largest element
ll.find_mut(|v| *v == 1003).unwrap().pop();
assert_eq!(ll.peek().unwrap(), &1002);
}
#[test]
fn test_updating_1() {
let mut ll: IntrusiveSortedLinkedList<u32, Max> = IntrusiveSortedLinkedList::new();
let mut a = Node { val: 1, next: None };
ll.push(&mut a);
let v = ll.pop().unwrap();
assert_eq!(v.val, 1);
}
#[test]
fn test_updating_2() {
let mut ll: IntrusiveSortedLinkedList<u32, Max> = IntrusiveSortedLinkedList::new();
let mut a = Node { val: 1, next: None };
ll.push(&mut a);
let mut find = ll.find_mut(|v| *v == 1).unwrap();
*find += 1000;
find.finish();
assert_eq!(ll.peek().unwrap(), &1001);
}
}

277
src/tq.rs
View file

@ -1,29 +1,28 @@
use crate::Monotonic;
use crate::{
sll::{IntrusiveSortedLinkedList, Min as IsslMin, Node as IntrusiveNode},
Monotonic,
};
use core::cmp::Ordering;
use heapless::sorted_linked_list::{LinkedIndexU16, Min, SortedLinkedList};
use core::task::Waker;
use heapless::sorted_linked_list::{LinkedIndexU16, Min as SllMin, SortedLinkedList};
pub struct TimerQueue<Mono, Task, const N: usize>(
pub SortedLinkedList<NotReady<Mono, Task>, LinkedIndexU16, Min, N>,
)
where
Mono: Monotonic,
Task: Copy;
impl<Mono, Task, const N: usize> TimerQueue<Mono, Task, N>
pub struct TimerQueue<'a, Mono, Task, const N_TASK: usize>
where
Mono: Monotonic,
Task: Copy,
{
/// # Safety
///
/// Writing to memory with a transmute in order to enable
/// interrupts of the ``SysTick`` timer
///
/// Enqueue a task without checking if it is full
#[inline]
pub unsafe fn enqueue_unchecked<F1, F2>(
&mut self,
nr: NotReady<Mono, Task>,
pub task_queue: SortedLinkedList<TaskNotReady<Mono, Task>, LinkedIndexU16, SllMin, N_TASK>,
pub waker_queue: IntrusiveSortedLinkedList<'a, WakerNotReady<Mono>, IsslMin>,
}
impl<'a, Mono, Task, const N_TASK: usize> TimerQueue<'a, Mono, Task, N_TASK>
where
Mono: Monotonic + 'a,
Task: Copy,
{
fn check_if_enable<F1, F2>(
&self,
instant: Mono::Instant,
enable_interrupt: F1,
pend_handler: F2,
mono: Option<&mut Mono>,
@ -33,11 +32,17 @@ where
{
// Check if the top contains a non-empty element and if that element is
// greater than nr
let if_heap_max_greater_than_nr =
self.0.peek().map_or(true, |head| nr.instant < head.instant);
let if_task_heap_max_greater_than_nr = self
.task_queue
.peek()
.map_or(true, |head| instant < head.instant);
let if_waker_heap_max_greater_than_nr = self
.waker_queue
.peek()
.map_or(true, |head| instant < head.instant);
if if_heap_max_greater_than_nr {
if Mono::DISABLE_INTERRUPT_ON_EMPTY_QUEUE && self.0.is_empty() {
if if_task_heap_max_greater_than_nr || if_waker_heap_max_greater_than_nr {
if Mono::DISABLE_INTERRUPT_ON_EMPTY_QUEUE && self.is_empty() {
if let Some(mono) = mono {
mono.enable_timer();
}
@ -46,19 +51,49 @@ where
pend_handler();
}
self.0.push_unchecked(nr);
}
/// Check if the timer queue is empty.
/// Enqueue a task without checking if it is full
#[inline]
pub unsafe fn enqueue_task_unchecked<F1, F2>(
&mut self,
nr: TaskNotReady<Mono, Task>,
enable_interrupt: F1,
pend_handler: F2,
mono: Option<&mut Mono>,
) where
F1: FnOnce(),
F2: FnOnce(),
{
self.check_if_enable(nr.instant, enable_interrupt, pend_handler, mono);
self.task_queue.push_unchecked(nr);
}
/// Enqueue a waker
#[inline]
pub fn enqueue_waker<F1, F2>(
&mut self,
nr: &'a mut IntrusiveNode<WakerNotReady<Mono>>,
enable_interrupt: F1,
pend_handler: F2,
mono: Option<&mut Mono>,
) where
F1: FnOnce(),
F2: FnOnce(),
{
self.check_if_enable(nr.val.instant, enable_interrupt, pend_handler, mono);
self.waker_queue.push(nr);
}
/// Check if all the timer queue is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.0.is_empty()
self.task_queue.is_empty() && self.waker_queue.is_empty()
}
/// Cancel the marker value
pub fn cancel_marker(&mut self, marker: u32) -> Option<(Task, u8)> {
if let Some(val) = self.0.find_mut(|nr| nr.marker == marker) {
/// Cancel the marker value for a task
pub fn cancel_task_marker(&mut self, marker: u32) -> Option<(Task, u8)> {
if let Some(val) = self.task_queue.find_mut(|nr| nr.marker == marker) {
let nr = val.pop();
Some((nr.task, nr.index))
@ -67,16 +102,23 @@ where
}
}
/// Update the instant at an marker value to a new instant
/// Cancel the marker value for a waker
pub fn cancel_waker_marker(&mut self, marker: u32) {
if let Some(val) = self.waker_queue.find_mut(|nr| nr.marker == marker) {
let _ = val.pop();
}
}
/// Update the instant at an marker value for a task to a new instant
#[allow(clippy::result_unit_err)]
pub fn update_marker<F: FnOnce()>(
pub fn update_task_marker<F: FnOnce()>(
&mut self,
marker: u32,
new_marker: u32,
instant: Mono::Instant,
pend_handler: F,
) -> Result<(), ()> {
if let Some(mut val) = self.0.find_mut(|nr| nr.marker == marker) {
if let Some(mut val) = self.task_queue.find_mut(|nr| nr.marker == marker) {
val.instant = instant;
val.marker = new_marker;
@ -89,6 +131,62 @@ where
}
}
fn dequeue_task_queue(
&mut self,
instant: Mono::Instant,
mono: &mut Mono,
) -> Option<(Task, u8)> {
if instant <= mono.now() {
// task became ready
let nr = unsafe { self.task_queue.pop_unchecked() };
Some((nr.task, nr.index))
} else {
// Set compare
mono.set_compare(instant);
// Double check that the instant we set is really in the future, else
// dequeue. If the monotonic is fast enough it can happen that from the
// read of now to the set of the compare, the time can overflow. This is to
// guard against this.
if instant <= mono.now() {
let nr = unsafe { self.task_queue.pop_unchecked() };
Some((nr.task, nr.index))
} else {
None
}
}
}
fn dequeue_waker_queue(&mut self, instant: Mono::Instant, mono: &mut Mono) -> bool {
let mut did_wake = false;
if instant <= mono.now() {
// Task became ready, wake the waker
if let Some(v) = self.waker_queue.pop() {
v.val.waker.wake_by_ref();
did_wake = true;
}
} else {
// Set compare
mono.set_compare(instant);
// Double check that the instant we set is really in the future, else
// dequeue. If the monotonic is fast enough it can happen that from the
// read of now to the set of the compare, the time can overflow. This is to
// guard against this.
if instant <= mono.now() {
if let Some(v) = self.waker_queue.pop() {
v.val.waker.wake_by_ref();
did_wake = true;
}
}
}
did_wake
}
/// Dequeue a task from the ``TimerQueue``
pub fn dequeue<F>(&mut self, disable_interrupt: F, mono: &mut Mono) -> Option<(Task, u8)>
where
@ -96,59 +194,72 @@ where
{
mono.clear_compare_flag();
if let Some(instant) = self.0.peek().map(|p| p.instant) {
if instant <= mono.now() {
// task became ready
let nr = unsafe { self.0.pop_unchecked() };
loop {
let tq = self.task_queue.peek().map(|p| p.instant);
let wq = self.waker_queue.peek().map(|p| p.instant);
Some((nr.task, nr.index))
} else {
// Set compare
mono.set_compare(instant);
let dequeue_task;
let instant;
// Double check that the instant we set is really in the future, else
// dequeue. If the monotonic is fast enough it can happen that from the
// read of now to the set of the compare, the time can overflow. This is to
// guard against this.
if instant <= mono.now() {
let nr = unsafe { self.0.pop_unchecked() };
match (tq, wq) {
(Some(tq_instant), Some(wq_instant)) => {
if tq_instant <= wq_instant {
dequeue_task = true;
instant = tq_instant;
} else {
dequeue_task = false;
instant = wq_instant;
}
}
(Some(tq_instant), None) => {
dequeue_task = true;
instant = tq_instant;
}
(None, Some(wq_instant)) => {
dequeue_task = false;
instant = wq_instant;
}
(None, None) => {
// The queue is empty, disable the interrupt.
if Mono::DISABLE_INTERRUPT_ON_EMPTY_QUEUE {
disable_interrupt();
mono.disable_timer();
}
Some((nr.task, nr.index))
} else {
None
return None;
}
}
} else {
// The queue is empty, disable the interrupt.
if Mono::DISABLE_INTERRUPT_ON_EMPTY_QUEUE {
disable_interrupt();
mono.disable_timer();
}
None
if dequeue_task {
return self.dequeue_task_queue(instant, mono);
} else if !self.dequeue_waker_queue(instant, mono) {
return None;
} else {
// Run the dequeue again
}
}
}
}
pub struct NotReady<Mono, Task>
pub struct TaskNotReady<Mono, Task>
where
Task: Copy,
Mono: Monotonic,
{
pub task: Task,
pub index: u8,
pub instant: Mono::Instant,
pub task: Task,
pub marker: u32,
}
impl<Mono, Task> Eq for NotReady<Mono, Task>
impl<Mono, Task> Eq for TaskNotReady<Mono, Task>
where
Task: Copy,
Mono: Monotonic,
{
}
impl<Mono, Task> Ord for NotReady<Mono, Task>
impl<Mono, Task> Ord for TaskNotReady<Mono, Task>
where
Task: Copy,
Mono: Monotonic,
@ -158,7 +269,7 @@ where
}
}
impl<Mono, Task> PartialEq for NotReady<Mono, Task>
impl<Mono, Task> PartialEq for TaskNotReady<Mono, Task>
where
Task: Copy,
Mono: Monotonic,
@ -168,7 +279,7 @@ where
}
}
impl<Mono, Task> PartialOrd for NotReady<Mono, Task>
impl<Mono, Task> PartialOrd for TaskNotReady<Mono, Task>
where
Task: Copy,
Mono: Monotonic,
@ -177,3 +288,41 @@ where
Some(self.cmp(other))
}
}
pub struct WakerNotReady<Mono>
where
Mono: Monotonic,
{
pub waker: Waker,
pub instant: Mono::Instant,
pub marker: u32,
}
impl<Mono> Eq for WakerNotReady<Mono> where Mono: Monotonic {}
impl<Mono> Ord for WakerNotReady<Mono>
where
Mono: Monotonic,
{
fn cmp(&self, other: &Self) -> Ordering {
self.instant.cmp(&other.instant)
}
}
impl<Mono> PartialEq for WakerNotReady<Mono>
where
Mono: Monotonic,
{
fn eq(&self, other: &Self) -> bool {
self.instant == other.instant
}
}
impl<Mono> PartialOrd for WakerNotReady<Mono>
where
Mono: Monotonic,
{
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}