Save, init generation fixed

This commit is contained in:
Emil Fresk 2020-12-03 21:04:06 +01:00
parent 3b4c10e790
commit ef50aeb2e8
13 changed files with 229 additions and 242 deletions

View file

@ -3,7 +3,7 @@ use core::{
sync::atomic::{AtomicBool, Ordering},
};
pub use crate::tq::{NotReady, TimerQueue};
//pub use crate::tq::{NotReady, TimerQueue};
pub use bare_metal::CriticalSection;
#[cfg(armv7m)]
pub use cortex_m::register::basepri;

View file

@ -32,82 +32,20 @@
#![deny(missing_docs)]
#![deny(rust_2018_compatibility)]
#![deny(rust_2018_idioms)]
#![deny(warnings)]
// #![deny(warnings)]
#![no_std]
use core::ops::Sub;
use cortex_m::{
interrupt::Nr,
peripheral::{CBP, CPUID, DCB, DWT, FPB, FPU, ITM, MPU, NVIC, SCB, TPIU},
};
use cortex_m::{interrupt::Nr, peripheral::NVIC};
pub use cortex_m_rtic_macros::app;
pub use rtic_core::{prelude as mutex_prelude, Exclusive, Mutex};
pub use rtic_core::{prelude as mutex_prelude, Exclusive, monotonic::Monotonic, Mutex};
#[cfg(armv7m)]
pub mod cyccnt;
#[doc(hidden)]
pub mod export;
#[doc(hidden)]
mod tq;
/// `cortex_m::Peripherals` minus `SYST`
#[allow(non_snake_case)]
pub struct Peripherals {
/// Cache and branch predictor maintenance operations (not present on Cortex-M0 variants)
pub CBP: CBP,
/// CPUID
pub CPUID: CPUID,
/// Debug Control Block
pub DCB: DCB,
/// Data Watchpoint and Trace unit
pub DWT: DWT,
/// Flash Patch and Breakpoint unit (not present on Cortex-M0 variants)
pub FPB: FPB,
/// Floating Point Unit (only present on `thumbv7em-none-eabihf`)
pub FPU: FPU,
/// Instrumentation Trace Macrocell (not present on Cortex-M0 variants)
pub ITM: ITM,
/// Memory Protection Unit
pub MPU: MPU,
/// Nested Vector Interrupt Controller
pub NVIC: NVIC,
/// System Control Block
pub SCB: SCB,
// SysTick: System Timer
// pub SYST: SYST,
/// Trace Port Interface Unit (not present on Cortex-M0 variants)
pub TPIU: TPIU,
}
impl From<cortex_m::Peripherals> for Peripherals {
fn from(p: cortex_m::Peripherals) -> Self {
Self {
CBP: p.CBP,
CPUID: p.CPUID,
DCB: p.DCB,
DWT: p.DWT,
FPB: p.FPB,
FPU: p.FPU,
ITM: p.ITM,
MPU: p.MPU,
NVIC: p.NVIC,
SCB: p.SCB,
TPIU: p.TPIU,
}
}
}
/// Sets the given `interrupt` as pending
///
/// This is a convenience function around

312
src/tq.rs
View file

@ -1,156 +1,156 @@
use core::{
cmp::{self, Ordering},
convert::TryInto,
mem,
ops::Sub,
};
use cortex_m::peripheral::{SCB, SYST};
use heapless::{binary_heap::Min, ArrayLength, BinaryHeap};
use crate::Monotonic;
pub struct TimerQueue<M, T, N>(pub BinaryHeap<NotReady<M, T>, N, Min>)
where
M: Monotonic,
<M::Instant as Sub>::Output: TryInto<u32>,
N: ArrayLength<NotReady<M, T>>,
T: Copy;
impl<M, T, N> TimerQueue<M, T, N>
where
M: Monotonic,
<M::Instant as Sub>::Output: TryInto<u32>,
N: ArrayLength<NotReady<M, T>>,
T: Copy,
{
/// # Safety
///
/// Writing to memory with a transmute in order to enable
/// interrupts of the SysTick timer
///
/// Enqueue a task without checking if it is full
#[inline]
pub unsafe fn enqueue_unchecked(&mut self, nr: NotReady<M, T>) {
let mut is_empty = true;
// Check if the top contains a non-empty element and if that element is
// greater than nr
let if_heap_max_greater_than_nr = self
.0
.peek()
.map(|head| {
is_empty = false;
nr.instant < head.instant
})
.unwrap_or(true);
if if_heap_max_greater_than_nr {
if is_empty {
mem::transmute::<_, SYST>(()).enable_interrupt();
}
// Set SysTick pending
SCB::set_pendst();
}
self.0.push_unchecked(nr);
}
/// Dequeue a task from the TimerQueue
#[inline]
pub fn dequeue(&mut self) -> Option<(T, u8)> {
unsafe {
if let Some(instant) = self.0.peek().map(|p| p.instant) {
let now = M::now();
if instant < now {
// task became ready
let nr = self.0.pop_unchecked();
Some((nr.task, nr.index))
} else {
// set a new timeout
const MAX: u32 = 0x00ffffff;
let ratio = M::ratio();
let dur = match (instant - now).try_into().ok().and_then(|x| {
x.checked_mul(ratio.numerator)
.map(|x| x / ratio.denominator)
}) {
None => MAX,
// ARM Architecture Reference Manual says:
// "Setting SYST_RVR to zero has the effect of
// disabling the SysTick counter independently
// of the counter enable bit."
Some(0) => 1,
Some(x) => cmp::min(MAX, x),
};
mem::transmute::<_, SYST>(()).set_reload(dur);
// Start counting down from the new reload
mem::transmute::<_, SYST>(()).clear_current();
None
}
} else {
// The queue is empty
mem::transmute::<_, SYST>(()).disable_interrupt();
None
}
}
}
}
pub struct NotReady<M, T>
where
T: Copy,
M: Monotonic,
<M::Instant as Sub>::Output: TryInto<u32>,
{
pub index: u8,
pub instant: M::Instant,
pub task: T,
}
impl<M, T> Eq for NotReady<M, T>
where
T: Copy,
M: Monotonic,
<M::Instant as Sub>::Output: TryInto<u32>,
{
}
impl<M, T> Ord for NotReady<M, T>
where
T: Copy,
M: Monotonic,
<M::Instant as Sub>::Output: TryInto<u32>,
{
fn cmp(&self, other: &Self) -> Ordering {
self.instant.cmp(&other.instant)
}
}
impl<M, T> PartialEq for NotReady<M, T>
where
T: Copy,
M: Monotonic,
<M::Instant as Sub>::Output: TryInto<u32>,
{
fn eq(&self, other: &Self) -> bool {
self.instant == other.instant
}
}
impl<M, T> PartialOrd for NotReady<M, T>
where
T: Copy,
M: Monotonic,
<M::Instant as Sub>::Output: TryInto<u32>,
{
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(&other))
}
}
// use core::{
// cmp::{self, Ordering},
// convert::TryInto,
// mem,
// ops::Sub,
// };
//
// use cortex_m::peripheral::{SCB, SYST};
// use heapless::{binary_heap::Min, ArrayLength, BinaryHeap};
//
// use crate::Monotonic;
//
// pub struct TimerQueue<M, T, N>(pub BinaryHeap<NotReady<M, T>, N, Min>)
// where
// M: Monotonic,
// <M::Instant as Sub>::Output: TryInto<u32>,
// N: ArrayLength<NotReady<M, T>>,
// T: Copy;
//
// impl<M, T, N> TimerQueue<M, T, N>
// where
// M: Monotonic,
// <M::Instant as Sub>::Output: TryInto<u32>,
// N: ArrayLength<NotReady<M, T>>,
// T: Copy,
// {
// /// # Safety
// ///
// /// Writing to memory with a transmute in order to enable
// /// interrupts of the SysTick timer
// ///
// /// Enqueue a task without checking if it is full
// #[inline]
// pub unsafe fn enqueue_unchecked(&mut self, nr: NotReady<M, T>) {
// let mut is_empty = true;
// // Check if the top contains a non-empty element and if that element is
// // greater than nr
// let if_heap_max_greater_than_nr = self
// .0
// .peek()
// .map(|head| {
// is_empty = false;
// nr.instant < head.instant
// })
// .unwrap_or(true);
// if if_heap_max_greater_than_nr {
// if is_empty {
// mem::transmute::<_, SYST>(()).enable_interrupt();
// }
//
// // Set SysTick pending
// SCB::set_pendst();
// }
//
// self.0.push_unchecked(nr);
// }
//
// /// Dequeue a task from the TimerQueue
// #[inline]
// pub fn dequeue(&mut self) -> Option<(T, u8)> {
// unsafe {
// if let Some(instant) = self.0.peek().map(|p| p.instant) {
// let now = M::now();
//
// if instant < now {
// // task became ready
// let nr = self.0.pop_unchecked();
//
// Some((nr.task, nr.index))
// } else {
// // set a new timeout
// const MAX: u32 = 0x00ffffff;
//
// let ratio = M::ratio();
// let dur = match (instant - now).try_into().ok().and_then(|x| {
// x.checked_mul(ratio.numerator)
// .map(|x| x / ratio.denominator)
// }) {
// None => MAX,
//
// // ARM Architecture Reference Manual says:
// // "Setting SYST_RVR to zero has the effect of
// // disabling the SysTick counter independently
// // of the counter enable bit."
// Some(0) => 1,
//
// Some(x) => cmp::min(MAX, x),
// };
// mem::transmute::<_, SYST>(()).set_reload(dur);
//
// // Start counting down from the new reload
// mem::transmute::<_, SYST>(()).clear_current();
//
// None
// }
// } else {
// // The queue is empty
// mem::transmute::<_, SYST>(()).disable_interrupt();
//
// None
// }
// }
// }
// }
//
// pub struct NotReady<M, T>
// where
// T: Copy,
// M: Monotonic,
// <M::Instant as Sub>::Output: TryInto<u32>,
// {
// pub index: u8,
// pub instant: M::Instant,
// pub task: T,
// }
//
// impl<M, T> Eq for NotReady<M, T>
// where
// T: Copy,
// M: Monotonic,
// <M::Instant as Sub>::Output: TryInto<u32>,
// {
// }
//
// impl<M, T> Ord for NotReady<M, T>
// where
// T: Copy,
// M: Monotonic,
// <M::Instant as Sub>::Output: TryInto<u32>,
// {
// fn cmp(&self, other: &Self) -> Ordering {
// self.instant.cmp(&other.instant)
// }
// }
//
// impl<M, T> PartialEq for NotReady<M, T>
// where
// T: Copy,
// M: Monotonic,
// <M::Instant as Sub>::Output: TryInto<u32>,
// {
// fn eq(&self, other: &Self) -> bool {
// self.instant == other.instant
// }
// }
//
// impl<M, T> PartialOrd for NotReady<M, T>
// where
// T: Copy,
// M: Monotonic,
// <M::Instant as Sub>::Output: TryInto<u32>,
// {
// fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
// Some(self.cmp(&other))
// }
// }