Merge pull request #205 from japaric/heterogeneous

rtfm-syntax refactor + heterogeneous multi-core support
This commit is contained in:
Jorge Aparicio 2019-09-15 17:09:40 +00:00 committed by GitHub
commit 4ff28e9d13
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
172 changed files with 5586 additions and 6435 deletions

View file

@ -3,22 +3,28 @@ language: rust
matrix: matrix:
include: include:
# NOTE used to build docs on successful merges to master # NOTE used to build docs on successful merges to master
# - env: TARGET=x86_64-unknown-linux-gnu
# - env: TARGET=thumbv6m-none-eabi
# if: (branch = staging OR branch = trying) OR (type = pull_request AND branch = master)
# - env: TARGET=thumbv7m-none-eabi
# if: (branch = staging OR branch = trying) OR (type = pull_request AND branch = master)
- env: TARGET=x86_64-unknown-linux-gnu - env: TARGET=x86_64-unknown-linux-gnu
rust: nightly
# if: (branch = staging OR branch = trying) OR (type = pull_request AND branch = master) # MSRV
- env: TARGET=thumbv7m-none-eabi
rust: 1.36.0
if: (branch = staging OR branch = trying) OR (type = pull_request AND branch = master)
- env: TARGET=thumbv6m-none-eabi - env: TARGET=thumbv6m-none-eabi
if: (branch = staging OR branch = trying) OR (type = pull_request AND branch = master)
- env: TARGET=thumbv7m-none-eabi
if: (branch = staging OR branch = trying) OR (type = pull_request AND branch = master)
# compile-fail tests
- env: TARGET=x86_64-unknown-linux-gnu
rust: nightly rust: nightly
if: (branch = staging OR branch = trying) OR (type = pull_request AND branch = master) if: (branch = staging OR branch = trying) OR (type = pull_request AND branch = master)
# heterogeneous multi-core support
- env: TARGET=thumbv6m-none-eabi
rust: nightly
if: (branch = staging OR branch = trying) OR (type = pull_request AND branch = master)
- env: TARGET=thumbv7m-none-eabi - env: TARGET=thumbv7m-none-eabi
rust: nightly rust: nightly
if: (branch = staging OR branch = trying) OR (type = pull_request AND branch = master) if: (branch = staging OR branch = trying) OR (type = pull_request AND branch = master)

View file

@ -12,61 +12,75 @@ license = "MIT OR Apache-2.0"
name = "cortex-m-rtfm" name = "cortex-m-rtfm"
readme = "README.md" readme = "README.md"
repository = "https://github.com/japaric/cortex-m-rtfm" repository = "https://github.com/japaric/cortex-m-rtfm"
version = "0.5.0-alpha.1" version = "0.5.0-beta.1"
[lib] [lib]
name = "rtfm" name = "rtfm"
[[example]] [[example]]
name = "baseline" name = "baseline"
required-features = ["timer-queue"] required-features = ["__v7"]
[[example]] [[example]]
name = "periodic" name = "periodic"
required-features = ["timer-queue"] required-features = ["__v7"]
[[example]] [[example]]
name = "pool" name = "pool"
# this example doesn't need this feature but only works on ARMv7-M required-features = ["__v7"]
# specifying the feature here avoids compiling this for ARMv6-M
required-features = ["timer-queue"]
[[example]] [[example]]
name = "schedule" name = "schedule"
required-features = ["timer-queue"] required-features = ["__v7"]
[[example]]
name = "t-cfg"
required-features = ["__v7"]
[[example]]
name = "t-schedule"
required-features = ["__v7"]
[[example]] [[example]]
name = "types" name = "types"
required-features = ["timer-queue"] required-features = ["__v7"]
[dependencies] [dependencies]
cortex-m = "0.5.8" cortex-m = "0.6.0"
cortex-m-rt = "0.6.7" cortex-m-rtfm-macros = { path = "macros" }
cortex-m-rtfm-macros = { path = "macros", version = "0.5.0-alpha.1" } rtfm-core = { git = "https://github.com/japaric/rtfm-core" }
heapless = "0.5.0-alpha.1" cortex-m-rt = "0.6.9"
heapless = "0.5.0"
[dependencies.microamp]
optional = true
version = "0.1.0-alpha.2"
[dev-dependencies] [dev-dependencies]
cortex-m-semihosting = "0.3.2"
lm3s6965 = "0.1.3" lm3s6965 = "0.1.3"
panic-halt = "0.2.0" panic-halt = "0.2.0"
cortex-m-semihosting = "0.3.3"
[dev-dependencies.panic-semihosting] [dev-dependencies.panic-semihosting]
features = ["exit"] features = ["exit"]
version = "0.5.1" version = "0.5.2"
[features]
timer-queue = ["cortex-m-rtfm-macros/timer-queue"]
[target.x86_64-unknown-linux-gnu.dev-dependencies] [target.x86_64-unknown-linux-gnu.dev-dependencies]
compiletest_rs = "0.3.21" compiletest_rs = "0.3.22"
tempdir = "0.3.7"
[package.metadata.docs.rs] [features]
features = ["timer-queue"] heterogeneous = ["cortex-m-rtfm-macros/heterogeneous", "microamp"]
homogeneous = ["cortex-m-rtfm-macros/homogeneous"]
# used for testing this crate; do not use in applications
__v7 =[]
[profile.release] [profile.release]
codegen-units = 1 codegen-units = 1
lto = true lto = true
[workspace] [workspace]
members = ["macros"] members = [
"heterogeneous",
"homogeneous",
"macros",
]

View file

@ -31,9 +31,7 @@ A concurrency framework for building real time systems.
- **Highly efficient memory usage**: All the tasks share a single call stack and - **Highly efficient memory usage**: All the tasks share a single call stack and
there's no hard dependency on a dynamic memory allocator. there's no hard dependency on a dynamic memory allocator.
- **All Cortex-M devices are supported**. The core features of RTFM are - **All Cortex-M devices are fully supported**.
supported on all Cortex-M devices. The timer queue is currently only supported
on ARMv7-M devices.
- This task model is amenable to known WCET (Worst Case Execution Time) analysis - This task model is amenable to known WCET (Worst Case Execution Time) analysis
and scheduling analysis techniques. (Though we haven't yet developed Rust and scheduling analysis techniques. (Though we haven't yet developed Rust

View file

@ -4,7 +4,7 @@
- [RTFM by example](./by-example.md) - [RTFM by example](./by-example.md)
- [The `app` attribute](./by-example/app.md) - [The `app` attribute](./by-example/app.md)
- [Resources](./by-example/resources.md) - [Resources](./by-example/resources.md)
- [Tasks](./by-example/tasks.md) - [Software tasks](./by-example/tasks.md)
- [Timer queue](./by-example/timer-queue.md) - [Timer queue](./by-example/timer-queue.md)
- [Types, Send and Sync](./by-example/types-send-sync.md) - [Types, Send and Sync](./by-example/types-send-sync.md)
- [Starting a new project](./by-example/new.md) - [Starting a new project](./by-example/new.md)
@ -18,3 +18,5 @@
- [Ceiling analysis](./internals/ceilings.md) - [Ceiling analysis](./internals/ceilings.md)
- [Software tasks](./internals/tasks.md) - [Software tasks](./internals/tasks.md)
- [Timer queue](./internals/timer-queue.md) - [Timer queue](./internals/timer-queue.md)
- [Homogeneous multi-core support](./homogeneous.md)
- [Heterogeneous multi-core support](./heterogeneous.md)

View file

@ -10,8 +10,8 @@ All RTFM applications use the [`app`] attribute (`#[app(..)]`). This attribute
must be applied to a `const` item that contains items. The `app` attribute has must be applied to a `const` item that contains items. The `app` attribute has
a mandatory `device` argument that takes a *path* as a value. This path must a mandatory `device` argument that takes a *path* as a value. This path must
point to a *peripheral access crate* (PAC) generated using [`svd2rust`] point to a *peripheral access crate* (PAC) generated using [`svd2rust`]
**v0.14.x**. The `app` attribute will expand into a suitable entry point so it's **v0.14.x** or newer. The `app` attribute will expand into a suitable entry
not required to use the [`cortex_m_rt::entry`] attribute. point so it's not required to use the [`cortex_m_rt::entry`] attribute.
[`app`]: ../../api/cortex_m_rtfm_macros/attr.app.html [`app`]: ../../api/cortex_m_rtfm_macros/attr.app.html
[`svd2rust`]: https://crates.io/crates/svd2rust [`svd2rust`]: https://crates.io/crates/svd2rust
@ -28,22 +28,23 @@ not required to use the [`cortex_m_rt::entry`] attribute.
Within the pseudo-module the `app` attribute expects to find an initialization Within the pseudo-module the `app` attribute expects to find an initialization
function marked with the `init` attribute. This function must have signature function marked with the `init` attribute. This function must have signature
`fn(init::Context) [-> init::LateResources]`. `fn(init::Context) [-> init::LateResources]` (the return type is not always
required).
This initialization function will be the first part of the application to run. This initialization function will be the first part of the application to run.
The `init` function will run *with interrupts disabled* and has exclusive access The `init` function will run *with interrupts disabled* and has exclusive access
to Cortex-M and device specific peripherals through the `core` and `device` to Cortex-M and, optionally, device specific peripherals through the `core` and
variables fields of `init::Context`. Not all Cortex-M peripherals are available `device` fields of `init::Context`.
in `core` because the RTFM runtime takes ownership of some of them -- for more
details see the [`rtfm::Peripherals`] struct.
`static mut` variables declared at the beginning of `init` will be transformed `static mut` variables declared at the beginning of `init` will be transformed
into `&'static mut` references that are safe to access. into `&'static mut` references that are safe to access.
[`rtfm::Peripherals`]: ../../api/rtfm/struct.Peripherals.html [`rtfm::Peripherals`]: ../../api/rtfm/struct.Peripherals.html
The example below shows the types of the `core` and `device` variables and The example below shows the types of the `core` and `device` fields and
showcases safe access to a `static mut` variable. showcases safe access to a `static mut` variable. The `device` field is only
available when the `peripherals` argument is set to `true` (it defaults to
`false`).
``` rust ``` rust
{{#include ../../../../examples/init.rs}} {{#include ../../../../examples/init.rs}}
@ -64,7 +65,7 @@ signature `fn(idle::Context) - > !`.
When present, the runtime will execute the `idle` task after `init`. Unlike When present, the runtime will execute the `idle` task after `init`. Unlike
`init`, `idle` will run *with interrupts enabled* and it's not allowed to return `init`, `idle` will run *with interrupts enabled* and it's not allowed to return
so it runs forever. so it must run forever.
When no `idle` function is declared, the runtime sets the [SLEEPONEXIT] bit and When no `idle` function is declared, the runtime sets the [SLEEPONEXIT] bit and
then sends the microcontroller to sleep after running `init`. then sends the microcontroller to sleep after running `init`.
@ -84,21 +85,67 @@ The example below shows that `idle` runs after `init`.
$ cargo run --example idle $ cargo run --example idle
{{#include ../../../../ci/expected/idle.run}}``` {{#include ../../../../ci/expected/idle.run}}```
## `interrupt` / `exception` ## Hardware tasks
Just like you would do with the `cortex-m-rt` crate you can use the `interrupt` To declare interrupt handlers the framework provides a `#[task]` attribute that
and `exception` attributes within the `app` pseudo-module to declare interrupt can be attached to functions. This attribute takes a `binds` argument whose
and exception handlers. In RTFM, we refer to interrupt and exception handlers as value is the name of the interrupt to which the handler will be bound to; the
*hardware* tasks. function adornated with this attribute becomes the interrupt handler. Within the
framework these type of tasks are referred to as *hardware* tasks, because they
start executing in reaction to a hardware event.
The example below demonstrates the use of the `#[task]` attribute to declare an
interrupt handler. Like in the case of `#[init]` and `#[idle]` local `static
mut` variables are safe to use within a hardware task.
``` rust ``` rust
{{#include ../../../../examples/interrupt.rs}} {{#include ../../../../examples/hardware.rs}}
``` ```
``` console ``` console
$ cargo run --example interrupt $ cargo run --example interrupt
{{#include ../../../../ci/expected/interrupt.run}}``` {{#include ../../../../ci/expected/hardware.run}}```
So far all the RTFM applications we have seen look no different that the So far all the RTFM applications we have seen look no different that the
applications one can write using only the `cortex-m-rt` crate. In the next applications one can write using only the `cortex-m-rt` crate. From this point
section we start introducing features unique to RTFM. we start introducing features unique to RTFM.
## Priorities
The static priority of each handler can be declared in the `task` attribute
using the `priority` argument. Tasks can have priorities in the range `1..=(1 <<
NVIC_PRIO_BITS)` where `NVIC_PRIO_BITS` is a constant defined in the `device`
crate. When the `priority` argument is omitted the priority is assumed to be
`1`. The `idle` task has a non-configurable static priority of `0`, the lowest
priority.
When several tasks are ready to be executed the one with *highest* static
priority will be executed first. Task prioritization can be observed in the
following scenario: an interrupt signal arrives during the execution of a low
priority task; the signal puts the higher priority task in the pending state.
The difference in priority results in the higher priority task preempting the
lower priority one: the execution of the lower priority task is suspended and
the higher priority task is executed to completion. Once the higher priority
task has terminated the lower priority task is resumed.
The following example showcases the priority based scheduling of tasks.
``` rust
{{#include ../../../../examples/preempt.rs}}
```
``` console
$ cargo run --example interrupt
{{#include ../../../../ci/expected/preempt.run}}```
Note that the task `gpiob` does *not* preempt task `gpioc` because its priority
is the *same* as `gpioc`'s. However, once `gpioc` terminates the execution of
task `gpiob` is prioritized over `gpioa`'s due to its higher priority. `gpioa`
is resumed only after `gpiob` terminates.
One more note about priorities: choosing a priority higher than what the device
supports (that is `1 << NVIC_PRIO_BITS`) will result in a compile error. Due to
limitations in the language the error message is currently far from helpful: it
will say something along the lines of "evaluation of constant value failed" and
the span of the error will *not* point out to the problematic interrupt value --
we are sorry about this!

View file

@ -36,8 +36,7 @@ $ cargo add lm3s6965 --vers 0.1.3
$ rm memory.x build.rs $ rm memory.x build.rs
``` ```
3. Add the `cortex-m-rtfm` crate as a dependency and, if you need it, enable the 3. Add the `cortex-m-rtfm` crate as a dependency.
`timer-queue` feature.
``` console ``` console
$ cargo add cortex-m-rtfm --allow-prerelease $ cargo add cortex-m-rtfm --allow-prerelease

View file

@ -1,22 +1,27 @@
## Resources ## Resources
One of the limitations of the attributes provided by the `cortex-m-rt` crate is The framework provides an abstraction to share data between any of the contexts
that sharing data (or peripherals) between interrupts, or between an interrupt we saw in the previous section (task handlers, `init` and `idle`): resources.
and the `entry` function, requires a `cortex_m::interrupt::Mutex`, which
*always* requires disabling *all* interrupts to access the data. Disabling all
the interrupts is not always required for memory safety but the compiler doesn't
have enough information to optimize the access to the shared data.
The `app` attribute has a full view of the application thus it can optimize Resources are data visible only to functions declared within the `#[app]`
access to `static` variables. In RTFM we refer to the `static` variables pseudo-module. The framework gives the user complete control over which context
declared inside the `app` pseudo-module as *resources*. To access a resource the can access which resource.
context (`init`, `idle`, `interrupt` or `exception`) one must first declare the
resource in the `resources` argument of its attribute.
In the example below two interrupt handlers access the same resource. No `Mutex` All resources are declared as a single `struct` within the `#[app]`
is required in this case because the two handlers run at the same priority and pseudo-module. Each field in the structure corresponds to a different resource.
no preemption is possible. The `SHARED` resource can only be accessed by these Resources can optionally be given an initial value using the `#[init]`
two handlers. attribute. Resources that are not given an initial value are referred to as
*late* resources and are covered in more detail in a follow up section in this
page.
Each context (task handler, `init` or `idle`) must declare the resources it
intends to access in its corresponding metadata attribute using the `resources`
argument. This argument takes a list of resource names as its value. The listed
resources are made available to the context under the `resources` field of the
`Context` structure.
The example application shown below contains two interrupt handlers that share
access to a resource named `shared`.
``` rust ``` rust
{{#include ../../../../examples/resource.rs}} {{#include ../../../../examples/resource.rs}}
@ -26,40 +31,39 @@ two handlers.
$ cargo run --example resource $ cargo run --example resource
{{#include ../../../../ci/expected/resource.run}}``` {{#include ../../../../ci/expected/resource.run}}```
## Priorities Note that the `shared` resource cannot accessed from `idle`. Attempting to do
so results in a compile error.
The priority of each handler can be declared in the `interrupt` and `exception` ## `lock`
attributes. It's not possible to set the priority in any other way because the
runtime takes ownership of the `NVIC` peripheral thus it's also not possible to
change the priority of a handler / task at runtime. Thanks to this restriction
the framework has knowledge about the *static* priorities of all interrupt and
exception handlers.
Interrupts and exceptions can have priorities in the range `1..=(1 << In the presence of preemption critical sections are required to mutate shared
NVIC_PRIO_BITS)` where `NVIC_PRIO_BITS` is a constant defined in the `device` data in a data race free manner. As the framework has complete knowledge over
crate. The `idle` task has a priority of `0`, the lowest priority. the priorities of tasks and which tasks can access which resources it enforces
that critical sections are used where required for memory safety.
Resources that are shared between handlers that run at different priorities Where a critical section is required the framework hands out a resource proxy
require critical sections for memory safety. The framework ensures that critical instead of a reference. This resource proxy is a structure that implements the
sections are used but *only where required*: for example, no critical section is [`Mutex`] trait. The only method on this trait, [`lock`], runs its closure
required by the highest priority handler that has access to the resource. argument in a critical section.
The critical section API provided by the RTFM framework (see [`Mutex`]) is
based on dynamic priorities rather than on disabling interrupts. The consequence
is that these critical sections will prevent *some* handlers, including all the
ones that contend for the resource, from *starting* but will let higher priority
handlers, that don't contend for the resource, run.
[`Mutex`]: ../../api/rtfm/trait.Mutex.html [`Mutex`]: ../../api/rtfm/trait.Mutex.html
[`lock`]: ../../api/rtfm/trait.Mutex.html#method.lock
The critical section created by the `lock` API is based on dynamic priorities:
it temporarily raises the dynamic priority of the context to a *ceiling*
priority that prevents other tasks from preempting the critical section. This
synchronization protocol is known as the [Immediate Ceiling Priority Protocol
(ICPP)][icpp].
[icpp]: https://en.wikipedia.org/wiki/Priority_ceiling_protocol
In the example below we have three interrupt handlers with priorities ranging In the example below we have three interrupt handlers with priorities ranging
from one to three. The two handlers with the lower priorities contend for the from one to three. The two handlers with the lower priorities contend for the
`SHARED` resource. The lowest priority handler needs to [`lock`] the `shared` resource. The lowest priority handler needs to `lock` the
`SHARED` resource to access its data, whereas the mid priority handler can `shared` resource to access its data, whereas the mid priority handler can
directly access its data. The highest priority handler is free to preempt directly access its data. The highest priority handler, which cannot access
the critical section created by the lowest priority handler. the `shared` resource, is free to preempt the critical section created by the
lowest priority handler.
[`lock`]: ../../api/rtfm/trait.Mutex.html#method.lock
``` rust ``` rust
{{#include ../../../../examples/lock.rs}} {{#include ../../../../examples/lock.rs}}
@ -69,27 +73,17 @@ the critical section created by the lowest priority handler.
$ cargo run --example lock $ cargo run --example lock
{{#include ../../../../ci/expected/lock.run}}``` {{#include ../../../../ci/expected/lock.run}}```
One more note about priorities: choosing a priority higher than what the device
supports (that is `1 << NVIC_PRIO_BITS`) will result in a compile error. Due to
limitations in the language the error message is currently far from helpful: it
will say something along the lines of "evaluation of constant value failed" and
the span of the error will *not* point out to the problematic interrupt value --
we are sorry about this!
## Late resources ## Late resources
Unlike normal `static` variables, which need to be assigned an initial value Late resources are resources that are not given an initial value at compile
when declared, resources can be initialized at runtime. We refer to these using the `#[init]` attribute but instead are initialized are runtime using the
runtime initialized resources as *late resources*. Late resources are useful for `init::LateResources` values returned by the `init` function.
*moving* (as in transferring ownership) peripherals initialized in `init` into
interrupt and exception handlers.
Late resources are declared like normal resources but that are given an initial Late resources are useful for *moving* (as in transferring the ownership of)
value of `()` (the unit value). `init` must return the initial values of all peripherals initialized in `init` into interrupt handlers.
late resources packed in a `struct` of type `init::LateResources`.
The example below uses late resources to stablish a lockless, one-way channel The example below uses late resources to stablish a lockless, one-way channel
between the `UART0` interrupt handler and the `idle` function. A single producer between the `UART0` interrupt handler and the `idle` task. A single producer
single consumer [`Queue`] is used as the channel. The queue is split into single consumer [`Queue`] is used as the channel. The queue is split into
consumer and producer end points in `init` and then each end point is stored consumer and producer end points in `init` and then each end point is stored
in a different resource; `UART0` owns the producer resource and `idle` owns in a different resource; `UART0` owns the producer resource and `idle` owns
@ -105,22 +99,32 @@ the consumer resource.
$ cargo run --example late $ cargo run --example late
{{#include ../../../../ci/expected/late.run}}``` {{#include ../../../../ci/expected/late.run}}```
## `static` resources ## Only shared access
`static` variables can also be used as resources. Tasks can only get `&` By default the framework assumes that all tasks require exclusive access
(shared) references to these resources but locks are never required to access (`&mut-`) to resources but it is possible to specify that a task only requires
their data. You can think of `static` resources as plain `static` variables that shared access (`&-`) to a resource using the `&resource_name` syntax in the
can be initialized at runtime and have better scoping rules: you can control `resources` list.
which tasks can access the variable, instead of the variable being visible to
all the functions in the scope it was declared in.
In the example below a key is loaded (or created) at runtime and then used from The advantage of specifying shared access (`&-`) to a resource is that no locks
two tasks that run at different priorities. are required to access the resource even if the resource is contended by several
tasks running at different priorities. The downside is that the task only gets a
shared reference (`&-`) to the resource, limiting the operations it can perform
on it, but where a shared reference is enough this approach reduces the number
of required locks.
Note that in this release of RTFM it is not possible to request both exclusive
access (`&mut-`) and shared access (`&-`) to the *same* resource from different
tasks. Attempting to do so will result in a compile error.
In the example below a key (e.g. a cryptographic key) is loaded (or created) at
runtime and then used from two tasks that run at different priorities without
any kind of lock.
``` rust ``` rust
{{#include ../../../../examples/static.rs}} {{#include ../../../../examples/only-shared-access.rs}}
``` ```
``` console ``` console
$ cargo run --example static $ cargo run --example only-shared-access
{{#include ../../../../ci/expected/static.run}}``` {{#include ../../../../ci/expected/only-shared-access.run}}```

View file

@ -1,22 +1,23 @@
# Software tasks # Software tasks
RTFM treats interrupt and exception handlers as *hardware* tasks. Hardware tasks In addition to hardware tasks, which are invoked by the hardware in response to
are invoked by the hardware in response to events, like pressing a button. RTFM hardware events, RTFM also supports *software* tasks which can be spawned by the
also supports *software* tasks which can be spawned by the software from any application from any execution context.
execution context.
Software tasks can also be assigned priorities and are dispatched from interrupt Software tasks can also be assigned priorities and, under the hood, are
handlers. RTFM requires that free interrupts are declared in an `extern` block dispatched from interrupt handlers. RTFM requires that free interrupts are
when using software tasks; these free interrupts will be used to dispatch the declared in an `extern` block when using software tasks; some of these free
software tasks. An advantage of software tasks over hardware tasks is that many interrupts will be used to dispatch the software tasks. An advantage of software
tasks can be mapped to a single interrupt handler. tasks over hardware tasks is that many tasks can be mapped to a single interrupt
handler.
Software tasks are declared by applying the `task` attribute to functions. To be Software tasks are also declared using the `task` attribute but the `binds`
able to spawn a software task the name of the task must appear in the `spawn` argument must be omitted. To be able to spawn a software task from a context
argument of the context attribute (`init`, `idle`, `interrupt`, etc.). the name of the task must appear in the `spawn` argument of the context
attribute (`init`, `idle`, `task`, etc.).
The example below showcases three software tasks that run at 2 different The example below showcases three software tasks that run at 2 different
priorities. The three tasks map to 2 interrupts handlers. priorities. The three software tasks are mapped to 2 interrupts handlers.
``` rust ``` rust
{{#include ../../../../examples/task.rs}} {{#include ../../../../examples/task.rs}}
@ -44,15 +45,17 @@ $ cargo run --example message
## Capacity ## Capacity
Task dispatchers do *not* use any dynamic memory allocation. The memory required RTFM does *not* perform any form of heap-based memory allocation. The memory
to store messages is statically reserved. The framework will reserve enough required to store messages is statically reserved. By default the framework
space for every context to be able to spawn each task at most once. This is a minimizes the memory footprint of the application so each task has a message
sensible default but the "inbox" capacity of each task can be controlled using "capacity" of 1: meaning that at most one message can be posted to the task
the `capacity` argument of the `task` attribute. before it gets a chance to run. This default can be overridden for each task
using the `capacity` argument. This argument takes a positive integer that
indicates how many messages the task message buffer can hold.
The example below sets the capacity of the software task `foo` to 4. If the The example below sets the capacity of the software task `foo` to 4. If the
capacity is not specified then the second `spawn.foo` call in `UART0` would capacity is not specified then the second `spawn.foo` call in `UART0` would
fail. fail (panic).
``` rust ``` rust
{{#include ../../../../examples/capacity.rs}} {{#include ../../../../examples/capacity.rs}}
@ -61,3 +64,54 @@ fail.
``` console ``` console
$ cargo run --example capacity $ cargo run --example capacity
{{#include ../../../../ci/expected/capacity.run}}``` {{#include ../../../../ci/expected/capacity.run}}```
## Error handling
The `spawn` API returns the `Err` variant when there's no space to send the
message. In most scenarios spawning errors are handled in one of two ways:
- Panicking, using `unwrap`, `expect`, etc. This approach is used to catch the
programmer error (i.e. bug) of selecting a capacity that was too small. When
this panic is encountered during testing choosing a bigger capacity and
recompiling the program may fix the issue but sometimes it's necessary to dig
deeper and perform a timing analysis of the application to check if the
platform can deal with peak payload or if the processor needs to be replaced
with a faster one.
- Ignoring the result. In soft real time and non real time applications it may
be OK to occasionally lose data or fail to respond to some events during event
bursts. In those scenarios silently letting a `spawn` call fail may be
acceptable.
It should be noted that retrying a `spawn` call is usually the wrong approach as
this operation will likely never succeed in practice. Because there are only
context switches towards *higher* priority tasks retrying the `spawn` call of a
lower priority task will never let the scheduler dispatch said task meaning that
its message buffer will never be emptied. This situation is depicted in the
following snippet:
``` rust
#[rtfm::app(..)]
const APP: () = {
#[init(spawn = [foo, bar])]
fn init(cx: init::Context) {
cx.spawn.foo().unwrap();
cx.spawn.bar().unwrap();
}
#[task(priority = 2, spawn = [bar])]
fn foo(cx: foo::Context) {
// ..
// the program will get stuck here
while cx.spawn.bar(payload).is_err() {
// retry the spawn call if it failed
}
}
#[task(priority = 1)]
fn bar(cx: bar::Context, payload: i32) {
// ..
}
};
```

View file

@ -1,37 +1,43 @@
# Timer queue # Timer queue
When the `timer-queue` feature is enabled the RTFM framework includes a *global In contrast with the `spawn` API, which immediately spawns a software task onto
timer queue* that applications can use to *schedule* software tasks to run at the scheduler, the `schedule` API can be used to schedule a task to run some
some time in the future. time in the future.
> **NOTE**: The timer-queue feature can't be enabled when the target is To use the `schedule` API a monotonic timer must be first defined using the
> `thumbv6m-none-eabi` because there's no timer queue support for ARMv6-M. This `monotonic` argument of the `#[app]` attribute. This argument takes a path to a
> may change in the future. type that implements the [`Monotonic`] trait. The associated type, `Instant`, of
this trait represents a timestamp in arbitrary units and it's used extensively
in the `schedule` API -- it is suggested to model this type after [the one in
the standard library][std-instant].
> **NOTE**: When the `timer-queue` feature is enabled you will *not* be able to Although not shown in the trait definition (due to limitations in the trait /
> use the `SysTick` exception as a hardware task because the runtime uses it to type system) the subtraction of two `Instant`s should return some `Duration`
> implement the global timer queue. type (see [`core::time::Duration`]) and this `Duration` type must implement the
`TryInto<u32>` trait. The implementation of this trait must convert the
`Duration` value, which uses some arbitrary unit of time, into the "system timer
(SYST) clock cycles" time unit. The result of the conversion must be a 32-bit
integer. If the result of the conversion doesn't fit in a 32-bit number then the
operation must return an error, any error type.
To be able to schedule a software task the name of the task must appear in the [`Monotonic`]: ../../api/rtfm/trait.Monotonic.html
`schedule` argument of the context attribute. When scheduling a task the [std-instant]: https://doc.rust-lang.org/std/time/struct.Instant.html
[`Instant`] at which the task should be executed must be passed as the first [`core::time::Duration`]: https://doc.rust-lang.org/core/time/struct.Duration.html
argument of the `schedule` invocation.
[`Instant`]: ../../api/rtfm/struct.Instant.html For ARMv7+ targets the `rtfm` crate provides a `Monotonic` implementation based
on the built-in CYCle CouNTer (CYCCNT). Note that this is a 32-bit timer clocked
at the frequency of the CPU and as such it is not suitable for tracking time
spans in the order of seconds.
The RTFM runtime includes a monotonic, non-decreasing, 32-bit timer which can be To be able to schedule a software task from a context the name of the task must
queried using the `Instant::now` constructor. A [`Duration`] can be added to first appear in the `schedule` argument of the context attribute. When
`Instant::now()` to obtain an `Instant` into the future. The monotonic timer is scheduling a task the (user-defined) `Instant` at which the task should be
disabled while `init` runs so `Instant::now()` always returns the value executed must be passed as the first argument of the `schedule` invocation.
`Instant(0 /* clock cycles */)`; the timer is enabled right before the
interrupts are re-enabled and `idle` is executed.
[`Duration`]: ../../api/rtfm/struct.Duration.html
The example below schedules two tasks from `init`: `foo` and `bar`. `foo` is The example below schedules two tasks from `init`: `foo` and `bar`. `foo` is
scheduled to run 8 million clock cycles in the future. Next, `bar` is scheduled scheduled to run 8 million clock cycles in the future. Next, `bar` is scheduled
to run 4 million clock cycles in the future. `bar` runs before `foo` since it to run 4 million clock cycles in the future. Thus `bar` runs before `foo` since
was scheduled to run first. it was scheduled to run first.
> **IMPORTANT**: The examples that use the `schedule` API or the `Instant` > **IMPORTANT**: The examples that use the `schedule` API or the `Instant`
> abstraction will **not** properly work on QEMU because the Cortex-M cycle > abstraction will **not** properly work on QEMU because the Cortex-M cycle
@ -41,12 +47,19 @@ was scheduled to run first.
{{#include ../../../../examples/schedule.rs}} {{#include ../../../../examples/schedule.rs}}
``` ```
Running the program on real hardware produces the following output in the console: Running the program on real hardware produces the following output in the
console:
``` text ``` text
{{#include ../../../../ci/expected/schedule.run}} {{#include ../../../../ci/expected/schedule.run}}
``` ```
When the `schedule` API is being used the runtime internally uses the `SysTick`
interrupt handler and the system timer peripheral (`SYST`) so neither can be
used by the application. This is accomplished by changing the type of
`init::Context.core` from `cortex_m::Peripherals` to `rtfm::Peripherals`. The
latter structure contains all the fields of the former minus the `SYST` one.
## Periodic tasks ## Periodic tasks
Software tasks have access to the `Instant` at which they were scheduled to run Software tasks have access to the `Instant` at which they were scheduled to run
@ -80,9 +93,10 @@ the task. Depending on the priority of the task and the load of the system the
What do you think will be the value of `scheduled` for software tasks that are What do you think will be the value of `scheduled` for software tasks that are
*spawned* instead of scheduled? The answer is that spawned tasks inherit the *spawned* instead of scheduled? The answer is that spawned tasks inherit the
*baseline* time of the context that spawned it. The baseline of hardware tasks *baseline* time of the context that spawned it. The baseline of hardware tasks
is `start`, the baseline of software tasks is `scheduled` and the baseline of is their `start` time, the baseline of software tasks is their `scheduled` time
`init` is `start = Instant(0)`. `idle` doesn't really have a baseline but tasks and the baseline of `init` is the system start time or time zero
spawned from it will use `Instant::now()` as their baseline time. (`Instant::zero()`). `idle` doesn't really have a baseline but tasks spawned
from it will use `Instant::now()` as their baseline time.
The example below showcases the different meanings of the *baseline*. The example below showcases the different meanings of the *baseline*.

View file

@ -2,10 +2,21 @@
## Generics ## Generics
Resources shared between two or more tasks implement the `Mutex` trait in *all* Resources may appear in contexts as resource proxies or as unique references
contexts, even on those where a critical section is not required to access the (`&mut-`) depending on the priority of the task. Because the same resource may
data. This lets you easily write generic code that operates on resources and can appear as *different* types in different contexts one cannot refactor a common
be called from different tasks. Here's one such example: operation that uses resources into a plain function; however, such refactor is
possible using *generics*.
All resource proxies implement the `rtfm::Mutex` trait. On the other hand,
unique references (`&mut-`) do *not* implement this trait (due to limitations in
the trait system) but one can wrap these references in the [`rtfm::Exclusive`]
newtype which does implement the `Mutex` trait. With the help of this newtype
one can write a generic function that operates on generic resources and call it
from different tasks to perform some operation on the same set of resources.
Here's one such example:
[`rtfm::Exclusive`]: ../../api/rtfm/struct.Exclusive.html
``` rust ``` rust
{{#include ../../../../examples/generics.rs}} {{#include ../../../../examples/generics.rs}}
@ -15,17 +26,15 @@ be called from different tasks. Here's one such example:
$ cargo run --example generics $ cargo run --example generics
{{#include ../../../../ci/expected/generics.run}}``` {{#include ../../../../ci/expected/generics.run}}```
This also lets you change the static priorities of tasks without having to Using generics also lets you change the static priorities of tasks during
rewrite code. If you consistently use `lock`s to access the data behind shared development without having to rewrite a bunch code every time.
resources then your code will continue to compile when you change the priority
of tasks.
## Conditional compilation ## Conditional compilation
You can use conditional compilation (`#[cfg]`) on resources (`static [mut]` You can use conditional compilation (`#[cfg]`) on resources (the fields of
items) and tasks (`fn` items). The effect of using `#[cfg]` attributes is that `struct Resources`) and tasks (the `fn` items). The effect of using `#[cfg]`
the resource / task will *not* be available through the corresponding `Context` attributes is that the resource / task will *not* be available through the
`struct` if the condition doesn't hold. corresponding `Context` `struct` if the condition doesn't hold.
The example below logs a message whenever the `foo` task is spawned, but only if The example below logs a message whenever the `foo` task is spawned, but only if
the program has been compiled using the `dev` profile. the program has been compiled using the `dev` profile.
@ -34,6 +43,12 @@ the program has been compiled using the `dev` profile.
{{#include ../../../../examples/cfg.rs}} {{#include ../../../../examples/cfg.rs}}
``` ```
``` console
$ cargo run --example cfg --release
$ cargo run --example cfg
{{#include ../../../../ci/expected/cfg.run}}```
## Running tasks from RAM ## Running tasks from RAM
The main goal of moving the specification of RTFM applications to attributes in The main goal of moving the specification of RTFM applications to attributes in
@ -70,25 +85,13 @@ One can look at the output of `cargo-nm` to confirm that `bar` ended in RAM
``` console ``` console
$ cargo nm --example ramfunc --release | grep ' foo::' $ cargo nm --example ramfunc --release | grep ' foo::'
{{#include ../../../../ci/expected/ramfunc.grep.foo}}``` {{#include ../../../../ci/expected/ramfunc.grep.foo}}
```
``` console ``` console
$ cargo nm --example ramfunc --release | grep ' bar::' $ cargo nm --example ramfunc --release | grep ' bar::'
{{#include ../../../../ci/expected/ramfunc.grep.bar}}``` {{#include ../../../../ci/expected/ramfunc.grep.bar}}
## `binds`
You can give hardware tasks more task-like names using the `binds` argument: you
name the function as you wish and specify the name of the interrupt / exception
in the `binds` argument. Types like `Spawn` will be placed in a module named
after the function, not the interrupt / exception. Example below:
``` rust
{{#include ../../../../examples/binds.rs}}
``` ```
``` console
$ cargo run --example binds
{{#include ../../../../ci/expected/binds.run}}```
## Indirection for faster message passing ## Indirection for faster message passing
@ -100,10 +103,10 @@ instead of sending the buffer by value, one can send an owning pointer into the
buffer. buffer.
One can use a global allocator to achieve indirection (`alloc::Box`, One can use a global allocator to achieve indirection (`alloc::Box`,
`alloc::Rc`, etc.), which requires using the nightly channel as of Rust v1.34.0, `alloc::Rc`, etc.), which requires using the nightly channel as of Rust v1.37.0,
or one can use a statically allocated memory pool like [`heapless::Pool`]. or one can use a statically allocated memory pool like [`heapless::Pool`].
[`heapless::Pool`]: https://docs.rs/heapless/0.4.3/heapless/pool/index.html [`heapless::Pool`]: https://docs.rs/heapless/0.5.0/heapless/pool/index.html
Here's an example where `heapless::Pool` is used to "box" buffers of 128 bytes. Here's an example where `heapless::Pool` is used to "box" buffers of 128 bytes.
@ -111,7 +114,7 @@ Here's an example where `heapless::Pool` is used to "box" buffers of 128 bytes.
{{#include ../../../../examples/pool.rs}} {{#include ../../../../examples/pool.rs}}
``` ```
``` console ``` console
$ cargo run --example binds $ cargo run --example pool
{{#include ../../../../ci/expected/pool.run}}``` {{#include ../../../../ci/expected/pool.run}}```
## Inspecting the expanded code ## Inspecting the expanded code
@ -131,33 +134,18 @@ $ cargo build --example foo
$ rustfmt target/rtfm-expansion.rs $ rustfmt target/rtfm-expansion.rs
$ tail -n30 target/rtfm-expansion.rs $ tail target/rtfm-expansion.rs
``` ```
``` rust ``` rust
#[doc = r" Implementation details"] #[doc = r" Implementation details"]
const APP: () = { const APP: () = {
#[doc = r" Always include the device crate which contains the vector table"]
use lm3s6965 as _; use lm3s6965 as _;
#[no_mangle] #[no_mangle]
unsafe fn main() -> ! { unsafe extern "C" fn main() -> ! {
rtfm::export::interrupt::disable(); rtfm::export::interrupt::disable();
let mut core = rtfm::export::Peripherals::steal(); let mut core: rtfm::export::Peripherals = core::mem::transmute(());
let late = init(
init::Locals::new(),
init::Context::new(rtfm::Peripherals {
CBP: core.CBP,
CPUID: core.CPUID,
DCB: core.DCB,
DWT: core.DWT,
FPB: core.FPB,
FPU: core.FPU,
ITM: core.ITM,
MPU: core.MPU,
SCB: &mut core.SCB,
SYST: core.SYST,
TPIU: core.TPIU,
}),
);
core.SCB.scr.modify(|r| r | 1 << 1); core.SCB.scr.modify(|r| r | 1 << 1);
rtfm::export::interrupt::enable(); rtfm::export::interrupt::enable();
loop { loop {
@ -175,5 +163,5 @@ crate and print the output to the console.
``` console ``` console
$ # produces the same output as before $ # produces the same output as before
$ cargo expand --example smallest | tail -n30 $ cargo expand --example smallest | tail
``` ```

View file

@ -1,8 +1,8 @@
# Types, Send and Sync # Types, Send and Sync
The `app` attribute injects a context, a collection of variables, into every Every function within the `APP` pseudo-module has a `Context` structure as its
function. All these variables have predictable, non-anonymous types so you can first parameter. All the fields of these structures have predictable,
write plain functions that take them as arguments. non-anonymous types so you can write plain functions that take them as arguments.
The API reference specifies how these types are generated from the input. You The API reference specifies how these types are generated from the input. You
can also generate documentation for you binary crate (`cargo doc --bin <name>`); can also generate documentation for you binary crate (`cargo doc --bin <name>`);
@ -20,8 +20,8 @@ The example below shows the different types generates by the `app` attribute.
[`Send`] is a marker trait for "types that can be transferred across thread [`Send`] is a marker trait for "types that can be transferred across thread
boundaries", according to its definition in `core`. In the context of RTFM the boundaries", according to its definition in `core`. In the context of RTFM the
`Send` trait is only required where it's possible to transfer a value between `Send` trait is only required where it's possible to transfer a value between
tasks that run at *different* priorities. This occurs in a few places: in message tasks that run at *different* priorities. This occurs in a few places: in
passing, in shared `static mut` resources and in the initialization of late message passing, in shared resources and in the initialization of late
resources. resources.
[`Send`]: https://doc.rust-lang.org/core/marker/trait.Send.html [`Send`]: https://doc.rust-lang.org/core/marker/trait.Send.html
@ -30,7 +30,7 @@ The `app` attribute will enforce that `Send` is implemented where required so
you don't need to worry much about it. It's more important to know where you do you don't need to worry much about it. It's more important to know where you do
*not* need the `Send` trait: on types that are transferred between tasks that *not* need the `Send` trait: on types that are transferred between tasks that
run at the *same* priority. This occurs in two places: in message passing and in run at the *same* priority. This occurs in two places: in message passing and in
shared `static mut` resources. shared resources.
The example below shows where a type that doesn't implement `Send` can be used. The example below shows where a type that doesn't implement `Send` can be used.
@ -39,9 +39,11 @@ The example below shows where a type that doesn't implement `Send` can be used.
``` ```
It's important to note that late initialization of resources is effectively a It's important to note that late initialization of resources is effectively a
send operation where the initial value is sent from `idle`, which has the lowest send operation where the initial value is sent from the background context,
priority of `0`, to a task with will run with a priority greater than or equal which has the lowest priority of `0`, to a task, which will run at a priority
to `1`. Thus all late resources need to implement the `Send` trait. greater than or equal to `1`. Thus all late resources need to implement the
`Send` trait, except for those exclusively accessed by `idle`, which runs at a
priority of `0`.
Sharing a resource with `init` can be used to implement late initialization, see Sharing a resource with `init` can be used to implement late initialization, see
example below. For that reason, resources shared with `init` must also implement example below. For that reason, resources shared with `init` must also implement
@ -56,14 +58,14 @@ the `Send` trait.
Similarly, [`Sync`] is a marker trait for "types for which it is safe to share Similarly, [`Sync`] is a marker trait for "types for which it is safe to share
references between threads", according to its definition in `core`. In the references between threads", according to its definition in `core`. In the
context of RTFM the `Sync` trait is only required where it's possible for two, context of RTFM the `Sync` trait is only required where it's possible for two,
or more, tasks that run at different priority to hold a shared reference to a or more, tasks that run at different priorities and may get a shared reference
resource. This only occurs with shared `static` resources. (`&-`) to a resource. This only occurs with shared access (`&-`) resources.
[`Sync`]: https://doc.rust-lang.org/core/marker/trait.Sync.html [`Sync`]: https://doc.rust-lang.org/core/marker/trait.Sync.html
The `app` attribute will enforce that `Sync` is implemented where required but The `app` attribute will enforce that `Sync` is implemented where required but
it's important to know where the `Sync` bound is not required: in `static` it's important to know where the `Sync` bound is not required: shared access
resources shared between tasks that run at the *same* priority. (`&-`) resources contended by tasks that run at the *same* priority.
The example below shows where a type that doesn't implement `Sync` can be used. The example below shows where a type that doesn't implement `Sync` can be used.

View file

@ -0,0 +1,6 @@
# Heterogeneous multi-core support
This section covers the *experimental* heterogeneous multi-core support provided
by RTFM behind the `heterogeneous` Cargo feature.
**Content coming soon**

View file

@ -0,0 +1,6 @@
# Homogeneous multi-core support
This section covers the *experimental* homogeneous multi-core support provided
by RTFM behind the `homogeneous` Cargo feature.
**Content coming soon**

View file

@ -21,7 +21,7 @@ This makes it impossible for the user code to refer to these static variables.
Access to the resources is then given to each task using a `Resources` struct Access to the resources is then given to each task using a `Resources` struct
whose fields correspond to the resources the task has access to. There's one whose fields correspond to the resources the task has access to. There's one
such struct per task and the `Resources` struct is initialized with either a such struct per task and the `Resources` struct is initialized with either a
mutable reference (`&mut`) to the static variables or with a resource proxy (see unique reference (`&mut-`) to the static variables or with a resource proxy (see
section on [critical sections](critical-sections.html)). section on [critical sections](critical-sections.html)).
The code below is an example of the kind of source level transformation that The code below is an example of the kind of source level transformation that

View file

@ -16,61 +16,65 @@ that has a logical priority of `0` whereas `init` is completely omitted from the
analysis -- the reason for that is that `init` never uses (or needs) critical analysis -- the reason for that is that `init` never uses (or needs) critical
sections to access static variables. sections to access static variables.
In the previous section we showed that a shared resource may appear as a mutable In the previous section we showed that a shared resource may appear as a unique
reference or behind a proxy depending on the task that has access to it. Which reference (`&mut-`) or behind a proxy depending on the task that has access to
version is presented to the task depends on the task priority and the resource it. Which version is presented to the task depends on the task priority and the
ceiling. If the task priority is the same as the resource ceiling then the task resource ceiling. If the task priority is the same as the resource ceiling then
gets a mutable reference to the resource memory, otherwise the task gets a the task gets a unique reference (`&mut-`) to the resource memory, otherwise the
proxy -- this also applies to `idle`. `init` is special: it always gets a task gets a proxy -- this also applies to `idle`. `init` is special: it always
mutable reference to resources. gets a unique reference (`&mut-`) to resources.
An example to illustrate the ceiling analysis: An example to illustrate the ceiling analysis:
``` rust ``` rust
#[rtfm::app(device = ..)] #[rtfm::app(device = ..)]
const APP: () = { const APP: () = {
// accessed by `foo` (prio = 1) and `bar` (prio = 2) struct Resources {
// CEILING = 2 // accessed by `foo` (prio = 1) and `bar` (prio = 2)
static mut X: u64 = 0; // -> CEILING = 2
#[init(0)]
x: u64,
// accessed by `idle` (prio = 0) // accessed by `idle` (prio = 0)
// CEILING = 0 // -> CEILING = 0
static mut Y: u64 = 0; #[init(0)]
y: u64,
}
#[init(resources = [X])] #[init(resources = [x])]
fn init(c: init::Context) { fn init(c: init::Context) {
// mutable reference because this is `init` // unique reference because this is `init`
let x: &mut u64 = c.resources.X; let x: &mut u64 = c.resources.x;
// mutable reference because this is `init` // unique reference because this is `init`
let y: &mut u64 = c.resources.Y; let y: &mut u64 = c.resources.y;
// .. // ..
} }
// PRIORITY = 0 // PRIORITY = 0
#[idle(resources = [Y])] #[idle(resources = [y])]
fn idle(c: idle::Context) -> ! { fn idle(c: idle::Context) -> ! {
// mutable reference because priority (0) == resource ceiling (0) // unique reference because priority (0) == resource ceiling (0)
let y: &'static mut u64 = c.resources.Y; let y: &'static mut u64 = c.resources.y;
loop { loop {
// .. // ..
} }
} }
#[interrupt(binds = UART0, priority = 1, resources = [X])] #[interrupt(binds = UART0, priority = 1, resources = [x])]
fn foo(c: foo::Context) { fn foo(c: foo::Context) {
// resource proxy because task priority (1) < resource ceiling (2) // resource proxy because task priority (1) < resource ceiling (2)
let x: resources::X = c.resources.X; let x: resources::x = c.resources.x;
// .. // ..
} }
#[interrupt(binds = UART1, priority = 2, resources = [X])] #[interrupt(binds = UART1, priority = 2, resources = [x])]
fn bar(c: foo::Context) { fn bar(c: foo::Context) {
// mutable reference because task priority (2) == resource ceiling (2) // unique reference because task priority (2) == resource ceiling (2)
let x: &mut u64 = c.resources.X; let x: &mut u64 = c.resources.x;
// .. // ..
} }

View file

@ -1,19 +1,19 @@
# Critical sections # Critical sections
When a resource (static variable) is shared between two, or more, tasks that run When a resource (static variable) is shared between two, or more, tasks that run
at different priorities some form of mutual exclusion is required to access the at different priorities some form of mutual exclusion is required to mutate the
memory in a data race free manner. In RTFM we use priority-based critical memory in a data race free manner. In RTFM we use priority-based critical
sections to guarantee mutual exclusion (see the [Immediate Priority Ceiling sections to guarantee mutual exclusion (see the [Immediate Ceiling Priority
Protocol][ipcp]). Protocol][icpp]).
[ipcp]: https://en.wikipedia.org/wiki/Priority_ceiling_protocol [icpp]: https://en.wikipedia.org/wiki/Priority_ceiling_protocol
The critical section consists of temporarily raising the *dynamic* priority of The critical section consists of temporarily raising the *dynamic* priority of
the task. While a task is within this critical section all the other tasks that the task. While a task is within this critical section all the other tasks that
may request the resource are *not allowed to start*. may request the resource are *not allowed to start*.
How high must the dynamic priority be to ensure mutual exclusion on a particular How high must the dynamic priority be to ensure mutual exclusion on a particular
resource? The [ceiling analysis](ceiling-analysis.html) is in charge of resource? The [ceiling analysis](ceilings.html) is in charge of
answering that question and will be discussed in the next section. This section answering that question and will be discussed in the next section. This section
will focus on the implementation of the critical section. will focus on the implementation of the critical section.
@ -25,7 +25,7 @@ a data race the *lower priority* task must use a critical section when it needs
to modify the shared memory. On the other hand, the higher priority task can to modify the shared memory. On the other hand, the higher priority task can
directly modify the shared memory because it can't be preempted by the lower directly modify the shared memory because it can't be preempted by the lower
priority task. To enforce the use of a critical section on the lower priority priority task. To enforce the use of a critical section on the lower priority
task we give it a *resource proxy*, whereas we give a mutable reference task we give it a *resource proxy*, whereas we give a unique reference
(`&mut-`) to the higher priority task. (`&mut-`) to the higher priority task.
The example below shows the different types handed out to each task: The example below shows the different types handed out to each task:
@ -33,12 +33,15 @@ The example below shows the different types handed out to each task:
``` rust ``` rust
#[rtfm::app(device = ..)] #[rtfm::app(device = ..)]
const APP: () = { const APP: () = {
static mut X: u64 = 0; struct Resources {
#[init(0)]
x: u64,
}
#[interrupt(binds = UART0, priority = 1, resources = [X])] #[interrupt(binds = UART0, priority = 1, resources = [x])]
fn foo(c: foo::Context) { fn foo(c: foo::Context) {
// resource proxy // resource proxy
let mut x: resources::X = c.resources.X; let mut x: resources::x = c.resources.x;
x.lock(|x: &mut u64| { x.lock(|x: &mut u64| {
// critical section // critical section
@ -46,9 +49,9 @@ const APP: () = {
}); });
} }
#[interrupt(binds = UART1, priority = 2, resources = [X])] #[interrupt(binds = UART1, priority = 2, resources = [x])]
fn bar(c: foo::Context) { fn bar(c: foo::Context) {
let mut x: &mut u64 = c.resources.X; let mut x: &mut u64 = c.resources.x;
*x += 1; *x += 1;
} }
@ -69,14 +72,14 @@ fn bar(c: bar::Context) {
} }
pub mod resources { pub mod resources {
pub struct X { pub struct x {
// .. // ..
} }
} }
pub mod foo { pub mod foo {
pub struct Resources { pub struct Resources {
pub X: resources::X, pub x: resources::x,
} }
pub struct Context { pub struct Context {
@ -87,7 +90,7 @@ pub mod foo {
pub mod bar { pub mod bar {
pub struct Resources<'a> { pub struct Resources<'a> {
pub X: rtfm::Exclusive<'a, u64>, // newtype over `&'a mut u64` pub x: &'a mut u64,
} }
pub struct Context { pub struct Context {
@ -97,9 +100,9 @@ pub mod bar {
} }
const APP: () = { const APP: () = {
static mut X: u64 = 0; static mut x: u64 = 0;
impl rtfm::Mutex for resources::X { impl rtfm::Mutex for resources::x {
type T = u64; type T = u64;
fn lock<R>(&mut self, f: impl FnOnce(&mut u64) -> R) -> R { fn lock<R>(&mut self, f: impl FnOnce(&mut u64) -> R) -> R {
@ -111,7 +114,7 @@ const APP: () = {
unsafe fn UART0() { unsafe fn UART0() {
foo(foo::Context { foo(foo::Context {
resources: foo::Resources { resources: foo::Resources {
X: resources::X::new(/* .. */), x: resources::x::new(/* .. */),
}, },
// .. // ..
}) })
@ -121,7 +124,7 @@ const APP: () = {
unsafe fn UART1() { unsafe fn UART1() {
bar(bar::Context { bar(bar::Context {
resources: bar::Resources { resources: bar::Resources {
X: rtfm::Exclusive(&mut X), x: &mut x,
}, },
// .. // ..
}) })
@ -158,7 +161,7 @@ In this particular example we could implement the critical section as follows:
> **NOTE:** this is a simplified implementation > **NOTE:** this is a simplified implementation
``` rust ``` rust
impl rtfm::Mutex for resources::X { impl rtfm::Mutex for resources::x {
type T = u64; type T = u64;
fn lock<R, F>(&mut self, f: F) -> R fn lock<R, F>(&mut self, f: F) -> R
@ -170,7 +173,7 @@ impl rtfm::Mutex for resources::X {
asm!("msr BASEPRI, 192" : : : "memory" : "volatile"); asm!("msr BASEPRI, 192" : : : "memory" : "volatile");
// run user code within the critical section // run user code within the critical section
let r = f(&mut implementation_defined_name_for_X); let r = f(&mut x);
// end of critical section: restore dynamic priority to its static value (`1`) // end of critical section: restore dynamic priority to its static value (`1`)
asm!("msr BASEPRI, 0" : : : "memory" : "volatile"); asm!("msr BASEPRI, 0" : : : "memory" : "volatile");
@ -183,23 +186,23 @@ impl rtfm::Mutex for resources::X {
Here it's important to use the `"memory"` clobber in the `asm!` block. It Here it's important to use the `"memory"` clobber in the `asm!` block. It
prevents the compiler from reordering memory operations across it. This is prevents the compiler from reordering memory operations across it. This is
important because accessing the variable `X` outside the critical section would important because accessing the variable `x` outside the critical section would
result in a data race. result in a data race.
It's important to note that the signature of the `lock` method prevents nesting It's important to note that the signature of the `lock` method prevents nesting
calls to it. This is required for memory safety, as nested calls would produce calls to it. This is required for memory safety, as nested calls would produce
multiple mutable references (`&mut-`) to `X` breaking Rust aliasing rules. See multiple unique references (`&mut-`) to `x` breaking Rust aliasing rules. See
below: below:
``` rust ``` rust
#[interrupt(binds = UART0, priority = 1, resources = [X])] #[interrupt(binds = UART0, priority = 1, resources = [x])]
fn foo(c: foo::Context) { fn foo(c: foo::Context) {
// resource proxy // resource proxy
let mut res: resources::X = c.resources.X; let mut res: resources::x = c.resources.x;
res.lock(|x: &mut u64| { res.lock(|x: &mut u64| {
res.lock(|alias: &mut u64| { res.lock(|alias: &mut u64| {
//~^ error: `res` has already been mutably borrowed //~^ error: `res` has already been uniquely borrowed (`&mut-`)
// .. // ..
}); });
}); });
@ -223,18 +226,22 @@ Consider this program:
``` rust ``` rust
#[rtfm::app(device = ..)] #[rtfm::app(device = ..)]
const APP: () = { const APP: () = {
static mut X: u64 = 0; struct Resources {
static mut Y: u64 = 0; #[init(0)]
x: u64,
#[init(0)]
y: u64,
}
#[init] #[init]
fn init() { fn init() {
rtfm::pend(Interrupt::UART0); rtfm::pend(Interrupt::UART0);
} }
#[interrupt(binds = UART0, priority = 1, resources = [X, Y])] #[interrupt(binds = UART0, priority = 1, resources = [x, y])]
fn foo(c: foo::Context) { fn foo(c: foo::Context) {
let mut x = c.resources.X; let mut x = c.resources.x;
let mut y = c.resources.Y; let mut y = c.resources.y;
y.lock(|y| { y.lock(|y| {
*y += 1; *y += 1;
@ -259,12 +266,12 @@ const APP: () = {
}) })
} }
#[interrupt(binds = UART1, priority = 2, resources = [X])] #[interrupt(binds = UART1, priority = 2, resources = [x])]
fn bar(c: foo::Context) { fn bar(c: foo::Context) {
// .. // ..
} }
#[interrupt(binds = UART2, priority = 3, resources = [Y])] #[interrupt(binds = UART2, priority = 3, resources = [y])]
fn baz(c: foo::Context) { fn baz(c: foo::Context) {
// .. // ..
} }
@ -279,13 +286,13 @@ The code generated by the framework looks like this:
// omitted: user code // omitted: user code
pub mod resources { pub mod resources {
pub struct X<'a> { pub struct x<'a> {
priority: &'a Cell<u8>, priority: &'a Cell<u8>,
} }
impl<'a> X<'a> { impl<'a> x<'a> {
pub unsafe fn new(priority: &'a Cell<u8>) -> Self { pub unsafe fn new(priority: &'a Cell<u8>) -> Self {
X { priority } x { priority }
} }
pub unsafe fn priority(&self) -> &Cell<u8> { pub unsafe fn priority(&self) -> &Cell<u8> {
@ -293,7 +300,7 @@ pub mod resources {
} }
} }
// repeat for `Y` // repeat for `y`
} }
pub mod foo { pub mod foo {
@ -303,34 +310,35 @@ pub mod foo {
} }
pub struct Resources<'a> { pub struct Resources<'a> {
pub X: resources::X<'a>, pub x: resources::x<'a>,
pub Y: resources::Y<'a>, pub y: resources::y<'a>,
} }
} }
const APP: () = { const APP: () = {
use cortex_m::register::basepri;
#[no_mangle] #[no_mangle]
unsafe fn UART0() { unsafe fn UART1() {
// the static priority of this interrupt (as specified by the user) // the static priority of this interrupt (as specified by the user)
const PRIORITY: u8 = 1; const PRIORITY: u8 = 2;
// take a snashot of the BASEPRI // take a snashot of the BASEPRI
let initial: u8; let initial = basepri::read();
asm!("mrs $0, BASEPRI" : "=r"(initial) : : : "volatile");
let priority = Cell::new(PRIORITY); let priority = Cell::new(PRIORITY);
foo(foo::Context { bar(bar::Context {
resources: foo::Resources::new(&priority), resources: bar::Resources::new(&priority),
// .. // ..
}); });
// roll back the BASEPRI to the snapshot value we took before // roll back the BASEPRI to the snapshot value we took before
asm!("msr BASEPRI, $0" : : "r"(initial) : : "volatile"); basepri::write(initial); // same as the `asm!` block we saw before
} }
// similarly for `UART1` // similarly for `UART0` / `foo` and `UART2` / `baz`
impl<'a> rtfm::Mutex for resources::X<'a> { impl<'a> rtfm::Mutex for resources::x<'a> {
type T = u64; type T = u64;
fn lock<R>(&mut self, f: impl FnOnce(&mut u64) -> R) -> R { fn lock<R>(&mut self, f: impl FnOnce(&mut u64) -> R) -> R {
@ -342,26 +350,24 @@ const APP: () = {
if current < CEILING { if current < CEILING {
// raise dynamic priority // raise dynamic priority
self.priority().set(CEILING); self.priority().set(CEILING);
let hw = logical2hw(CEILING); basepri::write(logical2hw(CEILING));
asm!("msr BASEPRI, $0" : : "r"(hw) : "memory" : "volatile");
let r = f(&mut X); let r = f(&mut y);
// restore dynamic priority // restore dynamic priority
let hw = logical2hw(current); basepri::write(logical2hw(current));
asm!("msr BASEPRI, $0" : : "r"(hw) : "memory" : "volatile");
self.priority().set(current); self.priority().set(current);
r r
} else { } else {
// dynamic priority is high enough // dynamic priority is high enough
f(&mut X) f(&mut y)
} }
} }
} }
} }
// repeat for `Y` // repeat for resource `y`
}; };
``` ```
@ -373,38 +379,38 @@ fn foo(c: foo::Context) {
// NOTE: BASEPRI contains the value `0` (its reset value) at this point // NOTE: BASEPRI contains the value `0` (its reset value) at this point
// raise dynamic priority to `3` // raise dynamic priority to `3`
unsafe { asm!("msr BASEPRI, 160" : : : "memory" : "volatile") } unsafe { basepri::write(160) }
// the two operations on `Y` are merged into one // the two operations on `y` are merged into one
Y += 2; y += 2;
// BASEPRI is not modified to access `X` because the dynamic priority is high enough // BASEPRI is not modified to access `x` because the dynamic priority is high enough
X += 1; x += 1;
// lower (restore) the dynamic priority to `1` // lower (restore) the dynamic priority to `1`
unsafe { asm!("msr BASEPRI, 224" : : : "memory" : "volatile") } unsafe { basepri::write(224) }
// mid-point // mid-point
// raise dynamic priority to `2` // raise dynamic priority to `2`
unsafe { asm!("msr BASEPRI, 192" : : : "memory" : "volatile") } unsafe { basepri::write(192) }
X += 1; x += 1;
// raise dynamic priority to `3` // raise dynamic priority to `3`
unsafe { asm!("msr BASEPRI, 160" : : : "memory" : "volatile") } unsafe { basepri::write(160) }
Y += 1; y += 1;
// lower (restore) the dynamic priority to `2` // lower (restore) the dynamic priority to `2`
unsafe { asm!("msr BASEPRI, 192" : : : "memory" : "volatile") } unsafe { basepri::write(192) }
// NOTE: it would be sound to merge this operation on X with the previous one but // NOTE: it would be sound to merge this operation on `x` with the previous one but
// compiler fences are coarse grained and prevent such optimization // compiler fences are coarse grained and prevent such optimization
X += 1; x += 1;
// lower (restore) the dynamic priority to `1` // lower (restore) the dynamic priority to `1`
unsafe { asm!("msr BASEPRI, 224" : : : "memory" : "volatile") } unsafe { basepri::write(224) }
// NOTE: BASEPRI contains the value `224` at this point // NOTE: BASEPRI contains the value `224` at this point
// the UART0 handler will restore the value to `0` before returning // the UART0 handler will restore the value to `0` before returning
@ -425,7 +431,10 @@ handler through preemption. This is best observed in the following example:
``` rust ``` rust
#[rtfm::app(device = ..)] #[rtfm::app(device = ..)]
const APP: () = { const APP: () = {
static mut X: u64 = 0; struct Resources {
#[init(0)]
x: u64,
}
#[init] #[init]
fn init() { fn init() {
@ -444,11 +453,11 @@ const APP: () = {
// this function returns to `idle` // this function returns to `idle`
} }
#[task(binds = UART1, priority = 2, resources = [X])] #[task(binds = UART1, priority = 2, resources = [x])]
fn bar() { fn bar() {
// BASEPRI is `0` (dynamic priority = 2) // BASEPRI is `0` (dynamic priority = 2)
X.lock(|x| { x.lock(|x| {
// BASEPRI is raised to `160` (dynamic priority = 3) // BASEPRI is raised to `160` (dynamic priority = 3)
// .. // ..
@ -470,7 +479,7 @@ const APP: () = {
} }
} }
#[task(binds = UART2, priority = 3, resources = [X])] #[task(binds = UART2, priority = 3, resources = [x])]
fn baz() { fn baz() {
// .. // ..
} }
@ -493,8 +502,7 @@ const APP: () = {
const PRIORITY: u8 = 2; const PRIORITY: u8 = 2;
// take a snashot of the BASEPRI // take a snashot of the BASEPRI
let initial: u8; let initial = basepri::read();
asm!("mrs $0, BASEPRI" : "=r"(initial) : : : "volatile");
let priority = Cell::new(PRIORITY); let priority = Cell::new(PRIORITY);
bar(bar::Context { bar(bar::Context {
@ -503,7 +511,7 @@ const APP: () = {
}); });
// BUG: FORGOT to roll back the BASEPRI to the snapshot value we took before // BUG: FORGOT to roll back the BASEPRI to the snapshot value we took before
// asm!("msr BASEPRI, $0" : : "r"(initial) : : "volatile"); basepri::write(initial);
} }
}; };
``` ```

View file

@ -12,7 +12,7 @@ configuration is done before the `init` function runs.
This example gives you an idea of the code that the RTFM framework runs: This example gives you an idea of the code that the RTFM framework runs:
``` rust ``` rust
#[rtfm::app(device = ..)] #[rtfm::app(device = lm3s6965)]
const APP: () = { const APP: () = {
#[init] #[init]
fn init(c: init::Context) { fn init(c: init::Context) {
@ -39,8 +39,7 @@ The framework generates an entry point that looks like this:
unsafe fn main() -> ! { unsafe fn main() -> ! {
// transforms a logical priority into a hardware / NVIC priority // transforms a logical priority into a hardware / NVIC priority
fn logical2hw(priority: u8) -> u8 { fn logical2hw(priority: u8) -> u8 {
// this value comes from the device crate use lm3s6965::NVIC_PRIO_BITS;
const NVIC_PRIO_BITS: u8 = ..;
// the NVIC encodes priority in the higher bits of a bit // the NVIC encodes priority in the higher bits of a bit
// also a bigger numbers means lower priority // also a bigger numbers means lower priority

View file

@ -11,21 +11,22 @@ initialize late resources.
``` rust ``` rust
#[rtfm::app(device = ..)] #[rtfm::app(device = ..)]
const APP: () = { const APP: () = {
// late resource struct Resources {
static mut X: Thing = {}; x: Thing,
}
#[init] #[init]
fn init() -> init::LateResources { fn init() -> init::LateResources {
// .. // ..
init::LateResources { init::LateResources {
X: Thing::new(..), x: Thing::new(..),
} }
} }
#[task(binds = UART0, resources = [X])] #[task(binds = UART0, resources = [x])]
fn foo(c: foo::Context) { fn foo(c: foo::Context) {
let x: &mut Thing = c.resources.X; let x: &mut Thing = c.resources.x;
x.frob(); x.frob();
@ -50,7 +51,7 @@ fn foo(c: foo::Context) {
// Public API // Public API
pub mod init { pub mod init {
pub struct LateResources { pub struct LateResources {
pub X: Thing, pub x: Thing,
} }
// .. // ..
@ -58,7 +59,7 @@ pub mod init {
pub mod foo { pub mod foo {
pub struct Resources<'a> { pub struct Resources<'a> {
pub X: &'a mut Thing, pub x: &'a mut Thing,
} }
pub struct Context<'a> { pub struct Context<'a> {
@ -70,7 +71,7 @@ pub mod foo {
/// Implementation details /// Implementation details
const APP: () = { const APP: () = {
// uninitialized static // uninitialized static
static mut X: MaybeUninit<Thing> = MaybeUninit::uninit(); static mut x: MaybeUninit<Thing> = MaybeUninit::uninit();
#[no_mangle] #[no_mangle]
unsafe fn main() -> ! { unsafe fn main() -> ! {
@ -81,7 +82,7 @@ const APP: () = {
let late = init(..); let late = init(..);
// initialization of late resources // initialization of late resources
X.write(late.X); x.as_mut_ptr().write(late.x);
cortex_m::interrupt::enable(); //~ compiler fence cortex_m::interrupt::enable(); //~ compiler fence
@ -94,8 +95,8 @@ const APP: () = {
unsafe fn UART0() { unsafe fn UART0() {
foo(foo::Context { foo(foo::Context {
resources: foo::Resources { resources: foo::Resources {
// `X` has been initialized at this point // `x` has been initialized at this point
X: &mut *X.as_mut_ptr(), x: &mut *x.as_mut_ptr(),
}, },
// .. // ..
}) })

View file

@ -13,24 +13,20 @@ are discouraged from directly invoking an interrupt handler.
``` rust ``` rust
#[rtfm::app(device = ..)] #[rtfm::app(device = ..)]
const APP: () = { const APP: () = {
static mut X: u64 = 0;
#[init] #[init]
fn init(c: init::Context) { .. } fn init(c: init::Context) { .. }
#[interrupt(binds = UART0, resources = [X])] #[interrupt(binds = UART0)]
fn foo(c: foo::Context) { fn foo(c: foo::Context) {
let x: &mut u64 = c.resources.X; static mut X: u64 = 0;
*x = 1; let x: &mut u64 = X;
// ..
//~ `bar` can preempt `foo` at this point //~ `bar` can preempt `foo` at this point
*x = 2; // ..
if *x == 2 {
// something
}
} }
#[interrupt(binds = UART1, priority = 2)] #[interrupt(binds = UART1, priority = 2)]
@ -40,15 +36,15 @@ const APP: () = {
} }
// this interrupt handler will invoke task handler `foo` resulting // this interrupt handler will invoke task handler `foo` resulting
// in mutable aliasing of the static variable `X` // in aliasing of the static variable `X`
unsafe { UART0() } unsafe { UART0() }
} }
}; };
``` ```
The RTFM framework must generate the interrupt handler code that calls the user The RTFM framework must generate the interrupt handler code that calls the user
defined task handlers. We are careful in making these handlers `unsafe` and / or defined task handlers. We are careful in making these handlers impossible to
impossible to call from user code. call from user code.
The above example expands into: The above example expands into:

View file

@ -19,7 +19,7 @@ task.
The ready queue is a SPSC (Single Producer Single Consumer) lock-free queue. The The ready queue is a SPSC (Single Producer Single Consumer) lock-free queue. The
task dispatcher owns the consumer endpoint of the queue; the producer endpoint task dispatcher owns the consumer endpoint of the queue; the producer endpoint
is treated as a resource shared by the tasks that can `spawn` other tasks. is treated as a resource contended by the tasks that can `spawn` other tasks.
## The task dispatcher ## The task dispatcher
@ -244,7 +244,7 @@ const APP: () = {
baz_INPUTS[index as usize].write(message); baz_INPUTS[index as usize].write(message);
lock(self.priority(), RQ1_CEILING, || { lock(self.priority(), RQ1_CEILING, || {
// put the task in the ready queu // put the task in the ready queue
RQ1.split().1.enqueue_unchecked(Ready { RQ1.split().1.enqueue_unchecked(Ready {
task: T1::baz, task: T1::baz,
index, index,

View file

@ -47,7 +47,7 @@ mod foo {
} }
const APP: () = { const APP: () = {
use rtfm::Instant; type Instant = <path::to::user::monotonic::timer as rtfm::Monotonic>::Instant;
// all tasks that can be `schedule`-d // all tasks that can be `schedule`-d
enum T { enum T {
@ -158,15 +158,14 @@ way it will run at the right priority.
handler; basically, `enqueue_unchecked` delegates the task of setting up a new handler; basically, `enqueue_unchecked` delegates the task of setting up a new
timeout interrupt to the `SysTick` handler. timeout interrupt to the `SysTick` handler.
## Resolution and range of `Instant` and `Duration` ## Resolution and range of `cyccnt::Instant` and `cyccnt::Duration`
In the current implementation the `DWT`'s (Data Watchpoint and Trace) cycle RTFM provides a `Monotonic` implementation based on the `DWT`'s (Data Watchpoint
counter is used as a monotonic timer. `Instant::now` returns a snapshot of this and Trace) cycle counter. `Instant::now` returns a snapshot of this timer; these
timer; these DWT snapshots (`Instant`s) are used to sort entries in the timer DWT snapshots (`Instant`s) are used to sort entries in the timer queue. The
queue. The cycle counter is a 32-bit counter clocked at the core clock cycle counter is a 32-bit counter clocked at the core clock frequency. This
frequency. This counter wraps around every `(1 << 32)` clock cycles; there's no counter wraps around every `(1 << 32)` clock cycles; there's no interrupt
interrupt associated to this counter so nothing worth noting happens when it associated to this counter so nothing worth noting happens when it wraps around.
wraps around.
To order `Instant`s in the queue we need to compare two 32-bit integers. To To order `Instant`s in the queue we need to compare two 32-bit integers. To
account for the wrap-around behavior we use the difference between two account for the wrap-around behavior we use the difference between two
@ -264,11 +263,11 @@ The ceiling analysis would go like this:
## Changes in the `spawn` implementation ## Changes in the `spawn` implementation
When the "timer-queue" feature is enabled the `spawn` implementation changes a When the `schedule` API is used the `spawn` implementation changes a bit to
bit to track the baseline of tasks. As you saw in the `schedule` implementation track the baseline of tasks. As you saw in the `schedule` implementation there's
there's an `INSTANTS` buffers used to store the time at which a task was an `INSTANTS` buffers used to store the time at which a task was scheduled to
scheduled to run; this `Instant` is read in the task dispatcher and passed to run; this `Instant` is read in the task dispatcher and passed to the user code
the user code as part of the task context. as part of the task context.
``` rust ``` rust
const APP: () = { const APP: () = {

View file

@ -14,6 +14,6 @@ There is a translation of this book in [Russian].
**HEADS UP** This is an **alpha** pre-release; there may be breaking changes in **HEADS UP** This is an **alpha** pre-release; there may be breaking changes in
the API and semantics before a proper release is made. the API and semantics before a proper release is made.
{{#include ../../../README.md:5:46}} {{#include ../../../README.md:5:44}}
{{#include ../../../README.md:52:}} {{#include ../../../README.md:50:}}

View file

@ -7,7 +7,10 @@ fn main() {
println!("cargo:rustc-cfg=armv6m") println!("cargo:rustc-cfg=armv6m")
} }
if target.starts_with("thumbv7m") | target.starts_with("thumbv7em") { if target.starts_with("thumbv7m")
| target.starts_with("thumbv7em")
| target.starts_with("thumbv8m")
{
println!("cargo:rustc-cfg=armv7m") println!("cargo:rustc-cfg=armv7m")
} }

2
ci/expected/cfg.run Normal file
View file

@ -0,0 +1,2 @@
foo has been called 1 time
foo has been called 2 times

View file

@ -1,6 +1,6 @@
UART1(STATE = 0) UART1(STATE = 0)
SHARED: 0 -> 1 shared: 0 -> 1
UART0(STATE = 0) UART0(STATE = 0)
SHARED: 1 -> 2 shared: 1 -> 2
UART1(STATE = 1) UART1(STATE = 1)
SHARED: 2 -> 4 shared: 2 -> 4

View file

@ -1,5 +1,5 @@
A A
B - SHARED = 1 B - shared = 1
C C
D - SHARED = 2 D - shared = 2
E E

View file

@ -0,0 +1,2 @@
UART1(key = 0xdeadbeef)
UART0(key = 0xdeadbeef)

5
ci/expected/preempt.run Normal file
View file

@ -0,0 +1,5 @@
GPIOA - start
GPIOC - start
GPIOC - end
GPIOB
GPIOA - end

View file

@ -1,3 +1 @@
20000100 B bar::FREE_QUEUE::lk14244m263eivix 20000000 t ramfunc::bar::h9d6714fe5a3b0c89
200000dc B bar::INPUTS::mi89534s44r1mnj1
20000000 T bar::ns9009yhw2dc2y25

View file

@ -1,3 +1 @@
20000100 B foo::FREE_QUEUE::ujkptet2nfdw5t20 00000162 t ramfunc::foo::h30e7789b08c08e19
200000dc B foo::INPUTS::thvubs85b91dg365
000002c6 T foo::sidaht420cg1mcm8

View file

@ -1,2 +1,2 @@
UART0: SHARED = 1 UART0: shared = 1
UART1: SHARED = 2 UART1: shared = 2

View file

@ -1,2 +0,0 @@
UART1(KEY = 0xdeadbeef)
UART0(KEY = 0xdeadbeef)

View file

@ -1,3 +1,5 @@
foo foo - start
foo - middle
baz baz
foo - end
bar bar

View file

@ -1,17 +1,20 @@
set -euxo pipefail set -euxo pipefail
main() { main() {
if [ $TARGET != x86_64-unknown-linux-gnu ]; then if [ $TARGET = x86_64-unknown-linux-gnu ]; then
rustup target add $TARGET ( cd .. && cargo install microamp-tools --version 0.1.0-alpha.2 -f )
rustup target add thumbv6m-none-eabi thumbv7m-none-eabi
fi fi
rustup target add $TARGET
mkdir qemu mkdir qemu
curl -L https://github.com/japaric/qemu-bin/raw/master/14.04/qemu-system-arm-2.12.0 > qemu/qemu-system-arm curl -L https://github.com/japaric/qemu-bin/raw/master/14.04/qemu-system-arm-2.12.0 > qemu/qemu-system-arm
chmod +x qemu/qemu-system-arm chmod +x qemu/qemu-system-arm
# install mdbook # install mdbook
curl -LSfs https://japaric.github.io/trust/install.sh | \ curl -LSfs https://japaric.github.io/trust/install.sh | \
sh -s -- --git rust-lang-nursery/mdbook --tag v0.2.1 sh -s -- --git rust-lang-nursery/mdbook --tag v0.3.1
pip install linkchecker --user pip install linkchecker --user
} }

View file

@ -37,61 +37,75 @@ main() {
mkdir -p ci/builds mkdir -p ci/builds
if [ $T = x86_64-unknown-linux-gnu ]; then if [ $T = x86_64-unknown-linux-gnu ]; then
# compile-fail and compile-pass tests if [ $TRAVIS_RUST_VERSION = nightly ]; then
# compile-fail tests
cargo test --test single --target $T
# TODO how to run a subset of these tests when timer-queue is disabled? # multi-core compile-pass tests
cargo test --features "timer-queue" --test compiletest --target $T pushd heterogeneous
local exs=(
smallest
x-init-2
x-init
x-schedule
x-spawn
)
for ex in ${exs[@]}; do
cargo microamp --example $ex --target thumbv7m-none-eabi,thumbv6m-none-eabi --check
done
popd
else
if [ $TRAVIS_RUST_VERSION != nightly ]; then
rm -f .cargo/config
cargo doc
( cd book/en && mdbook build )
( cd book/ru && mdbook build )
local td=$(mktemp -d)
cp -r target/doc $td/api
mkdir $td/book
cp -r book/en/book $td/book/en
cp -r book/ru/book $td/book/ru
cp LICENSE-* $td/book/en
cp LICENSE-* $td/book/ru
linkchecker $td/book/en/
linkchecker $td/book/ru/
linkchecker $td/api/rtfm/
linkchecker $td/api/cortex_m_rtfm_macros/
fi
fi
cargo check --target $T cargo check --target $T
if [ $TARGET != thumbv6m-none-eabi ]; then ( cd macros && cargo test --target $T )
cargo check --features "timer-queue" --target $T
fi
if [ $TRAVIS_RUST_VERSION != nightly ]; then
rm -f .cargo/config
if [ $TARGET != thumbv6m-none-eabi ]; then
cargo doc --features timer-queue
else
cargo doc
fi
( cd book/en && mdbook build )
( cd book/ru && mdbook build )
local td=$(mktemp -d)
cp -r target/doc $td/api
mkdir $td/book
cp -r book/en/book $td/book/en
cp -r book/ru/book $td/book/ru
cp LICENSE-* $td/book/en
cp LICENSE-* $td/book/ru
linkchecker $td/book/en/
linkchecker $td/book/ru/
linkchecker $td/api/rtfm/
linkchecker $td/api/cortex_m_rtfm_macros/
fi
return return
fi fi
cargo check --target $T --examples if [ $TARGET = thumbv6m-none-eabi ]; then
if [ $TARGET != thumbv6m-none-eabi ]; then cargo check --target $T --examples
cargo check --features "timer-queue" --target $T --examples else
cargo check --target $T --examples --features __v7
fi fi
cargo check -p homogeneous --target $T --examples
# run-pass tests # run-pass tests
case $T in case $T in
thumbv6m-none-eabi | thumbv7m-none-eabi) thumbv6m-none-eabi | thumbv7m-none-eabi)
local exs=( local exs=(
idle idle
init init
interrupt hardware
preempt
binds binds
resource resource
lock lock
late late
static only-shared-access
task task
message message
@ -103,79 +117,81 @@ main() {
shared-with-init shared-with-init
generics generics
cfg
pool pool
ramfunc ramfunc
) )
for ex in ${exs[@]}; do for ex in ${exs[@]}; do
if [ $ex = ramfunc ] && [ $T = thumbv6m-none-eabi ]; then
# LLD doesn't support this at the moment
continue
fi
if [ $ex = pool ]; then if [ $ex = pool ]; then
if [ $TARGET != thumbv6m-none-eabi ]; then if [ $TARGET = thumbv6m-none-eabi ]; then
local td=$(mktemp -d) continue
local features="timer-queue"
cargo run --example $ex --target $TARGET --features $features >\
$td/pool.run
grep 'foo(0x2' $td/pool.run
grep 'bar(0x2' $td/pool.run
arm-none-eabi-objcopy -O ihex target/$TARGET/debug/examples/$ex \
ci/builds/${ex}_${features/,/_}_debug_1.hex
cargo run --example $ex --target $TARGET --features $features --release >\
$td/pool.run
grep 'foo(0x2' $td/pool.run
grep 'bar(0x2' $td/pool.run
arm-none-eabi-objcopy -O ihex target/$TARGET/release/examples/$ex \
ci/builds/${ex}_${features/,/_}_release_1.hex
rm -rf $td
fi fi
local td=$(mktemp -d)
cargo run --example $ex --target $TARGET --features __v7 >\
$td/pool.run
grep 'foo(0x2' $td/pool.run
grep 'bar(0x2' $td/pool.run
arm-none-eabi-objcopy -O ihex target/$TARGET/debug/examples/$ex \
ci/builds/${ex}___v7_debug_1.hex
cargo run --example $ex --target $TARGET --features __v7 --release >\
$td/pool.run
grep 'foo(0x2' $td/pool.run
grep 'bar(0x2' $td/pool.run
arm-none-eabi-objcopy -O ihex target/$TARGET/release/examples/$ex \
ci/builds/${ex}___v7_release_1.hex
rm -rf $td
continue continue
fi fi
if [ $ex != types ]; then if [ $ex = types ]; then
arm_example "run" $ex "debug" "" "1" if [ $TARGET = thumbv6m-none-eabi ]; then
arm_example "run" $ex "release" "" "1" continue
fi
arm_example "run" $ex "debug" "__v7" "1"
arm_example "run" $ex "release" "__v7" "1"
continue
fi fi
if [ $TARGET != thumbv6m-none-eabi ]; then arm_example "run" $ex "debug" "" "1"
arm_example "run" $ex "debug" "timer-queue" "1" if [ $ex = types ]; then
arm_example "run" $ex "release" "timer-queue" "1" arm_example "run" $ex "release" "" "1"
else
arm_example "build" $ex "release" "" "1"
fi fi
done done
local built=() local built=()
cargo clean cargo clean
for ex in ${exs[@]}; do for ex in ${exs[@]}; do
if [ $ex = ramfunc ] && [ $T = thumbv6m-none-eabi ]; then if [ $ex = types ] || [ $ex = pool ]; then
# LLD doesn't support this at the moment if [ $TARGET = thumbv6m-none-eabi ]; then
continue continue
fi fi
if [ $ex != types ] && [ $ex != pool ]; then arm_example "build" $ex "debug" "__v7" "2"
cmp ci/builds/${ex}___v7_debug_1.hex \
ci/builds/${ex}___v7_debug_2.hex
arm_example "build" $ex "release" "__v7" "2"
cmp ci/builds/${ex}___v7_release_1.hex \
ci/builds/${ex}___v7_release_2.hex
else
arm_example "build" $ex "debug" "" "2" arm_example "build" $ex "debug" "" "2"
cmp ci/builds/${ex}_debug_1.hex \ cmp ci/builds/${ex}_debug_1.hex \
ci/builds/${ex}_debug_2.hex ci/builds/${ex}_debug_2.hex
arm_example "build" $ex "release" "" "2" arm_example "build" $ex "release" "" "2"
cmp ci/builds/${ex}_release_1.hex \ cmp ci/builds/${ex}_release_1.hex \
ci/builds/${ex}_release_2.hex ci/builds/${ex}_release_2.hex
built+=( $ex )
fi fi
if [ $TARGET != thumbv6m-none-eabi ]; then built+=( $ex )
arm_example "build" $ex "debug" "timer-queue" "2"
cmp ci/builds/${ex}_timer-queue_debug_1.hex \
ci/builds/${ex}_timer-queue_debug_2.hex
arm_example "build" $ex "release" "timer-queue" "2"
cmp ci/builds/${ex}_timer-queue_release_1.hex \
ci/builds/${ex}_timer-queue_release_2.hex
fi
done done
( cd target/$TARGET/release/examples/ && size ${built[@]} ) ( cd target/$TARGET/release/examples/ && size ${built[@]} )

View file

@ -5,27 +5,26 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt; use lm3s6965::Interrupt;
use panic_semihosting as _;
// NOTE: does NOT properly work on QEMU // NOTE: does NOT properly work on QEMU
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965, monotonic = rtfm::cyccnt::CYCCNT)]
const APP: () = { const APP: () = {
#[init(spawn = [foo])] #[init(spawn = [foo])]
fn init(c: init::Context) { fn init(cx: init::Context) {
hprintln!("init(baseline = {:?})", c.start).unwrap(); hprintln!("init(baseline = {:?})", cx.start).unwrap();
// `foo` inherits the baseline of `init`: `Instant(0)` // `foo` inherits the baseline of `init`: `Instant(0)`
c.spawn.foo().unwrap(); cx.spawn.foo().unwrap();
} }
#[task(schedule = [foo])] #[task(schedule = [foo])]
fn foo(c: foo::Context) { fn foo(cx: foo::Context) {
static mut ONCE: bool = true; static mut ONCE: bool = true;
hprintln!("foo(baseline = {:?})", c.scheduled).unwrap(); hprintln!("foo(baseline = {:?})", cx.scheduled).unwrap();
if *ONCE { if *ONCE {
*ONCE = false; *ONCE = false;
@ -36,12 +35,12 @@ const APP: () = {
} }
} }
#[interrupt(spawn = [foo])] #[task(binds = UART0, spawn = [foo])]
fn UART0(c: UART0::Context) { fn uart0(cx: uart0::Context) {
hprintln!("UART0(baseline = {:?})", c.start).unwrap(); hprintln!("UART0(baseline = {:?})", cx.start).unwrap();
// `foo` inherits the baseline of `UART0`: its `start` time // `foo` inherits the baseline of `UART0`: its `start` time
c.spawn.foo().unwrap(); cx.spawn.foo().unwrap();
} }
extern "C" { extern "C" {

View file

@ -5,10 +5,9 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt; use lm3s6965::Interrupt;
use panic_semihosting as _;
// `examples/interrupt.rs` rewritten to use `binds` // `examples/interrupt.rs` rewritten to use `binds`
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965)]
@ -31,7 +30,7 @@ const APP: () = {
loop {} loop {}
} }
#[interrupt(binds = UART0)] #[task(binds = UART0)]
fn foo(_: foo::Context) { fn foo(_: foo::Context) {
static mut TIMES: u32 = 0; static mut TIMES: u32 = 0;

View file

@ -5,10 +5,9 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt; use lm3s6965::Interrupt;
use panic_semihosting as _;
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965)]
const APP: () = { const APP: () = {
@ -17,8 +16,8 @@ const APP: () = {
rtfm::pend(Interrupt::UART0); rtfm::pend(Interrupt::UART0);
} }
#[interrupt(spawn = [foo, bar])] #[task(binds = UART0, spawn = [foo, bar])]
fn UART0(c: UART0::Context) { fn uart0(c: uart0::Context) {
c.spawn.foo(0).unwrap(); c.spawn.foo(0).unwrap();
c.spawn.foo(1).unwrap(); c.spawn.foo(1).unwrap();
c.spawn.foo(2).unwrap(); c.spawn.foo(2).unwrap();

View file

@ -5,38 +5,49 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting; use cortex_m_semihosting::debug;
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
use cortex_m_semihosting::hprintln; use cortex_m_semihosting::hprintln;
use panic_semihosting as _;
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965)]
const APP: () = { const APP: () = {
#[cfg(debug_assertions)] // <- `true` when using the `dev` profile struct Resources {
static mut COUNT: u32 = 0; #[cfg(debug_assertions)] // <- `true` when using the `dev` profile
#[init(0)]
#[init] count: u32,
fn init(_: init::Context) {
// ..
} }
#[task(priority = 3, resources = [COUNT], spawn = [log])] #[init(spawn = [foo])]
fn foo(c: foo::Context) { fn init(cx: init::Context) {
cx.spawn.foo().unwrap();
cx.spawn.foo().unwrap();
}
#[idle]
fn idle(_: idle::Context) -> ! {
debug::exit(debug::EXIT_SUCCESS);
loop {}
}
#[task(capacity = 2, resources = [count], spawn = [log])]
fn foo(_cx: foo::Context) {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
*c.resources.COUNT += 1; *_cx.resources.count += 1;
c.spawn.log(*c.resources.COUNT).ok(); _cx.spawn.log(*_cx.resources.count).unwrap();
} }
// this wouldn't compile in `release` mode // this wouldn't compile in `release` mode
// *resources.COUNT += 1; // *_cx.resources.count += 1;
// .. // ..
} }
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
#[task] #[task(capacity = 2)]
fn log(_: log::Context, n: u32) { fn log(_: log::Context, n: u32) {
hprintln!( hprintln!(
"foo has been called {} time{}", "foo has been called {} time{}",

View file

@ -5,15 +5,17 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt; use lm3s6965::Interrupt;
use rtfm::Mutex; use panic_semihosting as _;
use rtfm::{Exclusive, Mutex};
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965)]
const APP: () = { const APP: () = {
static mut SHARED: u32 = 0; struct Resources {
#[init(0)]
shared: u32,
}
#[init] #[init]
fn init(_: init::Context) { fn init(_: init::Context) {
@ -21,42 +23,43 @@ const APP: () = {
rtfm::pend(Interrupt::UART1); rtfm::pend(Interrupt::UART1);
} }
#[interrupt(resources = [SHARED])] #[task(binds = UART0, resources = [shared])]
fn UART0(c: UART0::Context) { fn uart0(c: uart0::Context) {
static mut STATE: u32 = 0; static mut STATE: u32 = 0;
hprintln!("UART0(STATE = {})", *STATE).unwrap(); hprintln!("UART0(STATE = {})", *STATE).unwrap();
advance(STATE, c.resources.SHARED); // second argument has type `resources::shared`
advance(STATE, c.resources.shared);
rtfm::pend(Interrupt::UART1); rtfm::pend(Interrupt::UART1);
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS);
} }
#[interrupt(priority = 2, resources = [SHARED])] #[task(binds = UART1, priority = 2, resources = [shared])]
fn UART1(mut c: UART1::Context) { fn uart1(c: uart1::Context) {
static mut STATE: u32 = 0; static mut STATE: u32 = 0;
hprintln!("UART1(STATE = {})", *STATE).unwrap(); hprintln!("UART1(STATE = {})", *STATE).unwrap();
// just to show that `SHARED` can be accessed directly and .. // just to show that `shared` can be accessed directly
*c.resources.SHARED += 0; *c.resources.shared += 0;
// .. also through a (no-op) `lock`
c.resources.SHARED.lock(|shared| *shared += 0);
advance(STATE, c.resources.SHARED); // second argument has type `Exclusive<u32>`
advance(STATE, Exclusive(c.resources.shared));
} }
}; };
// the second parameter is generic: it can be any type that implements the `Mutex` trait
fn advance(state: &mut u32, mut shared: impl Mutex<T = u32>) { fn advance(state: &mut u32, mut shared: impl Mutex<T = u32>) {
*state += 1; *state += 1;
let (old, new) = shared.lock(|shared| { let (old, new) = shared.lock(|shared: &mut u32| {
let old = *shared; let old = *shared;
*shared += *state; *shared += *state;
(old, *shared) (old, *shared)
}); });
hprintln!("SHARED: {} -> {}", old, new).unwrap(); hprintln!("shared: {} -> {}", old, new).unwrap();
} }

View file

@ -1,14 +1,13 @@
//! examples/interrupt.rs //! examples/hardware.rs
#![deny(unsafe_code)] #![deny(unsafe_code)]
#![deny(warnings)] #![deny(warnings)]
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt; use lm3s6965::Interrupt;
use panic_semihosting as _;
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965)]
const APP: () = { const APP: () = {
@ -16,7 +15,7 @@ const APP: () = {
fn init(_: init::Context) { fn init(_: init::Context) {
// Pends the UART0 interrupt but its handler won't run until *after* // Pends the UART0 interrupt but its handler won't run until *after*
// `init` returns because interrupts are disabled // `init` returns because interrupts are disabled
rtfm::pend(Interrupt::UART0); rtfm::pend(Interrupt::UART0); // equivalent to NVIC::pend
hprintln!("init").unwrap(); hprintln!("init").unwrap();
} }
@ -34,8 +33,8 @@ const APP: () = {
loop {} loop {}
} }
#[interrupt] #[task(binds = UART0)]
fn UART0(_: UART0::Context) { fn uart0(_: uart0::Context) {
static mut TIMES: u32 = 0; static mut TIMES: u32 = 0;
// Safe access to local `static mut` variable // Safe access to local `static mut` variable

View file

@ -5,9 +5,8 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use panic_semihosting as _;
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965)]
const APP: () = { const APP: () = {

View file

@ -5,21 +5,20 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use panic_semihosting as _;
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965, peripherals = true)]
const APP: () = { const APP: () = {
#[init] #[init]
fn init(c: init::Context) { fn init(cx: init::Context) {
static mut X: u32 = 0; static mut X: u32 = 0;
// Cortex-M peripherals // Cortex-M peripherals
let _core: rtfm::Peripherals = c.core; let _core: cortex_m::Peripherals = cx.core;
// Device specific peripherals // Device specific peripherals
let _device: lm3s6965::Peripherals = c.device; let _device: lm3s6965::Peripherals = cx.device;
// Safe access to local `static mut` variable // Safe access to local `static mut` variable
let _x: &'static mut u32 = X; let _x: &'static mut u32 = X;

View file

@ -5,38 +5,37 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use heapless::{ use heapless::{
consts::*, consts::*,
i,
spsc::{Consumer, Producer, Queue}, spsc::{Consumer, Producer, Queue},
}; };
use lm3s6965::Interrupt; use lm3s6965::Interrupt;
use panic_semihosting as _;
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965)]
const APP: () = { const APP: () = {
// Late resources // Late resources
static mut P: Producer<'static, u32, U4> = (); struct Resources {
static mut C: Consumer<'static, u32, U4> = (); p: Producer<'static, u32, U4>,
c: Consumer<'static, u32, U4>,
}
#[init] #[init]
fn init(_: init::Context) -> init::LateResources { fn init(_: init::Context) -> init::LateResources {
// NOTE: we use `Option` here to work around the lack of static mut Q: Queue<u32, U4> = Queue(i::Queue::new());
// a stable `const` constructor
static mut Q: Option<Queue<u32, U4>> = None;
*Q = Some(Queue::new()); let (p, c) = Q.split();
let (p, c) = Q.as_mut().unwrap().split();
// Initialization of late resources // Initialization of late resources
init::LateResources { P: p, C: c } init::LateResources { p, c }
} }
#[idle(resources = [C])] #[idle(resources = [c])]
fn idle(c: idle::Context) -> ! { fn idle(c: idle::Context) -> ! {
loop { loop {
if let Some(byte) = c.resources.C.dequeue() { if let Some(byte) = c.resources.c.dequeue() {
hprintln!("received message: {}", byte).unwrap(); hprintln!("received message: {}", byte).unwrap();
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS);
@ -46,8 +45,8 @@ const APP: () = {
} }
} }
#[interrupt(resources = [P])] #[task(binds = UART0, resources = [p])]
fn UART0(c: UART0::Context) { fn uart0(c: uart0::Context) {
c.resources.P.enqueue(42).unwrap(); c.resources.p.enqueue(42).unwrap();
} }
}; };

View file

@ -5,14 +5,16 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt; use lm3s6965::Interrupt;
use panic_semihosting as _;
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965)]
const APP: () = { const APP: () = {
static mut SHARED: u32 = 0; struct Resources {
#[init(0)]
shared: u32,
}
#[init] #[init]
fn init(_: init::Context) { fn init(_: init::Context) {
@ -20,21 +22,21 @@ const APP: () = {
} }
// when omitted priority is assumed to be `1` // when omitted priority is assumed to be `1`
#[interrupt(resources = [SHARED])] #[task(binds = GPIOA, resources = [shared])]
fn GPIOA(mut c: GPIOA::Context) { fn gpioa(mut c: gpioa::Context) {
hprintln!("A").unwrap(); hprintln!("A").unwrap();
// the lower priority task requires a critical section to access the data // the lower priority task requires a critical section to access the data
c.resources.SHARED.lock(|shared| { c.resources.shared.lock(|shared| {
// data can only be modified within this critical section (closure) // data can only be modified within this critical section (closure)
*shared += 1; *shared += 1;
// GPIOB will *not* run right now due to the critical section // GPIOB will *not* run right now due to the critical section
rtfm::pend(Interrupt::GPIOB); rtfm::pend(Interrupt::GPIOB);
hprintln!("B - SHARED = {}", *shared).unwrap(); hprintln!("B - shared = {}", *shared).unwrap();
// GPIOC does not contend for `SHARED` so it's allowed to run now // GPIOC does not contend for `shared` so it's allowed to run now
rtfm::pend(Interrupt::GPIOC); rtfm::pend(Interrupt::GPIOC);
}); });
@ -45,16 +47,16 @@ const APP: () = {
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS);
} }
#[interrupt(priority = 2, resources = [SHARED])] #[task(binds = GPIOB, priority = 2, resources = [shared])]
fn GPIOB(mut c: GPIOB::Context) { fn gpiob(c: gpiob::Context) {
// the higher priority task does *not* need a critical section // the higher priority task does *not* need a critical section
*c.resources.SHARED += 1; *c.resources.shared += 1;
hprintln!("D - SHARED = {}", *c.resources.SHARED).unwrap(); hprintln!("D - shared = {}", *c.resources.shared).unwrap();
} }
#[interrupt(priority = 3)] #[task(binds = GPIOC, priority = 3)]
fn GPIOC(_: GPIOC::Context) { fn gpioc(_: gpioc::Context) {
hprintln!("C").unwrap(); hprintln!("C").unwrap();
} }
}; };

View file

@ -5,9 +5,8 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use panic_semihosting as _;
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965)]
const APP: () = { const APP: () = {

View file

@ -5,11 +5,10 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_halt;
use core::marker::PhantomData; use core::marker::PhantomData;
use cortex_m_semihosting::debug; use cortex_m_semihosting::debug;
use panic_halt as _;
use rtfm::app; use rtfm::app;
pub struct NotSend { pub struct NotSend {
@ -18,7 +17,10 @@ pub struct NotSend {
#[app(device = lm3s6965)] #[app(device = lm3s6965)]
const APP: () = { const APP: () = {
static mut SHARED: Option<NotSend> = None; struct Resources {
#[init(None)]
shared: Option<NotSend>,
}
#[init(spawn = [baz, quux])] #[init(spawn = [baz, quux])]
fn init(c: init::Context) { fn init(c: init::Context) {
@ -37,16 +39,16 @@ const APP: () = {
// scenario 1 // scenario 1
} }
#[task(priority = 2, resources = [SHARED])] #[task(priority = 2, resources = [shared])]
fn baz(mut c: baz::Context) { fn baz(c: baz::Context) {
// scenario 2: resource shared between tasks that run at the same priority // scenario 2: resource shared between tasks that run at the same priority
*c.resources.SHARED = Some(NotSend { _0: PhantomData }); *c.resources.shared = Some(NotSend { _0: PhantomData });
} }
#[task(priority = 2, resources = [SHARED])] #[task(priority = 2, resources = [shared])]
fn quux(mut c: quux::Context) { fn quux(c: quux::Context) {
// scenario 2 // scenario 2
let _not_send = c.resources.SHARED.take().unwrap(); let _not_send = c.resources.shared.take().unwrap();
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS);
} }

View file

@ -5,11 +5,10 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_halt;
use core::marker::PhantomData; use core::marker::PhantomData;
use cortex_m_semihosting::debug; use cortex_m_semihosting::debug;
use panic_halt as _;
pub struct NotSync { pub struct NotSync {
_0: PhantomData<*const ()>, _0: PhantomData<*const ()>,
@ -17,21 +16,24 @@ pub struct NotSync {
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965)]
const APP: () = { const APP: () = {
static SHARED: NotSync = NotSync { _0: PhantomData }; struct Resources {
#[init(NotSync { _0: PhantomData })]
shared: NotSync,
}
#[init] #[init]
fn init(_: init::Context) { fn init(_: init::Context) {
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS);
} }
#[task(resources = [SHARED])] #[task(resources = [&shared])]
fn foo(c: foo::Context) { fn foo(c: foo::Context) {
let _: &NotSync = c.resources.SHARED; let _: &NotSync = c.resources.shared;
} }
#[task(resources = [SHARED])] #[task(resources = [&shared])]
fn bar(c: bar::Context) { fn bar(c: bar::Context) {
let _: &NotSync = c.resources.SHARED; let _: &NotSync = c.resources.shared;
} }
extern "C" { extern "C" {

View file

@ -0,0 +1,38 @@
//! examples/static.rs
#![deny(unsafe_code)]
#![deny(warnings)]
#![no_main]
#![no_std]
use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt;
use panic_semihosting as _;
#[rtfm::app(device = lm3s6965)]
const APP: () = {
struct Resources {
key: u32,
}
#[init]
fn init(_: init::Context) -> init::LateResources {
rtfm::pend(Interrupt::UART0);
rtfm::pend(Interrupt::UART1);
init::LateResources { key: 0xdeadbeef }
}
#[task(binds = UART0, resources = [&key])]
fn uart0(cx: uart0::Context) {
let key: &u32 = cx.resources.key;
hprintln!("UART0(key = {:#x})", key).unwrap();
debug::exit(debug::EXIT_SUCCESS);
}
#[task(binds = UART1, priority = 2, resources = [&key])]
fn uart1(cx: uart1::Context) {
hprintln!("UART1(key = {:#x})", cx.resources.key).unwrap();
}
};

View file

@ -5,27 +5,26 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::hprintln; use cortex_m_semihosting::hprintln;
use rtfm::Instant; use panic_semihosting as _;
use rtfm::cyccnt::{Instant, U32Ext};
const PERIOD: u32 = 8_000_000; const PERIOD: u32 = 8_000_000;
// NOTE: does NOT work on QEMU! // NOTE: does NOT work on QEMU!
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965, monotonic = rtfm::cyccnt::CYCCNT)]
const APP: () = { const APP: () = {
#[init(schedule = [foo])] #[init(schedule = [foo])]
fn init(c: init::Context) { fn init(cx: init::Context) {
c.schedule.foo(Instant::now() + PERIOD.cycles()).unwrap(); cx.schedule.foo(Instant::now() + PERIOD.cycles()).unwrap();
} }
#[task(schedule = [foo])] #[task(schedule = [foo])]
fn foo(c: foo::Context) { fn foo(cx: foo::Context) {
let now = Instant::now(); let now = Instant::now();
hprintln!("foo(scheduled = {:?}, now = {:?})", c.scheduled, now).unwrap(); hprintln!("foo(scheduled = {:?}, now = {:?})", cx.scheduled, now).unwrap();
c.schedule.foo(c.scheduled + PERIOD.cycles()).unwrap(); cx.schedule.foo(cx.scheduled + PERIOD.cycles()).unwrap();
} }
extern "C" { extern "C" {

View file

@ -5,14 +5,13 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use heapless::{ use heapless::{
pool, pool,
pool::singleton::{Box, Pool}, pool::singleton::{Box, Pool},
}; };
use lm3s6965::Interrupt; use lm3s6965::Interrupt;
use panic_semihosting as _;
use rtfm::app; use rtfm::app;
// Declare a pool of 128-byte memory blocks // Declare a pool of 128-byte memory blocks
@ -30,8 +29,8 @@ const APP: () = {
rtfm::pend(Interrupt::I2C0); rtfm::pend(Interrupt::I2C0);
} }
#[interrupt(priority = 2, spawn = [foo, bar])] #[task(binds = I2C0, priority = 2, spawn = [foo, bar])]
fn I2C0(c: I2C0::Context) { fn i2c0(c: i2c0::Context) {
// claim a memory block, leave it uninitialized and .. // claim a memory block, leave it uninitialized and ..
let x = P::alloc().unwrap().freeze(); let x = P::alloc().unwrap().freeze();

37
examples/preempt.rs Normal file
View file

@ -0,0 +1,37 @@
//! examples/preempt.rs
#![no_main]
#![no_std]
use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt;
use panic_semihosting as _;
use rtfm::app;
#[app(device = lm3s6965)]
const APP: () = {
#[init]
fn init(_: init::Context) {
rtfm::pend(Interrupt::GPIOA);
}
#[task(binds = GPIOA, priority = 1)]
fn gpioa(_: gpioa::Context) {
hprintln!("GPIOA - start").unwrap();
rtfm::pend(Interrupt::GPIOC);
hprintln!("GPIOA - end").unwrap();
debug::exit(debug::EXIT_SUCCESS);
}
#[task(binds = GPIOB, priority = 2)]
fn gpiob(_: gpiob::Context) {
hprintln!(" GPIOB").unwrap();
}
#[task(binds = GPIOC, priority = 2)]
fn gpioc(_: gpioc::Context) {
hprintln!(" GPIOC - start").unwrap();
rtfm::pend(Interrupt::GPIOB);
hprintln!(" GPIOC - end").unwrap();
}
};

View file

@ -5,9 +5,8 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use panic_semihosting as _;
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965)]
const APP: () = { const APP: () = {

View file

@ -5,15 +5,17 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt; use lm3s6965::Interrupt;
use panic_semihosting as _;
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965)]
const APP: () = { const APP: () = {
// A resource struct Resources {
static mut SHARED: u32 = 0; // A resource
#[init(0)]
shared: u32,
}
#[init] #[init]
fn init(_: init::Context) { fn init(_: init::Context) {
@ -21,29 +23,31 @@ const APP: () = {
rtfm::pend(Interrupt::UART1); rtfm::pend(Interrupt::UART1);
} }
// `shared` cannot be accessed from this context
#[idle] #[idle]
fn idle(_: idle::Context) -> ! { fn idle(_cx: idle::Context) -> ! {
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS);
// error: `SHARED` can't be accessed from this context // error: no `resources` field in `idle::Context`
// SHARED += 1; // _cx.resources.shared += 1;
loop {} loop {}
} }
// `SHARED` can be access from this context // `shared` can be accessed from this context
#[interrupt(resources = [SHARED])] #[task(binds = UART0, resources = [shared])]
fn UART0(mut c: UART0::Context) { fn uart0(cx: uart0::Context) {
*c.resources.SHARED += 1; let shared: &mut u32 = cx.resources.shared;
*shared += 1;
hprintln!("UART0: SHARED = {}", c.resources.SHARED).unwrap(); hprintln!("UART0: shared = {}", shared).unwrap();
} }
// `SHARED` can be access from this context // `shared` can be accessed from this context
#[interrupt(resources = [SHARED])] #[task(binds = UART1, resources = [shared])]
fn UART1(mut c: UART1::Context) { fn uart1(cx: uart1::Context) {
*c.resources.SHARED += 1; *cx.resources.shared += 1;
hprintln!("UART1: SHARED = {}", c.resources.SHARED).unwrap(); hprintln!("UART1: shared = {}", cx.resources.shared).unwrap();
} }
}; };

View file

@ -5,25 +5,24 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::hprintln; use cortex_m_semihosting::hprintln;
use rtfm::Instant; use panic_halt as _;
use rtfm::cyccnt::{Instant, U32Ext as _};
// NOTE: does NOT work on QEMU! // NOTE: does NOT work on QEMU!
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965, monotonic = rtfm::cyccnt::CYCCNT)]
const APP: () = { const APP: () = {
#[init(schedule = [foo, bar])] #[init(schedule = [foo, bar])]
fn init(c: init::Context) { fn init(cx: init::Context) {
let now = Instant::now(); let now = Instant::now();
hprintln!("init @ {:?}", now).unwrap(); hprintln!("init @ {:?}", now).unwrap();
// Schedule `foo` to run 8e6 cycles (clock cycles) in the future // Schedule `foo` to run 8e6 cycles (clock cycles) in the future
c.schedule.foo(now + 8_000_000.cycles()).unwrap(); cx.schedule.foo(now + 8_000_000.cycles()).unwrap();
// Schedule `bar` to run 4e6 cycles in the future // Schedule `bar` to run 4e6 cycles in the future
c.schedule.bar(now + 4_000_000.cycles()).unwrap(); cx.schedule.bar(now + 4_000_000.cycles()).unwrap();
} }
#[task] #[task]

View file

@ -5,30 +5,32 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_halt;
use cortex_m_semihosting::debug; use cortex_m_semihosting::debug;
use lm3s6965::Interrupt; use lm3s6965::Interrupt;
use panic_halt as _;
use rtfm::app; use rtfm::app;
pub struct MustBeSend; pub struct MustBeSend;
#[app(device = lm3s6965)] #[app(device = lm3s6965)]
const APP: () = { const APP: () = {
static mut SHARED: Option<MustBeSend> = None; struct Resources {
#[init(None)]
shared: Option<MustBeSend>,
}
#[init(resources = [SHARED])] #[init(resources = [shared])]
fn init(c: init::Context) { fn init(c: init::Context) {
// this `message` will be sent to task `UART0` // this `message` will be sent to task `UART0`
let message = MustBeSend; let message = MustBeSend;
*c.resources.SHARED = Some(message); *c.resources.shared = Some(message);
rtfm::pend(Interrupt::UART0); rtfm::pend(Interrupt::UART0);
} }
#[interrupt(resources = [SHARED])] #[task(binds = UART0, resources = [shared])]
fn UART0(c: UART0::Context) { fn uart0(c: uart0::Context) {
if let Some(message) = c.resources.SHARED.take() { if let Some(message) = c.resources.shared.take() {
// `message` has been received // `message` has been received
drop(message); drop(message);

View file

@ -1,17 +1,10 @@
//! examples/smallest.rs //! examples/smallest.rs
#![deny(unsafe_code)]
#![deny(warnings)]
#![no_main] #![no_main]
#![no_std] #![no_std]
// panic-handler crate use panic_semihosting as _; // panic handler
extern crate panic_semihosting;
use rtfm::app; use rtfm::app;
#[app(device = lm3s6965)] #[app(device = lm3s6965)]
const APP: () = { const APP: () = {};
#[init]
fn init(_: init::Context) {}
};

View file

@ -1,36 +0,0 @@
//! examples/static.rs
#![deny(unsafe_code)]
#![deny(warnings)]
#![no_main]
#![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt;
#[rtfm::app(device = lm3s6965)]
const APP: () = {
static KEY: u32 = ();
#[init]
fn init(_: init::Context) -> init::LateResources {
rtfm::pend(Interrupt::UART0);
rtfm::pend(Interrupt::UART1);
init::LateResources { KEY: 0xdeadbeef }
}
#[interrupt(resources = [KEY])]
fn UART0(c: UART0::Context) {
hprintln!("UART0(KEY = {:#x})", c.resources.KEY).unwrap();
debug::exit(debug::EXIT_SUCCESS);
}
#[interrupt(priority = 2, resources = [KEY])]
fn UART1(c: UART1::Context) {
hprintln!("UART1(KEY = {:#x})", c.resources.KEY).unwrap();
}
};

View file

@ -1,24 +1,25 @@
//! Check that `binds` works as advertised //! [compile-pass] Check that `binds` works as advertised
#![deny(unsafe_code)] #![deny(unsafe_code)]
#![deny(warnings)] #![deny(warnings)]
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate lm3s6965; use panic_halt as _;
extern crate panic_halt;
extern crate rtfm;
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965)]
const APP: () = { const APP: () = {
#[init] #[init]
fn init(_: init::Context) {} fn init(_: init::Context) {}
#[exception(binds = SVCall)] // Cortex-M exception
#[task(binds = SVCall)]
fn foo(c: foo::Context) { fn foo(c: foo::Context) {
foo_trampoline(c) foo_trampoline(c)
} }
#[interrupt(binds = UART0)] // LM3S6965 interrupt
#[task(binds = UART0)]
fn bar(c: bar::Context) { fn bar(c: bar::Context) {
bar_trampoline(c) bar_trampoline(c)
} }

View file

@ -1,18 +1,17 @@
//! Compile-pass test that checks that `#[cfg]` attributes are respected //! [compile-pass] check that `#[cfg]` attributes are respected
#![deny(unsafe_code)]
#![deny(warnings)]
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate lm3s6965; use panic_halt as _;
extern crate panic_halt;
extern crate rtfm;
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965, monotonic = rtfm::cyccnt::CYCCNT)]
const APP: () = { const APP: () = {
#[cfg(never)] struct Resources {
static mut FOO: u32 = 0; #[cfg(never)]
#[init(0)]
foo: u32,
}
#[init] #[init]
fn init(_: init::Context) { fn init(_: init::Context) {
@ -28,13 +27,13 @@ const APP: () = {
loop {} loop {}
} }
#[task(resources = [FOO], schedule = [quux], spawn = [quux])] #[task(resources = [foo], schedule = [quux], spawn = [quux])]
fn foo(_: foo::Context) { fn foo(_: foo::Context) {
#[cfg(never)] #[cfg(never)]
static mut BAR: u32 = 0; static mut BAR: u32 = 0;
} }
#[task(priority = 3, resources = [FOO], schedule = [quux], spawn = [quux])] #[task(priority = 3, resources = [foo], schedule = [quux], spawn = [quux])]
fn bar(_: bar::Context) { fn bar(_: bar::Context) {
#[cfg(never)] #[cfg(never)]
static mut BAR: u32 = 0; static mut BAR: u32 = 0;

View file

@ -0,0 +1,36 @@
//! [compile-pass] late resources don't need to be `Send` if they are owned by `idle`
#![no_main]
#![no_std]
use core::marker::PhantomData;
use panic_halt as _;
pub struct NotSend {
_0: PhantomData<*const ()>,
}
#[rtfm::app(device = lm3s6965)]
const APP: () = {
struct Resources {
x: NotSend,
#[init(None)]
y: Option<NotSend>,
}
#[init(resources = [y])]
fn init(c: init::Context) -> init::LateResources {
// equivalent to late resource initialization
*c.resources.y = Some(NotSend { _0: PhantomData });
init::LateResources {
x: NotSend { _0: PhantomData },
}
}
#[idle(resources = [x, y])]
fn idle(_: idle::Context) -> ! {
loop {}
}
};

87
examples/t-resource.rs Normal file
View file

@ -0,0 +1,87 @@
//! [compile-pass] Check code generation of resources
#![deny(unsafe_code)]
#![deny(warnings)]
#![no_main]
#![no_std]
use panic_halt as _;
#[rtfm::app(device = lm3s6965)]
const APP: () = {
struct Resources {
#[init(0)]
o1: u32, // init
#[init(0)]
o2: u32, // idle
#[init(0)]
o3: u32, // EXTI0
#[init(0)]
o4: u32, // idle
#[init(0)]
o5: u32, // EXTI1
#[init(0)]
o6: u32, // init
#[init(0)]
s1: u32, // idle & uart0
#[init(0)]
s2: u32, // uart0 & uart1
#[init(0)]
s3: u32, // idle & uart0
}
#[init(resources = [o1, o4, o5, o6, s3])]
fn init(c: init::Context) {
// owned by `init` == `&'static mut`
let _: &'static mut u32 = c.resources.o1;
// owned by `init` == `&'static` if read-only
let _: &'static u32 = c.resources.o6;
// `init` has exclusive access to all resources
let _: &mut u32 = c.resources.o4;
let _: &mut u32 = c.resources.o5;
let _: &mut u32 = c.resources.s3;
}
#[idle(resources = [o2, &o4, s1, &s3])]
fn idle(mut c: idle::Context) -> ! {
// owned by `idle` == `&'static mut`
let _: &'static mut u32 = c.resources.o2;
// owned by `idle` == `&'static` if read-only
let _: &'static u32 = c.resources.o4;
// shared with `idle` == `Mutex`
c.resources.s1.lock(|_| {});
// `&` if read-only
let _: &u32 = c.resources.s3;
loop {}
}
#[task(binds = UART0, resources = [o3, s1, s2, &s3])]
fn uart0(c: uart0::Context) {
// owned by interrupt == `&mut`
let _: &mut u32 = c.resources.o3;
// no `Mutex` proxy when access from highest priority task
let _: &mut u32 = c.resources.s1;
// no `Mutex` proxy when co-owned by cooperative (same priority) tasks
let _: &mut u32 = c.resources.s2;
// `&` if read-only
let _: &u32 = c.resources.s3;
}
#[task(binds = UART1, resources = [s2, &o5])]
fn uart1(c: uart1::Context) {
// owned by interrupt == `&` if read-only
let _: &u32 = c.resources.o5;
// no `Mutex` proxy when co-owned by cooperative (same priority) tasks
let _: &mut u32 = c.resources.s2;
}
};

View file

@ -1,15 +1,14 @@
//! [compile-pass] Check `schedule` code generation
#![deny(unsafe_code)] #![deny(unsafe_code)]
#![deny(warnings)] #![deny(warnings)]
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate lm3s6965; use panic_halt as _;
extern crate panic_halt; use rtfm::cyccnt::{Instant, U32Ext as _};
extern crate rtfm;
use rtfm::Instant; #[rtfm::app(device = lm3s6965, monotonic = rtfm::cyccnt::CYCCNT)]
#[rtfm::app(device = lm3s6965)]
const APP: () = { const APP: () = {
#[init(schedule = [foo, bar, baz])] #[init(schedule = [foo, bar, baz])]
fn init(c: init::Context) { fn init(c: init::Context) {
@ -27,15 +26,15 @@ const APP: () = {
loop {} loop {}
} }
#[exception(schedule = [foo, bar, baz])] #[task(binds = SVCall, schedule = [foo, bar, baz])]
fn SVCall(c: SVCall::Context) { fn svcall(c: svcall::Context) {
let _: Result<(), ()> = c.schedule.foo(c.start + 70.cycles()); let _: Result<(), ()> = c.schedule.foo(c.start + 70.cycles());
let _: Result<(), u32> = c.schedule.bar(c.start + 80.cycles(), 0); let _: Result<(), u32> = c.schedule.bar(c.start + 80.cycles(), 0);
let _: Result<(), (u32, u32)> = c.schedule.baz(c.start + 90.cycles(), 0, 1); let _: Result<(), (u32, u32)> = c.schedule.baz(c.start + 90.cycles(), 0, 1);
} }
#[interrupt(schedule = [foo, bar, baz])] #[task(binds = UART0, schedule = [foo, bar, baz])]
fn UART0(c: UART0::Context) { fn uart0(c: uart0::Context) {
let _: Result<(), ()> = c.schedule.foo(c.start + 100.cycles()); let _: Result<(), ()> = c.schedule.foo(c.start + 100.cycles());
let _: Result<(), u32> = c.schedule.bar(c.start + 110.cycles(), 0); let _: Result<(), u32> = c.schedule.bar(c.start + 110.cycles(), 0);
let _: Result<(), (u32, u32)> = c.schedule.baz(c.start + 120.cycles(), 0, 1); let _: Result<(), (u32, u32)> = c.schedule.baz(c.start + 120.cycles(), 0, 1);

View file

@ -1,12 +1,11 @@
//! Check code generation of `spawn` //! [compile-pass] Check code generation of `spawn`
#![deny(unsafe_code)] #![deny(unsafe_code)]
#![deny(warnings)] #![deny(warnings)]
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate lm3s6965; use panic_halt as _;
extern crate panic_halt;
extern crate rtfm;
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965)]
const APP: () = { const APP: () = {
@ -26,15 +25,15 @@ const APP: () = {
loop {} loop {}
} }
#[exception(spawn = [foo, bar, baz])] #[task(binds = SVCall, spawn = [foo, bar, baz])]
fn SVCall(c: SVCall::Context) { fn svcall(c: svcall::Context) {
let _: Result<(), ()> = c.spawn.foo(); let _: Result<(), ()> = c.spawn.foo();
let _: Result<(), u32> = c.spawn.bar(0); let _: Result<(), u32> = c.spawn.bar(0);
let _: Result<(), (u32, u32)> = c.spawn.baz(0, 1); let _: Result<(), (u32, u32)> = c.spawn.baz(0, 1);
} }
#[interrupt(spawn = [foo, bar, baz])] #[task(binds = UART0, spawn = [foo, bar, baz])]
fn UART0(c: UART0::Context) { fn uart0(c: uart0::Context) {
let _: Result<(), ()> = c.spawn.foo(); let _: Result<(), ()> = c.spawn.foo();
let _: Result<(), u32> = c.spawn.bar(0); let _: Result<(), u32> = c.spawn.bar(0);
let _: Result<(), (u32, u32)> = c.spawn.baz(0, 1); let _: Result<(), (u32, u32)> = c.spawn.baz(0, 1);

View file

@ -5,9 +5,8 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use panic_semihosting as _;
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965)]
const APP: () = { const APP: () = {
@ -18,16 +17,20 @@ const APP: () = {
#[task(spawn = [bar, baz])] #[task(spawn = [bar, baz])]
fn foo(c: foo::Context) { fn foo(c: foo::Context) {
hprintln!("foo").unwrap(); hprintln!("foo - start").unwrap();
// spawns `bar` onto the task scheduler // spawns `bar` onto the task scheduler
// `foo` and `bar` have the same priority so `bar` will not run until // `foo` and `bar` have the same priority so `bar` will not run until
// after `foo` terminates // after `foo` terminates
c.spawn.bar().unwrap(); c.spawn.bar().unwrap();
hprintln!("foo - middle").unwrap();
// spawns `baz` onto the task scheduler // spawns `baz` onto the task scheduler
// `baz` has higher priority than `foo` so it immediately preempts `foo` // `baz` has higher priority than `foo` so it immediately preempts `foo`
c.spawn.baz().unwrap(); c.spawn.baz().unwrap();
hprintln!("foo - end").unwrap();
} }
#[task] #[task]

View file

@ -5,48 +5,51 @@
#![no_main] #![no_main]
#![no_std] #![no_std]
extern crate panic_semihosting;
use cortex_m_semihosting::debug; use cortex_m_semihosting::debug;
use rtfm::{Exclusive, Instant}; use panic_semihosting as _;
use rtfm::cyccnt;
#[rtfm::app(device = lm3s6965)] #[rtfm::app(device = lm3s6965, peripherals = true, monotonic = rtfm::cyccnt::CYCCNT)]
const APP: () = { const APP: () = {
static mut SHARED: u32 = 0; struct Resources {
#[init(0)]
shared: u32,
}
#[init(schedule = [foo], spawn = [foo])] #[init(schedule = [foo], spawn = [foo])]
fn init(c: init::Context) { fn init(cx: init::Context) {
let _: Instant = c.start; let _: cyccnt::Instant = cx.start;
let _: rtfm::Peripherals = c.core; let _: rtfm::Peripherals = cx.core;
let _: lm3s6965::Peripherals = c.device; let _: lm3s6965::Peripherals = cx.device;
let _: init::Schedule = c.schedule; let _: init::Schedule = cx.schedule;
let _: init::Spawn = c.spawn; let _: init::Spawn = cx.spawn;
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS);
} }
#[exception(schedule = [foo], spawn = [foo])] #[idle(schedule = [foo], spawn = [foo])]
fn SVCall(c: SVCall::Context) { fn idle(cx: idle::Context) -> ! {
let _: Instant = c.start; let _: idle::Schedule = cx.schedule;
let _: SVCall::Schedule = c.schedule; let _: idle::Spawn = cx.spawn;
let _: SVCall::Spawn = c.spawn;
loop {}
} }
#[interrupt(resources = [SHARED], schedule = [foo], spawn = [foo])] #[task(binds = UART0, resources = [shared], schedule = [foo], spawn = [foo])]
fn UART0(c: UART0::Context) { fn uart0(cx: uart0::Context) {
let _: Instant = c.start; let _: cyccnt::Instant = cx.start;
let _: resources::SHARED = c.resources.SHARED; let _: resources::shared = cx.resources.shared;
let _: UART0::Schedule = c.schedule; let _: uart0::Schedule = cx.schedule;
let _: UART0::Spawn = c.spawn; let _: uart0::Spawn = cx.spawn;
} }
#[task(priority = 2, resources = [SHARED], schedule = [foo], spawn = [foo])] #[task(priority = 2, resources = [shared], schedule = [foo], spawn = [foo])]
fn foo(c: foo::Context) { fn foo(cx: foo::Context) {
let _: Instant = c.scheduled; let _: cyccnt::Instant = cx.scheduled;
let _: Exclusive<u32> = c.resources.SHARED; let _: &mut u32 = cx.resources.shared;
let _: foo::Resources = c.resources; let _: foo::Resources = cx.resources;
let _: foo::Schedule = c.schedule; let _: foo::Schedule = cx.schedule;
let _: foo::Spawn = c.spawn; let _: foo::Spawn = cx.spawn;
} }
extern "C" { extern "C" {

18
heterogeneous/Cargo.toml Normal file
View file

@ -0,0 +1,18 @@
[package]
authors = ["Jorge Aparicio <jorge@japaric.io>"]
edition = "2018"
name = "heterogeneous"
# this crate is only used for testing
publish = false
version = "0.0.0-alpha.0"
[dependencies]
bare-metal = "0.2.4"
[dependencies.cortex-m-rtfm]
path = ".."
features = ["heterogeneous"]
[dev-dependencies]
panic-halt = "0.2.0"
microamp = "0.1.0-alpha.1"

1
heterogeneous/README.md Normal file
View file

@ -0,0 +1 @@
This directory contains *heterogeneous* multi-core compile pass tests.

View file

@ -0,0 +1,7 @@
#![no_main]
#![no_std]
use panic_halt as _;
#[rtfm::app(cores = 2, device = heterogeneous)]
const APP: () = {};

View file

@ -0,0 +1,39 @@
//! [compile-pass] Cross initialization of late resources
#![deny(unsafe_code)]
#![deny(warnings)]
#![no_main]
#![no_std]
use panic_halt as _;
#[rtfm::app(cores = 2, device = heterogeneous)]
const APP: () = {
struct Resources {
// owned by core #1 but initialized by core #0
x: u32,
// owned by core #0 but initialized by core #1
y: u32,
}
#[init(core = 0, late = [x])]
fn a(_: a::Context) -> a::LateResources {
a::LateResources { x: 0 }
}
#[idle(core = 0, resources = [y])]
fn b(_: b::Context) -> ! {
loop {}
}
#[init(core = 1)]
fn c(_: c::Context) -> c::LateResources {
c::LateResources { y: 0 }
}
#[idle(core = 1, resources = [x])]
fn d(_: d::Context) -> ! {
loop {}
}
};

View file

@ -0,0 +1,26 @@
//! [compile-pass] Split initialization of late resources
#![deny(unsafe_code)]
#![deny(warnings)]
#![no_main]
#![no_std]
use panic_halt as _;
#[rtfm::app(cores = 2, device = heterogeneous)]
const APP: () = {
struct Resources {
x: u32,
y: u32,
}
#[init(core = 0, late = [x])]
fn a(_: a::Context) -> a::LateResources {
a::LateResources { x: 0 }
}
#[init(core = 1)]
fn b(_: b::Context) -> b::LateResources {
b::LateResources { y: 0 }
}
};

View file

@ -0,0 +1,36 @@
#![no_main]
#![no_std]
use panic_halt as _;
#[rtfm::app(cores = 2, device = heterogeneous, monotonic = heterogeneous::MT)]
const APP: () = {
#[init(core = 0, spawn = [ping])]
fn init(c: init::Context) {
c.spawn.ping().ok();
}
#[task(core = 0, schedule = [ping])]
fn pong(c: pong::Context) {
c.schedule.ping(c.scheduled + 1_000_000).ok();
}
#[task(core = 1, schedule = [pong])]
fn ping(c: ping::Context) {
c.schedule.pong(c.scheduled + 1_000_000).ok();
}
extern "C" {
#[core = 0]
fn I0();
#[core = 0]
fn I1();
#[core = 1]
fn I0();
#[core = 1]
fn I1();
}
};

View file

@ -0,0 +1,20 @@
#![no_main]
#![no_std]
use panic_halt as _;
#[rtfm::app(cores = 2, device = heterogeneous)]
const APP: () = {
#[init(core = 0, spawn = [foo])]
fn init(c: init::Context) {
c.spawn.foo().ok();
}
#[task(core = 1)]
fn foo(_: foo::Context) {}
extern "C" {
#[core = 1]
fn I0();
}
};

99
heterogeneous/src/lib.rs Normal file
View file

@ -0,0 +1,99 @@
//! Fake multi-core PAC
#![no_std]
use core::{
cmp::Ordering,
ops::{Add, Sub},
};
use bare_metal::Nr;
use rtfm::{Fraction, Monotonic, MultiCore};
// both cores have the exact same interrupts
pub use Interrupt_0 as Interrupt_1;
// Fake priority bits
pub const NVIC_PRIO_BITS: u8 = 3;
pub fn xpend(_core: u8, _interrupt: impl Nr) {}
/// Fake monotonic timer
pub struct MT;
impl Monotonic for MT {
type Instant = Instant;
fn ratio() -> Fraction {
Fraction {
numerator: 1,
denominator: 1,
}
}
unsafe fn reset() {
(0xE0001004 as *mut u32).write_volatile(0)
}
fn now() -> Instant {
unsafe { Instant((0xE0001004 as *const u32).read_volatile() as i32) }
}
fn zero() -> Instant {
Instant(0)
}
}
impl MultiCore for MT {}
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct Instant(i32);
impl Add<u32> for Instant {
type Output = Instant;
fn add(self, rhs: u32) -> Self {
Instant(self.0.wrapping_add(rhs as i32))
}
}
impl Sub for Instant {
type Output = u32;
fn sub(self, rhs: Self) -> u32 {
self.0.checked_sub(rhs.0).unwrap() as u32
}
}
impl Ord for Instant {
fn cmp(&self, rhs: &Self) -> Ordering {
self.0.wrapping_sub(rhs.0).cmp(&0)
}
}
impl PartialOrd for Instant {
fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> {
Some(self.cmp(rhs))
}
}
// Fake interrupts
#[allow(non_camel_case_types)]
#[derive(Clone, Copy)]
#[repr(u8)]
pub enum Interrupt_0 {
I0 = 0,
I1 = 1,
I2 = 2,
I3 = 3,
I4 = 4,
I5 = 5,
I6 = 6,
I7 = 7,
}
unsafe impl Nr for Interrupt_0 {
fn nr(&self) -> u8 {
*self as u8
}
}

17
homogeneous/Cargo.toml Normal file
View file

@ -0,0 +1,17 @@
[package]
authors = ["Jorge Aparicio <jorge@japaric.io>"]
edition = "2018"
name = "homogeneous"
# this crate is only used for testing
publish = false
version = "0.0.0-alpha.0"
[dependencies]
bare-metal = "0.2.4"
[dependencies.cortex-m-rtfm]
path = ".."
features = ["homogeneous"]
[dev-dependencies]
panic-halt = "0.2.0"

1
homogeneous/README.md Normal file
View file

@ -0,0 +1 @@
This directory contains *homogeneous* multi-core compile pass tests.

View file

@ -0,0 +1,7 @@
#![no_main]
#![no_std]
use panic_halt as _;
#[rtfm::app(cores = 2, device = homogeneous)]
const APP: () = {};

View file

@ -0,0 +1,39 @@
//! [compile-pass] Cross initialization of late resources
#![deny(unsafe_code)]
#![deny(warnings)]
#![no_main]
#![no_std]
use panic_halt as _;
#[rtfm::app(cores = 2, device = homogeneous)]
const APP: () = {
struct Resources {
// owned by core #1 but initialized by core #0
x: u32,
// owned by core #0 but initialized by core #1
y: u32,
}
#[init(core = 0, late = [x])]
fn a(_: a::Context) -> a::LateResources {
a::LateResources { x: 0 }
}
#[idle(core = 0, resources = [y])]
fn b(_: b::Context) -> ! {
loop {}
}
#[init(core = 1)]
fn c(_: c::Context) -> c::LateResources {
c::LateResources { y: 0 }
}
#[idle(core = 1, resources = [x])]
fn d(_: d::Context) -> ! {
loop {}
}
};

View file

@ -0,0 +1,26 @@
//! [compile-pass] Split initialization of late resources
#![deny(unsafe_code)]
#![deny(warnings)]
#![no_main]
#![no_std]
use panic_halt as _;
#[rtfm::app(cores = 2, device = homogeneous)]
const APP: () = {
struct Resources {
x: u32,
y: u32,
}
#[init(core = 0, late = [x])]
fn a(_: a::Context) -> a::LateResources {
a::LateResources { x: 0 }
}
#[init(core = 1)]
fn b(_: b::Context) -> b::LateResources {
b::LateResources { y: 0 }
}
};

View file

@ -0,0 +1,36 @@
#![no_main]
#![no_std]
use panic_halt as _;
#[rtfm::app(cores = 2, device = homogeneous, monotonic = homogeneous::MT)]
const APP: () = {
#[init(core = 0, spawn = [ping])]
fn init(c: init::Context) {
c.spawn.ping().ok();
}
#[task(core = 0, schedule = [ping])]
fn pong(c: pong::Context) {
c.schedule.ping(c.scheduled + 1_000_000).ok();
}
#[task(core = 1, schedule = [pong])]
fn ping(c: ping::Context) {
c.schedule.pong(c.scheduled + 1_000_000).ok();
}
extern "C" {
#[core = 0]
fn I0();
#[core = 0]
fn I1();
#[core = 1]
fn I0();
#[core = 1]
fn I1();
}
};

View file

@ -0,0 +1,20 @@
#![no_main]
#![no_std]
use panic_halt as _;
#[rtfm::app(cores = 2, device = homogeneous)]
const APP: () = {
#[init(core = 0, spawn = [foo])]
fn init(c: init::Context) {
c.spawn.foo().ok();
}
#[task(core = 1)]
fn foo(_: foo::Context) {}
extern "C" {
#[core = 1]
fn I0();
}
};

99
homogeneous/src/lib.rs Normal file
View file

@ -0,0 +1,99 @@
//! Fake multi-core PAC
#![no_std]
use core::{
cmp::Ordering,
ops::{Add, Sub},
};
use bare_metal::Nr;
use rtfm::{Fraction, Monotonic, MultiCore};
// both cores have the exact same interrupts
pub use Interrupt_0 as Interrupt_1;
// Fake priority bits
pub const NVIC_PRIO_BITS: u8 = 3;
pub fn xpend(_core: u8, _interrupt: impl Nr) {}
/// Fake monotonic timer
pub struct MT;
impl Monotonic for MT {
type Instant = Instant;
fn ratio() -> Fraction {
Fraction {
numerator: 1,
denominator: 1,
}
}
unsafe fn reset() {
(0xE0001004 as *mut u32).write_volatile(0)
}
fn now() -> Instant {
unsafe { Instant((0xE0001004 as *const u32).read_volatile() as i32) }
}
fn zero() -> Instant {
Instant(0)
}
}
impl MultiCore for MT {}
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct Instant(i32);
impl Add<u32> for Instant {
type Output = Instant;
fn add(self, rhs: u32) -> Self {
Instant(self.0.wrapping_add(rhs as i32))
}
}
impl Sub for Instant {
type Output = u32;
fn sub(self, rhs: Self) -> u32 {
self.0.checked_sub(rhs.0).unwrap() as u32
}
}
impl Ord for Instant {
fn cmp(&self, rhs: &Self) -> Ordering {
self.0.wrapping_sub(rhs.0).cmp(&0)
}
}
impl PartialOrd for Instant {
fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> {
Some(self.cmp(rhs))
}
}
// Fake interrupts
#[allow(non_camel_case_types)]
#[derive(Clone, Copy)]
#[repr(u8)]
pub enum Interrupt_0 {
I0 = 0,
I1 = 1,
I2 = 2,
I3 = 3,
I4 = 4,
I5 = 5,
I6 = 6,
I7 = 7,
}
unsafe impl Nr for Interrupt_0 {
fn nr(&self) -> u8 {
*self as u8
}
}

View file

@ -15,12 +15,13 @@ version = "0.5.0-alpha.1"
proc-macro = true proc-macro = true
[dependencies] [dependencies]
quote = "0.6.10" proc-macro2 = "1"
proc-macro2 = "0.4.24" quote = "1"
syn = "1"
[dependencies.syn] [dependencies.rtfm-syntax]
features = ["extra-traits", "full"] git = "https://github.com/japaric/rtfm-syntax"
version = "0.15.23"
[features] [features]
timer-queue = [] heterogeneous = []
homogeneous = []

View file

@ -1,265 +1,59 @@
use std::{ use core::ops;
cmp, use std::collections::{BTreeMap, BTreeSet};
collections::{BTreeMap, HashMap, HashSet},
use rtfm_syntax::{
analyze::{self, Priority},
ast::App,
Core, P,
}; };
use syn::Ident;
use syn::{Attribute, Ident, Type}; /// Extend the upstream `Analysis` struct with our field
use crate::syntax::{App, Idents};
pub type Ownerships = HashMap<Ident, Ownership>;
pub struct Analysis { pub struct Analysis {
/// Capacities of free queues parent: P<analyze::Analysis>,
pub capacities: Capacities, pub interrupts: BTreeMap<Core, BTreeMap<Priority, Ident>>,
pub dispatchers: Dispatchers,
// Ceilings of free queues
pub free_queues: HashMap<Ident, u8>,
pub resources_assert_send: HashSet<Box<Type>>,
pub tasks_assert_send: HashSet<Ident>,
/// Types of RO resources that need to be Sync
pub assert_sync: HashSet<Box<Type>>,
// Resource ownership
pub ownerships: Ownerships,
// Ceilings of ready queues
pub ready_queues: HashMap<u8, u8>,
pub timer_queue: TimerQueue,
} }
#[derive(Clone, Copy, PartialEq)] impl ops::Deref for Analysis {
pub enum Ownership { type Target = analyze::Analysis;
// NOTE priorities and ceilings are "logical" (0 = lowest priority, 255 = highest priority)
Owned { priority: u8 },
CoOwned { priority: u8 },
Shared { ceiling: u8 },
}
impl Ownership { fn deref(&self) -> &Self::Target {
pub fn needs_lock(&self, priority: u8) -> bool { &self.parent
match *self {
Ownership::Owned { .. } | Ownership::CoOwned { .. } => false,
Ownership::Shared { ceiling } => {
debug_assert!(ceiling >= priority);
priority < ceiling
}
}
}
pub fn is_owned(&self) -> bool {
match *self {
Ownership::Owned { .. } => true,
_ => false,
}
} }
} }
pub struct Dispatcher { // Assign an `extern` interrupt to each priority level
/// Attributes to apply to the dispatcher pub fn app(analysis: P<analyze::Analysis>, app: &App) -> P<Analysis> {
pub attrs: Vec<Attribute>, let mut interrupts = BTreeMap::new();
pub interrupt: Ident, for core in 0..app.args.cores {
/// Tasks dispatched at this priority level let priorities = app
pub tasks: Vec<Ident>, .software_tasks
// Queue capacity .values()
pub capacity: u8, .filter_map(|task| {
} if task.args.core == core {
Some(task.args.priority)
/// Priority -> Dispatcher } else {
pub type Dispatchers = BTreeMap<u8, Dispatcher>; None
pub type Capacities = HashMap<Ident, u8>;
pub fn app(app: &App) -> Analysis {
// Ceiling analysis of R/W resource and Sync analysis of RO resources
// (Resource shared by tasks that run at different priorities need to be `Sync`)
let mut ownerships = Ownerships::new();
let mut resources_assert_send = HashSet::new();
let mut tasks_assert_send = HashSet::new();
let mut assert_sync = HashSet::new();
for (priority, res) in app.resource_accesses() {
if let Some(ownership) = ownerships.get_mut(res) {
match *ownership {
Ownership::Owned { priority: ceiling }
| Ownership::CoOwned { priority: ceiling }
| Ownership::Shared { ceiling }
if priority != ceiling =>
{
*ownership = Ownership::Shared {
ceiling: cmp::max(ceiling, priority),
};
let res = &app.resources[res];
if res.mutability.is_none() {
assert_sync.insert(res.ty.clone());
}
} }
Ownership::Owned { priority: ceiling } if ceiling == priority => { })
*ownership = Ownership::CoOwned { priority }; .chain(analysis.timer_queues.get(&core).map(|tq| tq.priority))
} .collect::<BTreeSet<_>>();
_ => {}
}
continue; if !priorities.is_empty() {
} interrupts.insert(
core,
ownerships.insert(res.clone(), Ownership::Owned { priority }); priorities
} .iter()
.cloned()
// Compute sizes of free queues .rev()
// We assume at most one message per `spawn` / `schedule` .zip(app.extern_interrupts[&core].keys().cloned())
let mut capacities: Capacities = app.tasks.keys().map(|task| (task.clone(), 0)).collect(); .collect(),
for (_, task) in app.spawn_calls().chain(app.schedule_calls()) { );
*capacities.get_mut(task).expect("BUG: capacities.get_mut") += 1;
}
// Override computed capacities if user specified a capacity in `#[task]`
for (name, task) in &app.tasks {
if let Some(cap) = task.args.capacity {
*capacities.get_mut(name).expect("BUG: capacities.get_mut") = cap;
} }
} }
// Compute the size of the timer queue P::new(Analysis {
// Compute the priority of the timer queue, which matches the priority of the highest parent: analysis,
// `schedule`-able task interrupts,
let mut tq_capacity = 0; })
let mut tq_priority = 1;
let mut tq_tasks = Idents::new();
for (_, task) in app.schedule_calls() {
tq_capacity += capacities[task];
tq_priority = cmp::max(tq_priority, app.tasks[task].args.priority);
tq_tasks.insert(task.clone());
}
// Compute dispatchers capacities
// Determine which tasks are dispatched by which dispatcher
// Compute the timer queue priority which matches the priority of the highest priority
// dispatcher
let mut dispatchers = Dispatchers::new();
let mut free_interrupts = app.free_interrupts.iter();
let mut tasks = app.tasks.iter().collect::<Vec<_>>();
tasks.sort_by(|l, r| l.1.args.priority.cmp(&r.1.args.priority));
for (name, task) in tasks {
let dispatcher = dispatchers.entry(task.args.priority).or_insert_with(|| {
let (name, fi) = free_interrupts
.next()
.expect("BUG: not enough free_interrupts");
Dispatcher {
attrs: fi.attrs.clone(),
capacity: 0,
interrupt: name.clone(),
tasks: vec![],
}
});
dispatcher.capacity += capacities[name];
dispatcher.tasks.push(name.clone());
}
// All messages sent from `init` need to be `Send`
for task in app.init.args.spawn.iter().chain(&app.init.args.schedule) {
tasks_assert_send.insert(task.clone());
}
// All late resources need to be `Send`, unless they are owned by `idle`
for (name, res) in &app.resources {
let owned_by_idle = Ownership::Owned { priority: 0 };
if res.expr.is_none()
&& ownerships
.get(name)
.map(|ship| *ship != owned_by_idle)
.unwrap_or(false)
{
resources_assert_send.insert(res.ty.clone());
}
}
// All resources shared with init need to be `Send`, unless they are owned by `idle`
// This is equivalent to late initialization (e.g. `static mut LATE: Option<T> = None`)
for name in &app.init.args.resources {
let owned_by_idle = Ownership::Owned { priority: 0 };
if ownerships
.get(name)
.map(|ship| *ship != owned_by_idle)
.unwrap_or(false)
{
resources_assert_send.insert(app.resources[name].ty.clone());
}
}
// Ceiling analysis of free queues (consumer end point) -- first pass
// Ceiling analysis of ready queues (producer end point) -- first pass
// Also compute more Send-ness requirements
let mut free_queues = HashMap::new();
let mut ready_queues = HashMap::new();
for (priority, task) in app.spawn_calls() {
if let Some(priority) = priority {
// Users of `spawn` contend for the spawnee FREE_QUEUE
let c = free_queues.entry(task.clone()).or_default();
*c = cmp::max(*c, priority);
// Users of `spawn` contend for the spawnee's dispatcher READY_QUEUE
let c = ready_queues
.entry(app.tasks[task].args.priority)
.or_default();
*c = cmp::max(*c, priority);
// Send is required when sending messages from a task whose priority doesn't match the
// priority of the receiving task
if app.tasks[task].args.priority != priority {
tasks_assert_send.insert(task.clone());
}
} else {
// spawns from `init` are excluded from the ceiling analysis
}
}
// Ceiling analysis of ready queues (producer end point) -- second pass
// Ceiling analysis of free queues (consumer end point) -- second pass
// Ceiling analysis of the timer queue
let mut tq_ceiling = tq_priority;
for (priority, task) in app.schedule_calls() {
// the system timer handler contends for the spawnee's dispatcher READY_QUEUE
let c = ready_queues
.entry(app.tasks[task].args.priority)
.or_default();
*c = cmp::max(*c, tq_priority);
if let Some(priority) = priority {
// Users of `schedule` contend for the spawnee task FREE_QUEUE
let c = free_queues.entry(task.clone()).or_default();
*c = cmp::max(*c, priority);
// Users of `schedule` contend for the timer queue
tq_ceiling = cmp::max(tq_ceiling, priority);
} else {
// spawns from `init` are excluded from the ceiling analysis
}
}
Analysis {
capacities,
dispatchers,
free_queues,
tasks_assert_send,
resources_assert_send,
assert_sync,
ownerships,
ready_queues,
timer_queue: TimerQueue {
capacity: tq_capacity,
ceiling: tq_ceiling,
priority: tq_priority,
tasks: tq_tasks,
},
}
}
pub struct TimerQueue {
pub capacity: u8,
pub ceiling: u8,
pub priority: u8,
pub tasks: Idents,
} }

View file

@ -1,122 +1,225 @@
use std::{collections::HashSet, iter}; use std::collections::HashSet;
use proc_macro2::Span; use proc_macro2::Span;
use syn::parse; use rtfm_syntax::{
analyze::Analysis,
ast::{App, CustomArg},
};
use syn::{parse, Path};
use crate::syntax::App; pub struct Extra<'a> {
pub device: &'a Path,
pub monotonic: Option<&'a Path>,
pub peripherals: Option<u8>,
}
pub fn app(app: &App) -> parse::Result<()> { impl<'a> Extra<'a> {
// Check that all referenced resources have been declared pub fn monotonic(&self) -> &'a Path {
for res in app self.monotonic.expect("UNREACHABLE")
.idle }
.as_ref() }
.map(|idle| -> Box<dyn Iterator<Item = _>> { Box::new(idle.args.resources.iter()) })
.unwrap_or_else(|| Box::new(iter::empty())) pub fn app<'a>(app: &'a App, analysis: &Analysis) -> parse::Result<Extra<'a>> {
.chain(&app.init.args.resources) if cfg!(feature = "homogeneous") {
.chain(app.exceptions.values().flat_map(|e| &e.args.resources)) // this RTFM mode uses the same namespace for all cores so we need to check that the
.chain(app.interrupts.values().flat_map(|i| &i.args.resources)) // identifiers used for each core `#[init]` and `#[idle]` functions don't collide
.chain(app.tasks.values().flat_map(|t| &t.args.resources)) let mut seen = HashSet::new();
for name in app
.inits
.values()
.map(|init| &init.name)
.chain(app.idles.values().map(|idle| &idle.name))
{
if seen.contains(name) {
return Err(parse::Error::new(
name.span(),
"this identifier is already being used by another core",
));
} else {
seen.insert(name);
}
}
}
// check that all exceptions are valid; only exceptions with configurable priorities are
// accepted
for (name, task) in &app.hardware_tasks {
let name_s = task.args.binds.to_string();
match &*name_s {
"SysTick" => {
if analysis.timer_queues.get(&task.args.core).is_some() {
return Err(parse::Error::new(
name.span(),
"this exception can't be used because it's being used by the runtime",
));
} else {
// OK
}
}
"NonMaskableInt" | "HardFault" => {
return Err(parse::Error::new(
name.span(),
"only exceptions with configurable priority can be used as hardware tasks",
));
}
_ => {}
}
}
// check that external (device-specific) interrupts are not named after known (Cortex-M)
// exceptions
for name in app
.extern_interrupts
.iter()
.flat_map(|(_, interrupts)| interrupts.keys())
{ {
if !app.resources.contains_key(res) { let name_s = name.to_string();
return Err(parse::Error::new(
res.span(), match &*name_s {
"this resource has NOT been declared", "NonMaskableInt" | "HardFault" | "MemoryManagement" | "BusFault" | "UsageFault"
)); | "SecureFault" | "SVCall" | "DebugMonitor" | "PendSV" | "SysTick" => {
return Err(parse::Error::new(
name.span(),
"Cortex-M exceptions can't be used as `extern` interrupts",
));
}
_ => {}
} }
} }
// Check that late resources have not been assigned to `init` // check that there are enough external interrupts to dispatch the software tasks and the timer
for res in &app.init.args.resources { // queue handler
if app.resources.get(res).unwrap().expr.is_none() { for core in 0..app.args.cores {
return Err(parse::Error::new( let mut first = None;
res.span(), let priorities = app
"late resources can NOT be assigned to `init`", .software_tasks
)); .iter()
.filter_map(|(name, task)| {
if task.args.core == core {
first = Some(name);
Some(task.args.priority)
} else {
None
}
})
.chain(analysis.timer_queues.get(&core).map(|tq| tq.priority))
.collect::<HashSet<_>>();
let need = priorities.len();
let given = app
.extern_interrupts
.get(&core)
.map(|ei| ei.len())
.unwrap_or(0);
if need > given {
let s = if app.args.cores == 1 {
format!(
"not enough `extern` interrupts to dispatch \
all software tasks (need: {}; given: {})",
need, given
)
} else {
format!(
"not enough `extern` interrupts to dispatch \
all software tasks on this core (need: {}; given: {})",
need, given
)
};
return Err(parse::Error::new(first.unwrap().span(), &s));
} }
} }
if app.resources.iter().any(|(_, res)| res.expr.is_none()) { let mut device = None;
// Check that `init` returns `LateResources` if there's any declared late resource let mut monotonic = None;
if !app.init.returns_late_resources { let mut peripherals = None;
return Err(parse::Error::new(
app.init.span, for (k, v) in &app.args.custom {
"late resources have been specified so `init` must return `init::LateResources`", let ks = k.to_string();
));
} match &*ks {
} else if app.init.returns_late_resources { "device" => match v {
// If there are no late resources the signature should be `fn(init::Context)` CustomArg::Path(p) => device = Some(p),
if app.init.returns_late_resources {
return Err(parse::Error::new( _ => {
app.init.span, return Err(parse::Error::new(
"`init` signature must be `fn(init::Context)` if there are no late resources", k.span(),
)); "unexpected argument value; this should be a path",
));
}
},
"monotonic" => match v {
CustomArg::Path(p) => monotonic = Some(p),
_ => {
return Err(parse::Error::new(
k.span(),
"unexpected argument value; this should be a path",
));
}
},
"peripherals" => match v {
CustomArg::Bool(x) if app.args.cores == 1 => {
peripherals = if *x { Some(0) } else { None }
}
CustomArg::UInt(s) if app.args.cores != 1 => {
let x = s.parse::<u8>().ok();
peripherals = if x.is_some() && x.unwrap() < app.args.cores {
Some(x.unwrap())
} else {
return Err(parse::Error::new(
k.span(),
&format!(
"unexpected argument value; \
this should be an integer in the range 0..={}",
app.args.cores
),
));
}
}
_ => {
return Err(parse::Error::new(
k.span(),
if app.args.cores == 1 {
"unexpected argument value; this should be a boolean"
} else {
"unexpected argument value; this should be an integer"
},
));
}
},
_ => {
return Err(parse::Error::new(k.span(), "unexpected argument"));
}
} }
} }
// Check that all referenced tasks have been declared if !analysis.timer_queues.is_empty() && monotonic.is_none() {
for task in app
.idle
.as_ref()
.map(|idle| -> Box<dyn Iterator<Item = _>> {
Box::new(idle.args.schedule.iter().chain(&idle.args.spawn))
})
.unwrap_or_else(|| Box::new(iter::empty()))
.chain(&app.init.args.schedule)
.chain(&app.init.args.spawn)
.chain(
app.exceptions
.values()
.flat_map(|e| e.args.schedule.iter().chain(&e.args.spawn)),
)
.chain(
app.interrupts
.values()
.flat_map(|i| i.args.schedule.iter().chain(&i.args.spawn)),
)
.chain(
app.tasks
.values()
.flat_map(|t| t.args.schedule.iter().chain(&t.args.spawn)),
)
{
if !app.tasks.contains_key(task) {
return Err(parse::Error::new(
task.span(),
"this task has NOT been declared",
));
}
}
// Check that there are enough free interrupts to dispatch all tasks
let ndispatchers = app
.tasks
.values()
.map(|t| t.args.priority)
.collect::<HashSet<_>>()
.len();
if ndispatchers > app.free_interrupts.len() {
return Err(parse::Error::new( return Err(parse::Error::new(
Span::call_site(), Span::call_site(),
&*format!( "a `monotonic` timer must be specified to use the `schedule` API",
"{} free interrupt{} (`extern {{ .. }}`) {} required to dispatch all soft tasks",
ndispatchers,
if ndispatchers > 1 { "s" } else { "" },
if ndispatchers > 1 { "are" } else { "is" },
),
)); ));
} }
// Check that free interrupts are not being used if let Some(device) = device {
for (handler, interrupt) in &app.interrupts { Ok(Extra {
let name = interrupt.args.binds(handler); device,
monotonic,
if app.free_interrupts.contains_key(name) { peripherals,
return Err(parse::Error::new( })
name.span(), } else {
"free interrupts (`extern { .. }`) can't be used as interrupt handlers", Err(parse::Error::new(
)); Span::call_site(),
} "a `device` argument must be specified in `#[rtfm::app]`",
))
} }
Ok(())
} }

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,33 @@
use proc_macro2::TokenStream as TokenStream2;
use quote::quote;
use crate::{analyze::Analysis, check::Extra};
/// Generates compile-time assertions that check that types implement the `Send` / `Sync` traits
pub fn codegen(core: u8, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream2> {
let mut stmts = vec![];
// we don't generate *all* assertions on all cores because the user could conditionally import a
// type only on some core (e.g. `#[cfg(core = "0")] use some::Type;`)
if let Some(types) = analysis.send_types.get(&core) {
for ty in types {
stmts.push(quote!(rtfm::export::assert_send::<#ty>();));
}
}
if let Some(types) = analysis.sync_types.get(&core) {
for ty in types {
stmts.push(quote!(rtfm::export::assert_sync::<#ty>();));
}
}
// if the `schedule` API is used in more than one core then we need to check that the
// `monotonic` timer can be used in multi-core context
if analysis.timer_queues.len() > 1 && analysis.timer_queues.contains_key(&core) {
let monotonic = extra.monotonic();
stmts.push(quote!(rtfm::export::assert_multicore::<#monotonic>();));
}
stmts
}

View file

@ -0,0 +1,189 @@
use proc_macro2::TokenStream as TokenStream2;
use quote::quote;
use rtfm_syntax::ast::App;
use crate::{analyze::Analysis, check::Extra, codegen::util};
/// Generates task dispatchers
pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream2> {
let mut items = vec![];
for (&receiver, dispatchers) in &analysis.channels {
let interrupts = &analysis.interrupts[&receiver];
for (&level, channels) in dispatchers {
let mut stmts = vec![];
for (&sender, channel) in channels {
let cfg_sender = util::cfg_core(sender, app.args.cores);
let variants = channel
.tasks
.iter()
.map(|name| {
let cfgs = &app.software_tasks[name].cfgs;
quote!(
#(#cfgs)*
#name
)
})
.collect::<Vec<_>>();
let doc = format!(
"Software tasks spawned from core #{} to be dispatched at priority level {} by core #{}",
sender, level, receiver,
);
let t = util::spawn_t_ident(receiver, level, sender);
items.push(quote!(
#[allow(non_camel_case_types)]
#[derive(Clone, Copy)]
#[doc = #doc]
enum #t {
#(#variants,)*
}
));
let n = util::capacity_typenum(channel.capacity, true);
let rq = util::rq_ident(receiver, level, sender);
let (rq_attr, rq_ty, rq_expr, section) = if sender == receiver {
(
cfg_sender.clone(),
quote!(rtfm::export::SCRQ<#t, #n>),
quote!(rtfm::export::Queue(unsafe {
rtfm::export::iQueue::u8_sc()
})),
util::link_section("bss", sender),
)
} else {
let shared = if cfg!(feature = "heterogeneous") {
Some(quote!(#[rtfm::export::shared]))
} else {
None
};
(
shared,
quote!(rtfm::export::MCRQ<#t, #n>),
quote!(rtfm::export::Queue(rtfm::export::iQueue::u8())),
None,
)
};
let doc = format!(
"Queue of tasks sent by core #{} ready to be dispatched by core #{} at priority level {}",
sender,
receiver,
level
);
items.push(quote!(
#[doc = #doc]
#rq_attr
#section
static mut #rq: #rq_ty = #rq_expr;
));
if let Some(ceiling) = channel.ceiling {
items.push(quote!(
#cfg_sender
struct #rq<'a> {
priority: &'a rtfm::export::Priority,
}
));
items.push(util::impl_mutex(
extra,
&[],
cfg_sender.as_ref(),
false,
&rq,
rq_ty,
ceiling,
quote!(&mut #rq),
));
}
let arms = channel
.tasks
.iter()
.map(|name| {
let task = &app.software_tasks[name];
let cfgs = &task.cfgs;
let fq = util::fq_ident(name, sender);
let inputs = util::inputs_ident(name, sender);
let (_, tupled, pats, _) = util::regroup_inputs(&task.inputs);
let (let_instant, instant) = if app.uses_schedule(receiver) {
let instants = util::instants_ident(name, sender);
(
quote!(
let instant =
#instants.get_unchecked(usize::from(index)).as_ptr().read();
),
quote!(, instant),
)
} else {
(quote!(), quote!())
};
let locals_new = if task.locals.is_empty() {
quote!()
} else {
quote!(#name::Locals::new(),)
};
quote!(
#(#cfgs)*
#t::#name => {
let #tupled =
#inputs.get_unchecked(usize::from(index)).as_ptr().read();
#let_instant
#fq.split().0.enqueue_unchecked(index);
let priority = &rtfm::export::Priority::new(PRIORITY);
#name(
#locals_new
#name::Context::new(priority #instant)
#(,#pats)*
)
}
)
})
.collect::<Vec<_>>();
stmts.push(quote!(
while let Some((task, index)) = #rq.split().1.dequeue() {
match task {
#(#arms)*
}
}
));
}
let doc = format!(
"Interrupt handler used by core #{} to dispatch tasks at priority {}",
receiver, level
);
let cfg_receiver = util::cfg_core(receiver, app.args.cores);
let section = util::link_section("text", receiver);
let interrupt = util::suffixed(&interrupts[&level].to_string(), receiver);
items.push(quote!(
#[allow(non_snake_case)]
#[doc = #doc]
#[no_mangle]
#cfg_receiver
#section
unsafe fn #interrupt() {
/// The priority of this interrupt handler
const PRIORITY: u8 = #level;
rtfm::export::run(PRIORITY, || {
#(#stmts)*
});
}
));
}
}
items
}

View file

@ -0,0 +1,132 @@
use proc_macro2::TokenStream as TokenStream2;
use quote::quote;
use rtfm_syntax::{ast::App, Context};
use crate::{
analyze::Analysis,
check::Extra,
codegen::{locals, module, resources_struct, util},
};
/// Generate support code for hardware tasks (`#[exception]`s and `#[interrupt]`s)
pub fn codegen(
app: &App,
analysis: &Analysis,
extra: &Extra,
) -> (
// const_app_hardware_tasks -- interrupt handlers and `${task}Resources` constructors
Vec<TokenStream2>,
// root_hardware_tasks -- items that must be placed in the root of the crate:
// - `${task}Locals` structs
// - `${task}Resources` structs
// - `${task}` modules
Vec<TokenStream2>,
// user_hardware_tasks -- the `#[task]` functions written by the user
Vec<TokenStream2>,
) {
let mut const_app = vec![];
let mut root = vec![];
let mut user_tasks = vec![];
for (name, task) in &app.hardware_tasks {
let core = task.args.core;
let cfg_core = util::cfg_core(core, app.args.cores);
let (let_instant, instant) = if app.uses_schedule(core) {
let m = extra.monotonic();
(
Some(quote!(let instant = <#m as rtfm::Monotonic>::now();)),
Some(quote!(, instant)),
)
} else {
(None, None)
};
let locals_new = if task.locals.is_empty() {
quote!()
} else {
quote!(#name::Locals::new(),)
};
let symbol = if cfg!(feature = "homogeneous") {
util::suffixed(&task.args.binds.to_string(), core)
} else {
task.args.binds.clone()
};
let priority = task.args.priority;
let section = util::link_section("text", core);
const_app.push(quote!(
#[allow(non_snake_case)]
#[no_mangle]
#section
#cfg_core
unsafe fn #symbol() {
const PRIORITY: u8 = #priority;
#let_instant
rtfm::export::run(PRIORITY, || {
crate::#name(
#locals_new
#name::Context::new(&rtfm::export::Priority::new(PRIORITY) #instant)
)
});
}
));
let mut needs_lt = false;
// `${task}Resources`
if !task.args.resources.is_empty() {
let (item, constructor) = resources_struct::codegen(
Context::HardwareTask(name),
priority,
&mut needs_lt,
app,
analysis,
);
root.push(item);
const_app.push(constructor);
}
root.push(module::codegen(
Context::HardwareTask(name),
needs_lt,
app,
extra,
));
// `${task}Locals`
let mut locals_pat = None;
if !task.locals.is_empty() {
let (struct_, pat) =
locals::codegen(Context::HardwareTask(name), &task.locals, core, app);
root.push(struct_);
locals_pat = Some(pat);
}
let attrs = &task.attrs;
let context = &task.context;
let stmts = &task.stmts;
let section = util::link_section("text", core);
// XXX shouldn't this have a cfg_core?
let locals_pat = locals_pat.iter();
user_tasks.push(quote!(
#(#attrs)*
#[allow(non_snake_case)]
#section
fn #name(#(#locals_pat,)* #context: #name::Context) {
use rtfm::Mutex as _;
#(#stmts)*
}
));
}
(const_app, root, user_tasks)
}

View file

@ -0,0 +1,91 @@
use proc_macro2::TokenStream as TokenStream2;
use quote::quote;
use rtfm_syntax::{ast::App, Context};
use crate::{
analyze::Analysis,
check::Extra,
codegen::{locals, module, resources_struct, util},
};
/// Generates support code for `#[idle]` functions
pub fn codegen(
core: u8,
app: &App,
analysis: &Analysis,
extra: &Extra,
) -> (
// const_app_idle -- the `${idle}Resources` constructor
Option<TokenStream2>,
// root_idle -- items that must be placed in the root of the crate:
// - the `${idle}Locals` struct
// - the `${idle}Resources` struct
// - the `${idle}` module, which contains types like `${idle}::Context`
Vec<TokenStream2>,
// user_idle
Option<TokenStream2>,
// call_idle
TokenStream2,
) {
if let Some(idle) = app.idles.get(&core) {
let mut needs_lt = false;
let mut const_app = None;
let mut root_idle = vec![];
let mut locals_pat = None;
let mut locals_new = None;
if !idle.args.resources.is_empty() {
let (item, constructor) =
resources_struct::codegen(Context::Idle(core), 0, &mut needs_lt, app, analysis);
root_idle.push(item);
const_app = Some(constructor);
}
let name = &idle.name;
if !idle.locals.is_empty() {
let (locals, pat) = locals::codegen(Context::Idle(core), &idle.locals, core, app);
locals_new = Some(quote!(#name::Locals::new()));
locals_pat = Some(pat);
root_idle.push(locals);
}
root_idle.push(module::codegen(Context::Idle(core), needs_lt, app, extra));
let cfg_core = util::cfg_core(core, app.args.cores);
let attrs = &idle.attrs;
let context = &idle.context;
let stmts = &idle.stmts;
let section = util::link_section("text", core);
let locals_pat = locals_pat.iter();
let user_idle = Some(quote!(
#(#attrs)*
#[allow(non_snake_case)]
#cfg_core
#section
fn #name(#(#locals_pat,)* #context: #name::Context) -> ! {
use rtfm::Mutex as _;
#(#stmts)*
}
));
let locals_new = locals_new.iter();
let call_idle = quote!(#name(
#(#locals_new,)*
#name::Context::new(&rtfm::export::Priority::new(0))
));
(const_app, root_idle, user_idle, call_idle)
} else {
(
None,
vec![],
None,
quote!(loop {
rtfm::export::wfi()
}),
)
}
}

116
macros/src/codegen/init.rs Normal file
View file

@ -0,0 +1,116 @@
use proc_macro2::TokenStream as TokenStream2;
use quote::quote;
use rtfm_syntax::{ast::App, Context};
use crate::{
analyze::Analysis,
check::Extra,
codegen::{locals, module, resources_struct, util},
};
/// Generates support code for `#[init]` functions
pub fn codegen(
core: u8,
app: &App,
analysis: &Analysis,
extra: &Extra,
) -> (
// const_app_idle -- the `${init}Resources` constructor
Option<TokenStream2>,
// root_init -- items that must be placed in the root of the crate:
// - the `${init}Locals` struct
// - the `${init}Resources` struct
// - the `${init}LateResources` struct
// - the `${init}` module, which contains types like `${init}::Context`
Vec<TokenStream2>,
// user_init -- the `#[init]` function written by the user
Option<TokenStream2>,
// call_init -- the call to the user `#[init]` if there's one
Option<TokenStream2>,
) {
if let Some(init) = app.inits.get(&core) {
let cfg_core = util::cfg_core(core, app.args.cores);
let mut needs_lt = false;
let name = &init.name;
let mut root_init = vec![];
let ret = {
let late_fields = analysis
.late_resources
.get(&core)
.map(|resources| {
resources
.iter()
.map(|name| {
let ty = &app.late_resources[name].ty;
quote!(pub #name: #ty)
})
.collect::<Vec<_>>()
})
.unwrap_or(vec![]);
if !late_fields.is_empty() {
let late_resources = util::late_resources_ident(&name);
root_init.push(quote!(
/// Resources initialized at runtime
#cfg_core
#[allow(non_snake_case)]
pub struct #late_resources {
#(#late_fields),*
}
));
Some(quote!(-> #name::LateResources))
} else {
None
}
};
let mut locals_pat = None;
let mut locals_new = None;
if !init.locals.is_empty() {
let (struct_, pat) = locals::codegen(Context::Init(core), &init.locals, core, app);
locals_new = Some(quote!(#name::Locals::new()));
locals_pat = Some(pat);
root_init.push(struct_);
}
let context = &init.context;
let attrs = &init.attrs;
let stmts = &init.stmts;
let section = util::link_section("text", core);
let locals_pat = locals_pat.iter();
let user_init = Some(quote!(
#(#attrs)*
#cfg_core
#[allow(non_snake_case)]
#section
fn #name(#(#locals_pat,)* #context: #name::Context) #ret {
#(#stmts)*
}
));
let mut const_app = None;
if !init.args.resources.is_empty() {
let (item, constructor) =
resources_struct::codegen(Context::Init(core), 0, &mut needs_lt, app, analysis);
root_init.push(item);
const_app = Some(constructor);
}
let locals_new = locals_new.iter();
let call_init =
Some(quote!(let late = #name(#(#locals_new,)* #name::Context::new(core.into()));));
root_init.push(module::codegen(Context::Init(core), needs_lt, app, extra));
(const_app, root_init, user_init, call_init)
} else {
(None, vec![], None, None)
}
}

View file

@ -0,0 +1,101 @@
use proc_macro2::TokenStream as TokenStream2;
use quote::quote;
use rtfm_syntax::{
ast::{App, Local},
Context, Core, Map,
};
use crate::codegen::util;
pub fn codegen(
ctxt: Context,
locals: &Map<Local>,
core: Core,
app: &App,
) -> (
// locals
TokenStream2,
// pat
TokenStream2,
) {
assert!(!locals.is_empty());
let runs_once = ctxt.runs_once();
let ident = util::locals_ident(ctxt, app);
let mut lt = None;
let mut fields = vec![];
let mut items = vec![];
let mut names = vec![];
let mut values = vec![];
let mut pats = vec![];
let mut has_cfgs = false;
for (name, local) in locals {
let lt = if runs_once {
quote!('static)
} else {
lt = Some(quote!('a));
quote!('a)
};
let cfgs = &local.cfgs;
has_cfgs |= !cfgs.is_empty();
let section = if local.shared && cfg!(feature = "heterogeneous") {
Some(quote!(#[rtfm::export::shared]))
} else {
util::link_section("data", core)
};
let expr = &local.expr;
let ty = &local.ty;
fields.push(quote!(
#(#cfgs)*
#name: &#lt mut #ty
));
items.push(quote!(
#(#cfgs)*
#section
static mut #name: #ty = #expr
));
values.push(quote!(
#(#cfgs)*
#name: &mut #name
));
names.push(name);
pats.push(quote!(
#(#cfgs)*
#name
));
}
if lt.is_some() && has_cfgs {
fields.push(quote!(__marker__: core::marker::PhantomData<&'a mut ()>));
values.push(quote!(__marker__: core::marker::PhantomData));
}
let locals = quote!(
#[allow(non_snake_case)]
#[doc(hidden)]
pub struct #ident<#lt> {
#(#fields),*
}
impl<#lt> #ident<#lt> {
#[inline(always)]
unsafe fn new() -> Self {
#(#items;)*
#ident {
#(#values),*
}
}
}
);
let ident = ctxt.ident(app);
(
locals,
quote!(#ident::Locals { #(#pats,)* .. }: #ident::Locals),
)
}

View file

@ -0,0 +1,328 @@
use proc_macro2::TokenStream as TokenStream2;
use quote::quote;
use rtfm_syntax::{ast::App, Context};
use crate::{check::Extra, codegen::util};
pub fn codegen(ctxt: Context, resources_tick: bool, app: &App, extra: &Extra) -> TokenStream2 {
let mut items = vec![];
let mut fields = vec![];
let mut values = vec![];
let name = ctxt.ident(app);
let core = ctxt.core(app);
let mut needs_instant = false;
let mut lt = None;
match ctxt {
Context::Init(core) => {
if app.uses_schedule(core) {
let m = extra.monotonic();
fields.push(quote!(
/// System start time = `Instant(0 /* cycles */)`
pub start: <#m as rtfm::Monotonic>::Instant
));
values.push(quote!(start: <#m as rtfm::Monotonic>::zero()));
fields.push(quote!(
/// Core (Cortex-M) peripherals minus the SysTick
pub core: rtfm::Peripherals
));
} else {
fields.push(quote!(
/// Core (Cortex-M) peripherals
pub core: rtfm::export::Peripherals
));
}
if extra.peripherals == Some(core) {
let device = extra.device;
fields.push(quote!(
/// Device peripherals
pub device: #device::Peripherals
));
values.push(quote!(device: #device::Peripherals::steal()));
}
values.push(quote!(core));
}
Context::Idle(..) => {}
Context::HardwareTask(..) => {
if app.uses_schedule(core) {
let m = extra.monotonic();
fields.push(quote!(
/// Time at which this handler started executing
pub start: <#m as rtfm::Monotonic>::Instant
));
values.push(quote!(start: instant));
needs_instant = true;
}
}
Context::SoftwareTask(..) => {
if app.uses_schedule(core) {
let m = extra.monotonic();
fields.push(quote!(
/// The time at which this task was scheduled to run
pub scheduled: <#m as rtfm::Monotonic>::Instant
));
values.push(quote!(scheduled: instant));
needs_instant = true;
}
}
}
if ctxt.has_locals(app) {
let ident = util::locals_ident(ctxt, app);
items.push(quote!(
#[doc(inline)]
pub use super::#ident as Locals;
));
}
if ctxt.has_resources(app) {
let ident = util::resources_ident(ctxt, app);
let lt = if resources_tick {
lt = Some(quote!('a));
Some(quote!('a))
} else {
None
};
items.push(quote!(
#[doc(inline)]
pub use super::#ident as Resources;
));
fields.push(quote!(
/// Resources this task has access to
pub resources: Resources<#lt>
));
let priority = if ctxt.is_init() {
None
} else {
Some(quote!(priority))
};
values.push(quote!(resources: Resources::new(#priority)));
}
if ctxt.uses_schedule(app) {
let doc = "Tasks that can be `schedule`-d from this context";
if ctxt.is_init() {
items.push(quote!(
#[doc = #doc]
#[derive(Clone, Copy)]
pub struct Schedule {
_not_send: core::marker::PhantomData<*mut ()>,
}
));
fields.push(quote!(
#[doc = #doc]
pub schedule: Schedule
));
values.push(quote!(
schedule: Schedule { _not_send: core::marker::PhantomData }
));
} else {
lt = Some(quote!('a));
items.push(quote!(
#[doc = #doc]
#[derive(Clone, Copy)]
pub struct Schedule<'a> {
priority: &'a rtfm::export::Priority,
}
impl<'a> Schedule<'a> {
#[doc(hidden)]
#[inline(always)]
pub unsafe fn priority(&self) -> &rtfm::export::Priority {
&self.priority
}
}
));
fields.push(quote!(
#[doc = #doc]
pub schedule: Schedule<'a>
));
values.push(quote!(
schedule: Schedule { priority }
));
}
}
if ctxt.uses_spawn(app) {
let doc = "Tasks that can be `spawn`-ed from this context";
if ctxt.is_init() {
fields.push(quote!(
#[doc = #doc]
pub spawn: Spawn
));
items.push(quote!(
#[doc = #doc]
#[derive(Clone, Copy)]
pub struct Spawn {
_not_send: core::marker::PhantomData<*mut ()>,
}
));
values.push(quote!(spawn: Spawn { _not_send: core::marker::PhantomData }));
} else {
lt = Some(quote!('a));
fields.push(quote!(
#[doc = #doc]
pub spawn: Spawn<'a>
));
let mut instant_method = None;
if ctxt.is_idle() {
items.push(quote!(
#[doc = #doc]
#[derive(Clone, Copy)]
pub struct Spawn<'a> {
priority: &'a rtfm::export::Priority,
}
));
values.push(quote!(spawn: Spawn { priority }));
} else {
let instant_field = if app.uses_schedule(core) {
let m = extra.monotonic();
needs_instant = true;
instant_method = Some(quote!(
pub unsafe fn instant(&self) -> <#m as rtfm::Monotonic>::Instant {
self.instant
}
));
Some(quote!(instant: <#m as rtfm::Monotonic>::Instant,))
} else {
None
};
items.push(quote!(
/// Tasks that can be spawned from this context
#[derive(Clone, Copy)]
pub struct Spawn<'a> {
#instant_field
priority: &'a rtfm::export::Priority,
}
));
let _instant = if needs_instant {
Some(quote!(, instant))
} else {
None
};
values.push(quote!(
spawn: Spawn { priority #_instant }
));
}
items.push(quote!(
impl<'a> Spawn<'a> {
#[doc(hidden)]
#[inline(always)]
pub unsafe fn priority(&self) -> &rtfm::export::Priority {
self.priority
}
#instant_method
}
));
}
}
if let Context::Init(core) = ctxt {
let init = &app.inits[&core];
if init.returns_late_resources {
let late_resources = util::late_resources_ident(&init.name);
items.push(quote!(
#[doc(inline)]
pub use super::#late_resources as LateResources;
));
}
}
let doc = match ctxt {
Context::Idle(_) => "Idle loop",
Context::Init(_) => "Initialization function",
Context::HardwareTask(_) => "Hardware task",
Context::SoftwareTask(_) => "Software task",
};
let core = if ctxt.is_init() {
if app.uses_schedule(core) {
Some(quote!(core: rtfm::Peripherals,))
} else {
Some(quote!(core: rtfm::export::Peripherals,))
}
} else {
None
};
let priority = if ctxt.is_init() {
None
} else {
Some(quote!(priority: &#lt rtfm::export::Priority))
};
let instant = if needs_instant {
let m = extra.monotonic();
Some(quote!(, instant: <#m as rtfm::Monotonic>::Instant))
} else {
None
};
items.push(quote!(
/// Execution context
pub struct Context<#lt> {
#(#fields,)*
}
impl<#lt> Context<#lt> {
#[inline(always)]
pub unsafe fn new(#core #priority #instant) -> Self {
Context {
#(#values,)*
}
}
}
));
if !items.is_empty() {
let cfg_core = util::cfg_core(ctxt.core(app), app.args.cores);
quote!(
#[allow(non_snake_case)]
#[doc = #doc]
#cfg_core
pub mod #name {
#(#items)*
}
)
} else {
quote!()
}
}

View file

@ -0,0 +1,155 @@
use proc_macro2::TokenStream as TokenStream2;
use quote::quote;
use crate::{analyze::Analysis, check::Extra, codegen::util};
/// Generates code that runs after `#[init]` returns
pub fn codegen(
core: u8,
analysis: &Analysis,
extra: &Extra,
) -> (Vec<TokenStream2>, Vec<TokenStream2>) {
let mut const_app = vec![];
let mut stmts = vec![];
// initialize late resources
if let Some(late_resources) = analysis.late_resources.get(&core) {
for name in late_resources {
// if it's live
if analysis.locations.get(name).is_some() {
stmts.push(quote!(#name.as_mut_ptr().write(late.#name);));
}
}
}
if analysis.timer_queues.is_empty() {
// cross-initialization barriers -- notify *other* cores that their resources have been
// initialized
for (user, initializers) in &analysis.initialization_barriers {
if !initializers.contains(&core) {
continue;
}
let ib = util::init_barrier(*user);
let shared = if cfg!(feature = "heterogeneous") {
Some(quote!(
#[rtfm::export::shared]
))
} else {
None
};
const_app.push(quote!(
#shared
static #ib: rtfm::export::Barrier = rtfm::export::Barrier::new();
));
stmts.push(quote!(
#ib.release();
));
}
// then wait until the other cores have initialized *our* resources
if analysis.initialization_barriers.contains_key(&core) {
let ib = util::init_barrier(core);
stmts.push(quote!(
#ib.wait();
));
}
// cross-spawn barriers: wait until other cores are ready to receive messages
for (&receiver, senders) in &analysis.spawn_barriers {
if senders.get(&core) == Some(&false) {
let sb = util::spawn_barrier(receiver);
stmts.push(quote!(
#sb.wait();
));
}
}
} else {
// if the `schedule` API is used then we'll synchronize all cores to leave the
// `init`-ialization phase at the same time. In this case the rendezvous barrier makes the
// cross-initialization and spawn barriers unnecessary
let m = extra.monotonic();
if analysis.timer_queues.len() == 1 {
// reset the monotonic timer / counter
stmts.push(quote!(
<#m as rtfm::Monotonic>::reset();
));
} else {
// in the multi-core case we need a rendezvous (RV) barrier between *all* the cores that
// use the `schedule` API; otherwise one of the cores could observe the before-reset
// value of the monotonic counter
// (this may be easier to implement with `AtomicU8.fetch_sub` but that API is not
// available on ARMv6-M)
// this core will reset the monotonic counter
const FIRST: u8 = 0;
if core == FIRST {
for &i in analysis.timer_queues.keys() {
let rv = util::rendezvous_ident(i);
let shared = if cfg!(feature = "heterogeneous") {
Some(quote!(
#[rtfm::export::shared]
))
} else {
None
};
const_app.push(quote!(
#shared
static #rv: rtfm::export::Barrier = rtfm::export::Barrier::new();
));
// wait until all the other cores have reached the RV point
if i != FIRST {
stmts.push(quote!(
#rv.wait();
));
}
}
let rv = util::rendezvous_ident(core);
stmts.push(quote!(
// the compiler fences are used to prevent `reset` from being re-ordering wrt to
// the atomic operations -- we don't know if `reset` contains load or store
// operations
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
// reset the counter
<#m as rtfm::Monotonic>::reset();
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
// now unblock all the other cores
#rv.release();
));
} else {
let rv = util::rendezvous_ident(core);
// let the first core know that we have reached the RV point
stmts.push(quote!(
#rv.release();
));
let rv = util::rendezvous_ident(FIRST);
// wait until the first core has reset the monotonic timer
stmts.push(quote!(
#rv.wait();
));
}
}
}
// enable the interrupts -- this completes the `init`-ialization phase
stmts.push(quote!(rtfm::export::interrupt::enable();));
(const_app, stmts)
}

View file

@ -0,0 +1,159 @@
use proc_macro2::TokenStream as TokenStream2;
use quote::quote;
use rtfm_syntax::ast::App;
use crate::{analyze::Analysis, check::Extra, codegen::util};
/// Generates code that runs before `#[init]`
pub fn codegen(
core: u8,
app: &App,
analysis: &Analysis,
extra: &Extra,
) -> (
// `const_app_pre_init` -- `static` variables for barriers
Vec<TokenStream2>,
// `pre_init_stmts`
Vec<TokenStream2>,
) {
let mut const_app = vec![];
let mut stmts = vec![];
// disable interrupts -- `init` must run with interrupts disabled
stmts.push(quote!(rtfm::export::interrupt::disable();));
// populate this core `FreeQueue`s
for (name, senders) in &analysis.free_queues {
let task = &app.software_tasks[name];
let cap = task.args.capacity;
for &sender in senders.keys() {
if sender == core {
let fq = util::fq_ident(name, sender);
stmts.push(quote!(
(0..#cap).for_each(|i| #fq.enqueue_unchecked(i));
));
}
}
}
stmts.push(quote!(
// NOTE(transmute) to avoid debug_assertion in multi-core mode
let mut core: rtfm::export::Peripherals = core::mem::transmute(());
));
let device = extra.device;
let nvic_prio_bits = quote!(#device::NVIC_PRIO_BITS);
// unmask interrupts and set their priorities
for (&priority, name) in analysis
.interrupts
.get(&core)
.iter()
.flat_map(|interrupts| *interrupts)
.chain(app.hardware_tasks.values().flat_map(|task| {
if !util::is_exception(&task.args.binds) {
Some((&task.args.priority, &task.args.binds))
} else {
// we do exceptions in another pass
None
}
}))
{
// compile time assert that this priority is supported by the device
stmts.push(quote!(let _ = [(); ((1 << #nvic_prio_bits) - #priority as usize)];));
// NOTE this also checks that the interrupt exists in the `Interrupt` enumeration
let interrupt = util::interrupt_ident(core, app.args.cores);
stmts.push(quote!(
core.NVIC.set_priority(
#device::#interrupt::#name,
rtfm::export::logical2hw(#priority, #nvic_prio_bits),
);
));
// NOTE unmask the interrupt *after* setting its priority: changing the priority of a pended
// interrupt is implementation defined
stmts.push(quote!(rtfm::export::NVIC::unmask(#device::#interrupt::#name);));
}
// cross-spawn barriers: now that priorities have been set and the interrupts have been unmasked
// we are ready to receive messages from *other* cores
if analysis.spawn_barriers.contains_key(&core) {
let sb = util::spawn_barrier(core);
let shared = if cfg!(feature = "heterogeneous") {
Some(quote!(
#[rtfm::export::shared]
))
} else {
None
};
const_app.push(quote!(
#shared
static #sb: rtfm::export::Barrier = rtfm::export::Barrier::new();
));
// unblock cores that may send us a message
stmts.push(quote!(
#sb.release();
));
}
// set exception priorities
for (name, priority) in app.hardware_tasks.values().filter_map(|task| {
if util::is_exception(&task.args.binds) {
Some((&task.args.binds, task.args.priority))
} else {
None
}
}) {
// compile time assert that this priority is supported by the device
stmts.push(quote!(let _ = [(); ((1 << #nvic_prio_bits) - #priority as usize)];));
stmts.push(quote!(core.SCB.set_priority(
rtfm::export::SystemHandler::#name,
rtfm::export::logical2hw(#priority, #nvic_prio_bits),
);));
}
// initialize the SysTick
if let Some(tq) = analysis.timer_queues.get(&core) {
let priority = tq.priority;
// compile time assert that this priority is supported by the device
stmts.push(quote!(let _ = [(); ((1 << #nvic_prio_bits) - #priority as usize)];));
stmts.push(quote!(core.SCB.set_priority(
rtfm::export::SystemHandler::SysTick,
rtfm::export::logical2hw(#priority, #nvic_prio_bits),
);));
stmts.push(quote!(
core.SYST.set_clock_source(rtfm::export::SystClkSource::Core);
core.SYST.enable_counter();
core.DCB.enable_trace();
));
}
// if there's no user `#[idle]` then optimize returning from interrupt handlers
if app.idles.get(&core).is_none() {
// Set SLEEPONEXIT bit to enter sleep mode when returning from ISR
stmts.push(quote!(core.SCB.scr.modify(|r| r | 1 << 1);));
}
// cross-spawn barriers: wait until other cores are ready to receive messages
for (&receiver, senders) in &analysis.spawn_barriers {
// only block here if `init` can send messages to `receiver`
if senders.get(&core) == Some(&true) {
let sb = util::spawn_barrier(receiver);
stmts.push(quote!(
#sb.wait();
));
}
}
(const_app, stmts)
}

View file

@ -0,0 +1,125 @@
use proc_macro2::TokenStream as TokenStream2;
use quote::quote;
use rtfm_syntax::{
analyze::{Location, Ownership},
ast::App,
};
use crate::{analyze::Analysis, check::Extra, codegen::util};
/// Generates `static [mut]` variables and resource proxies
pub fn codegen(
app: &App,
analysis: &Analysis,
extra: &Extra,
) -> (
// const_app -- the `static [mut]` variables behind the proxies
Vec<TokenStream2>,
// mod_resources -- the `resources` module
TokenStream2,
) {
let mut const_app = vec![];
let mut mod_resources = vec![];
for (name, res, expr, loc) in app.resources(analysis) {
let cfgs = &res.cfgs;
let ty = &res.ty;
{
let (loc_attr, section) = match loc {
Location::Owned {
core,
cross_initialized: false,
} => (
util::cfg_core(*core, app.args.cores),
util::link_section("data", *core),
),
// shared `static`s and cross-initialized resources need to be in `.shared` memory
_ => (
if cfg!(feature = "heterogeneous") {
Some(quote!(#[rtfm::export::shared]))
} else {
None
},
None,
),
};
let (ty, expr) = if let Some(expr) = expr {
(quote!(#ty), quote!(#expr))
} else {
(
quote!(core::mem::MaybeUninit<#ty>),
quote!(core::mem::MaybeUninit::uninit()),
)
};
let attrs = &res.attrs;
const_app.push(quote!(
#[allow(non_upper_case_globals)]
#(#attrs)*
#(#cfgs)*
#loc_attr
#section
static mut #name: #ty = #expr;
));
}
if let Some(Ownership::Contended { ceiling }) = analysis.ownerships.get(name) {
let cfg_core = util::cfg_core(loc.core().expect("UNREACHABLE"), app.args.cores);
mod_resources.push(quote!(
#[allow(non_camel_case_types)]
#(#cfgs)*
#cfg_core
pub struct #name<'a> {
priority: &'a Priority,
}
#(#cfgs)*
#cfg_core
impl<'a> #name<'a> {
#[inline(always)]
pub unsafe fn new(priority: &'a Priority) -> Self {
#name { priority }
}
#[inline(always)]
pub unsafe fn priority(&self) -> &Priority {
self.priority
}
}
));
let ptr = if expr.is_none() {
quote!(#name.as_mut_ptr())
} else {
quote!(&mut #name)
};
const_app.push(util::impl_mutex(
extra,
cfgs,
cfg_core.as_ref(),
true,
name,
quote!(#ty),
*ceiling,
ptr,
));
}
}
let mod_resources = if mod_resources.is_empty() {
quote!()
} else {
quote!(mod resources {
use rtfm::export::Priority;
#(#mod_resources)*
})
};
(const_app, mod_resources)
}

View file

@ -0,0 +1,182 @@
use proc_macro2::TokenStream as TokenStream2;
use quote::quote;
use rtfm_syntax::{ast::App, Context};
use crate::{analyze::Analysis, codegen::util};
pub fn codegen(
ctxt: Context,
priority: u8,
needs_lt: &mut bool,
app: &App,
analysis: &Analysis,
) -> (TokenStream2, TokenStream2) {
let mut lt = None;
let resources = match ctxt {
Context::Init(core) => &app.inits[&core].args.resources,
Context::Idle(core) => &app.idles[&core].args.resources,
Context::HardwareTask(name) => &app.hardware_tasks[name].args.resources,
Context::SoftwareTask(name) => &app.software_tasks[name].args.resources,
};
let mut fields = vec![];
let mut values = vec![];
let mut has_cfgs = false;
for (name, access) in resources {
let (res, expr) = app.resource(name).expect("UNREACHABLE");
let cfgs = &res.cfgs;
has_cfgs |= !cfgs.is_empty();
let mut_ = if access.is_exclusive() {
Some(quote!(mut))
} else {
None
};
let ty = &res.ty;
if ctxt.is_init() {
if !analysis.ownerships.contains_key(name) {
// owned by `init`
fields.push(quote!(
#(#cfgs)*
pub #name: &'static #mut_ #ty
));
values.push(quote!(
#(#cfgs)*
#name: &#mut_ #name
));
} else {
// owned by someone else
lt = Some(quote!('a));
fields.push(quote!(
#(#cfgs)*
pub #name: &'a mut #ty
));
values.push(quote!(
#(#cfgs)*
#name: &mut #name
));
}
} else {
let ownership = &analysis.ownerships[name];
if ownership.needs_lock(priority) {
if mut_.is_none() {
lt = Some(quote!('a));
fields.push(quote!(
#(#cfgs)*
pub #name: &'a #ty
));
} else {
// resource proxy
lt = Some(quote!('a));
fields.push(quote!(
#(#cfgs)*
pub #name: resources::#name<'a>
));
values.push(quote!(
#(#cfgs)*
#name: resources::#name::new(priority)
));
continue;
}
} else {
let lt = if ctxt.runs_once() {
quote!('static)
} else {
lt = Some(quote!('a));
quote!('a)
};
if ownership.is_owned() || mut_.is_none() {
fields.push(quote!(
#(#cfgs)*
pub #name: &#lt #mut_ #ty
));
} else {
fields.push(quote!(
#(#cfgs)*
pub #name: &#lt mut #ty
));
}
}
let is_late = expr.is_none();
if is_late {
let expr = if mut_.is_some() {
quote!(&mut *#name.as_mut_ptr())
} else {
quote!(&*#name.as_ptr())
};
values.push(quote!(
#(#cfgs)*
#name: #expr
));
} else {
values.push(quote!(
#(#cfgs)*
#name: &#mut_ #name
));
}
}
}
if lt.is_some() {
*needs_lt = true;
// the struct could end up empty due to `cfg`s leading to an error due to `'a` being unused
if has_cfgs {
fields.push(quote!(
#[doc(hidden)]
pub __marker__: core::marker::PhantomData<&'a ()>
));
values.push(quote!(__marker__: core::marker::PhantomData))
}
}
let core = ctxt.core(app);
let cores = app.args.cores;
let cfg_core = util::cfg_core(core, cores);
let doc = format!("Resources `{}` has access to", ctxt.ident(app));
let ident = util::resources_ident(ctxt, app);
let item = quote!(
#cfg_core
#[allow(non_snake_case)]
#[doc = #doc]
pub struct #ident<#lt> {
#(#fields,)*
}
);
let arg = if ctxt.is_init() {
None
} else {
Some(quote!(priority: &#lt rtfm::export::Priority))
};
let constructor = quote!(
#cfg_core
impl<#lt> #ident<#lt> {
#[inline(always)]
unsafe fn new(#arg) -> Self {
#ident {
#(#values,)*
}
}
}
);
(item, constructor)
}

View file

@ -0,0 +1,99 @@
use std::collections::{BTreeMap, HashSet};
use proc_macro2::TokenStream as TokenStream2;
use quote::quote;
use rtfm_syntax::ast::App;
use crate::{
check::Extra,
codegen::{schedule_body, util},
};
/// Generates all `${ctxt}::Schedule` methods
pub fn codegen(app: &App, extra: &Extra) -> Vec<TokenStream2> {
let mut items = vec![];
let mut seen = BTreeMap::<u8, HashSet<_>>::new();
for (scheduler, schedulees) in app.schedule_callers() {
let m = extra.monotonic();
let instant = quote!(<#m as rtfm::Monotonic>::Instant);
let sender = scheduler.core(app);
let cfg_sender = util::cfg_core(sender, app.args.cores);
let seen = seen.entry(sender).or_default();
let mut methods = vec![];
for name in schedulees {
let schedulee = &app.software_tasks[name];
let cfgs = &schedulee.cfgs;
let (args, _, untupled, ty) = util::regroup_inputs(&schedulee.inputs);
let args = &args;
if scheduler.is_init() {
// `init` uses a special `schedule` implementation; it doesn't use the
// `schedule_${name}` functions which are shared by other contexts
let body = schedule_body::codegen(scheduler, &name, app);
let section = util::link_section("text", sender);
methods.push(quote!(
#(#cfgs)*
#section
fn #name(&self, instant: #instant #(,#args)*) -> Result<(), #ty> {
#body
}
));
} else {
let schedule = util::schedule_ident(name, sender);
if !seen.contains(name) {
// generate a `schedule_${name}_S${sender}` function
seen.insert(name);
let body = schedule_body::codegen(scheduler, &name, app);
let section = util::link_section("text", sender);
items.push(quote!(
#cfg_sender
#(#cfgs)*
#section
unsafe fn #schedule(
priority: &rtfm::export::Priority,
instant: #instant
#(,#args)*
) -> Result<(), #ty> {
#body
}
));
}
methods.push(quote!(
#(#cfgs)*
#[inline(always)]
fn #name(&self, instant: #instant #(,#args)*) -> Result<(), #ty> {
unsafe {
#schedule(self.priority(), instant #(,#untupled)*)
}
}
));
}
}
let lt = if scheduler.is_init() {
None
} else {
Some(quote!('a))
};
let scheduler = scheduler.ident(app);
debug_assert!(!methods.is_empty());
items.push(quote!(
#cfg_sender
impl<#lt> #scheduler::Schedule<#lt> {
#(#methods)*
}
));
}
items
}

View file

@ -0,0 +1,61 @@
use proc_macro2::TokenStream as TokenStream2;
use quote::quote;
use rtfm_syntax::{ast::App, Context};
use syn::Ident;
use crate::codegen::util;
pub fn codegen(scheduler: Context, name: &Ident, app: &App) -> TokenStream2 {
let sender = scheduler.core(app);
let schedulee = &app.software_tasks[name];
let receiver = schedulee.args.core;
let fq = util::fq_ident(name, sender);
let tq = util::tq_ident(sender);
let (dequeue, enqueue) = if scheduler.is_init() {
(quote!(#fq.dequeue()), quote!(#tq.enqueue_unchecked(nr);))
} else {
(
quote!((#fq { priority }).lock(|fq| fq.split().1.dequeue())),
quote!((#tq { priority }).lock(|tq| tq.enqueue_unchecked(nr));),
)
};
let write_instant = if app.uses_schedule(receiver) {
let instants = util::instants_ident(name, sender);
Some(quote!(
#instants.get_unchecked_mut(usize::from(index)).as_mut_ptr().write(instant);
))
} else {
None
};
let (_, tupled, _, _) = util::regroup_inputs(&schedulee.inputs);
let inputs = util::inputs_ident(name, sender);
let t = util::schedule_t_ident(sender);
quote!(
unsafe {
use rtfm::Mutex as _;
let input = #tupled;
if let Some(index) = #dequeue {
#inputs.get_unchecked_mut(usize::from(index)).as_mut_ptr().write(input);
#write_instant
let nr = rtfm::export::NotReady {
instant,
index,
task: #t::#name,
};
#enqueue
Ok(())
} else {
Err(input)
}
}
)
}

Some files were not shown because too many files have changed in this diff Show more