528: The great 0.6 docs update r=AfoHT a=korken89

Closes #530
Closes #527
Closes #487
Closes #461
Closes #448
Closes #440
Closes #422

Co-authored-by: Emil Fresk <emil.fresk@gmail.com>
This commit is contained in:
bors[bot] 2021-09-25 17:09:54 +00:00 committed by GitHub
commit f0c3199825
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
106 changed files with 1286 additions and 1429 deletions

View file

@ -132,13 +132,11 @@ jobs:
- name: Check the examples - name: Check the examples
if: matrix.target == 'thumbv7m-none-eabi' if: matrix.target == 'thumbv7m-none-eabi'
env:
V7: __v7
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
use-cross: false use-cross: false
command: check command: check
args: --examples --target=${{ matrix.target }} --features ${{ env.V7 }} args: --examples --target=${{ matrix.target }}
# Verify the example output with run-pass tests # Verify the example output with run-pass tests
testexamples: testexamples:
@ -304,9 +302,15 @@ jobs:
args: --manifest-path macros/Cargo.toml --target=${{ matrix.target }} args: --manifest-path macros/Cargo.toml --target=${{ matrix.target }}
# Run test suite for thumbv7m # Run test suite for thumbv7m
testv7: tests:
name: testv7 name: tests
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
strategy:
matrix:
target:
- x86_64-unknown-linux-gnu
toolchain:
- stable
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v2
@ -334,56 +338,15 @@ jobs:
- name: Install Rust - name: Install Rust
uses: actions-rs/toolchain@v1 uses: actions-rs/toolchain@v1
with: with:
toolchain: stable toolchain: ${{ matrix.toolchain }}
target: thumbv7m-none-eabi target: ${{ matrix.target }}
override: true override: true
- uses: actions-rs/cargo@v1 - uses: actions-rs/cargo@v1
with: with:
use-cross: false use-cross: false
command: test command: test
args: --test tests --features __v7 args: --test tests --target=${{ matrix.target }}
# Run test suite for thumbv6m
testv6:
name: testv6
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Cache cargo dependencies
uses: actions/cache@v2
with:
path: |
- ~/.cargo/bin/
- ~/.cargo/registry/index/
- ~/.cargo/registry/cache/
- ~/.cargo/git/db/
key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.OS }}-cargo-
- name: Cache build output dependencies
uses: actions/cache@v2
with:
path: target
key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.OS }}-build-
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
target: thumbv6m-none-eabi
override: true
- uses: actions-rs/cargo@v1
with:
use-cross: false
command: test
args: --test tests
# Build documentation, check links # Build documentation, check links
docs: docs:
@ -506,8 +469,7 @@ jobs:
- testexamples - testexamples
- checkmacros - checkmacros
- testmacros - testmacros
- testv7 - tests
- testv6
- docs - docs
- mdbook - mdbook
# Only run this when pushing to master branch # Only run this when pushing to master branch
@ -624,8 +586,7 @@ jobs:
- testexamples - testexamples
- checkmacros - checkmacros
- testmacros - testmacros
- testv7 - tests
- testv6
- docs - docs
- mdbook - mdbook
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04

View file

@ -19,44 +19,21 @@ version = "0.6.0-alpha.5"
[lib] [lib]
name = "rtic" name = "rtic"
[[example]]
name = "periodic"
required-features = ["__v7"]
[[example]]
name = "pool"
required-features = ["__v7"]
[[example]]
name = "schedule"
required-features = ["__v7"]
[[example]]
name = "t-schedule"
required-features = ["__v7"]
[[example]]
name = "double_schedule"
required-features = ["__v7"]
[dependencies] [dependencies]
cortex-m = "0.7.0" cortex-m = "0.7.0"
cortex-m-rtic-macros = { path = "macros", version = "0.6.0-alpha.5" } cortex-m-rtic-macros = { path = "macros", version = "0.6.0-alpha.5" }
rtic-monotonic = "0.1.0-alpha.2" rtic-monotonic = "0.1.0-alpha.2"
rtic-core = "0.3.1" rtic-core = "0.3.1"
heapless = "0.7.5" heapless = "0.7.7"
bare-metal = "1.0.0" bare-metal = "1.0.0"
[dependencies.dwt-systick-monotonic]
version = "0.1.0-alpha.3"
optional = true
[build-dependencies] [build-dependencies]
version_check = "0.9" version_check = "0.9"
[dev-dependencies] [dev-dependencies]
lm3s6965 = "0.1.3" lm3s6965 = "0.1.3"
cortex-m-semihosting = "0.3.3" cortex-m-semihosting = "0.3.3"
systick-monotonic = "0.1.0-alpha.0"
[dev-dependencies.panic-semihosting] [dev-dependencies.panic-semihosting]
features = ["exit"] features = ["exit"]
@ -65,10 +42,6 @@ version = "0.5.2"
[target.x86_64-unknown-linux-gnu.dev-dependencies] [target.x86_64-unknown-linux-gnu.dev-dependencies]
trybuild = "1" trybuild = "1"
[features]
# used for testing this crate; do not use in applications
__v7 = ["dwt-systick-monotonic"]
[profile.release] [profile.release]
codegen-units = 1 codegen-units = 1
lto = true lto = true

View file

@ -3,13 +3,27 @@
[Preface](./preface.md) [Preface](./preface.md)
- [RTIC by example](./by-example.md) - [RTIC by example](./by-example.md)
- [The `app` attribute](./by-example/app.md) - [The `app`](./by-example/app.md)
- [App initialization](./by-example/app_init.md)
- [Resources](./by-example/resources.md) - [Resources](./by-example/resources.md)
- [Software tasks](./by-example/tasks.md) - [The background task](./by-example/app_idle.md)
- [Timer queue](./by-example/timer-queue.md) - [Defining tasks](./by-example/app_task.md)
- [Types, Send and Sync](./by-example/types-send-sync.md) - [Software tasks & `spawn`](./by-example/software_tasks.md)
- [Starting a new project](./by-example/new.md) - [Message passing & `capacity`](./by-example/message_passing.md)
- [Tips & tricks](./by-example/tips.md) - [Hardware tasks](./by-example/hardware_tasks.md)
- [Task priorities](./by-example/app_priorities.md)
- [Monotonic & `spawn_{at/after}`](./by-example/monotonic.md)
- [Starting a new project](./by-example/starting_a_project.md)
- [The minimal app](./by-example/app_minimal.md)
- [Tips & Tricks](./by-example/tips.md)
- [Implementing Monotonic](./by-example/tips_monotonic_impl.md)
- [Resource de-structure-ing](./by-example/tips_destructureing.md)
- [Using indirection](./by-example/tips_indirection.md)
- [`'static` super-powers](./by-example/tips_static_lifetimes.md)
- [Inspecting generated code](./by-example/tips_view_code.md)
- [Running tasks from RAM](./by-example/tips_from_ram.md)
<!-- - [`#[cfg(..)]` support](./by-example/tips.md) -->
- [Awesome RTIC examples](./awesome_rtic.md)
- [Migration Guides](./migration.md) - [Migration Guides](./migration.md)
- [v0.5.x to v0.6.x](./migration/migration_v5.md) - [v0.5.x to v0.6.x](./migration/migration_v5.md)
- [v0.4.x to v0.5.x](./migration/migration_v4.md) - [v0.4.x to v0.5.x](./migration/migration_v4.md)

View file

@ -0,0 +1 @@
# Awesome RTIC examples

View file

@ -4,7 +4,7 @@ This part of the book introduces the Real-Time Interrupt-driven Concurrency (RTI
to new users by walking them through examples of increasing complexity. to new users by walking them through examples of increasing complexity.
All examples in this part of the book can be found in the GitHub [repository] of All examples in this part of the book can be found in the GitHub [repository] of
the project, and most of the examples can be run on QEMU so no special hardware the project. The examples can be run on QEMU (emulating a Cortex M3 target) so no special hardware
is required to follow along. is required to follow along.
[repository]: https://github.com/rtic-rs/cortex-m-rtic [repository]: https://github.com/rtic-rs/cortex-m-rtic
@ -15,10 +15,3 @@ embedded development environment that includes QEMU.
[the embedded Rust book]: https://rust-embedded.github.io/book/intro/install.html [the embedded Rust book]: https://rust-embedded.github.io/book/intro/install.html
## Real World Examples
The following are examples of RTFM being used in real world projects.
### RTFM V0.4.2
- [etrombly/sandbox](https://github.com/etrombly/sandbox/tree/41d423bcdd0d8e42fd46b79771400a8ca349af55). A hardware zen garden that draws patterns in sand. Patterns are sent over serial using G-code.

View file

@ -1,17 +1,13 @@
# The `app` attribute # The `#[app]` attribute and an RTIC application
This is the smallest possible RTIC application: ## Requirements on the `app` attribute
``` rust
{{#include ../../../../examples/smallest.rs}}
```
All RTIC applications use the [`app`] attribute (`#[app(..)]`). This attribute All RTIC applications use the [`app`] attribute (`#[app(..)]`). This attribute
must be applied to a `mod`-item. The `app` attribute has a mandatory `device` must be applied to a `mod`-item containing the RTIC application. The `app`
attribute has a mandatory `device`
argument that takes a *path* as a value. This must be a full path pointing to a argument that takes a *path* as a value. This must be a full path pointing to a
*peripheral access crate* (PAC) generated using [`svd2rust`] **v0.14.x** or *peripheral access crate* (PAC) generated using [`svd2rust`] **v0.14.x** or
newer. More details can be found in the [Starting a new project](./new.md) newer.
section.
The `app` attribute will expand into a suitable entry point so it's not required The `app` attribute will expand into a suitable entry point so it's not required
to use the [`cortex_m_rt::entry`] attribute. to use the [`cortex_m_rt::entry`] attribute.
@ -20,143 +16,11 @@ to use the [`cortex_m_rt::entry`] attribute.
[`svd2rust`]: https://crates.io/crates/svd2rust [`svd2rust`]: https://crates.io/crates/svd2rust
[`cortex_m_rt::entry`]: ../../../api/cortex_m_rt_macros/attr.entry.html [`cortex_m_rt::entry`]: ../../../api/cortex_m_rt_macros/attr.entry.html
## `init` ## An RTIC application example
Within the `app` module the attribute expects to find an initialization To give a flavor of RTIC, the following example contains commonly used features. In the following sections we will go through each feature in detail.
function marked with the `init` attribute. This function must have
signature `fn(init::Context) -> (init::LateResources, init::Monotonics)`.
This initialization function will be the first part of the application to run.
The `init` function will run *with interrupts disabled* and has exclusive access
to Cortex-M where the `bare_metal::CriticalSection` token is available as `cs`.
And optionally, device specific peripherals through the `core` and `device` fields
of `init::Context`.
`static mut` variables declared at the beginning of `init` will be transformed
into `&'static mut` references that are safe to access. Notice, this feature may be deprecated in next release, see `task_local` resources.
[`rtic::Peripherals`]: ../../api/rtic/struct.Peripherals.html
The example below shows the types of the `core`, `device` and `cs` fields, and
showcases safe access to a `static mut` variable. The `device` field is only
available when the `peripherals` argument is set to `true` (default). In the rare case you want to implement an ultra-slim application you can explicitly set `peripherals` to `false`.
``` rust ``` rust
{{#include ../../../../examples/init.rs}} {{#include ../../../../examples/common.rs}}
``` ```
Running the example will print `init` to the console and then exit the QEMU
process.
``` console
$ cargo run --example init
{{#include ../../../../ci/expected/init.run}}
```
> **NOTE**: Remember to specify your chosen target device by passing a target
> triple to cargo (e.g `cargo run --example init --target thumbv7m-none-eabi`) or
> configure a device to be used by default when building the examples in `.cargo/config.toml`.
> In this case, we use a Cortex M3 emulated in QEMU so the target is `thumbv7m-none-eabi`.
> See [`Starting a new project`](./new.md) for more info.
## `idle`
A function marked with the `idle` attribute can optionally appear in the
module. This function is used as the special *idle task* and must have
signature `fn(idle::Context) - > !`.
When present, the runtime will execute the `idle` task after `init`. Unlike
`init`, `idle` will run *with interrupts enabled* and it's not allowed to return
so it must run forever.
When no `idle` function is declared, the runtime sets the [SLEEPONEXIT] bit and
then sends the microcontroller to sleep after running `init`.
[SLEEPONEXIT]: https://developer.arm.com/docs/100737/0100/Power-management/Sleep-mode/Sleep-on-exit-bit
Like in `init`, `static mut` variables will be transformed into `&'static mut`
references that are safe to access. Notice, this feature may be deprecated in the next release, see `task_local` resources.
The example below shows that `idle` runs after `init`.
**Note:** The `loop {}` in idle cannot be empty as this will crash the microcontroller due to
LLVM compiling empty loops to an `UDF` instruction in release mode. To avoid UB, the loop needs to imply a "side-effect" by inserting an assembly instruction (e.g., `WFI`) or a `continue`.
``` rust
{{#include ../../../../examples/idle.rs}}
```
``` console
$ cargo run --example idle
{{#include ../../../../ci/expected/idle.run}}
```
## Hardware tasks
To declare interrupt handlers the framework provides a `#[task]` attribute that
can be attached to functions. This attribute takes a `binds` argument whose
value is the name of the interrupt to which the handler will be bound to; the
function adorned with this attribute becomes the interrupt handler. Within the
framework these type of tasks are referred to as *hardware* tasks, because they
start executing in reaction to a hardware event.
The example below demonstrates the use of the `#[task]` attribute to declare an
interrupt handler. Like in the case of `#[init]` and `#[idle]` local `static
mut` variables are safe to use within a hardware task.
``` rust
{{#include ../../../../examples/hardware.rs}}
```
``` console
$ cargo run --example hardware
{{#include ../../../../ci/expected/hardware.run}}
```
So far all the RTIC applications we have seen look no different than the
applications one can write using only the `cortex-m-rt` crate. From this point
we start introducing features unique to RTIC.
## Priorities
The static priority of each handler can be declared in the `task` attribute
using the `priority` argument. Tasks can have priorities in the range `1..=(1 <<
NVIC_PRIO_BITS)` where `NVIC_PRIO_BITS` is a constant defined in the `device`
crate. When the `priority` argument is omitted, the priority is assumed to be
`1`. The `idle` task has a non-configurable static priority of `0`, the lowest priority.
> A higher number means a higher priority in RTIC, which is the opposite from what
> Cortex-M does in the NVIC peripheral.
> Explicitly, this means that number `10` has a **higher** priority than number `9`.
When several tasks are ready to be executed the one with highest static
priority will be executed first. Task prioritization can be observed in the
following scenario: an interrupt signal arrives during the execution of a low
priority task; the signal puts the higher priority task in the pending state.
The difference in priority results in the higher priority task preempting the
lower priority one: the execution of the lower priority task is suspended and
the higher priority task is executed to completion. Once the higher priority
task has terminated the lower priority task is resumed.
The following example showcases the priority based scheduling of tasks.
``` rust
{{#include ../../../../examples/preempt.rs}}
```
``` console
$ cargo run --example preempt
{{#include ../../../../ci/expected/preempt.run}}
```
Note that the task `gpiob` does *not* preempt task `gpioc` because its priority
is the *same* as `gpioc`'s. However, once `gpioc` returns, the execution of
task `gpiob` is prioritized over `gpioa` due to its higher priority. `gpioa`
is resumed only after `gpiob` returns.
One more note about priorities: choosing a priority higher than what the device
supports (that is `1 << NVIC_PRIO_BITS`) will result in a compile error. Due to
limitations in the language, the error message is currently far from helpful: it
will say something along the lines of "evaluation of constant value failed" and
the span of the error will *not* point out to the problematic interrupt value --
we are sorry about this!

View file

@ -0,0 +1,27 @@
# The background task `#[idle]`
A function marked with the `idle` attribute can optionally appear in the
module. This function is used as the special *idle task* and must have
signature `fn(idle::Context) -> !`.
When present, the runtime will execute the `idle` task after `init`. Unlike
`init`, `idle` will run *with interrupts enabled* and it's not allowed to return
so it must run forever.
When no `idle` function is declared, the runtime sets the [SLEEPONEXIT] bit and
then sends the microcontroller to sleep after running `init`.
[SLEEPONEXIT]: https://developer.arm.com/docs/100737/0100/power-management/sleep-mode/sleep-on-exit-bit
Like in `init`, locally declared resources will have `'static` lifetimes that are safe to access.
The example below shows that `idle` runs after `init`.
``` rust
{{#include ../../../../examples/idle.rs}}
```
``` console
$ cargo run --target thumbv7m-none-eabi --example idle
{{#include ../../../../ci/expected/idle.run}}
```

View file

@ -0,0 +1,28 @@
# App initialization and `#[init]`
An RTIC application is required an `init` task setting up the system. The corresponding function must have the signature `fn(init::Context) -> (Shared, Local, init::Monotonics)`, where `Shared` and `Local` are the resource structures defined by the user.
On system reset, the `init` task is executed (after the optionally defined `pre-init` and internal RTIC initialization). The `init` task runs *with interrupts disabled* and has exclusive access to Cortex-M (the `bare_metal::CriticalSection` token is available as `cs`) while device specific peripherals are available through the `core` and `device` fields of `init::Context`.
## Example
The example below shows the types of the `core`, `device` and `cs` fields, and showcases the use of a `local` variable with `'static` lifetime. As we will see later, such variables can later be delegated from `init` to other tasks of the RTIC application.
The `device` field is only available when the `peripherals` argument is set to `true` (which is the default). In the rare case you want to implement an ultra-slim application you can explicitly set `peripherals` to `false`.
``` rust
{{#include ../../../../examples/init.rs}}
```
Running the example will print `init` to the console and then exit the QEMU process.
``` console
$ cargo run --target thumbv7m-none-eabi --example init
{{#include ../../../../ci/expected/init.run}}
```
> **NOTE**: You can choose target device by passing a target
> triple to cargo (e.g `cargo run --example init --target thumbv7m-none-eabi`) or
> configure a default target in `.cargo/config.toml`.
>
> For running the examples, we use a Cortex M3 emulated in QEMU so the target is `thumbv7m-none-eabi`.

View file

@ -0,0 +1,7 @@
# The minimal app
This is the smallest possible RTIC application:
``` rust
{{#include ../../../../examples/smallest.rs}}
```

View file

@ -0,0 +1,45 @@
# Task priorities
## Priorities
The static priority of each handler can be declared in the `task` attribute
using the `priority` argument. For Cortex-M, tasks can have priorities in the range `1..=(1 <<
NVIC_PRIO_BITS)` where `NVIC_PRIO_BITS` is a constant defined in the `device`
crate. When the `priority` argument is omitted, the priority is assumed to be
`1`. The `idle` task has a non-configurable static priority of `0`, the lowest priority.
> A higher number means a higher priority in RTIC, which is the opposite from what
> Cortex-M does in the NVIC peripheral.
> Explicitly, this means that number `10` has a **higher** priority than number `9`.
When several tasks are ready to be executed the one with highest static
priority will be executed first. Task prioritization can be observed in the
following scenario: during the execution of a low
priority task a higher priority task is spawned; this puts the higher priority task in the pending state.
The difference in priority results in the higher priority task preempting the
lower priority one: the execution of the lower priority task is suspended and
the higher priority task is executed to completion. Once the higher priority
task has terminated the lower priority task is resumed.
The following example showcases the priority based scheduling of tasks.
``` rust
{{#include ../../../../examples/preempt.rs}}
```
``` console
$ cargo run --target thumbv7m-none-eabi --example preempt
{{#include ../../../../ci/expected/preempt.run}}
```
Note that the task `bar` does *not* preempt task `baz` because its priority
is the *same* as `baz`'s. However, once `baz` returns, the execution of
task `bar` is prioritized over `foo` due to its higher priority. `foo`
is resumed only after `bar` returns.
One more note about priorities: choosing a priority higher than what the device
supports will result in a compile error. Due to
limitations in the language, the error message is currently far from helpful: it
will say something along the lines of "evaluation of constant value failed" and
the span of the error will *not* point out to the problematic interrupt value --
we are sorry about this!

View file

@ -0,0 +1,7 @@
# Defining tasks with `#[task]`
Tasks, defined with `#[task]`, are the main mechanism of getting work done in RTIC. Every task can be spawned, now or later, be sent messages (message passing) and be given priorities for preemptive multitasking.
There are two kinds of tasks, software tasks and hardware tasks, and the difference is that hardware tasks are bound to a specific interrupt vector in the MCU while software tasks are not. This means that if a hardware task is bound to the UART's RX interrupt the task will run every time a character is received.
In the coming pages we will explore both tasks and the different options available.

View file

@ -0,0 +1,24 @@
# Hardware tasks
To declare interrupt handlers the `#[task]` attribute takes a `binds = InterruptName` argument whose
value is the name of the interrupt to which the handler will be bound to; the
function used with this attribute becomes the interrupt handler. Within the
framework these type of tasks are referred to as *hardware* tasks, because they
start executing in reaction to a hardware event.
Providing an interrupt name that does not exist will cause a compile error to help with accidental
errors.
The example below demonstrates the use of the `#[task]` attribute to declare an
interrupt handler. Like in the case of `#[init]` and `#[idle]` local `static
mut` variables are safe to use within a hardware task.
``` rust
{{#include ../../../../examples/hardware.rs}}
```
``` console
$ cargo run --target thumbv7m-none-eabi --example hardware
{{#include ../../../../ci/expected/hardware.run}}
```

View file

@ -0,0 +1,14 @@
# Message passing & capacity
Software tasks have support for message passing, this means that they can be spawned with an argument
as `foo::spawn(1)` which will run the task `foo` with the argument `1`. The number of arguments is not
limited and is exemplified in the following:
``` rust
{{#include ../../../../examples/message_passing.rs}}
```
``` console
$ cargo run --target thumbv7m-none-eabi --example message_passing
{{#include ../../../../ci/expected/message_passing.run}}
```

View file

@ -0,0 +1,55 @@
# Monotonic & spawn_{at/after}
The understanding of time is an important concept in embedded systems, and to be able to run tasks
based on time is very useful. For this use-case the framework provides the static methods
`task::spawn_after(/* duration */)` and `task::spawn_at(/* specific time instant */)`.
Mostly one uses `spawn_after`, but in cases where it's needed to have spawns happen without drift or
to a fixed baseline `spawn_at` is available.
To support this the `#[monotonic]` attribute exists which is applied to a type alias definition.
This type alias must point to a type which implements the [`rtic_monotonic::Monotonic`] trait.
This is generally some timer which handles the timing of the system. One or more monotonics can be
used in the same system, for example a slow timer that is used to wake the system from sleep and another
that is used for high granularity scheduling while the system is awake.
[`rtic_monotonic::Monotonic`]: https://docs.rs/rtic-monotonic
The attribute has one required parameter and two optional parameters, `binds`, `default` and
`priority` respectively. `binds = InterruptName` defines which interrupt vector is associated to
the timer's interrupt, `default = true` enables a shorthand API when spawning and accessing the
time (`monotonics::now()` vs `monotonics::MyMono::now()`), and `priority` sets the priority the
interrupt vector has.
> By default `priority` is set to the **maximum priority** of the system but a lower priority
> can be selected if a high priority task cannot take the jitter introduced by the scheduling.
> This can however introduce jitter and delays into the scheduling, making it a trade-off.
Finally, the monotonics must be initialized in `#[init]` and returned in the `init::Monotonic( ... )` tuple.
This moves the monotonics into the active state which makes it possible to use them.
An example is provided below:
``` rust
{{#include ../../../../examples/schedule.rs}}
```
``` console
$ cargo run --target thumbv7m-none-eabi --example message
{{#include ../../../../ci/expected/schedule.run}}
```
## Canceling or rescheduling a scheduled task
Tasks spawned using `task::spawn_after` and `task::spawn_at` has as returns a `SpawnHandle`,
where the `SpawnHandle` can be used to cancel or reschedule a task that will run in the future.
If `cancel` or `reschedule_at`/`reschedule_after` returns an `Err` it means that the operation was
too late and that the task is already sent for execution. The following example shows this in action:
``` rust
{{#include ../../../../examples/cancel-reschedule.rs}}
```
``` console
$ cargo run --target thumbv7m-none-eabi --example message
{{#include ../../../../ci/expected/cancel-reschedule.run}}
```

View file

@ -1,84 +0,0 @@
# Starting a new project
Now that you have learned about the main features of the RTIC framework you can
try it out on your hardware by following these instructions.
1. Instantiate the [`cortex-m-quickstart`] template.
[`cortex-m-quickstart`]: https://github.com/rust-embedded/cortex-m-quickstart#cortex-m-quickstart
``` console
$ # for example using `cargo-generate`
$ cargo generate \
--git https://github.com/rust-embedded/cortex-m-quickstart \
--name app
$ # follow the rest of the instructions
```
2. Add a peripheral access crate (PAC) that was generated using [`svd2rust`]
**v0.14.x**, or a board support crate that depends on one such PAC as a
dependency. Make sure that the `rt` feature of the crate is enabled.
[`svd2rust`]: https://crates.io/crates/svd2rust
In this example, I'll use the [`lm3s6965`] device crate. This device crate
doesn't have an `rt` Cargo feature; that feature is always enabled.
[`lm3s6965`]: https://crates.io/crates/lm3s6965
This device crate provides a linker script with the memory layout of the target
device so `memory.x` and `build.rs` need to be removed.
``` console
$ cargo add lm3s6965 --vers 0.1.3
$ rm memory.x build.rs
```
3. Add the `cortex-m-rtic` crate as a dependency.
``` console
$ cargo add cortex-m-rtic --allow-prerelease
```
4. Write your RTIC application.
Here I'll use the `init` example from the `cortex-m-rtic` crate.
The examples are found in the `examples` folder, and the contents
of `init.rs` is shown here:
``` console
{{#include ../../../../examples/init.rs}}
```
The `init` example uses the `lm3s6965` device. Remember to adjust the `device`
argument in the app macro attribute to match the path of your PAC crate, if
different, and add peripherals or other arguments if needed. Although aliases
can be used, this needs to be a full path (from the crate root). For many
devices, it is common for the HAL implementation crate (aliased as `hal`) or
Board Support crate to re-export the PAC as `pac`, leading to a pattern similar
to the below:
```rust
use abcd123_hal as hal;
//...
#[rtic::app(device = crate::hal::pac, peripherals = true, monotonic = rtic::cyccnt::CYCCNT)]
mod app { /*...*/ }
```
The `init` example also depends on the `panic-semihosting` crate:
``` console
$ cargo add panic-semihosting
```
5. Build it, flash it and run it.
``` console
$ # NOTE: I have uncommented the `runner` option in `.cargo/config`
$ cargo run
{{#include ../../../../ci/expected/init.run}}
```

View file

@ -1,112 +1,157 @@
# Resources # Resource usage
The framework provides an abstraction to share data between any of the contexts The RTIC framework manages shared and task local resources which allows data to be persistently
we saw in the previous section (task handlers, `init` and `idle`): resources. stored and safely accessed without the use of unsafe code.
Resources are data visible only to functions declared within the `#[app]` RTIC resources are visible only to functions declared within the `#[app]` module and the framework
module. The framework gives the user complete control over which context gives the user complete control (on a per-task basis) over resource accessibility.
can access which resource.
All resources are declared as *two* `struct`s within the `#[app]` module. System wide resources are declared as **two** `struct`'s within the `#[app]` module annotated with
Each field in these structures corresponds to a different resource. the attribute `#[local]` and `#[shared]` respectively. Each field in these structures corresponds
One `struct` must be annotated with the attribute `#[local]`. to a different resource (identified by field name). The difference between these two sets of
The other `struct` must be annotated with the attribute `#[shared]`. resources will be covered below.
The difference between these two sets of resources will be covered later.
Each context (task handler, `init` or `idle`) must declare the resources it Each task must declare the resources it intends to access in its corresponding metadata attribute
intends to access in its corresponding metadata attribute using either the using the `local` and `shared` arguments. Each argument takes a list of resource identifiers. The
`local` or `shared` argument. This argument takes a list of resource names as listed resources are made available to the context under the `local` and `shared` fields of the
its value. The listed resources are made available to the context under the `Context` structure.
`local` and `shared` fields of the `Context` structure.
All resources are initialized at runtime, after the `#[init]` function returns. The `init` task returns the initial values for the system wide (`#[shared]` and `#[local]`)
The `#[init]` function must return the initial values for all resources; hence its return type includes the types of the `#[shared]` and `#[local]` structs. resources, and the set of initialized timers used by the application. The monotonic timers will be
Because resources are uninitialized during the execution of the `#[init]` function, they cannot be accessed within the `#[init]` function. further discussed in [Monotonic & `spawn_{at/after}`](./monotonic.md).
The example application shown below contains two interrupt handlers. ## `#[local]` resources
Each handler has access to its own `#[local]` resource.
`#[local]` resources are locally accessible to a specific task, meaning that only that task can
access the resource and does so without locks or critical sections. This allows for the resources,
commonly drivers or large objects, to be initialized in `#[init]` and then be passed to a specific
task.
The example application shown below contains two tasks where each task has access to its own
`#[local]` resource, plus that the `idle` task has its own `#[local]` as well.
``` rust ``` rust
{{#include ../../../../examples/resource.rs}} {{#include ../../../../examples/locals.rs}}
``` ```
``` console ``` console
$ cargo run --example resource $ cargo run --target thumbv7m-none-eabi --example locals
{{#include ../../../../ci/expected/resource.run}} {{#include ../../../../ci/expected/locals.run}}
``` ```
A `#[local]` resource cannot be accessed from outside the task it was associated to in a `#[task]` attribute. A `#[local]` resource cannot be accessed from outside the task it was associated to in a `#[task]` attribute.
Assigning the same `#[local]` resource to more than one task is a compile-time error. Assigning the same `#[local]` resource to more than one task is a compile-time error.
## `lock` ### Task local initialized resources
Critical sections are required to access `#[shared]` resources in a data race-free manner. A special use-case of local resources are the ones specified directly in the resource claim,
`#[task(local = [my_var: TYPE = INITIAL_VALUE, ...])]`, this allows for creating locals which do no need to be
initialized in `#[init]`.
Moreover local resources in `#[init]` and `#[idle]` have `'static` lifetimes, this is safe since both are not re-entrant.
The `shared` field of the passed `Context` implements the [`Mutex`] trait for each shared resource accessible to the task. In the example below the different uses and lifetimes are shown:
The only method on this trait, [`lock`], runs its closure argument in a critical section. ``` rust
{{#include ../../../../examples/declared_locals.rs}}
```
<!-- ``` console
$ cargo run --target thumbv7m-none-eabi --example declared_locals
{{#include ../../../../ci/expected/declared_locals.run}}
``` -->
## `#[shared]` resources and `lock`
Critical sections are required to access `#[shared]` resources in a data race-free manner and to
achieve this the `shared` field of the passed `Context` implements the [`Mutex`] trait for each
shared resource accessible to the task. This trait has only one method, [`lock`], which runs its
closure argument in a critical section.
[`Mutex`]: ../../../api/rtic/trait.Mutex.html [`Mutex`]: ../../../api/rtic/trait.Mutex.html
[`lock`]: ../../../api/rtic/trait.Mutex.html#method.lock [`lock`]: ../../../api/rtic/trait.Mutex.html#method.lock
The critical section created by the `lock` API is based on dynamic priorities: it temporarily raises the dynamic priority of the context to a *ceiling* priority that prevents other tasks from preempting the critical section. This synchronization protocol is known as the [Immediate Ceiling Priority Protocol The critical section created by the `lock` API is based on dynamic priorities: it temporarily
(ICPP)][icpp], and complies with [Stack Resource Policy(SRP)][srp] based scheduling of RTIC. raises the dynamic priority of the context to a *ceiling* priority that prevents other tasks from
preempting the critical section. This synchronization protocol is known as the
[Immediate Ceiling Priority Protocol (ICPP)][icpp], and complies with
[Stack Resource Policy (SRP)][srp] based scheduling of RTIC.
[icpp]: https://en.wikipedia.org/wiki/Priority_ceiling_protocol [icpp]: https://en.wikipedia.org/wiki/Priority_ceiling_protocol
[srp]: https://en.wikipedia.org/wiki/Stack_Resource_Policy [srp]: https://en.wikipedia.org/wiki/Stack_Resource_Policy
In the example below we have three interrupt handlers with priorities ranging from one to three. The two handlers with the lower priorities contend for the `shared` resource and need to lock the resource for accessing the data. The highest priority handler, which do not access the `shared` resource, is free to preempt the critical section created by the In the example below we have three interrupt handlers with priorities ranging from one to three.
lowest priority handler. The two handlers with the lower priorities contend for the `shared` resource and need to lock the
resource for accessing the data. The highest priority handler, which do not access the `shared`
resource, is free to preempt the critical section created by the lowest priority handler.
``` rust ``` rust
{{#include ../../../../examples/lock.rs}} {{#include ../../../../examples/lock.rs}}
``` ```
``` console ``` console
$ cargo run --example lock $ cargo run --target thumbv7m-none-eabi --example lock
{{#include ../../../../ci/expected/lock.run}} {{#include ../../../../ci/expected/lock.run}}
``` ```
## Multi-lock ## Multi-lock
As an extension to `lock`, and to reduce rightward drift, locks can be taken as tuples. The following examples shows this in use: As an extension to `lock`, and to reduce rightward drift, locks can be taken as tuples. The
following examples shows this in use:
``` rust ``` rust
{{#include ../../../../examples/multilock.rs}} {{#include ../../../../examples/multilock.rs}}
``` ```
``` console
$ cargo run --target thumbv7m-none-eabi --example multilock
{{#include ../../../../ci/expected/multilock.run}}
```
## Only shared (`&-`) access ## Only shared (`&-`) access
By default the framework assumes that all tasks require exclusive access (`&mut-`) to resources but it is possible to specify that a task only requires shared access (`&-`) to a resource using the `&resource_name` syntax in the `resources` list. By default the framework assumes that all tasks require exclusive access (`&mut-`) to resources but
it is possible to specify that a task only requires shared access (`&-`) to a resource using the
`&resource_name` syntax in the `shared` list.
The advantage of specifying shared access (`&-`) to a resource is that no locks are required to access the resource even if the resource is contended by several tasks running at different priorities. The downside is that the task only gets a shared reference (`&-`) to the resource, limiting the operations it can perform on it, but where a shared reference is enough this approach reduces the number of required locks. In addition to simple immutable data, this shared access can be useful where the resource type safely implements interior mutability, with The advantage of specifying shared access (`&-`) to a resource is that no locks are required to
appropriate locking or atomic operations of its own. access the resource even if the resource is contended by several tasks running at different
priorities. The downside is that the task only gets a shared reference (`&-`) to the resource,
limiting the operations it can perform on it, but where a shared reference is enough this approach
reduces the number of required locks. In addition to simple immutable data, this shared access can
be useful where the resource type safely implements interior mutability, with appropriate locking
or atomic operations of its own.
Note that in this release of RTIC it is not possible to request both exclusive access (`&mut-`) and shared access (`&-`) to the *same* resource from different tasks. Attempting to do so will result in a compile error. Note that in this release of RTIC it is not possible to request both exclusive access (`&mut-`)
and shared access (`&-`) to the *same* resource from different tasks. Attempting to do so will
result in a compile error.
In the example below a key (e.g. a cryptographic key) is loaded (or created) at runtime and then used from two tasks that run at different priorities without any kind of lock. In the example below a key (e.g. a cryptographic key) is loaded (or created) at runtime and then
used from two tasks that run at different priorities without any kind of lock.
``` rust ``` rust
{{#include ../../../../examples/only-shared-access.rs}} {{#include ../../../../examples/only-shared-access.rs}}
``` ```
``` console ``` console
$ cargo run --example only-shared-access $ cargo run --target thumbv7m-none-eabi --example only-shared-access
{{#include ../../../../ci/expected/only-shared-access.run}} {{#include ../../../../ci/expected/only-shared-access.run}}
``` ```
## Lock-free resource access of mutable resources ## Lock-free resource access of shared resources
A critical section is *not* required to access a `#[shared]` resource that's only accessed by tasks running at the *same* priority. A critical section is *not* required to access a `#[shared]` resource that's only accessed by tasks
In this case, you can opt out of the `lock` API by adding the `#[lock_free]` field-level attribute to the resource declaration (see example below). running at the *same* priority. In this case, you can opt out of the `lock` API by adding the
Note that this is merely a convenience: if you do use the `lock` API, at runtime the framework will *not* produce a critical section. `#[lock_free]` field-level attribute to the resource declaration (see example below). Note that
Also worth noting: using `#[lock_free]` on resources shared by tasks running at different priorities will result in a *compile-time* error -- not using the `lock` API would be a data race in that case. this is merely a convenience: if you do use the `lock` API, at runtime the framework will
**not** produce a critical section. Also worth noting: using `#[lock_free]` on resources shared by
tasks running at different priorities will result in a *compile-time* error -- not using the `lock`
API would be a data race in that case.
``` rust ``` rust
{{#include ../../../../examples/lock-free.rs}} {{#include ../../../../examples/lock-free.rs}}
``` ```
``` console ``` console
$ cargo run --example lock-free $ cargo run --target thumbv7m-none-eabi --example lock-free
{{#include ../../../../ci/expected/lock-free.run}} {{#include ../../../../ci/expected/lock-free.run}}
``` ```

View file

@ -0,0 +1,16 @@
# Software tasks & spawn
To declare tasks in the framework the `#[task]` attribute is used on a function.
By default these tasks are referred to as software tasks as they do not have a direct coupling to
an interrupt handler. Software tasks can be spawned (started) using the `task_name::spawn()` static
method which will directly run the task given that there are no higher priority tasks running.
This is exemplified in the following:
``` rust
{{#include ../../../../examples/spawn.rs}}
```
``` console
$ cargo run --target thumbv7m-none-eabi --example spawn
{{#include ../../../../ci/expected/spawn.run}}
```

View file

@ -0,0 +1,14 @@
# Starting a new project
When starting an RTIC project from scratch it is recommended to follow RTIC's [`defmt-app-template`].
[`defmt-app-template`]: https://github.com/rtic-rs/defmt-app-template
This will give you an RTIC application with support for RTT logging with [`defmt`] and stack overflow
protection using [`flip-link`]. There are also an multitude of examples available provided by the community:
- [https://github.com/kalkyl/f411-rtic](https://github.com/kalkyl/f411-rtic)
- ... More to come
[`defmt`]: https://github.com/knurling-rs/defmt/
[`flip-link`]: https://github.com/knurling-rs/flip-link/

View file

@ -1,118 +0,0 @@
# Software tasks
In addition to hardware tasks, which are invoked by the hardware in response to
hardware events, RTIC also supports *software* tasks which can be spawned by the
application from any execution context.
Software tasks can also be assigned priorities and, under the hood, are
dispatched from interrupt handlers. RTIC requires that free interrupts are
declared in the `dispatchers` app argument when using software tasks; some of these free
interrupts will be used to dispatch the software tasks. An advantage of software
tasks over hardware tasks is that many tasks can be mapped to a single interrupt
handler.
Software tasks are also declared using the `task` attribute but the `binds`
argument must be omitted.
The example below showcases three software tasks that run at 2 different
priorities. The three software tasks are mapped to 2 interrupts handlers.
``` rust
{{#include ../../../../examples/task.rs}}
```
``` console
$ cargo run --example task
{{#include ../../../../ci/expected/task.run}}
```
## Message passing
The other advantage of software tasks is that messages can be passed to these
tasks when spawning them. The type of the message payload must be specified in
the signature of the task handler.
The example below showcases three tasks, two of them expect a message.
``` rust
{{#include ../../../../examples/message.rs}}
```
``` console
$ cargo run --example message
{{#include ../../../../ci/expected/message.run}}
```
## Capacity
RTIC does *not* perform any form of heap-based memory allocation. The memory
required to store messages is statically reserved. By default the framework
minimizes the memory footprint of the application so each task has a message
"capacity" of 1: meaning that at most one message can be posted to the task
before it gets a chance to run. This default can be overridden for each task
using the `capacity` argument. This argument takes a positive integer that
indicates how many messages the task message buffer can hold.
The example below sets the capacity of the software task `foo` to 4. If the
capacity is not specified then the second `spawn.foo` call in `UART0` would
fail (panic).
``` rust
{{#include ../../../../examples/capacity.rs}}
```
``` console
$ cargo run --example capacity
{{#include ../../../../ci/expected/capacity.run}}
```
## Error handling
The `spawn` API returns the `Err` variant when there's no space to send the
message. In most scenarios spawning errors are handled in one of two ways:
- Panicking, using `unwrap`, `expect`, etc. This approach is used to catch the
programmer error (i.e. bug) of selecting a capacity that was too small. When
this panic is encountered during testing choosing a bigger capacity and
recompiling the program may fix the issue but sometimes it's necessary to dig
deeper and perform a timing analysis of the application to check if the
platform can deal with peak payload or if the processor needs to be replaced
with a faster one.
- Ignoring the result. In soft real-time and non real-time applications it may
be OK to occasionally lose data or fail to respond to some events during event
bursts. In those scenarios silently letting a `spawn` call fail may be
acceptable.
It should be noted that retrying a `spawn` call is usually the wrong approach as
this operation will likely never succeed in practice. Because there are only
context switches towards *higher* priority tasks retrying the `spawn` call of a
lower priority task will never let the scheduler dispatch said task meaning that
its message buffer will never be emptied. This situation is depicted in the
following snippet:
``` rust
#[rtic::app(..)]
mod app {
#[init(spawn = [foo, bar])]
fn init(cx: init::Context) {
cx.spawn.foo().unwrap();
cx.spawn.bar().unwrap();
}
#[task(priority = 2, spawn = [bar])]
fn foo(cx: foo::Context) {
// ..
// the program will get stuck here
while cx.spawn.bar(payload).is_err() {
// retry the spawn call if it failed
}
}
#[task(priority = 1)]
fn bar(cx: bar::Context, payload: i32) {
// ..
}
}
```

View file

@ -1,113 +0,0 @@
# Timer queue
In contrast with the `spawn` API, which immediately spawns a software task onto
the scheduler, the `schedule` API can be used to schedule a task to run some
time in the future.
To use the `schedule` API a monotonic timer must be first defined using the
`monotonic` argument of the `#[app]` attribute. This argument takes a path to a
type that implements the [`Monotonic`] trait. The associated type, `Instant`, of
this trait represents a timestamp in arbitrary units and it's used extensively
in the `schedule` API -- it is suggested to model this type after [the one in
the standard library][std-instant].
Although not shown in the trait definition (due to limitations in the trait /
type system) the subtraction of two `Instant`s should return some `Duration`
type (see [`core::time::Duration`]) and this `Duration` type must implement the
`TryInto<u32>` trait. The implementation of this trait must convert the
`Duration` value, which uses some arbitrary unit of time, into the "system timer
(SYST) clock cycles" time unit. The result of the conversion must be a 32-bit
integer. If the result of the conversion doesn't fit in a 32-bit number then the
operation must return an error, any error type.
[`Monotonic`]: ../../../api/rtic/trait.Monotonic.html
[std-instant]: https://doc.rust-lang.org/std/time/struct.Instant.html
[`core::time::Duration`]: https://doc.rust-lang.org/core/time/struct.Duration.html
For ARMv7+ targets the `rtic` crate provides a `Monotonic` implementation based
on the built-in CYCle CouNTer (CYCCNT). Note that this is a 32-bit timer clocked
at the frequency of the CPU and as such it is not suitable for tracking time
spans in the order of seconds.
When scheduling a task the (user-defined) `Instant` at which the task should be
executed must be passed as the first argument of the `schedule` invocation.
Additionally, the chosen `monotonic` timer must be configured and initialized
during the `#[init]` phase. Note that this is *also* the case if you choose to
use the `CYCCNT` provided by the `cortex-m-rtic` crate.
The example below schedules two tasks from `init`: `foo` and `bar`. `foo` is
scheduled to run 8 million clock cycles in the future. Next, `bar` is scheduled
to run 4 million clock cycles in the future. Thus `bar` runs before `foo` since
it was scheduled to run first.
> **IMPORTANT**: The examples that use the `schedule` API or the `Instant`
> abstraction will **not** properly work on QEMU because the Cortex-M cycle
> counter functionality has not been implemented in `qemu-system-arm`.
``` rust
{{#include ../../../../examples/schedule.rs}}
```
Running the program on real hardware produces the following output in the
console:
``` text
{{#include ../../../../ci/expected/schedule.run}}
```
When the `schedule` API is being used the runtime internally uses the `SysTick`
interrupt handler and the system timer peripheral (`SYST`) so neither can be
used by the application. This is accomplished by changing the type of
`init::Context.core` from `cortex_m::Peripherals` to `rtic::Peripherals`. The
latter structure contains all the fields of the former minus the `SYST` one.
## Periodic tasks
Software tasks have access to the `Instant` at which they were scheduled to run
through the `scheduled` variable. This information and the `schedule` API can be
used to implement periodic tasks as shown in the example below.
``` rust
{{#include ../../../../examples/periodic.rs}}
```
This is the output produced by the example. Note that there is zero drift /
jitter even though `schedule.foo` was invoked at the *end* of `foo`. Using
`Instant::now` instead of `scheduled` would have resulted in drift / jitter.
``` text
{{#include ../../../../ci/expected/periodic.run}}
```
## Baseline
For the tasks scheduled from `init` we have exact information about their
`scheduled` time. For hardware tasks there's no `scheduled` time because these
tasks are asynchronous in nature. For hardware tasks the runtime provides a
`start` time, which indicates the time at which the task handler started
executing.
Note that `start` is **not** equal to the arrival time of the event that fired
the task. Depending on the priority of the task and the load of the system the
`start` time could be very far off from the event arrival time.
What do you think will be the value of `scheduled` for software tasks that are
*spawned* instead of scheduled? The answer is that spawned tasks inherit the
*baseline* time of the context that spawned it. The baseline of hardware tasks
is their `start` time, the baseline of software tasks is their `scheduled` time
and the baseline of `init` is the system start time or time zero
(`Instant::zero()`). `idle` doesn't really have a baseline but tasks spawned
from it will use `Instant::now()` as their baseline time.
The example below showcases the different meanings of the *baseline*.
``` rust
{{#include ../../../../examples/baseline.rs}}
```
Running the program on real hardware produces the following output in the console:
``` text
{{#include ../../../../ci/expected/baseline.run}}
```

View file

@ -1,176 +1,3 @@
# Tips & tricks # Tips & tricks
For complete RTIC examples see the [rtic-examples][rtic-examples] repository. In this section we will explore common tips & tricks related to using RTIC.
[rtic-examples]: https://github.com/rtic-rs/rtic-examples
## Generics
All resource proxies implement the `rtic::Mutex` trait.
If a resource does not implement this, one can wrap it in the [`rtic::Exclusive`]
newtype which does implement the `Mutex` trait. With the help of this newtype
one can write a generic function that operates on generic resources and call it
from different tasks to perform some operation on the same set of resources.
Here's one such example:
[`rtic::Exclusive`]: ../../../api/rtic/struct.Exclusive.html
``` rust
{{#include ../../../../examples/generics.rs}}
```
``` console
$ cargo run --example generics
{{#include ../../../../ci/expected/generics.run}}
```
## Conditional compilation
You can use conditional compilation (`#[cfg]`) on resources (the fields of
`#[resources] struct Resources`) and tasks (the `fn` items).
The effect of using `#[cfg]` attributes is that the resource / task
will *not* be available through the corresponding `Context` `struct`
if the condition doesn't hold.
The example below logs a message whenever the `foo` task is spawned, but only if
the program has been compiled using the `dev` profile.
``` rust
{{#include ../../../../examples/cfg.rs}}
```
``` console
$ cargo run --example cfg --release
$ cargo run --example cfg
{{#include ../../../../ci/expected/cfg.run}}
```
## Running tasks from RAM
The main goal of moving the specification of RTIC applications to attributes in
RTIC v0.4.0 was to allow inter-operation with other attributes. For example, the
`link_section` attribute can be applied to tasks to place them in RAM; this can
improve performance in some cases.
> **IMPORTANT**: In general, the `link_section`, `export_name` and `no_mangle`
> attributes are very powerful but also easy to misuse. Incorrectly using any of
> these attributes can cause undefined behavior; you should always prefer to use
> safe, higher level attributes around them like `cortex-m-rt`'s `interrupt` and
> `exception` attributes.
>
> In the particular case of RAM functions there's no
> safe abstraction for it in `cortex-m-rt` v0.6.5 but there's an [RFC] for
> adding a `ramfunc` attribute in a future release.
[RFC]: https://github.com/rust-embedded/cortex-m-rt/pull/100
The example below shows how to place the higher priority task, `bar`, in RAM.
``` rust
{{#include ../../../../examples/ramfunc.rs}}
```
Running this program produces the expected output.
``` console
$ cargo run --example ramfunc
{{#include ../../../../ci/expected/ramfunc.run}}
```
One can look at the output of `cargo-nm` to confirm that `bar` ended in RAM
(`0x2000_0000`), whereas `foo` ended in Flash (`0x0000_0000`).
``` console
$ cargo nm --example ramfunc --release | grep ' foo::'
{{#include ../../../../ci/expected/ramfunc.grep.foo}}
```
``` console
$ cargo nm --example ramfunc --release | grep ' bar::'
{{#include ../../../../ci/expected/ramfunc.grep.bar}}
```
## Indirection for faster message passing
Message passing always involves copying the payload from the sender into a
static variable and then from the static variable into the receiver. Thus
sending a large buffer, like a `[u8; 128]`, as a message involves two expensive
`memcpy`s. To minimize the message passing overhead one can use indirection:
instead of sending the buffer by value, one can send an owning pointer into the
buffer.
One can use a global allocator to achieve indirection (`alloc::Box`,
`alloc::Rc`, etc.), which requires using the nightly channel as of Rust v1.37.0,
or one can use a statically allocated memory pool like [`heapless::Pool`].
[`heapless::Pool`]: https://docs.rs/heapless/0.5.0/heapless/pool/index.html
Here's an example where `heapless::Pool` is used to "box" buffers of 128 bytes.
``` rust
{{#include ../../../../examples/pool.rs}}
```
``` console
$ cargo run --example pool
{{#include ../../../../ci/expected/pool.run}}
```
## Inspecting the expanded code
`#[rtic::app]` is a procedural macro that produces support code. If for some
reason you need to inspect the code generated by this macro you have two
options:
You can inspect the file `rtic-expansion.rs` inside the `target` directory. This
file contains the expansion of the `#[rtic::app]` item (not your whole program!)
of the *last built* (via `cargo build` or `cargo check`) RTIC application. The
expanded code is not pretty printed by default so you'll want to run `rustfmt`
on it before you read it.
``` console
$ cargo build --example foo
$ rustfmt target/rtic-expansion.rs
$ tail target/rtic-expansion.rs
```
``` rust
#[doc = r" Implementation details"]
mod app {
#[doc = r" Always include the device crate which contains the vector table"]
use lm3s6965 as _;
#[no_mangle]
unsafe extern "C" fn main() -> ! {
rtic::export::interrupt::disable();
let mut core: rtic::export::Peripherals = core::mem::transmute(());
core.SCB.scr.modify(|r| r | 1 << 1);
rtic::export::interrupt::enable();
loop {
rtic::export::wfi()
}
}
}
```
Or, you can use the [`cargo-expand`] sub-command. This sub-command will expand
*all* the macros, including the `#[rtic::app]` attribute, and modules in your
crate and print the output to the console.
[`cargo-expand`]: https://crates.io/crates/cargo-expand
``` console
$ # produces the same output as before
$ cargo expand --example smallest | tail
```
## Resource de-structure-ing
When having a task taking multiple resources it can help in readability to split
up the resource struct. Here are two examples on how this can be done:
``` rust
{{#include ../../../../examples/destructure.rs}}
```

View file

@ -0,0 +1,13 @@
# Resource de-structure-ing
When having a task taking multiple resources it can help in readability to split
up the resource struct. Here are two examples on how this can be done:
``` rust
{{#include ../../../../examples/destructure.rs}}
```
``` console
$ cargo run --target thumbv7m-none-eabi --example destructure
{{#include ../../../../ci/expected/destructure.run}}
```

View file

@ -0,0 +1,45 @@
# Running tasks from RAM
The main goal of moving the specification of RTIC applications to attributes in
RTIC v0.4.0 was to allow inter-operation with other attributes. For example, the
`link_section` attribute can be applied to tasks to place them in RAM; this can
improve performance in some cases.
> **IMPORTANT**: In general, the `link_section`, `export_name` and `no_mangle`
> attributes are very powerful but also easy to misuse. Incorrectly using any of
> these attributes can cause undefined behavior; you should always prefer to use
> safe, higher level attributes around them like `cortex-m-rt`'s `interrupt` and
> `exception` attributes.
>
> In the particular case of RAM functions there's no
> safe abstraction for it in `cortex-m-rt` v0.6.5 but there's an [RFC] for
> adding a `ramfunc` attribute in a future release.
[RFC]: https://github.com/rust-embedded/cortex-m-rt/pull/100
The example below shows how to place the higher priority task, `bar`, in RAM.
``` rust
{{#include ../../../../examples/ramfunc.rs}}
```
Running this program produces the expected output.
``` console
$ cargo run --target thumbv7m-none-eabi --example ramfunc
{{#include ../../../../ci/expected/ramfunc.run}}
```
One can look at the output of `cargo-nm` to confirm that `bar` ended in RAM
(`0x2000_0000`), whereas `foo` ended in Flash (`0x0000_0000`).
``` console
$ cargo nm --example ramfunc --release | grep ' foo::'
{{#include ../../../../ci/expected/ramfunc.grep.foo}}
```
``` console
$ cargo nm --example ramfunc --release | grep ' bar::'
{{#include ../../../../ci/expected/ramfunc.grep.bar}}
```

View file

@ -0,0 +1,26 @@
# Using indirection for faster message passing
Message passing always involves copying the payload from the sender into a
static variable and then from the static variable into the receiver. Thus
sending a large buffer, like a `[u8; 128]`, as a message involves two expensive
`memcpy`s. To minimize the message passing overhead one can use indirection:
instead of sending the buffer by value, one can send an owning pointer into the
buffer.
One can use a global allocator to achieve indirection (`alloc::Box`,
`alloc::Rc`, etc.), which requires using the nightly channel as of Rust v1.37.0,
or one can use a statically allocated memory pool like [`heapless::Pool`].
[`heapless::Pool`]: https://docs.rs/heapless/0.5.0/heapless/pool/index.html
Here's an example where `heapless::Pool` is used to "box" buffers of 128 bytes.
``` rust
{{#include ../../../../examples/pool.rs}}
```
``` console
$ cargo run --target thumbv7m-none-eabi --example pool
{{#include ../../../../ci/expected/pool.run}}
```

View file

@ -0,0 +1,59 @@
# Implementing a `Monotonic` timer for scheduling
The framework is very flexible in that it can utilize any timer which has compare-match and (optional)
overflow interrupts for scheduling. The only thing needed to make a timer usable with RTIC is to
implement the [`rtic_monotonic::Monotonic`] trait.
Implementing time that supports a vast range is generally **very** difficult, and in RTIC 0.5 it was a
common problem how to implement time handling and not get stuck in weird special cases. Moreover
it was difficult to understand the relation between time and the timers used for scheduling. From
RTIC 0.6 we have moved to use [`embedded_time`] as the basis for all time-based operation and
abstraction of clocks. This is why from RTIC 0.6 it is almost trivial to implement the `Monotonic`
trait and use any timer in a system for scheduling.
The trait documents the requirements for each method, however a small PoC implementation is provided
below.
[`rtic_monotonic::Monotonic`]: https://docs.rs/rtic-monotonic/
[`embedded_time`]: https://docs.rs/embedded_time/
```rust
use rtic_monotonic::{embedded_time::clock::Error, Clock, Fraction, Instant, Monotonic};
/// Example wrapper struct for a timer
pub struct Timer<const FREQ: u32> {
tim: TIM2,
}
impl<const FREQ: u32> Clock for Timer<FREQ> {
const SCALING_FACTOR: Fraction = Fraction::new(1, FREQ);
type T = u32;
#[inline(always)]
fn try_now(&self) -> Result<Instant<Self>, Error> {
Ok(Instant::new(Self::count()))
}
}
impl Monotonic for Timer<TIM2> {
unsafe fn reset(&mut self) {
// Reset timer counter
self.tim.cnt.write(|_, w| w.bits(0));
// Since reset is only called once, we use it to enable
// the interrupt generation bit.
self.tim.dier.modify(|_, w| w.cc1ie().set_bit());
}
// Use Compare channel 1 for Monotonic
fn set_compare(&mut self, instant: &Instant<Self>) {
self.tim
.ccr1
.write(|w| w.ccr().bits(instant.duration_since_epoch().integer()));
}
fn clear_compare_flag(&mut self) {
self.tim.sr.modify(|_, w| w.cc1if().clear_bit());
}
}
```

View file

@ -0,0 +1,24 @@
# 'static super-powers
As discussed earlier `local` resources are given `'static` lifetime in `#[init]` and `#[idle]`,
this can be used to allocate an object and then split it up or give the pre-allocated object to a
task, driver or some other object.
This is very useful when needing to allocate memory for drivers, such as USB drivers, and using
data structures that can be split such as [`heapless::spsc::Queue`].
In the following example an [`heapless::spsc::Queue`] is given to two different tasks for lock-free access
to the shared queue.
[`heapless::spsc::Queue`]: https://docs.rs/heapless/0.7.5/heapless/spsc/struct.Queue.html
``` rust
{{#include ../../../../examples/static.rs}}
```
Running this program produces the expected output.
``` console
$ cargo run --target thumbv7m-none-eabi --example static
{{#include ../../../../ci/expected/static.run}}
```

View file

@ -0,0 +1,48 @@
# Inspecting generated code
`#[rtic::app]` is a procedural macro that produces support code. If for some
reason you need to inspect the code generated by this macro you have two
options:
You can inspect the file `rtic-expansion.rs` inside the `target` directory. This
file contains the expansion of the `#[rtic::app]` item (not your whole program!)
of the *last built* (via `cargo build` or `cargo check`) RTIC application. The
expanded code is not pretty printed by default so you'll want to run `rustfmt`
on it before you read it.
``` console
$ cargo build --example foo
$ rustfmt target/rtic-expansion.rs
$ tail target/rtic-expansion.rs
```
``` rust
#[doc = r" Implementation details"]
mod app {
#[doc = r" Always include the device crate which contains the vector table"]
use lm3s6965 as _;
#[no_mangle]
unsafe extern "C" fn main() -> ! {
rtic::export::interrupt::disable();
let mut core: rtic::export::Peripherals = core::mem::transmute(());
core.SCB.scr.modify(|r| r | 1 << 1);
rtic::export::interrupt::enable();
loop {
rtic::export::wfi()
}
}
}
```
Or, you can use the [`cargo-expand`] sub-command. This sub-command will expand
*all* the macros, including the `#[rtic::app]` attribute, and modules in your
crate and print the output to the console.
[`cargo-expand`]: https://crates.io/crates/cargo-expand
``` console
$ # produces the same output as before
$ cargo expand --example smallest | tail
```

View file

@ -1,51 +0,0 @@
# Types, Send and Sync
Every function within the `app` module has a `Context` structure as its
first parameter. All the fields of these structures have predictable,
non-anonymous types so you can write plain functions that take them as arguments.
The API reference specifies how these types are generated from the input. You
can also generate documentation for your binary crate (`cargo doc --bin <name>`);
in the documentation you'll find `Context` structs (e.g. `init::Context` and
`idle::Context`).
The example below shows the different types generates by the `app` attribute.
``` rust
{{#include ../../../../examples/types.rs}}
```
## `Send`
[`Send`] is a marker trait for "types that can be transferred across thread
boundaries", according to its definition in `core`. In the context of RTIC the
`Send` trait is only required where it's possible to transfer a value between
tasks that run at *different* priorities. This occurs in a few places: in
message passing, in shared resources and in the initialization of late
resources.
[`Send`]: https://doc.rust-lang.org/core/marker/trait.Send.html
The `app` attribute will enforce that `Send` is implemented where required so
you don't need to worry much about it. Currently all types that are passed need
to be `Send` in RTIC, however this restriction might be relaxed in the future.
## `Sync`
Similarly, [`Sync`] is a marker trait for "types for which it is safe to share
references between threads", according to its definition in `core`. In the
context of RTIC the `Sync` trait is only required where it's possible for two,
or more, tasks that run at different priorities and may get a shared reference
(`&-`) to a resource. This only occurs with shared access (`&-`) resources.
[`Sync`]: https://doc.rust-lang.org/core/marker/trait.Sync.html
The `app` attribute will enforce that `Sync` is implemented where required but
it's important to know where the `Sync` bound is not required: shared access
(`&-`) resources contended by tasks that run at the *same* priority.
The example below shows where a type that doesn't implement `Sync` can be used.
``` rust
{{#include ../../../../examples/not-sync.rs}}
```

View file

@ -21,6 +21,6 @@ This is the documentation of v0.6.x of RTIC; for the documentation of version
* v0.5.x go [here](/0.5). * v0.5.x go [here](/0.5).
* v0.4.x go [here](/0.4). * v0.4.x go [here](/0.4).
{{#include ../../../README.md:7:46}} {{#include ../../../README.md:7:47}}
{{#include ../../../README.md:52:}} {{#include ../../../README.md:48:}}

View file

View file

@ -0,0 +1,3 @@
init
foo
bar

View file

0
ci/expected/common.run Normal file
View file

View file

View file

@ -0,0 +1,2 @@
foo: a = 0, b = 0, c = 0
bar: a = 0, b = 0, c = 0

View file

@ -0,0 +1,4 @@
init
foo called
idle
foo called

View file

@ -0,0 +1,2 @@
foo 1, 2
foo 2, 3

3
ci/expected/locals.run Normal file
View file

@ -0,0 +1,3 @@
foo: local_to_foo = 1
bar: local_to_bar = 1
idle: local_to_idle = 1

View file

@ -1,14 +1,2 @@
GPIOA/start foo = 1
GPIOA/counter = 1 bar = 2
GPIOA/end
GPIOB/start
GPIOB/counter = 2
GPIOB/end
GPIOA/start
GPIOA/counter = 3
GPIOA/end
GPIOB/start
GPIOB/counter = 4
GPIOB/end
GPIOA/start
GPIOA/counter = 5

View file

@ -0,0 +1,3 @@
foo 1, 1
foo 1, 2
foo 2, 3

View file

@ -1,4 +1 @@
Multiple single locks Multiple locks, s1: 1, s2: 1, s3: 1
Multiple single locks, s1: 1, s2: 1, s3: 1
Multilock!
Multiple single locks, s1: 2, s2: 2, s3: 2

View file

@ -1,2 +1,2 @@
UART1(key = 0xdeadbeef) bar(key = 0xdeadbeef)
UART0(key = 0xdeadbeef) foo(key = 0xdeadbeef)

View file

@ -1,3 +1,4 @@
foo(scheduled = Instant(8000000), now = Instant(8000196)) foo
foo(scheduled = Instant(16000000), now = Instant(16000196)) foo
foo(scheduled = Instant(24000000), now = Instant(24000196)) foo
foo

View file

@ -1,2 +1,2 @@
bar(0x2000008c) bar(0x20000088)
foo(0x20000110) foo(0x2000010c)

View file

@ -1,5 +1,5 @@
GPIOA - start foo - start
GPIOC - start baz - start
GPIOC - end baz - end
GPIOB bar
GPIOA - end foo - end

View file

@ -0,0 +1,2 @@
UART0: shared = 1
UART1: shared = 2

View file

@ -1,3 +1,4 @@
init @ Instant(0) init
bar @ Instant(4000236) foo
foo @ Instant(8000173) bar
baz

1
ci/expected/shared.run Normal file
View file

@ -0,0 +1 @@
received message: 42

0
ci/expected/smallest.run Normal file
View file

2
ci/expected/spawn.run Normal file
View file

@ -0,0 +1,2 @@
init
foo

3
ci/expected/static.run Normal file
View file

@ -0,0 +1,3 @@
received message: 1
received message: 2
received message: 3

0
ci/expected/t-binds.run Normal file
View file

View file

View file

View file

View file

View file

0
ci/expected/t-spawn.run Normal file
View file

View file

@ -24,6 +24,7 @@ impl BigStruct {
mod app { mod app {
use super::BigStruct; use super::BigStruct;
use core::mem::MaybeUninit; use core::mem::MaybeUninit;
use cortex_m_semihosting::debug;
#[shared] #[shared]
struct Shared { struct Shared {
@ -41,6 +42,8 @@ mod app {
&mut *cx.local.bs.as_mut_ptr() &mut *cx.local.bs.as_mut_ptr()
}; };
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
( (
Shared { Shared {
// assign the reference so we can use the resource // assign the reference so we can use the resource

View file

@ -34,7 +34,7 @@ mod app {
rtic::pend(Interrupt::UART0); rtic::pend(Interrupt::UART0);
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
loop { loop {
cortex_m::asm::nop(); cortex_m::asm::nop();

View file

@ -0,0 +1,74 @@
//! examples/cancel-reschedule.rs
#![deny(unsafe_code)]
#![deny(warnings)]
#![no_main]
#![no_std]
use panic_semihosting as _;
#[rtic::app(device = lm3s6965, dispatchers = [SSI0])]
mod app {
use cortex_m_semihosting::{debug, hprintln};
use rtic::time::duration::*;
use systick_monotonic::Systick;
#[monotonic(binds = SysTick, default = true)]
type MyMono = Systick<100>; // 100 Hz / 10 ms granularity
#[shared]
struct Shared {}
#[local]
struct Local {}
#[init]
fn init(cx: init::Context) -> (Shared, Local, init::Monotonics) {
let systick = cx.core.SYST;
// Initialize the monotonic
let mono = Systick::new(systick, 12_000_000);
hprintln!("init").ok();
// Schedule `foo` to run 1 second in the future
foo::spawn_after(1.seconds()).unwrap();
(
Shared {},
Local {},
init::Monotonics(mono), // Give the monotonic to RTIC
)
}
#[task]
fn foo(_: foo::Context) {
hprintln!("foo").ok();
// Schedule `bar` to run 2 seconds in the future (1 second after foo runs)
let spawn_handle = baz::spawn_after(2.seconds()).unwrap();
bar::spawn_after(1.seconds(), spawn_handle, false).unwrap(); // Change to true
}
#[task]
fn bar(_: bar::Context, baz_handle: baz::SpawnHandle, do_reschedule: bool) {
hprintln!("bar").ok();
if do_reschedule {
// Reschedule baz 2 seconds from now, instead of the original 1 second
// from now.
baz_handle.reschedule_after(2.seconds()).unwrap();
// Or baz_handle.reschedule_at(/* time */)
} else {
// Or cancel it
baz_handle.cancel().unwrap();
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
}
}
#[task]
fn baz(_: baz::Context) {
hprintln!("baz").ok();
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
}
}

View file

@ -44,6 +44,6 @@ mod app {
fn bar(_: bar::Context) { fn bar(_: bar::Context) {
hprintln!("bar").unwrap(); hprintln!("bar").unwrap();
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
} }
} }

View file

@ -43,7 +43,7 @@ mod app {
#[idle] #[idle]
fn idle(_: idle::Context) -> ! { fn idle(_: idle::Context) -> ! {
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
loop { loop {
cortex_m::asm::nop(); cortex_m::asm::nop();

101
examples/common.rs Normal file
View file

@ -0,0 +1,101 @@
//! examples/common.rs
#![deny(unsafe_code)]
#![deny(warnings)]
#![no_main]
#![no_std]
use panic_semihosting as _;
#[rtic::app(device = lm3s6965, dispatchers = [SSI0, QEI0])]
mod app {
use cortex_m_semihosting::{debug, hprintln};
use rtic::time::duration::*;
use systick_monotonic::Systick; // Implements the `Monotonic` trait // Time helpers, such as `N.seconds()`
// A monotonic timer to enable scheduling in RTIC
#[monotonic(binds = SysTick, default = true)]
type MyMono = Systick<100>; // 100 Hz / 10 ms granularity
// Resources shared between tasks
#[shared]
struct Shared {
s1: u32,
s2: i32,
}
// Local resources to specific tasks (cannot be shared)
#[local]
struct Local {
l1: u8,
l2: i8,
}
#[init]
fn init(cx: init::Context) -> (Shared, Local, init::Monotonics) {
let systick = cx.core.SYST;
let mono = Systick::new(systick, 12_000_000);
// Spawn the task `foo` directly after `init` finishes
foo::spawn().unwrap();
// Spawn the task `bar` 1 second after `init` finishes, this is enabled
// by the `#[monotonic(..)]` above
bar::spawn_after(1.seconds()).unwrap();
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
(
// Initialization of shared resources
Shared { s1: 0, s2: 1 },
// Initialization of task local resources
Local { l1: 2, l2: 3 },
// Move the monotonic timer to the RTIC run-time, this enables
// scheduling
init::Monotonics(mono),
)
}
// Background task, runs whenever no other tasks are running
#[idle]
fn idle(_: idle::Context) -> ! {
loop {
continue;
}
}
// Software task, not bound to a hardware interrupt.
// This task takes the task local resource `l1`
// The resources `s1` and `s2` are shared between all other tasks.
#[task(shared = [s1, s2], local = [l1])]
fn foo(_: foo::Context) {
// This task is only spawned once in `init`, hence this task will run
// only once
hprintln!("foo").ok();
}
// Software task, also not bound to a hardware interrupt
// This task takes the task local resource `l2`
// The resources `s1` and `s2` are shared between all other tasks.
#[task(shared = [s1, s2], local = [l2])]
fn bar(_: bar::Context) {
hprintln!("bar").ok();
// Run `bar` once per second
bar::spawn_after(1.seconds()).unwrap();
}
// Hardware task, bound to a hardware interrupt
// The resources `s1` and `s2` are shared between all other tasks.
#[task(binds = UART0, priority = 3, shared = [s1, s2])]
fn uart0_interrupt(_: uart0_interrupt::Context) {
// This task is bound to the interrupt `UART0` and will run
// whenever the interrupt fires
// Note that RTIC does NOT clear the interrupt flag, this is up to the
// user
hprintln!("UART0 interrupt!").ok();
}
}

View file

@ -0,0 +1,46 @@
//! examples/declared_locals.rs
#![deny(unsafe_code)]
#![deny(warnings)]
#![no_main]
#![no_std]
use panic_semihosting as _;
#[rtic::app(device = lm3s6965, dispatchers = [UART0])]
mod app {
use cortex_m_semihosting::debug;
#[shared]
struct Shared {}
#[local]
struct Local {}
#[init(local = [a: u32 = 0])]
fn init(cx: init::Context) -> (Shared, Local, init::Monotonics) {
// Locals in `#[init]` have 'static lifetime
let _a: &'static mut u32 = cx.local.a;
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
(Shared {}, Local {}, init::Monotonics())
}
#[idle(local = [a: u32 = 0])]
fn idle(cx: idle::Context) -> ! {
// Locals in `#[idle]` have 'static lifetime
let _a: &'static mut u32 = cx.local.a;
loop {}
}
#[task(local = [a: u32 = 0])]
fn foo(cx: foo::Context) {
// Locals in `#[task]`s have a local lifetime
let _a: &mut u32 = cx.local.a;
// error: explicit lifetime required in the type of `cx`
// let _a: &'static mut u32 = cx.local.a;
}
}

View file

@ -7,14 +7,12 @@
use panic_semihosting as _; use panic_semihosting as _;
#[rtic::app(device = lm3s6965)] #[rtic::app(device = lm3s6965, dispatchers = [UART0])]
mod app { mod app {
use cortex_m_semihosting::hprintln; use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt;
#[shared] #[shared]
struct Shared { struct Shared {
// Some resources to work with
a: u32, a: u32,
b: u32, b: u32,
c: u32, c: u32,
@ -25,27 +23,33 @@ mod app {
#[init] #[init]
fn init(_: init::Context) -> (Shared, Local, init::Monotonics) { fn init(_: init::Context) -> (Shared, Local, init::Monotonics) {
rtic::pend(Interrupt::UART0); foo::spawn().unwrap();
rtic::pend(Interrupt::UART1); bar::spawn().unwrap();
(Shared { a: 0, b: 0, c: 0 }, Local {}, init::Monotonics()) (Shared { a: 0, b: 0, c: 0 }, Local {}, init::Monotonics())
} }
#[idle]
fn idle(_: idle::Context) -> ! {
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
loop {}
}
// Direct destructure // Direct destructure
#[task(binds = UART0, shared = [&a, &b, &c])] #[task(shared = [&a, &b, &c])]
fn uart0(cx: uart0::Context) { fn foo(cx: foo::Context) {
let a = cx.shared.a; let a = cx.shared.a;
let b = cx.shared.b; let b = cx.shared.b;
let c = cx.shared.c; let c = cx.shared.c;
hprintln!("UART0: a = {}, b = {}, c = {}", a, b, c).unwrap(); hprintln!("foo: a = {}, b = {}, c = {}", a, b, c).unwrap();
} }
// De-structure-ing syntax // De-structure-ing syntax
#[task(binds = UART1, shared = [&a, &b, &c])] #[task(shared = [&a, &b, &c])]
fn uart1(cx: uart1::Context) { fn bar(cx: bar::Context) {
let uart1::SharedResources { a, b, c } = cx.shared; let bar::SharedResources { a, b, c } = cx.shared;
hprintln!("UART0: a = {}, b = {}, c = {}", a, b, c).unwrap(); hprintln!("bar: a = {}, b = {}, c = {}", a, b, c).unwrap();
} }
} }

View file

@ -1,46 +0,0 @@
//! examples/double_schedule.rs
#![deny(unsafe_code)]
#![deny(warnings)]
#![no_main]
#![no_std]
use panic_semihosting as _;
#[rtic::app(device = lm3s6965, dispatchers = [SSI0])]
mod app {
use dwt_systick_monotonic::DwtSystick;
use rtic::time::duration::Seconds;
#[monotonic(binds = SysTick, default = true)]
type MyMono = DwtSystick<8_000_000>; // 8 MHz
#[shared]
struct Shared {}
#[local]
struct Local {}
#[init]
fn init(cx: init::Context) -> (Shared, Local, init::Monotonics) {
task1::spawn().ok();
let mut dcb = cx.core.DCB;
let dwt = cx.core.DWT;
let systick = cx.core.SYST;
let mono = DwtSystick::new(&mut dcb, dwt, systick, 8_000_000);
(Shared {}, Local {}, init::Monotonics(mono))
}
#[task]
fn task1(_cx: task1::Context) {
task2::spawn_after(Seconds(1_u32)).ok();
}
#[task]
fn task2(_cx: task2::Context) {
task1::spawn_after(Seconds(1_u32)).ok();
}
}

View file

@ -40,7 +40,7 @@ mod app {
rtic::pend(Interrupt::UART0); rtic::pend(Interrupt::UART0);
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
loop { loop {
cortex_m::asm::nop(); cortex_m::asm::nop();

View file

@ -12,7 +12,7 @@ use panic_semihosting as _;
fn foo(_c: app::foo::Context, x: i32, y: u32) { fn foo(_c: app::foo::Context, x: i32, y: u32) {
hprintln!("foo {}, {}", x, y).unwrap(); hprintln!("foo {}, {}", x, y).unwrap();
if x == 2 { if x == 2 {
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
} }
app::foo::spawn(2, 3).unwrap(); app::foo::spawn(2, 3).unwrap();
} }

View file

@ -39,7 +39,7 @@ mod app {
rtic::pend(Interrupt::UART1); rtic::pend(Interrupt::UART1);
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
} }
#[task(binds = UART1, priority = 2, shared = [shared], local = [state: u32 = 0])] #[task(binds = UART1, priority = 2, shared = [shared], local = [state: u32 = 0])]

View file

@ -37,7 +37,7 @@ mod app {
rtic::pend(Interrupt::UART0); rtic::pend(Interrupt::UART0);
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
loop { loop {
cortex_m::asm::nop(); cortex_m::asm::nop();

View file

@ -31,7 +31,7 @@ mod app {
hprintln!("idle").unwrap(); hprintln!("idle").unwrap();
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
loop { loop {
cortex_m::asm::nop(); cortex_m::asm::nop();

View file

@ -34,7 +34,7 @@ mod app {
hprintln!("init").unwrap(); hprintln!("init").unwrap();
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
(Shared {}, Local {}, init::Monotonics()) (Shared {}, Local {}, init::Monotonics())
} }

86
examples/locals.rs Normal file
View file

@ -0,0 +1,86 @@
//! examples/locals.rs
#![deny(unsafe_code)]
#![deny(warnings)]
#![no_main]
#![no_std]
use panic_semihosting as _;
#[rtic::app(device = lm3s6965, dispatchers = [UART0, UART1])]
mod app {
use cortex_m_semihosting::{debug, hprintln};
#[shared]
struct Shared {}
#[local]
struct Local {
local_to_foo: i64,
local_to_bar: i64,
local_to_idle: i64,
}
// `#[init]` cannot access locals from the `#[local]` struct as they are initialized here.
#[init]
fn init(_: init::Context) -> (Shared, Local, init::Monotonics) {
foo::spawn().unwrap();
bar::spawn().unwrap();
(
Shared {},
// initial values for the `#[local]` resources
Local {
local_to_foo: 0,
local_to_bar: 0,
local_to_idle: 0,
},
init::Monotonics(),
)
}
// `local_to_idle` can only be accessed from this context
#[idle(local = [local_to_idle])]
fn idle(cx: idle::Context) -> ! {
let local_to_idle = cx.local.local_to_idle;
*local_to_idle += 1;
hprintln!("idle: local_to_idle = {}", local_to_idle).unwrap();
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
// error: no `local_to_foo` field in `idle::LocalResources`
// _cx.local.local_to_foo += 1;
// error: no `local_to_bar` field in `idle::LocalResources`
// _cx.local.local_to_bar += 1;
loop {
cortex_m::asm::nop();
}
}
// `local_to_foo` can only be accessed from this context
#[task(local = [local_to_foo])]
fn foo(cx: foo::Context) {
let local_to_foo = cx.local.local_to_foo;
*local_to_foo += 1;
// error: no `local_to_bar` field in `foo::LocalResources`
// cx.local.local_to_bar += 1;
hprintln!("foo: local_to_foo = {}", local_to_foo).unwrap();
}
// `shared` can only be accessed from this context
#[task(local = [local_to_bar])]
fn bar(cx: bar::Context) {
let local_to_bar = cx.local.local_to_bar;
*local_to_bar += 1;
// error: no `local_to_foo` field in `bar::LocalResources`
// cx.local.local_to_foo += 1;
hprintln!("bar: local_to_bar = {}", local_to_bar).unwrap();
}
}

View file

@ -7,10 +7,9 @@
use panic_semihosting as _; use panic_semihosting as _;
#[rtic::app(device = lm3s6965)] #[rtic::app(device = lm3s6965, dispatchers = [GPIOA])]
mod app { mod app {
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt;
#[shared] #[shared]
struct Shared { struct Shared {
@ -23,38 +22,28 @@ mod app {
#[init] #[init]
fn init(_: init::Context) -> (Shared, Local, init::Monotonics) { fn init(_: init::Context) -> (Shared, Local, init::Monotonics) {
rtic::pend(Interrupt::GPIOA); foo::spawn().unwrap();
(Shared { counter: 0 }, Local {}, init::Monotonics()) (Shared { counter: 0 }, Local {}, init::Monotonics())
} }
#[task(binds = GPIOA, shared = [counter])] // <- same priority #[task(shared = [counter])] // <- same priority
fn gpioa(c: gpioa::Context) { fn foo(c: foo::Context) {
hprintln!("GPIOA/start").unwrap(); bar::spawn().unwrap();
rtic::pend(Interrupt::GPIOB);
*c.shared.counter += 1; // <- no lock API required *c.shared.counter += 1; // <- no lock API required
let counter = *c.shared.counter; let counter = *c.shared.counter;
hprintln!(" GPIOA/counter = {}", counter).unwrap(); hprintln!(" foo = {}", counter).unwrap();
if counter == 5 {
debug::exit(debug::EXIT_SUCCESS);
}
hprintln!("GPIOA/end").unwrap();
} }
#[task(binds = GPIOB, shared = [counter])] // <- same priority #[task(shared = [counter])] // <- same priority
fn gpiob(c: gpiob::Context) { fn bar(c: bar::Context) {
hprintln!("GPIOB/start").unwrap(); foo::spawn().unwrap();
rtic::pend(Interrupt::GPIOA);
*c.shared.counter += 1; // <- no lock API required *c.shared.counter += 1; // <- no lock API required
let counter = *c.shared.counter; let counter = *c.shared.counter;
hprintln!(" GPIOB/counter = {}", counter).unwrap(); hprintln!(" bar = {}", counter).unwrap();
if counter == 5 { debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
debug::exit(debug::EXIT_SUCCESS);
}
hprintln!("GPIOB/end").unwrap();
} }
} }

View file

@ -7,10 +7,9 @@
use panic_semihosting as _; use panic_semihosting as _;
#[rtic::app(device = lm3s6965)] #[rtic::app(device = lm3s6965, dispatchers = [GPIOA, GPIOB, GPIOC])]
mod app { mod app {
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt;
#[shared] #[shared]
struct Shared { struct Shared {
@ -22,14 +21,14 @@ mod app {
#[init] #[init]
fn init(_: init::Context) -> (Shared, Local, init::Monotonics) { fn init(_: init::Context) -> (Shared, Local, init::Monotonics) {
rtic::pend(Interrupt::GPIOA); foo::spawn().unwrap();
(Shared { shared: 0 }, Local {}, init::Monotonics()) (Shared { shared: 0 }, Local {}, init::Monotonics())
} }
// when omitted priority is assumed to be `1` // when omitted priority is assumed to be `1`
#[task(binds = GPIOA, shared = [shared])] #[task(shared = [shared])]
fn gpioa(mut c: gpioa::Context) { fn foo(mut c: foo::Context) {
hprintln!("A").unwrap(); hprintln!("A").unwrap();
// the lower priority task requires a critical section to access the data // the lower priority task requires a critical section to access the data
@ -37,24 +36,24 @@ mod app {
// data can only be modified within this critical section (closure) // data can only be modified within this critical section (closure)
*shared += 1; *shared += 1;
// GPIOB will *not* run right now due to the critical section // bar will *not* run right now due to the critical section
rtic::pend(Interrupt::GPIOB); bar::spawn().unwrap();
hprintln!("B - shared = {}", *shared).unwrap(); hprintln!("B - shared = {}", *shared).unwrap();
// GPIOC does not contend for `shared` so it's allowed to run now // baz does not contend for `shared` so it's allowed to run now
rtic::pend(Interrupt::GPIOC); baz::spawn().unwrap();
}); });
// critical section is over: GPIOB can now start // critical section is over: bar can now start
hprintln!("E").unwrap(); hprintln!("E").unwrap();
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
} }
#[task(binds = GPIOB, priority = 2, shared = [shared])] #[task(priority = 2, shared = [shared])]
fn gpiob(mut c: gpiob::Context) { fn bar(mut c: bar::Context) {
// the higher priority task does still need a critical section // the higher priority task does still need a critical section
let shared = c.shared.shared.lock(|shared| { let shared = c.shared.shared.lock(|shared| {
*shared += 1; *shared += 1;
@ -65,8 +64,8 @@ mod app {
hprintln!("D - shared = {}", shared).unwrap(); hprintln!("D - shared = {}", shared).unwrap();
} }
#[task(binds = GPIOC, priority = 3)] #[task(priority = 3)]
fn gpioc(_: gpioc::Context) { fn baz(_: baz::Context) {
hprintln!("C").unwrap(); hprintln!("C").unwrap();
} }
} }

View file

@ -44,7 +44,7 @@ mod app {
hprintln!("baz({}, {})", x, y).unwrap(); hprintln!("baz({}, {})", x, y).unwrap();
if x + y > 4 { if x + y > 4 {
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
} }
foo::spawn().unwrap(); foo::spawn().unwrap();

View file

@ -1,4 +1,4 @@
//! examples/spawn2.rs //! examples/message_passing.rs
#![deny(unsafe_code)] #![deny(unsafe_code)]
#![deny(warnings)] #![deny(warnings)]
@ -19,23 +19,19 @@ mod app {
#[init] #[init]
fn init(_: init::Context) -> (Shared, Local, init::Monotonics) { fn init(_: init::Context) -> (Shared, Local, init::Monotonics) {
foo::spawn(1, 1).unwrap();
foo::spawn(1, 2).unwrap(); foo::spawn(1, 2).unwrap();
foo::spawn(2, 3).unwrap();
assert!(foo::spawn(1, 4).is_err()); // The capacity of `foo` is reached
(Shared {}, Local {}, init::Monotonics()) (Shared {}, Local {}, init::Monotonics())
} }
#[task] #[task(capacity = 3)]
fn foo(_c: foo::Context, x: i32, y: u32) { fn foo(_c: foo::Context, x: i32, y: u32) {
hprintln!("foo {}, {}", x, y).unwrap(); hprintln!("foo {}, {}", x, y).unwrap();
if x == 2 { if x == 2 {
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
} }
foo2::spawn(2).unwrap();
}
#[task]
fn foo2(_c: foo2::Context, x: i32) {
hprintln!("foo2 {}", x).unwrap();
foo::spawn(x, 0).unwrap();
} }
} }

View file

@ -1,6 +1,4 @@
//! examples/mutlilock.rs //! examples/mutlilock.rs
//!
//! The multi-lock feature example.
#![deny(unsafe_code)] #![deny(unsafe_code)]
#![deny(warnings)] #![deny(warnings)]
@ -9,10 +7,9 @@
use panic_semihosting as _; use panic_semihosting as _;
#[rtic::app(device = lm3s6965)] #[rtic::app(device = lm3s6965, dispatchers = [GPIOA])]
mod app { mod app {
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt;
#[shared] #[shared]
struct Shared { struct Shared {
@ -26,7 +23,7 @@ mod app {
#[init] #[init]
fn init(_: init::Context) -> (Shared, Local, init::Monotonics) { fn init(_: init::Context) -> (Shared, Local, init::Monotonics) {
rtic::pend(Interrupt::GPIOA); locks::spawn().unwrap();
( (
Shared { Shared {
@ -40,47 +37,20 @@ mod app {
} }
// when omitted priority is assumed to be `1` // when omitted priority is assumed to be `1`
#[task(binds = GPIOA, shared = [shared1, shared2, shared3])] #[task(shared = [shared1, shared2, shared3])]
fn locks(c: locks::Context) { fn locks(c: locks::Context) {
let mut s1 = c.shared.shared1; let s1 = c.shared.shared1;
let mut s2 = c.shared.shared2; let s2 = c.shared.shared2;
let mut s3 = c.shared.shared3; let s3 = c.shared.shared3;
hprintln!("Multiple single locks").unwrap();
s1.lock(|s1| {
s2.lock(|s2| {
s3.lock(|s3| {
*s1 += 1;
*s2 += 1;
*s3 += 1;
hprintln!(
"Multiple single locks, s1: {}, s2: {}, s3: {}",
*s1,
*s2,
*s3
)
.unwrap();
})
})
});
hprintln!("Multilock!").unwrap();
(s1, s2, s3).lock(|s1, s2, s3| { (s1, s2, s3).lock(|s1, s2, s3| {
*s1 += 1; *s1 += 1;
*s2 += 1; *s2 += 1;
*s3 += 1; *s3 += 1;
hprintln!( hprintln!("Multiple locks, s1: {}, s2: {}, s3: {}", *s1, *s2, *s3).unwrap();
"Multiple single locks, s1: {}, s2: {}, s3: {}",
*s1,
*s2,
*s3
)
.unwrap();
}); });
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
} }
} }

View file

@ -30,7 +30,7 @@ mod app {
#[init] #[init]
fn init(_: init::Context) -> (Shared, Local, init::Monotonics) { fn init(_: init::Context) -> (Shared, Local, init::Monotonics) {
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
( (
Shared { Shared {

View file

@ -1,4 +1,4 @@
//! examples/static.rs //! examples/only-shared-access.rs
#![deny(unsafe_code)] #![deny(unsafe_code)]
#![deny(warnings)] #![deny(warnings)]
@ -7,10 +7,9 @@
use panic_semihosting as _; use panic_semihosting as _;
#[rtic::app(device = lm3s6965)] #[rtic::app(device = lm3s6965, dispatchers = [UART0, UART1])]
mod app { mod app {
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt;
#[shared] #[shared]
struct Shared { struct Shared {
@ -22,22 +21,22 @@ mod app {
#[init] #[init]
fn init(_: init::Context) -> (Shared, Local, init::Monotonics) { fn init(_: init::Context) -> (Shared, Local, init::Monotonics) {
rtic::pend(Interrupt::UART0); foo::spawn().unwrap();
rtic::pend(Interrupt::UART1); bar::spawn().unwrap();
(Shared { key: 0xdeadbeef }, Local {}, init::Monotonics()) (Shared { key: 0xdeadbeef }, Local {}, init::Monotonics())
} }
#[task(binds = UART0, shared = [&key])] #[task(shared = [&key])]
fn uart0(cx: uart0::Context) { fn foo(cx: foo::Context) {
let key: &u32 = cx.shared.key; let key: &u32 = cx.shared.key;
hprintln!("UART0(key = {:#x})", key).unwrap(); hprintln!("foo(key = {:#x})", key).unwrap();
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
} }
#[task(binds = UART1, priority = 2, shared = [&key])] #[task(priority = 2, shared = [&key])]
fn uart1(cx: uart1::Context) { fn bar(cx: bar::Context) {
hprintln!("UART1(key = {:#x})", cx.shared.key).unwrap(); hprintln!("bar(key = {:#x})", cx.shared.key).unwrap();
} }
} }

View file

@ -10,11 +10,12 @@ use panic_semihosting as _;
// NOTE: does NOT work on QEMU! // NOTE: does NOT work on QEMU!
#[rtic::app(device = lm3s6965, dispatchers = [SSI0])] #[rtic::app(device = lm3s6965, dispatchers = [SSI0])]
mod app { mod app {
use dwt_systick_monotonic::DwtSystick; use cortex_m_semihosting::{debug, hprintln};
use rtic::time::duration::Seconds; use rtic::time::duration::*;
use systick_monotonic::Systick;
#[monotonic(binds = SysTick, default = true)] #[monotonic(binds = SysTick, default = true)]
type MyMono = DwtSystick<8_000_000>; // 8 MHz type MyMono = Systick<100>; // 100 Hz / 10 ms granularity
#[shared] #[shared]
struct Shared {} struct Shared {}
@ -24,20 +25,25 @@ mod app {
#[init] #[init]
fn init(cx: init::Context) -> (Shared, Local, init::Monotonics) { fn init(cx: init::Context) -> (Shared, Local, init::Monotonics) {
let mut dcb = cx.core.DCB;
let dwt = cx.core.DWT;
let systick = cx.core.SYST; let systick = cx.core.SYST;
let mono = DwtSystick::new(&mut dcb, dwt, systick, 8_000_000); let mono = Systick::new(systick, 12_000_000);
foo::spawn_after(Seconds(1_u32)).unwrap(); foo::spawn_after(1.seconds()).unwrap();
(Shared {}, Local {}, init::Monotonics(mono)) (Shared {}, Local {}, init::Monotonics(mono))
} }
#[task] #[task(local = [cnt: u32 = 0])]
fn foo(_cx: foo::Context) { fn foo(cx: foo::Context) {
// Periodic hprintln!("foo").ok();
foo::spawn_after(Seconds(1_u32)).unwrap(); *cx.local.cnt += 1;
if *cx.local.cnt == 4 {
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
}
// Periodic ever 1 seconds
foo::spawn_after(1.seconds()).unwrap();
} }
} }

View file

@ -18,7 +18,7 @@ mod app {
#[init] #[init]
fn init(_: init::Context) -> (Shared, Local, init::Monotonics) { fn init(_: init::Context) -> (Shared, Local, init::Monotonics) {
assert!(cortex_m::Peripherals::take().is_none()); assert!(cortex_m::Peripherals::take().is_none());
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
(Shared {}, Local {}, init::Monotonics()) (Shared {}, Local {}, init::Monotonics())
} }

View file

@ -61,7 +61,7 @@ mod app {
// explicitly return the block to the pool // explicitly return the block to the pool
drop(x); drop(x);
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
} }
#[task(priority = 2)] #[task(priority = 2)]

View file

@ -6,10 +6,9 @@
use panic_semihosting as _; use panic_semihosting as _;
use rtic::app; use rtic::app;
#[app(device = lm3s6965)] #[app(device = lm3s6965, dispatchers = [SSI0, QEI0])]
mod app { mod app {
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt;
#[shared] #[shared]
struct Shared {} struct Shared {}
@ -19,28 +18,28 @@ mod app {
#[init] #[init]
fn init(_: init::Context) -> (Shared, Local, init::Monotonics) { fn init(_: init::Context) -> (Shared, Local, init::Monotonics) {
rtic::pend(Interrupt::GPIOA); foo::spawn().unwrap();
(Shared {}, Local {}, init::Monotonics()) (Shared {}, Local {}, init::Monotonics())
} }
#[task(binds = GPIOA, priority = 1)] #[task(priority = 1)]
fn gpioa(_: gpioa::Context) { fn foo(_: foo::Context) {
hprintln!("GPIOA - start").unwrap(); hprintln!("foo - start").unwrap();
rtic::pend(Interrupt::GPIOC); baz::spawn().unwrap();
hprintln!("GPIOA - end").unwrap(); hprintln!("foo - end").unwrap();
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
} }
#[task(binds = GPIOB, priority = 2)] #[task(priority = 2)]
fn gpiob(_: gpiob::Context) { fn bar(_: bar::Context) {
hprintln!(" GPIOB").unwrap(); hprintln!(" bar").unwrap();
} }
#[task(binds = GPIOC, priority = 2)] #[task(priority = 2)]
fn gpioc(_: gpioc::Context) { fn baz(_: baz::Context) {
hprintln!(" GPIOC - start").unwrap(); hprintln!(" baz - start").unwrap();
rtic::pend(Interrupt::GPIOB); bar::spawn().unwrap();
hprintln!(" GPIOC - end").unwrap(); hprintln!(" baz - end").unwrap();
} }
} }

View file

@ -36,7 +36,7 @@ mod app {
fn foo(_: foo::Context) { fn foo(_: foo::Context) {
hprintln!("foo").unwrap(); hprintln!("foo").unwrap();
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
} }
// run this task from RAM // run this task from RAM

View file

@ -39,7 +39,7 @@ mod app {
// `shared` cannot be accessed from this context // `shared` cannot be accessed from this context
#[idle] #[idle]
fn idle(_cx: idle::Context) -> ! { fn idle(_cx: idle::Context) -> ! {
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
// error: no `shared` field in `idle::Context` // error: no `shared` field in `idle::Context`
// _cx.shared.shared += 1; // _cx.shared.shared += 1;

View file

@ -1,81 +0,0 @@
//! examples/resource.rs
#![deny(unsafe_code)]
#![deny(warnings)]
#![no_main]
#![no_std]
use panic_semihosting as _;
#[rtic::app(device = lm3s6965)]
mod app {
use cortex_m_semihosting::{debug, hprintln};
use lm3s6965::Interrupt;
#[shared]
struct Shared {}
#[local]
struct Local {
local_to_uart0: i64,
local_to_uart1: i64,
}
#[init]
fn init(_: init::Context) -> (Shared, Local, init::Monotonics) {
rtic::pend(Interrupt::UART0);
rtic::pend(Interrupt::UART1);
(
Shared {},
// initial values for the `#[local]` resources
Local {
local_to_uart0: 0,
local_to_uart1: 0,
},
init::Monotonics(),
)
}
// `#[local]` resources cannot be accessed from this context
#[idle]
fn idle(_cx: idle::Context) -> ! {
debug::exit(debug::EXIT_SUCCESS);
// error: no `local` field in `idle::Context`
// _cx.local.local_to_uart0 += 1;
// error: no `local` field in `idle::Context`
// _cx.local.local_to_uart1 += 1;
loop {
cortex_m::asm::nop();
}
}
// `local_to_uart0` can only be accessed from this context
// defaults to priority 1
#[task(binds = UART0, local = [local_to_uart0])]
fn uart0(cx: uart0::Context) {
*cx.local.local_to_uart0 += 1;
let local_to_uart0 = cx.local.local_to_uart0;
// error: no `local_to_uart1` field in `uart0::LocalResources`
// cx.local.local_to_uart1 += 1;
hprintln!("UART0: local_to_uart0 = {}", local_to_uart0).unwrap();
}
// `shared` can only be accessed from this context
// explicitly set to priority 2
#[task(binds = UART1, local = [local_to_uart1], priority = 2)]
fn uart1(cx: uart1::Context) {
*cx.local.local_to_uart1 += 1;
let local_to_uart1 = cx.local.local_to_uart1;
// error: no `local_to_uart0` field in `uart1::LocalResources`
// cx.local.local_to_uart0 += 1;
hprintln!("UART1: local_to_uart1 = {}", local_to_uart1).unwrap();
}
}

View file

@ -7,17 +7,14 @@
use panic_semihosting as _; use panic_semihosting as _;
// NOTE: does NOT work on QEMU!
#[rtic::app(device = lm3s6965, dispatchers = [SSI0])] #[rtic::app(device = lm3s6965, dispatchers = [SSI0])]
mod app { mod app {
use cortex_m_semihosting::hprintln; use cortex_m_semihosting::{debug, hprintln};
use dwt_systick_monotonic::DwtSystick; use rtic::time::duration::*;
use rtic::time::duration::Seconds; use systick_monotonic::Systick;
const MONO_HZ: u32 = 8_000_000; // 8 MHz
#[monotonic(binds = SysTick, default = true)] #[monotonic(binds = SysTick, default = true)]
type MyMono = DwtSystick<MONO_HZ>; type MyMono = Systick<100>; // 100 Hz / 10 ms granularity
#[shared] #[shared]
struct Shared {} struct Shared {}
@ -27,30 +24,42 @@ mod app {
#[init] #[init]
fn init(cx: init::Context) -> (Shared, Local, init::Monotonics) { fn init(cx: init::Context) -> (Shared, Local, init::Monotonics) {
let mut dcb = cx.core.DCB;
let dwt = cx.core.DWT;
let systick = cx.core.SYST; let systick = cx.core.SYST;
let mono = DwtSystick::new(&mut dcb, dwt, systick, 8_000_000); // Initialize the monotonic
let mono = Systick::new(systick, 12_000_000);
hprintln!("init").ok(); hprintln!("init").ok();
// Schedule `foo` to run 1 second in the future // Schedule `foo` to run 1 second in the future
foo::spawn_after(Seconds(1_u32)).ok(); foo::spawn_after(1.seconds()).unwrap();
// Schedule `bar` to run 2 seconds in the future (
bar::spawn_after(Seconds(2_u32)).ok(); Shared {},
Local {},
(Shared {}, Local {}, init::Monotonics(mono)) init::Monotonics(mono), // Give the monotonic to RTIC
)
} }
#[task] #[task]
fn foo(_: foo::Context) { fn foo(_: foo::Context) {
hprintln!("foo").ok(); hprintln!("foo").ok();
// Schedule `bar` to run 2 seconds in the future (1 second after foo runs)
bar::spawn_after(1.seconds()).unwrap();
} }
#[task] #[task]
fn bar(_: bar::Context) { fn bar(_: bar::Context) {
hprintln!("bar").ok(); hprintln!("bar").ok();
// Schedule `baz` to run 1 seconds from now, but with a specific time instant.
baz::spawn_at(monotonics::now() + 1.seconds()).unwrap();
}
#[task]
fn baz(_: baz::Context) {
hprintln!("baz").ok();
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
} }
} }

View file

@ -36,7 +36,7 @@ mod app {
if let Some(byte) = c.shared.c.lock(|c| c.dequeue()) { if let Some(byte) = c.shared.c.lock(|c| c.dequeue()) {
hprintln!("received message: {}", byte).unwrap(); hprintln!("received message: {}", byte).unwrap();
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
} else { } else {
rtic::pend(Interrupt::UART0); rtic::pend(Interrupt::UART0);
} }

View file

@ -8,6 +8,8 @@ use rtic::app;
#[app(device = lm3s6965)] #[app(device = lm3s6965)]
mod app { mod app {
use cortex_m_semihosting::debug;
#[shared] #[shared]
struct Shared {} struct Shared {}
@ -16,6 +18,7 @@ mod app {
#[init] #[init]
fn init(_: init::Context) -> (Shared, Local, init::Monotonics) { fn init(_: init::Context) -> (Shared, Local, init::Monotonics) {
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
(Shared {}, Local {}, init::Monotonics()) (Shared {}, Local {}, init::Monotonics())
} }
} }

View file

@ -19,17 +19,16 @@ mod app {
#[init] #[init]
fn init(_: init::Context) -> (Shared, Local, init::Monotonics) { fn init(_: init::Context) -> (Shared, Local, init::Monotonics) {
foo::spawn(1, 2).unwrap(); hprintln!("init").unwrap();
foo::spawn().unwrap();
(Shared {}, Local {}, init::Monotonics()) (Shared {}, Local {}, init::Monotonics())
} }
#[task()] #[task]
fn foo(_c: foo::Context, x: i32, y: u32) { fn foo(_: foo::Context) {
hprintln!("foo {}, {}", x, y).unwrap(); hprintln!("foo").unwrap();
if x == 2 {
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
}
foo::spawn(2, 3).unwrap();
} }
} }

View file

@ -7,45 +7,53 @@
use panic_semihosting as _; use panic_semihosting as _;
#[rtic::app(device = lm3s6965)] #[rtic::app(device = lm3s6965, dispatchers = [UART0])]
mod app { mod app {
use cortex_m_semihosting::{debug, hprintln}; use cortex_m_semihosting::{debug, hprintln};
use heapless::spsc::{Consumer, Producer, Queue}; use heapless::spsc::{Consumer, Producer, Queue};
use lm3s6965::Interrupt;
#[shared] #[shared]
struct Shared { struct Shared {}
#[local]
struct Local {
p: Producer<'static, u32, 5>, p: Producer<'static, u32, 5>,
c: Consumer<'static, u32, 5>, c: Consumer<'static, u32, 5>,
} }
#[local]
struct Local {}
#[init(local = [q: Queue<u32, 5> = Queue::new()])] #[init(local = [q: Queue<u32, 5> = Queue::new()])]
fn init(cx: init::Context) -> (Shared, Local, init::Monotonics) { fn init(cx: init::Context) -> (Shared, Local, init::Monotonics) {
// q has 'static life-time so after the split and return of `init`
// it will continue to exist and be allocated
let (p, c) = cx.local.q.split(); let (p, c) = cx.local.q.split();
(Shared { p, c }, Local {}, init::Monotonics()) foo::spawn().unwrap();
(Shared {}, Local { p, c }, init::Monotonics())
} }
#[idle(shared = [c])] #[idle(local = [c])]
fn idle(mut c: idle::Context) -> ! { fn idle(c: idle::Context) -> ! {
loop { loop {
if let Some(byte) = c.shared.c.lock(|c| c.dequeue()) { // Lock-free access to the same underlying queue!
hprintln!("received message: {}", byte).unwrap(); if let Some(data) = c.local.c.dequeue() {
hprintln!("received message: {}", data).unwrap();
debug::exit(debug::EXIT_SUCCESS); // Run foo until data
if data == 3 {
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
} else { } else {
rtic::pend(Interrupt::UART0); foo::spawn().unwrap();
}
} }
} }
} }
#[task(binds = UART0, shared = [p], local = [kalle: u32 = 0])] #[task(local = [p, state: u32 = 0])]
fn uart0(mut c: uart0::Context) { fn foo(c: foo::Context) {
*c.local.kalle += 1; *c.local.state += 1;
c.shared.p.lock(|p| p.enqueue(42).unwrap());
// Lock-free access to the same underlying queue!
c.local.p.enqueue(*c.local.state).unwrap();
} }
} }

View file

@ -9,6 +9,8 @@ use panic_semihosting as _;
#[rtic::app(device = lm3s6965)] #[rtic::app(device = lm3s6965)]
mod app { mod app {
use cortex_m_semihosting::debug;
#[shared] #[shared]
struct Shared {} struct Shared {}
@ -17,6 +19,8 @@ mod app {
#[init] #[init]
fn init(_: init::Context) -> (Shared, Local, init::Monotonics) { fn init(_: init::Context) -> (Shared, Local, init::Monotonics) {
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
(Shared {}, Local {}, init::Monotonics()) (Shared {}, Local {}, init::Monotonics())
} }

View file

@ -7,6 +7,8 @@ use panic_semihosting as _;
#[rtic::app(device = lm3s6965)] #[rtic::app(device = lm3s6965)]
mod app { mod app {
use cortex_m_semihosting::debug;
#[shared] #[shared]
struct Shared { struct Shared {
// A conditionally compiled resource behind feature_x // A conditionally compiled resource behind feature_x
@ -19,6 +21,8 @@ mod app {
#[init] #[init]
fn init(_: init::Context) -> (Shared, Local, init::Monotonics) { fn init(_: init::Context) -> (Shared, Local, init::Monotonics) {
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
( (
Shared { Shared {
#[cfg(feature = "feature_x")] #[cfg(feature = "feature_x")]

View file

@ -24,6 +24,6 @@ mod app {
#[task(binds = UART0)] #[task(binds = UART0)]
fn taskmain(_: taskmain::Context) { fn taskmain(_: taskmain::Context) {
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
} }
} }

View file

@ -22,7 +22,7 @@ mod app {
#[idle] #[idle]
fn taskmain(_: taskmain::Context) -> ! { fn taskmain(_: taskmain::Context) -> ! {
debug::exit(debug::EXIT_SUCCESS); debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
loop { loop {
cortex_m::asm::nop(); cortex_m::asm::nop();
} }

View file

@ -15,6 +15,7 @@ pub struct NotSend {
mod app { mod app {
use super::NotSend; use super::NotSend;
use core::marker::PhantomData; use core::marker::PhantomData;
use cortex_m_semihosting::debug;
#[shared] #[shared]
struct Shared { struct Shared {
@ -39,6 +40,7 @@ mod app {
#[idle(shared = [x, y])] #[idle(shared = [x, y])]
fn idle(_: idle::Context) -> ! { fn idle(_: idle::Context) -> ! {
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
loop { loop {
cortex_m::asm::nop(); cortex_m::asm::nop();
} }

View file

@ -9,11 +9,12 @@ use panic_semihosting as _;
#[rtic::app(device = lm3s6965, dispatchers = [SSI0])] #[rtic::app(device = lm3s6965, dispatchers = [SSI0])]
mod app { mod app {
use dwt_systick_monotonic::DwtSystick; use cortex_m_semihosting::debug;
use rtic::time::duration::Seconds; use rtic::time::duration::Seconds;
use systick_monotonic::Systick;
#[monotonic(binds = SysTick, default = true)] #[monotonic(binds = SysTick, default = true)]
type MyMono = DwtSystick<8_000_000>; // 8 MHz type MyMono = Systick<100>; // 100 Hz / 10 ms granularity
#[shared] #[shared]
struct Shared {} struct Shared {}
@ -23,12 +24,17 @@ mod app {
#[init] #[init]
fn init(cx: init::Context) -> (Shared, Local, init::Monotonics) { fn init(cx: init::Context) -> (Shared, Local, init::Monotonics) {
let mut dcb = cx.core.DCB;
let dwt = cx.core.DWT;
let systick = cx.core.SYST; let systick = cx.core.SYST;
let mono = DwtSystick::new(&mut dcb, dwt, systick, 8_000_000); let mono = Systick::new(systick, 12_000_000);
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
(Shared {}, Local {}, init::Monotonics(mono))
}
#[idle]
fn idle(_: idle::Context) -> ! {
// Task without message passing // Task without message passing
// Not default // Not default
@ -120,11 +126,6 @@ mod app {
let handle: Result<baz::SpawnHandle, (u32, u32)> = baz::spawn_after(Seconds(1_u32), 0, 1); let handle: Result<baz::SpawnHandle, (u32, u32)> = baz::spawn_after(Seconds(1_u32), 0, 1);
let _: Result<(u32, u32), ()> = handle.unwrap().cancel(); let _: Result<(u32, u32), ()> = handle.unwrap().cancel();
(Shared {}, Local {}, init::Monotonics(mono))
}
#[idle]
fn idle(_: idle::Context) -> ! {
loop { loop {
cortex_m::asm::nop(); cortex_m::asm::nop();
} }

View file

@ -9,6 +9,8 @@ use panic_semihosting as _;
#[rtic::app(device = lm3s6965, dispatchers = [SSI0])] #[rtic::app(device = lm3s6965, dispatchers = [SSI0])]
mod app { mod app {
use cortex_m_semihosting::debug;
#[shared] #[shared]
struct Shared {} struct Shared {}
@ -21,6 +23,8 @@ mod app {
let _: Result<(), u32> = bar::spawn(0); let _: Result<(), u32> = bar::spawn(0);
let _: Result<(), (u32, u32)> = baz::spawn(0, 1); let _: Result<(), (u32, u32)> = baz::spawn(0, 1);
debug::exit(debug::EXIT_SUCCESS); // Exit QEMU simulator
(Shared {}, Local {}, init::Monotonics()) (Shared {}, Local {}, init::Monotonics())
} }

Some files were not shown because too many files have changed in this diff Show more