diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000..80fc334 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,4 @@ +[target.thumbv7m-none-eabi] +# used to run the qemu_test.rs example with QEMU +runner = "qemu-system-arm -cpu cortex-m3 -machine lm3s6965evb -nographic -semihosting-config enable=on,target=native -kernel" +rustflags = ["-C", "link-arg=-Tlink.x"] diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d810925..bb51ea1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1 @@ -* @rust-embedded/cortex-m \ No newline at end of file +* @rust-embedded/libs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4d594f7..cc72680 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,30 +1,92 @@ on: - push: # Run CI for all branches except GitHub merge queue tmp branches - branches-ignore: - - "gh-readonly-queue/**" pull_request: # Run CI for PRs on any branch merge_group: # Run CI for the GitHub merge queue + workflow_dispatch: # Run CI when manually requested + schedule: + # Run every week at 8am UTC Saturday + - cron: '0 8 * * SAT' name: Continuous integration jobs: - ci: + check: runs-on: ubuntu-latest + env: {"RUSTFLAGS": "-D warnings"} strategy: matrix: target: - thumbv6m-none-eabi - thumbv7m-none-eabi + toolchain: + - stable + - nightly steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master with: - profile: minimal - toolchain: stable - target: ${{ matrix.target }} - override: true - - uses: actions-rs/cargo@v1 + targets: ${{ matrix.target }} + toolchain: ${{ matrix.toolchain }} + - run: cargo check --target=${{ matrix.target }} --example global_alloc + - if: ${{ matrix.toolchain == 'nightly' }} + run: cargo check --target=${{ matrix.target }} --examples --all-features + - uses: imjohnbo/issue-bot@v3 + if: | + failure() + && github.event_name == 'schedule' with: - command: check - args: --target=${{ matrix.target }} --examples + title: CI Failure + labels: ci + body: | + Scheduled CI run failed. Details: + https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + targets: thumbv7m-none-eabi + toolchain: nightly + - name: Install QEMU + run: | + sudo apt update + sudo apt install qemu-system-arm + - run: qemu-system-arm --version + - run: cargo run --target thumbv7m-none-eabi --example llff_integration_test --all-features + - run: cargo run --target thumbv7m-none-eabi --example tlsf_integration_test --all-features + + clippy: + name: Clippy + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + with: + components: clippy + toolchain: nightly + targets: thumbv6m-none-eabi + - run: cargo clippy --all-features --examples --target=thumbv6m-none-eabi -- --deny warnings + + format: + name: Format + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + with: + components: rustfmt + - run: cargo fmt -- --check + + rustdoc: + name: rustdoc + runs-on: ubuntu-latest + env: {"RUSTDOCFLAGS": "-D warnings"} + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + - name: rustdoc + run: cargo rustdoc --all-features diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml deleted file mode 100644 index 0fe9b23..0000000 --- a/.github/workflows/clippy.yml +++ /dev/null @@ -1,28 +0,0 @@ -on: - push: # Run CI for all branches except GitHub merge queue tmp branches - branches-ignore: - - "gh-readonly-queue/**" - pull_request: # Run CI for PRs on any branch - merge_group: # Run CI for the GitHub merge queue - -name: Clippy check - -jobs: - clippy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - if: github.event_name == 'pull_request_target' - with: - ref: refs/pull/${{ github.event.number }}/head - - uses: actions/checkout@v2 - if: github.event_name != 'pull_request_target' - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - override: true - components: clippy - - uses: actions-rs/clippy-check@v1 - with: - token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml deleted file mode 100644 index baaa18c..0000000 --- a/.github/workflows/cron.yml +++ /dev/null @@ -1,32 +0,0 @@ -on: - schedule: - # Run every week at 8am UTC Saturday. - - cron: '0 8 * * SAT' - -name: Cron CI - -jobs: - ci-cron: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - target: thumbv6m-none-eabi - override: true - - uses: actions-rs/cargo@v1 - with: - command: check - args: --examples --target thumbv6m-none-eabi - - uses: imjohnbo/issue-bot@v2 - if: failure() - with: - title: CI Failure - labels: ci - body: | - Scheduled CI run failed. Details: - https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/rustfmt.yml b/.github/workflows/rustfmt.yml deleted file mode 100644 index 714f503..0000000 --- a/.github/workflows/rustfmt.yml +++ /dev/null @@ -1,24 +0,0 @@ -on: - push: # Run CI for all branches except GitHub merge queue tmp branches - branches-ignore: - - "gh-readonly-queue/**" - pull_request: # Run CI for PRs on any branch - merge_group: # Run CI for the GitHub merge queue - -name: Code formatting check - -jobs: - rustfmt: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - override: true - components: rustfmt - - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --all -- --check diff --git a/CHANGELOG.md b/CHANGELOG.md index 12fd89e..0921f25 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,38 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] +### Added + +- Added a `init` macro to make initialization easier. + +### Changed + +- The `Heap::init` methods now panic if they're called more than once or with `size == 0`. + +## [v0.6.0] - 2024-09-01 + +### Added + +- Added a Two-Level Segregated Fit heap with the `tlsf` feature. + +### Changed + +- The `Heap` struct has been renamed to `LlffHeap` and requires the `llff` feature. +- Updated the rust edition from 2018 to 2021. + +## [v0.5.1] - 2023-11-04 + +### Added + +- Implemented [`Allocator`] for `Heap` with the `allocator_api` crate feature. + This feature requires a nightly toolchain for the unstable [`allocator_api`] + compiler feature. + +[`Allocator`]: https://doc.rust-lang.org/core/alloc/trait.Allocator.html +[`allocator_api`]: https://doc.rust-lang.org/beta/unstable-book/library-features/allocator-api.html + +### Changed + - Updated `linked_list_allocator` dependency to 0.10.5, which allows compiling with stable rust. @@ -109,7 +141,9 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Initial version of the allocator -[Unreleased]: https://github.com/rust-embedded/embedded-alloc/compare/v0.5.0...HEAD +[Unreleased]: https://github.com/rust-embedded/embedded-alloc/compare/v0.6.0...HEAD +[v0.6.0]: https://github.com/rust-embedded/embedded-alloc/compare/v0.5.1...v0.6.0 +[v0.5.1]: https://github.com/rust-embedded/embedded-alloc/compare/v0.5.0...v0.5.1 [v0.5.0]: https://github.com/rust-embedded/embedded-alloc/compare/v0.4.3...v0.5.0 [v0.4.3]: https://github.com/rust-embedded/embedded-alloc/compare/v0.4.2...v0.4.3 [v0.4.2]: https://github.com/rust-embedded/embedded-alloc/compare/v0.4.1...v0.4.2 diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 3ab76c6..7a47646 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -2,7 +2,7 @@ ## Conduct -**Contact**: [Cortex-M team](https://github.com/rust-embedded/wg#the-cortex-m-team) +**Contact**: [Libs team](https://github.com/rust-embedded/wg#the-libs-team) * We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic. * On IRC, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all. @@ -10,7 +10,7 @@ * Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer. * Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works. * We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behavior. We interpret the term "harassment" as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don't tolerate behavior that excludes people in socially marginalized groups. -* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel ops or any of the [Cortex-M team][team] immediately. Whether you're a regular contributor or a newcomer, we care about making this community a safe place for you and we've got your back. +* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel ops or any of the [Libs team][team] immediately. Whether you're a regular contributor or a newcomer, we care about making this community a safe place for you and we've got your back. * Likewise any spamming, trolling, flaming, baiting or other attention-stealing behavior is not welcome. ## Moderation @@ -34,4 +34,4 @@ The enforcement policies listed above apply to all official embedded WG venues; *Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling) as well as the [Contributor Covenant v1.3.0](https://www.contributor-covenant.org/version/1/3/0/).* -[team]: https://github.com/rust-embedded/wg#the-cortex-m-team +[team]: https://github.com/rust-embedded/wg#the-libs-team diff --git a/Cargo.toml b/Cargo.toml index 3009541..d23dfe6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,7 +10,7 @@ description = "A heap allocator for embedded systems" repository = "https://github.com/rust-embedded/embedded-alloc" documentation = "https://docs.rs/embedded-alloc" readme = "README.md" -edition = "2018" +edition = "2021" keywords = [ "allocator", @@ -21,15 +21,41 @@ keywords = [ ] license = "MIT OR Apache-2.0" name = "embedded-alloc" -version = "0.5.0" +version = "0.6.0" + +[features] +default = ["llff", "tlsf"] +allocator_api = [] + +# Use the Two-Level Segregated Fit allocator +tlsf = ["rlsf", "const-default"] +# Use the LinkedList first-fit allocator +llff = ["linked_list_allocator"] [dependencies] critical-section = "1.0" - -[dependencies.linked_list_allocator] -default-features = false -version = "0.10.5" +linked_list_allocator = { version = "0.10.5", default-features = false, optional = true } +rlsf = { version = "0.2.1", default-features = false, optional = true } +const-default = { version = "1.0.0", default-features = false, optional = true } [dev-dependencies] cortex-m = { version = "0.7.6", features = ["critical-section-single-core"] } cortex-m-rt = "0.7" +cortex-m-semihosting = "0.5" +panic-semihosting = { version = "0.6", features = ["exit"] } + +[[example]] +name = "allocator_api" +required-features = ["allocator_api", "llff"] + +[[example]] +name = "llff_integration_test" +required-features = ["allocator_api", "llff"] + +[[example]] +name = "tlsf_integration_test" +required-features = ["allocator_api", "tlsf"] + +[[example]] +name = "global_alloc" +required-features = ["llff"] diff --git a/README.md b/README.md index a7ea481..868ae51 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ Starting with Rust 1.68, this crate can be used as a global allocator on stable extern crate alloc; use cortex_m_rt::entry; -use embedded_alloc::Heap; +use embedded_alloc::LlffHeap as Heap; #[global_allocator] static HEAP: Heap = Heap::empty(); @@ -31,11 +31,15 @@ static HEAP: Heap = Heap::empty(); #[entry] fn main() -> ! { // Initialize the allocator BEFORE you use it + unsafe { + embedded_alloc::init!(HEAP, 1024); + } + // Alternatively, you can write the code directly to meet specific requirements. { use core::mem::MaybeUninit; const HEAP_SIZE: usize = 1024; static mut HEAP_MEM: [MaybeUninit; HEAP_SIZE] = [MaybeUninit::uninit(); HEAP_SIZE]; - unsafe { HEAP.init(HEAP_MEM.as_ptr() as usize, HEAP_SIZE) } + unsafe { HEAP.init(&raw mut HEAP_MEM as usize, HEAP_SIZE) } } // now the allocator is ready types like Box, Vec can be used. @@ -48,17 +52,25 @@ For a full usage example, see [`examples/global_alloc.rs`](https://github.com/ru For this to work, an implementation of [`critical-section`](https://github.com/rust-embedded/critical-section) must be provided. -For simple use cases you may enable the `critical-section-single-core` feature in the [cortex-m](https://github.com/rust-embedded/cortex-m) crate. +For simple use cases with Cortex-M CPUs you may enable the `critical-section-single-core` feature in the [cortex-m](https://github.com/rust-embedded/cortex-m) crate. Please refer to the documentation of [`critical-section`](https://docs.rs/critical-section) for further guidance. +## Features + +There are two heaps available to use: + +* `llff`: Provides `LlffHeap`, a Linked List First Fit heap. +* `tlsf`: Provides `TlsfHeap`, a Two-Level Segregated Fit heap. + +The best heap to use will depend on your application, see [#78](https://github.com/rust-embedded/embedded-alloc/pull/78) for more discussion. ## License Licensed under either of - Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or - http://www.apache.org/licenses/LICENSE-2.0) -- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + ) +- MIT license ([LICENSE-MIT](LICENSE-MIT) or ) at your option. diff --git a/examples/allocator_api.rs b/examples/allocator_api.rs new file mode 100644 index 0000000..10f5261 --- /dev/null +++ b/examples/allocator_api.rs @@ -0,0 +1,35 @@ +#![feature(allocator_api)] +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::vec::Vec; +use core::mem::MaybeUninit; +use core::panic::PanicInfo; +use cortex_m_rt::entry; +use embedded_alloc::LlffHeap as Heap; + +// This is not used, but as of 2023-10-29 allocator_api cannot be used without +// a global heap +#[global_allocator] +static HEAP: Heap = Heap::empty(); + +#[entry] +fn main() -> ! { + const HEAP_SIZE: usize = 16; + static mut HEAP_MEM: [MaybeUninit; HEAP_SIZE] = [MaybeUninit::uninit(); HEAP_SIZE]; + let heap: Heap = Heap::empty(); + unsafe { heap.init(&raw mut HEAP_MEM as usize, HEAP_SIZE) } + + let mut xs = Vec::new_in(heap); + xs.push(1); + + #[allow(clippy::empty_loop)] + loop { /* .. */ } +} + +#[panic_handler] +fn panic(_: &PanicInfo) -> ! { + loop {} +} diff --git a/examples/global_alloc.rs b/examples/global_alloc.rs index 63a16e7..b47f0ef 100644 --- a/examples/global_alloc.rs +++ b/examples/global_alloc.rs @@ -6,7 +6,10 @@ extern crate alloc; use alloc::vec::Vec; use core::panic::PanicInfo; use cortex_m_rt::entry; -use embedded_alloc::Heap; +// Linked-List First Fit Heap allocator (feature = "llff") +use embedded_alloc::LlffHeap as Heap; +// Two-Level Segregated Fit Heap allocator (feature = "tlsf") +// use embedded_alloc::TlsfHeap as Heap; #[global_allocator] static HEAP: Heap = Heap::empty(); @@ -14,16 +17,14 @@ static HEAP: Heap = Heap::empty(); #[entry] fn main() -> ! { // Initialize the allocator BEFORE you use it - { - use core::mem::MaybeUninit; - const HEAP_SIZE: usize = 1024; - static mut HEAP_MEM: [MaybeUninit; HEAP_SIZE] = [MaybeUninit::uninit(); HEAP_SIZE]; - unsafe { HEAP.init(HEAP_MEM.as_ptr() as usize, HEAP_SIZE) } + unsafe { + embedded_alloc::init!(HEAP, 1024); } let mut xs = Vec::new(); xs.push(1); + #[allow(clippy::empty_loop)] loop { /* .. */ } } diff --git a/examples/llff_integration_test.rs b/examples/llff_integration_test.rs new file mode 100644 index 0000000..5b7f4b2 --- /dev/null +++ b/examples/llff_integration_test.rs @@ -0,0 +1,86 @@ +//! This is a very basic smoke test that runs in QEMU +//! Reference the QEMU section of the [Embedded Rust Book] for more information +//! +//! This only tests integration of the allocator on an embedded target. +//! Comprehensive allocator tests are located in the allocator dependency. +//! +//! After toolchain installation this test can be run with: +//! +//! ```bash +//! cargo +nightly run --target thumbv7m-none-eabi --example llff_integration_test --all-features +//! ``` +//! +//! [Embedded Rust Book]: https://docs.rust-embedded.org/book/intro/index.html + +#![feature(allocator_api)] +#![no_main] +#![no_std] + +extern crate alloc; +extern crate panic_semihosting; + +use alloc::vec::Vec; +use core::mem::{size_of, MaybeUninit}; +use cortex_m_rt::entry; +use cortex_m_semihosting::{debug, hprintln}; +use embedded_alloc::LlffHeap as Heap; + +#[global_allocator] +static HEAP: Heap = Heap::empty(); + +fn test_global_heap() { + assert_eq!(HEAP.used(), 0); + + let mut xs: Vec = alloc::vec![1]; + xs.push(2); + xs.extend(&[3, 4]); + + // do not optimize xs + core::hint::black_box(&mut xs); + + assert_eq!(xs.as_slice(), &[1, 2, 3, 4]); + assert_eq!(HEAP.used(), size_of::() * xs.len()); +} + +fn test_allocator_api() { + // small local heap + const HEAP_SIZE: usize = 16; + let mut heap_mem: [MaybeUninit; HEAP_SIZE] = [MaybeUninit::uninit(); HEAP_SIZE]; + let local_heap: Heap = Heap::empty(); + unsafe { local_heap.init(&raw mut heap_mem as usize, HEAP_SIZE) } + + assert_eq!(local_heap.used(), 0); + + let mut v: Vec = Vec::new_in(local_heap); + v.push(0xCAFE); + v.extend(&[0xDEAD, 0xFEED]); + + // do not optimize v + core::hint::black_box(&mut v); + + assert_eq!(v.as_slice(), &[0xCAFE, 0xDEAD, 0xFEED]); +} + +#[entry] +fn main() -> ! { + unsafe { + embedded_alloc::init!(HEAP, 1024); + } + + #[allow(clippy::type_complexity)] + let tests: &[(fn() -> (), &'static str)] = &[ + (test_global_heap, "test_global_heap"), + (test_allocator_api, "test_allocator_api"), + ]; + + for (test_fn, test_name) in tests { + hprintln!("{}: start", test_name); + test_fn(); + hprintln!("{}: pass", test_name); + } + + // exit QEMU with a success status + debug::exit(debug::EXIT_SUCCESS); + #[allow(clippy::empty_loop)] + loop {} +} diff --git a/examples/tlsf_integration_test.rs b/examples/tlsf_integration_test.rs new file mode 100644 index 0000000..591b7d3 --- /dev/null +++ b/examples/tlsf_integration_test.rs @@ -0,0 +1,104 @@ +//! This is a very basic smoke test that runs in QEMU +//! Reference the QEMU section of the [Embedded Rust Book] for more information +//! +//! This only tests integration of the allocator on an embedded target. +//! Comprehensive allocator tests are located in the allocator dependency. +//! +//! After toolchain installation this test can be run with: +//! +//! ```bash +//! cargo +nightly run --target thumbv7m-none-eabi --example tlsf_integration_test --all-features +//! ``` +//! +//! [Embedded Rust Book]: https://docs.rust-embedded.org/book/intro/index.html + +#![feature(allocator_api)] +#![no_main] +#![no_std] + +extern crate alloc; +extern crate panic_semihosting; + +use alloc::collections::LinkedList; +use core::mem::MaybeUninit; +use cortex_m_rt::entry; +use cortex_m_semihosting::{debug, hprintln}; +use embedded_alloc::TlsfHeap as Heap; + +#[global_allocator] +static HEAP: Heap = Heap::empty(); +const HEAP_SIZE: usize = 30 * 1024; + +fn test_global_heap() { + const ELEMS: usize = 250; + + let mut allocated = LinkedList::new(); + for _ in 0..ELEMS { + allocated.push_back(0); + } + for i in 0..ELEMS { + allocated.push_back(i as i32); + } + + assert_eq!(allocated.len(), 2 * ELEMS); + + for _ in 0..ELEMS { + allocated.pop_front(); + } + + for i in 0..ELEMS { + assert_eq!(allocated.pop_front().unwrap(), i as i32); + } +} + +fn test_allocator_api() { + // small local heap + const HEAP_SIZE: usize = 256; + let mut heap_mem: [MaybeUninit; HEAP_SIZE] = [MaybeUninit::uninit(); HEAP_SIZE]; + let local_heap: Heap = Heap::empty(); + unsafe { local_heap.init(heap_mem.as_mut_ptr() as usize, HEAP_SIZE) } + + const ELEMS: usize = 2; + + let mut allocated = LinkedList::new_in(local_heap); + for _ in 0..ELEMS { + allocated.push_back(0); + } + for i in 0..ELEMS { + allocated.push_back(i as i32); + } + + assert_eq!(allocated.len(), 2 * ELEMS); + + for _ in 0..ELEMS { + allocated.pop_front(); + } + + for i in 0..ELEMS { + assert_eq!(allocated.pop_front().unwrap(), i as i32); + } +} + +#[entry] +fn main() -> ! { + unsafe { + embedded_alloc::init!(HEAP, HEAP_SIZE); + } + + #[allow(clippy::type_complexity)] + let tests: &[(fn() -> (), &'static str)] = &[ + (test_global_heap, "test_global_heap"), + (test_allocator_api, "test_allocator_api"), + ]; + + for (test_fn, test_name) in tests { + hprintln!("{}: start", test_name); + test_fn(); + hprintln!("{}: pass", test_name); + } + + // exit QEMU with a success status + debug::exit(debug::EXIT_SUCCESS); + #[allow(clippy::empty_loop)] + loop {} +} diff --git a/memory.x b/memory.x new file mode 100644 index 0000000..367c5c8 --- /dev/null +++ b/memory.x @@ -0,0 +1,6 @@ +MEMORY +{ + /* These values correspond to the LM3S6965, one of the few devices QEMU can emulate */ + FLASH : ORIGIN = 0x00000000, LENGTH = 256K + RAM : ORIGIN = 0x20000000, LENGTH = 64K +} diff --git a/src/lib.rs b/src/lib.rs index 27a9a7d..0b6bd29 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,90 +1,66 @@ #![doc = include_str!("../README.md")] #![no_std] +#![cfg_attr(feature = "allocator_api", feature(allocator_api, alloc_layout_extra))] +#![warn(missing_docs)] -use core::alloc::{GlobalAlloc, Layout}; -use core::cell::RefCell; -use core::ptr::{self, NonNull}; +#[cfg(feature = "llff")] +mod llff; +#[cfg(feature = "tlsf")] +mod tlsf; -use critical_section::Mutex; -use linked_list_allocator::Heap as LLHeap; +#[cfg(feature = "llff")] +pub use llff::Heap as LlffHeap; +#[cfg(feature = "tlsf")] +pub use tlsf::Heap as TlsfHeap; -pub struct Heap { - heap: Mutex>, -} - -impl Heap { - /// Crate a new UNINITIALIZED heap allocator - /// - /// You must initialize this heap using the - /// [`init`](Self::init) method before using the allocator. - pub const fn empty() -> Heap { - Heap { - heap: Mutex::new(RefCell::new(LLHeap::empty())), - } - } - - /// Initializes the heap - /// - /// This function must be called BEFORE you run any code that makes use of the - /// allocator. - /// - /// `start_addr` is the address where the heap will be located. - /// - /// `size` is the size of the heap in bytes. - /// - /// Note that: - /// - /// - The heap grows "upwards", towards larger addresses. Thus `start_addr` will - /// be the smallest address used. - /// - /// - The largest address used is `start_addr + size - 1`, so if `start_addr` is - /// `0x1000` and `size` is `0x30000` then the allocator won't use memory at - /// addresses `0x31000` and larger. - /// - /// # Safety - /// - /// Obey these or Bad Stuff will happen. - /// - /// - This function must be called exactly ONCE. - /// - `size > 0` - pub unsafe fn init(&self, start_addr: usize, size: usize) { - critical_section::with(|cs| { - self.heap - .borrow(cs) - .borrow_mut() - .init(start_addr as *mut u8, size); - }); - } - - /// Returns an estimate of the amount of bytes in use. - pub fn used(&self) -> usize { - critical_section::with(|cs| self.heap.borrow(cs).borrow_mut().used()) - } - - /// Returns an estimate of the amount of bytes available. - pub fn free(&self) -> usize { - critical_section::with(|cs| self.heap.borrow(cs).borrow_mut().free()) - } -} - -unsafe impl GlobalAlloc for Heap { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - critical_section::with(|cs| { - self.heap - .borrow(cs) - .borrow_mut() - .allocate_first_fit(layout) - .ok() - .map_or(ptr::null_mut(), |allocation| allocation.as_ptr()) - }) - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - critical_section::with(|cs| { - self.heap - .borrow(cs) - .borrow_mut() - .deallocate(NonNull::new_unchecked(ptr), layout) - }); - } +/// Initialize the global heap. +/// +/// This macro creates a static, uninitialized memory buffer of the specified size and +/// initializes the heap instance with that buffer. +/// +/// # Parameters +/// +/// - `$heap:ident`: The identifier of the global heap instance to initialize. +/// - `$size:expr`: An expression evaluating to a `usize` that specifies the size of the +/// static memory buffer in bytes. It must be **greater than zero**. +/// +/// # Safety +/// +/// This macro must be called first, before any operations on the heap, and **only once**. +/// It internally calls `Heap::init(...)` on the heap, +/// so `Heap::init(...)` should not be called directly if this macro is used. +/// +/// # Panics +/// +/// This macro will panic if either of the following are true: +/// +/// - this function is called more than ONCE. +/// - `size == 0`. +/// +/// # Example +/// +/// ```rust +/// use cortex_m_rt::entry; +/// use embedded_alloc::LlffHeap as Heap; +/// +/// #[global_allocator] +/// static HEAP: Heap = Heap::empty(); +/// +/// #[entry] +/// fn main() -> ! { +/// // Initialize the allocator BEFORE you use it +/// unsafe { +/// embedded_alloc::init!(HEAP, 1024); +/// } +/// let mut xs = Vec::new(); +/// // ... +/// } +/// ``` +#[macro_export] +macro_rules! init { + ($heap:ident, $size:expr) => { + static mut HEAP_MEM: [::core::mem::MaybeUninit; $size] = + [::core::mem::MaybeUninit::uninit(); $size]; + $heap.init(&raw mut HEAP_MEM as usize, $size) + }; } diff --git a/src/llff.rs b/src/llff.rs new file mode 100644 index 0000000..aae2485 --- /dev/null +++ b/src/llff.rs @@ -0,0 +1,127 @@ +use core::alloc::{GlobalAlloc, Layout}; +use core::cell::RefCell; +use core::ptr::{self, NonNull}; + +use critical_section::Mutex; +use linked_list_allocator::Heap as LLHeap; + +/// A linked list first fit heap. +pub struct Heap { + heap: Mutex>, +} + +impl Heap { + /// Create a new UNINITIALIZED heap allocator + /// + /// You must initialize this heap using the + /// [`init`](Self::init) method before using the allocator. + pub const fn empty() -> Heap { + Heap { + heap: Mutex::new(RefCell::new((LLHeap::empty(), false))), + } + } + + /// Initializes the heap + /// + /// This function must be called BEFORE you run any code that makes use of the + /// allocator. + /// + /// `start_addr` is the address where the heap will be located. + /// + /// `size` is the size of the heap in bytes. + /// + /// Note that: + /// + /// - The heap grows "upwards", towards larger addresses. Thus `start_addr` will + /// be the smallest address used. + /// + /// - The largest address used is `start_addr + size - 1`, so if `start_addr` is + /// `0x1000` and `size` is `0x30000` then the allocator won't use memory at + /// addresses `0x31000` and larger. + /// + /// # Safety + /// + /// This function is safe if the following invariants hold: + /// + /// - `start_addr` points to valid memory. + /// - `size` is correct. + /// + /// # Panics + /// + /// This function will panic if either of the following are true: + /// + /// - this function is called more than ONCE. + /// - `size == 0`. + pub unsafe fn init(&self, start_addr: usize, size: usize) { + assert!(size > 0); + critical_section::with(|cs| { + let mut heap = self.heap.borrow_ref_mut(cs); + assert!(!heap.1); + heap.1 = true; + heap.0.init(start_addr as *mut u8, size); + }); + } + + /// Returns an estimate of the amount of bytes in use. + pub fn used(&self) -> usize { + critical_section::with(|cs| self.heap.borrow_ref_mut(cs).0.used()) + } + + /// Returns an estimate of the amount of bytes available. + pub fn free(&self) -> usize { + critical_section::with(|cs| self.heap.borrow_ref_mut(cs).0.free()) + } + + fn alloc(&self, layout: Layout) -> Option> { + critical_section::with(|cs| { + self.heap + .borrow_ref_mut(cs) + .0 + .allocate_first_fit(layout) + .ok() + }) + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + critical_section::with(|cs| { + self.heap + .borrow_ref_mut(cs) + .0 + .deallocate(NonNull::new_unchecked(ptr), layout) + }); + } +} + +unsafe impl GlobalAlloc for Heap { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + self.alloc(layout) + .map_or(ptr::null_mut(), |allocation| allocation.as_ptr()) + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + self.dealloc(ptr, layout); + } +} + +#[cfg(feature = "allocator_api")] +mod allocator_api { + use super::*; + use core::alloc::{AllocError, Allocator}; + + unsafe impl Allocator for Heap { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + match layout.size() { + 0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)), + size => self.alloc(layout).map_or(Err(AllocError), |allocation| { + Ok(NonNull::slice_from_raw_parts(allocation, size)) + }), + } + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + if layout.size() != 0 { + self.dealloc(ptr.as_ptr(), layout); + } + } + } +} diff --git a/src/tlsf.rs b/src/tlsf.rs new file mode 100644 index 0000000..37935d1 --- /dev/null +++ b/src/tlsf.rs @@ -0,0 +1,115 @@ +use core::alloc::{GlobalAlloc, Layout}; +use core::cell::RefCell; +use core::ptr::{self, NonNull}; + +use const_default::ConstDefault; +use critical_section::Mutex; +use rlsf::Tlsf; + +type TlsfHeap = Tlsf<'static, usize, usize, { usize::BITS as usize }, { usize::BITS as usize }>; + +/// A two-Level segregated fit heap. +pub struct Heap { + heap: Mutex>, +} + +impl Heap { + /// Create a new UNINITIALIZED heap allocator + /// + /// You must initialize this heap using the + /// [`init`](Self::init) method before using the allocator. + pub const fn empty() -> Heap { + Heap { + heap: Mutex::new(RefCell::new((ConstDefault::DEFAULT, false))), + } + } + + /// Initializes the heap + /// + /// This function must be called BEFORE you run any code that makes use of the + /// allocator. + /// + /// `start_addr` is the address where the heap will be located. + /// + /// `size` is the size of the heap in bytes. + /// + /// Note that: + /// + /// - The heap grows "upwards", towards larger addresses. Thus `start_addr` will + /// be the smallest address used. + /// + /// - The largest address used is `start_addr + size - 1`, so if `start_addr` is + /// `0x1000` and `size` is `0x30000` then the allocator won't use memory at + /// addresses `0x31000` and larger. + /// + /// # Safety + /// + /// This function is safe if the following invariants hold: + /// + /// - `start_addr` points to valid memory. + /// - `size` is correct. + /// + /// # Panics + /// + /// This function will panic if either of the following are true: + /// + /// - this function is called more than ONCE. + /// - `size == 0`. + pub unsafe fn init(&self, start_addr: usize, size: usize) { + assert!(size > 0); + critical_section::with(|cs| { + let mut heap = self.heap.borrow_ref_mut(cs); + assert!(!heap.1); + heap.1 = true; + let block: &[u8] = core::slice::from_raw_parts(start_addr as *const u8, size); + heap.0.insert_free_block_ptr(block.into()); + }); + } + + fn alloc(&self, layout: Layout) -> Option> { + critical_section::with(|cs| self.heap.borrow_ref_mut(cs).0.allocate(layout)) + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + critical_section::with(|cs| { + self.heap + .borrow_ref_mut(cs) + .0 + .deallocate(NonNull::new_unchecked(ptr), layout.align()) + }) + } +} + +unsafe impl GlobalAlloc for Heap { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + self.alloc(layout) + .map_or(ptr::null_mut(), |allocation| allocation.as_ptr()) + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + self.dealloc(ptr, layout) + } +} + +#[cfg(feature = "allocator_api")] +mod allocator_api { + use super::*; + use core::alloc::{AllocError, Allocator}; + + unsafe impl Allocator for Heap { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + match layout.size() { + 0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)), + size => self.alloc(layout).map_or(Err(AllocError), |allocation| { + Ok(NonNull::slice_from_raw_parts(allocation, size)) + }), + } + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + if layout.size() != 0 { + self.dealloc(ptr.as_ptr(), layout); + } + } + } +}