Sophie

Sophie

distrib > Mageia > 7 > armv7hl > media > core-release > by-pkgid > 0c2243f8a1696816431e7210e991fa52 > files > 12767

rust-doc-1.35.0-1.mga7.armv7hl.rpm

<!DOCTYPE HTML>
<html lang="en" class="sidebar-visible no-js">
    <head>
        <!-- Book generated using mdBook -->
        <meta charset="UTF-8">
        <title>The Embedded Rust Book</title>
        <meta content="text/html; charset=utf-8" http-equiv="Content-Type">
        <meta name="description" content="">
        <meta name="viewport" content="width=device-width, initial-scale=1">
        <meta name="theme-color" content="#ffffff" />

        <link rel="shortcut icon" href="favicon.png">
        <link rel="stylesheet" href="css/variables.css">
        <link rel="stylesheet" href="css/general.css">
        <link rel="stylesheet" href="css/chrome.css">
        <link rel="stylesheet" href="css/print.css" media="print">

        <!-- Fonts -->
        <link rel="stylesheet" href="FontAwesome/css/font-awesome.css">
        <link href="https://fonts.googleapis.com/css?family=Open+Sans:300italic,400italic,600italic,700italic,800italic,400,300,600,700,800" rel="stylesheet" type="text/css">
        <link href="https://fonts.googleapis.com/css?family=Source+Code+Pro:500" rel="stylesheet" type="text/css">

        <!-- Highlight.js Stylesheets -->
        <link rel="stylesheet" href="highlight.css">
        <link rel="stylesheet" href="tomorrow-night.css">
        <link rel="stylesheet" href="ayu-highlight.css">

        <!-- Custom theme stylesheets -->
        

        
    </head>
    <body class="light">
        <!-- Provide site root to javascript -->
        <script type="text/javascript">
            var path_to_root = "";
            var default_theme = "light";
        </script>

        <!-- Work around some values being stored in localStorage wrapped in quotes -->
        <script type="text/javascript">
            try {
                var theme = localStorage.getItem('mdbook-theme');
                var sidebar = localStorage.getItem('mdbook-sidebar');

                if (theme.startsWith('"') && theme.endsWith('"')) {
                    localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
                }

                if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
                    localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
                }
            } catch (e) { }
        </script>

        <!-- Set the theme before any content is loaded, prevents flash -->
        <script type="text/javascript">
            var theme;
            try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { } 
            if (theme === null || theme === undefined) { theme = default_theme; }
            document.body.className = theme;
            document.querySelector('html').className = theme + ' js';
        </script>

        <!-- Hide / unhide sidebar before it is displayed -->
        <script type="text/javascript">
            var html = document.querySelector('html');
            var sidebar = 'hidden';
            if (document.body.clientWidth >= 1080) {
                try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
                sidebar = sidebar || 'visible';
            }
            html.classList.remove('sidebar-visible');
            html.classList.add("sidebar-" + sidebar);
        </script>

        <nav id="sidebar" class="sidebar" aria-label="Table of contents">
            <ol class="chapter"><li><a href="intro/index.html"><strong aria-hidden="true">1.</strong> Introduction</a></li><li><ol class="section"><li><a href="intro/hardware.html"><strong aria-hidden="true">1.1.</strong> Hardware</a></li><li><a href="intro/no-std.html"><strong aria-hidden="true">1.2.</strong> no_std</a></li><li><a href="intro/tooling.html"><strong aria-hidden="true">1.3.</strong> Tooling</a></li><li><a href="intro/install.html"><strong aria-hidden="true">1.4.</strong> Installation</a></li><li><ol class="section"><li><a href="intro/install/linux.html"><strong aria-hidden="true">1.4.1.</strong> Linux</a></li><li><a href="intro/install/macos.html"><strong aria-hidden="true">1.4.2.</strong> MacOS</a></li><li><a href="intro/install/windows.html"><strong aria-hidden="true">1.4.3.</strong> Windows</a></li><li><a href="intro/install/verify.html"><strong aria-hidden="true">1.4.4.</strong> Verify Installation</a></li></ol></li></ol></li><li><a href="start/index.html"><strong aria-hidden="true">2.</strong> Getting started</a></li><li><ol class="section"><li><a href="start/qemu.html"><strong aria-hidden="true">2.1.</strong> QEMU</a></li><li><a href="start/hardware.html"><strong aria-hidden="true">2.2.</strong> Hardware</a></li><li><a href="start/registers.html"><strong aria-hidden="true">2.3.</strong> Memory-mapped Registers</a></li><li><a href="start/semihosting.html"><strong aria-hidden="true">2.4.</strong> Semihosting</a></li><li><a href="start/panicking.html"><strong aria-hidden="true">2.5.</strong> Panicking</a></li><li><a href="start/exceptions.html"><strong aria-hidden="true">2.6.</strong> Exceptions</a></li><li><a href="start/interrupts.html"><strong aria-hidden="true">2.7.</strong> Interrupts</a></li><li><a href="start/io.html"><strong aria-hidden="true">2.8.</strong> IO</a></li></ol></li><li><a href="peripherals/index.html"><strong aria-hidden="true">3.</strong> Peripherals</a></li><li><ol class="section"><li><a href="peripherals/a-first-attempt.html"><strong aria-hidden="true">3.1.</strong> A first attempt in Rust</a></li><li><a href="peripherals/borrowck.html"><strong aria-hidden="true">3.2.</strong> The Borrow Checker</a></li><li><a href="peripherals/singletons.html"><strong aria-hidden="true">3.3.</strong> Singletons</a></li></ol></li><li><a href="static-guarantees/index.html"><strong aria-hidden="true">4.</strong> Static Guarantees</a></li><li><ol class="section"><li><a href="static-guarantees/typestate-programming.html"><strong aria-hidden="true">4.1.</strong> Typestate Programming</a></li><li><a href="static-guarantees/state-machines.html"><strong aria-hidden="true">4.2.</strong> Peripherals as State Machines</a></li><li><a href="static-guarantees/design-contracts.html"><strong aria-hidden="true">4.3.</strong> Design Contracts</a></li><li><a href="static-guarantees/zero-cost-abstractions.html"><strong aria-hidden="true">4.4.</strong> Zero Cost Abstractions</a></li></ol></li><li><a href="portability/index.html"><strong aria-hidden="true">5.</strong> Portability</a></li><li><a href="concurrency/index.html"><strong aria-hidden="true">6.</strong> Concurrency</a></li><li><a href="collections/index.html"><strong aria-hidden="true">7.</strong> Collections</a></li><li><a href="c-tips/index.html"><strong aria-hidden="true">8.</strong> Tips for embedded C developers</a></li><li><a href="interoperability/index.html"><strong aria-hidden="true">9.</strong> Interoperability</a></li><li><ol class="section"><li><a href="interoperability/c-with-rust.html"><strong aria-hidden="true">9.1.</strong> A little C with your Rust</a></li><li><a href="interoperability/rust-with-c.html"><strong aria-hidden="true">9.2.</strong> A little Rust with your C</a></li></ol></li><li><a href="unsorted/index.html"><strong aria-hidden="true">10.</strong> Unsorted topics</a></li><li><ol class="section"><li><a href="unsorted/speed-vs-size.html"><strong aria-hidden="true">10.1.</strong> Optimizations: The speed size tradeoff</a></li></ol></li></ol>
        </nav>

        <div id="page-wrapper" class="page-wrapper">

            <div class="page">
                
                <div id="menu-bar" class="menu-bar">
                    <div id="menu-bar-sticky-container">
                        <div class="left-buttons">
                            <button id="sidebar-toggle" class="icon-button" type="button" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
                                <i class="fa fa-bars"></i>
                            </button>
                            <button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
                                <i class="fa fa-paint-brush"></i>
                            </button>
                            <ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
                                <li role="none"><button role="menuitem" class="theme" id="light">Light (default)</button></li>
                                <li role="none"><button role="menuitem" class="theme" id="rust">Rust</button></li>
                                <li role="none"><button role="menuitem" class="theme" id="coal">Coal</button></li>
                                <li role="none"><button role="menuitem" class="theme" id="navy">Navy</button></li>
                                <li role="none"><button role="menuitem" class="theme" id="ayu">Ayu</button></li>
                            </ul>
                            
                            <button id="search-toggle" class="icon-button" type="button" title="Search. (Shortkey: s)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="S" aria-controls="searchbar">
                                <i class="fa fa-search"></i>
                            </button>
                            
                        </div>

                        <h1 class="menu-title">The Embedded Rust Book</h1> 

                        <div class="right-buttons">
                            <a href="print.html" title="Print this book" aria-label="Print this book">
                                <i id="print-button" class="fa fa-print"></i>
                            </a>
                            
                        </div>
                    </div>
                </div>

                
                <div id="search-wrapper" class="hidden">
                    <form id="searchbar-outer" class="searchbar-outer">
                        <input type="search" name="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
                    </form>
                    <div id="searchresults-outer" class="searchresults-outer hidden">
                        <div id="searchresults-header" class="searchresults-header"></div>
                        <ul id="searchresults">
                        </ul>
                    </div>
                </div>
                

                <!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
                <script type="text/javascript">
                    document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
                    document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
                    Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
                        link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
                    });
                </script>

                <div id="content" class="content">
                    <main>
                        <a class="header" href="#introduction" id="introduction"><h1>Introduction</h1></a>
<p>Welcome to The Embedded Rust Book: An introductory book about using the Rust
Programming Language on &quot;Bare Metal&quot; embedded systems, such as Microcontrollers.</p>
<a class="header" href="#who-embedded-rust-is-for" id="who-embedded-rust-is-for"><h2>Who Embedded Rust is For</h2></a>
<p>Embedded Rust is for everyone who wants to do embedded programming while taking advantage of the higher-level concepts and safety guarantees the Rust language provides.
(See also <a href="https://doc.rust-lang.org/book/ch00-00-introduction.html">Who Rust Is For</a>)</p>
<a class="header" href="#scope" id="scope"><h2>Scope</h2></a>
<p>The goals of this book are:</p>
<ul>
<li>
<p>Get developers up to speed with embedded Rust development. i.e. How to set
up a development environment.</p>
</li>
<li>
<p>Share <em>current</em> best practices about using Rust for embedded development. i.e.
How to best use Rust language features to write more correct embedded
software.</p>
</li>
<li>
<p>Serve as a cookbook in some cases. e.g. How do I do mix C and Rust in a single
project?</p>
</li>
</ul>
<p>This book tries to be as general as possible but to make things easier for both
the readers and the writers it uses the ARM Cortex-M architecture in all its
examples. However, the book doesn't assume that the reader is familiar with this
particular architecture and explains details particular to this architecture
where required.</p>
<a class="header" href="#who-this-book-is-for" id="who-this-book-is-for"><h2>Who This Book is For</h2></a>
<p>This book caters towards people with either some embedded background or some Rust background, however we believe
everybody curious about embedded Rust programming can get something out of this book. For those without any prior knowledge
we suggest you read the &quot;Assumptions and Prerequisites&quot; section and catch up on missing knowledge to get more out of the book
and improve your reading experience. You can check out the &quot;Other Resources&quot; section to find resources on topics
you might want to catch up on.</p>
<a class="header" href="#assumptions-and-prerequisites" id="assumptions-and-prerequisites"><h3>Assumptions and Prerequisites</h3></a>
<ul>
<li>You are comfortable using the Rust Programming Language, and have written,
run, and debugged Rust applications on a desktop environment. You should also
be familiar with the idioms of the <a href="https://doc.rust-lang.org/edition-guide/">2018 edition</a> as this book targets
Rust 2018.</li>
</ul>
<ul>
<li>You are comfortable developing and debugging embedded systems in another
language such as C, C++, or Ada, and are familiar with concepts such as:
<ul>
<li>Cross Compilation</li>
<li>Memory Mapped Peripherals</li>
<li>Interrupts</li>
<li>Common interfaces such as I2C, SPI, Serial, etc.</li>
</ul>
</li>
</ul>
<a class="header" href="#other-resources" id="other-resources"><h3>Other Resources</h3></a>
<p>If you are unfamiliar with anything mentioned above or if you want more information about a specific topic mentioned in this book you might find some of these resources helpful.</p>
<table><thead><tr><th> Topic        </th><th> Resource </th><th> Description </th></tr></thead><tbody>
<tr><td> Rust         </td><td> <a href="https://doc.rust-lang.org/book/">Rust Book</a> </td><td> If you are not yet comfortable with Rust, we highly suggest reading the this book. </td></tr>
<tr><td> Rust, Embedded </td><td> <a href="https://docs.rust-embedded.org">Embedded Rust Bookshelf</a> </td><td> Here you can find several other resources provided by Rust's Embedded Working Group. </td></tr>
<tr><td> Rust, Embedded </td><td> <a href="https://docs.rust-embedded.org/embedonomicon/">Embedonomicon</a> </td><td> The nitty gritty details when doing embedded programming in Rust. </td></tr>
<tr><td> Rust, Embedded </td><td> <a href="https://docs.rust-embedded.org/faq.html">embedded FAQ</a> </td><td> Frequently asked questions about Rust in an embedded context. </td></tr>
<tr><td> Interrupts </td><td> <a href="https://en.wikipedia.org/wiki/Interrupt">Interrupt</a> </td><td> - </td></tr>
<tr><td> Memory-mapped IO/Peripherals </td><td> <a href="https://en.wikipedia.org/wiki/Memory-mapped_I/O">Memory-mapped I/O</a> </td><td> - </td></tr>
<tr><td> SPI, UART, RS232, USB, I2C, TTL </td><td> <a href="https://electronics.stackexchange.com/questions/37814/usart-uart-rs232-usb-spi-i2c-ttl-etc-what-are-all-of-these-and-how-do-th">Stack Exchange about SPI, UART, and other interfaces</a> </td><td> - </td></tr>
</tbody></table>
<a class="header" href="#how-to-use-this-book" id="how-to-use-this-book"><h2>How to Use This Book</h2></a>
<p>This book generally assumes that you’re reading it front-to-back. Later
chapters build on concepts in earlier chapters, and earlier chapters may
not dig into details on a topic, revisiting the topic in a later chapter.</p>
<p>This book will be using the <a href="http://www.st.com/en/evaluation-tools/stm32f3discovery.html">STM32F3DISCOVERY</a> development board from
STMicroelectronics for the majority of the examples contained within. This board
is based on the ARM Cortex-M architecture, and while basic functionality is
the same across most CPUs based on this architecture, peripherals and other
implementation details of Microcontrollers are different between different
vendors, and often even different between Microcontroller families from the same
vendor.</p>
<p>For this reason, we suggest purchasing the <a href="http://www.st.com/en/evaluation-tools/stm32f3discovery.html">STM32F3DISCOVERY</a> development board
for the purpose of following the examples in this book.</p>
<a class="header" href="#contributing-to-this-book" id="contributing-to-this-book"><h2>Contributing to This Book</h2></a>
<p>The work on this book is coordinated in <a href="https://github.com/rust-embedded/book">this repository</a> and is mainly
developed by the <a href="https://github.com/rust-embedded/wg#the-resources-team">resources team</a>.</p>
<p>If you have trouble following the instructions in this book or find that some
section of the book is not clear enough or hard to follow then that's a bug and
it should be reported in <a href="https://github.com/rust-embedded/book/issues/">the issue tracker</a> of this book.</p>
<p>Pull requests fixing typos and adding new content are very welcome!</p>
<a class="header" href="#meet-your-hardware" id="meet-your-hardware"><h1>Meet Your Hardware</h1></a>
<p>Let's get familiar with the hardware we'll be working with.</p>
<a class="header" href="#stm32f3discovery-the-f3" id="stm32f3discovery-the-f3"><h2>STM32F3DISCOVERY (the &quot;F3&quot;)</h2></a>
<p align="center">
<img title="F3" src="../assets/f3.jpg">
</p>
<p>What does this board contain?</p>
<ul>
<li>
<p>A <a href="https://www.st.com/en/microcontrollers/stm32f303vc.html">STM32F303VCT6</a> microcontroller. This microcontroller has</p>
<ul>
<li>
<p>A single-core ARM Cortex-M4F processor with hardware support for single-precision floating point
operations and a maximum clock frequency of 72 MHz.</p>
</li>
<li>
<p>256 KiB of &quot;Flash&quot; memory. (1 KiB = 10<strong>24</strong> bytes)</p>
</li>
<li>
<p>48 KiB of RAM.</p>
</li>
<li>
<p>A variety of integrated peripherals such as timers, I2C, SPI and USART.</p>
</li>
<li>
<p>General purpose Input Output (GPIO) and other types of pins accessible through the two rows of headers along side the board.</p>
</li>
<li>
<p>A USB interface accessible through the USB port labeled &quot;USB USER&quot;.</p>
</li>
</ul>
</li>
<li>
<p>An <a href="https://en.wikipedia.org/wiki/Accelerometer">accelerometer</a> as part of the <a href="https://www.st.com/en/mems-and-sensors/lsm303dlhc.html">LSM303DLHC</a> chip.</p>
</li>
<li>
<p>A <a href="https://en.wikipedia.org/wiki/Magnetometer">magnetometer</a> as part of the <a href="https://www.st.com/en/mems-and-sensors/lsm303dlhc.html">LSM303DLHC</a> chip.</p>
</li>
<li>
<p>A <a href="https://en.wikipedia.org/wiki/Gyroscope">gyroscope</a> as part of the <a href="https://www.pololu.com/file/0J563/L3GD20.pdf">L3GD20</a> chip.</p>
</li>
<li>
<p>8 user LEDs arranged in the shape of a compass.</p>
</li>
<li>
<p>A second microcontroller: a <a href="https://www.st.com/en/microcontrollers/stm32f103cb.html">STM32F103</a>. This microcontroller is actually part of an on-board programmer / debugger and is connected to the USB port named &quot;USB ST-LINK&quot;.</p>
</li>
</ul>
<p>For a more detailed list of features and further specifications of the board take a look at the <a href="https://www.st.com/en/evaluation-tools/stm32f3discovery.html">STMicroelectronics</a> website.</p>
<p>A word of caution: be careful if you want to apply external signals to the board. The microcontroller STM32F303VCT6 pins take a nominal voltage of 3.3 volts. For further information consult the <a href="https://www.st.com/resource/en/datasheet/stm32f303vc.pdf">6.2 Absolute maximum ratings section in the manual</a></p>
<a class="header" href="#a-no_std-rust-environment" id="a-no_std-rust-environment"><h1>A <code>no_std</code> Rust Environment</h1></a>
<p>The term Embedded Programming is used for a wide range of different classes of programming.
Ranging from programming 8-Bit MCUs (like the <a href="https://www.st.com/resource/en/datasheet/st72325j6.pdf">ST72325xx</a>)
with just a few KB of RAM and ROM, up to systems like the Raspberry Pi
(<a href="https://en.wikipedia.org/wiki/Raspberry_Pi#Specifications">Model B 3+</a>) which has a 32/64-bit
4-core Cortex-A53 @ 1.4 GHz and 1GB of RAM. Different restrictions/limitations will apply when writing code
depending on what kind of target and use case you have.</p>
<p>There are two general Embedded Programming classifications:</p>
<a class="header" href="#hosted-environments" id="hosted-environments"><h2>Hosted Environments</h2></a>
<p>These kinds of environments are close to a normal PC environment.
What this means is that you are provided with a System Interface <a href="https://en.wikipedia.org/wiki/POSIX">E.G. POSIX</a>
that provides you with primitives to interact with various systems, such as file systems, networking, memory management, threads, etc.
Standard libraries in turn usually depend on these primitives to implement their functionality.
You may also have some sort of sysroot and restrictions on RAM/ROM-usage, and perhaps some
special HW or I/Os. Overall it feels like coding on a special-purpose PC environment.</p>
<a class="header" href="#bare-metal-environments" id="bare-metal-environments"><h2>Bare Metal Environments</h2></a>
<p>In a bare metal environment no code has been loaded before your program.
Without the software provided by an OS we can not load the standard library.
Instead the program, along with the crates it uses, can only use the hardware (bare metal) to run.
To prevent rust from loading the standard library use <code>no_std</code>.
The platform-agnostic parts of the standard library are available through <a href="https://doc.rust-lang.org/core/">libcore</a>.
libcore also excludes things which are not always desirable in an embedded environment.
One of these things is a memory allocator for dynamic memory allocation.
If you require this or any other functionalities there are often crates which provide these.</p>
<a class="header" href="#the-libstd-runtime" id="the-libstd-runtime"><h3>The libstd Runtime</h3></a>
<p>As mentioned before using <a href="https://doc.rust-lang.org/std/">libstd</a> requires some sort of system integration, but this is not only because
<a href="https://doc.rust-lang.org/std/">libstd</a> is just providing a common way of accessing OS abstractions, it also provides a runtime.
This runtime, among other things, takes care of setting up stack overflow protection, processing command line arguments,
and spawning the main thread before a program's main function is invoked. This runtime also won't be available in a <code>no_std</code> environment.</p>
<a class="header" href="#summary" id="summary"><h2>Summary</h2></a>
<p><code>#![no_std]</code> is a crate-level attribute that indicates that the crate will link to the core-crate instead of the std-crate.
The <a href="https://doc.rust-lang.org/core/">libcore</a> crate in turn is a platform-agnostic subset of the std crate
which makes no assumptions about the system the program will run on.
As such, it provides APIs for language primitives like floats, strings and slices, as well as APIs that expose processor features
like atomic operations and SIMD instructions. However it lacks APIs for anything that involves platform integration.
Because of these properties no_std and <a href="https://doc.rust-lang.org/core/">libcore</a> code can be used for any kind of
bootstrapping (stage 0) code like bootloaders, firmware or kernels.</p>
<a class="header" href="#overview" id="overview"><h3>Overview</h3></a>
<table><thead><tr><th> feature                                                   </th><th> no_std </th><th> std </th></tr></thead><tbody>
<tr><td> heap (dynamic memory)                                     </td><td>   *    </td><td>  ✓  </td></tr>
<tr><td> collections (Vec, HashMap, etc)                           </td><td>  **    </td><td>  ✓  </td></tr>
<tr><td> stack overflow protection                                 </td><td>   ✘    </td><td>  ✓  </td></tr>
<tr><td> runs init code before main                                </td><td>   ✘    </td><td>  ✓  </td></tr>
<tr><td> libstd available                                          </td><td>   ✘    </td><td>  ✓  </td></tr>
<tr><td> libcore available                                         </td><td>   ✓    </td><td>  ✓  </td></tr>
<tr><td> writing firmware, kernel, or bootloader code              </td><td>   ✓    </td><td>  ✘  </td></tr>
</tbody></table>
<p>* Only if you use the <code>alloc</code> crate and use a suitable allocator like <a href="https://github.com/rust-embedded/alloc-cortex-m">alloc-cortex-m</a>.</p>
<p>** Only if you use the <code>collections</code> crate and configure a global default allocator.</p>
<a class="header" href="#see-also" id="see-also"><h2>See Also</h2></a>
<ul>
<li><a href="https://github.com/rust-lang/rfcs/blob/master/text/1184-stabilize-no_std.md">RFC-1184</a></li>
</ul>
<a class="header" href="#tooling" id="tooling"><h1>Tooling</h1></a>
<p>Dealing with microcontrollers involves using several different tools as we'll be
dealing with an architecture different than your laptop's and we'll have to run
and debug programs on a <em>remote</em> device.</p>
<p>We'll use all the tools listed below. Any recent version should work when a
minimum version is not specified, but we have listed the versions we have
tested.</p>
<ul>
<li>Rust 1.31, 1.31-beta, or a newer toolchain PLUS ARM Cortex-M compilation
support.</li>
<li><a href="https://github.com/rust-embedded/cargo-binutils"><code>cargo-binutils</code></a> ~0.1.4</li>
<li><a href="https://www.qemu.org/"><code>qemu-system-arm</code></a>. Tested versions: 3.0.0</li>
<li>OpenOCD &gt;=0.8. Tested versions: v0.9.0 and v0.10.0</li>
<li>GDB with ARM support. Version 7.12 or newer highly recommended. Tested
versions: 7.10, 7.11, 7.12 and 8.1</li>
<li><a href="https://github.com/ashleygwilliams/cargo-generate"><code>cargo-generate</code></a> or <code>git</code>.
These tools are optional but will make it easier to follow along with the book.</li>
</ul>
<p>The text below explains why we are using these tools. Installation instructions
can be found on the next page.</p>
<a class="header" href="#cargo-generate-or-git" id="cargo-generate-or-git"><h2><code>cargo-generate</code> OR <code>git</code></h2></a>
<p>Bare metal programs are non-standard (<code>no_std</code>) Rust programs that require some
adjustments to the linking process in order to get the memory layout of the program
right. This requires some additional files (like linker scripts) and
settings (like linker flags). We have packaged those for you in a template
such that you only need to fill in the missing information (such as the project name and the
characteristics of your target hardware).</p>
<p>Our template is compatible with <code>cargo-generate</code>: a Cargo subcommand for
creating new Cargo projects from templates. You can also download the
template using <code>git</code>, <code>curl</code>, <code>wget</code>, or your web browser.</p>
<a class="header" href="#cargo-binutils" id="cargo-binutils"><h2><code>cargo-binutils</code></h2></a>
<p><code>cargo-binutils</code> is a collection of Cargo subcommands that make it easy to use
the LLVM tools that are shipped with the Rust toolchain. These tools include the
LLVM versions of <code>objdump</code>, <code>nm</code> and <code>size</code> and are used for inspecting
binaries.</p>
<p>The advantage of using these tools over GNU binutils is that (a) installing the
LLVM tools is the same one-command installation (<code>rustup component add llvm-tools-preview</code>) regardless of your OS and (b) tools like <code>objdump</code> support
all the architectures that <code>rustc</code> supports -- from ARM to x86_64 -- because
they both share the same LLVM backend.</p>
<a class="header" href="#qemu-system-arm" id="qemu-system-arm"><h2><code>qemu-system-arm</code></h2></a>
<p>QEMU is an emulator. In this case we use the variant that can fully emulate ARM
systems. We use QEMU to run embedded programs on the host. Thanks to this you
can follow some parts of this book even if you don't have any hardware with you!</p>
<a class="header" href="#gdb" id="gdb"><h2>GDB</h2></a>
<p>A debugger is a very important component of embedded development as you may not
always have the luxury to log stuff to the host console. In some cases, you may
not even have LEDs to blink on your hardware!</p>
<p>In general, LLDB works as well as GDB when it comes to debugging but we haven't
found an LLDB counterpart to GDB's <code>load</code> command, which uploads the program to
the target hardware, so currently we recommend that you use GDB.</p>
<a class="header" href="#openocd" id="openocd"><h2>OpenOCD</h2></a>
<p>GDB isn't able to communicate directly with the ST-Link debugging hardware on
your STM32F3DISCOVERY development board. It needs a translator and the Open
On-Chip Debugger, OpenOCD, is that translator. OpenOCD is a program that runs
on your laptop/PC and translates between GDB's TCP/IP based remote debug
protocol and ST-Link's USB based protocol.</p>
<p>OpenOCD also performs other important work as part of its translation for the
debugging of the ARM Cortex-M based microcontroller on your STM32F3DISCOVERY
development board:</p>
<ul>
<li>It knows how to interact with the memory mapped registers used by the ARM
CoreSight debug peripheral. It is these CoreSight registers that allow for:
<ul>
<li>Breakpoint/Watchpoint manipulation</li>
<li>Reading and writing of the CPU registers</li>
<li>Detecting when the CPU has been halted for a debug event</li>
<li>Continuing CPU execution after a debug event has been encountered</li>
<li>etc.</li>
</ul>
</li>
<li>It also knows how to erase and write to the microcontroller's FLASH</li>
</ul>
<a class="header" href="#installing-the-tools" id="installing-the-tools"><h1>Installing the tools</h1></a>
<p>This page contains OS-agnostic installation instructions for a few of the tools:</p>
<a class="header" href="#rust-toolchain" id="rust-toolchain"><h3>Rust Toolchain</h3></a>
<p>Install rustup by following the instructions at <a href="https://rustup.rs">https://rustup.rs</a>.</p>
<p><strong>NOTE</strong> Make sure you have a compiler version equal to or newer than <code>1.31</code>. <code>rustc -V</code> should return a date newer than the one shown below.</p>
<pre><code class="language-console">$ rustc -V
rustc 1.31.1 (b6c32da9b 2018-12-18)
</code></pre>
<p>For bandwidth and disk usage concerns the default installation only supports
native compilation. To add cross compilation support for the ARM Cortex-M
architecture choose one of the following compilation targets. Use the last one
for the STM32F3DISCOVERY board and follow along with the book.</p>
<p>Cortex M0 M0+</p>
<pre><code class="language-console">$ rustup target add thumbv6m-none-eabi
</code></pre>
<p>Cortex M3</p>
<pre><code class="language-console">$ rustup target add thumbv7m-none-eabi
</code></pre>
<p>Cortex M4 M7 without FPU</p>
<pre><code class="language-console">$ rustup target add thumbv7em-none-eabi
</code></pre>
<p>Cortex M4 M7 with FPU &lt;-- STM32F3DISCOVERY</p>
<pre><code class="language-console">$ rustup target add thumbv7em-none-eabihf
</code></pre>
<a class="header" href="#cargo-binutils-1" id="cargo-binutils-1"><h3><code>cargo-binutils</code></h3></a>
<pre><code class="language-console">$ cargo install cargo-binutils

$ rustup component add llvm-tools-preview
</code></pre>
<a class="header" href="#os-specific-instructions" id="os-specific-instructions"><h3>OS-Specific Instructions</h3></a>
<p>Now follow the instructions specific to the OS you are using:</p>
<ul>
<li><a href="intro/install/linux.html">Linux</a></li>
<li><a href="intro/install/windows.html">Windows</a></li>
<li><a href="intro/install/macos.html">macOS</a></li>
</ul>
<a class="header" href="#linux" id="linux"><h1>Linux</h1></a>
<p>Here are the installation commands for a few Linux distributions.</p>
<a class="header" href="#packages" id="packages"><h2>Packages</h2></a>
<ul>
<li>Ubuntu 18.04 or newer / Debian stretch or newer</li>
</ul>
<blockquote>
<p><strong>NOTE</strong> <code>gdb-multiarch</code> is the GDB command you'll use to debug your ARM
Cortex-M programs</p>
</blockquote>
<!-- Debian stretch -->
<!-- GDB 7.12 -->
<!-- OpenOCD 0.9.0 -->
<!-- QEMU 2.8.1 -->
<!-- Ubuntu 18.04 -->
<!-- GDB 8.1 -->
<!-- OpenOCD 0.10.0 -->
<!-- QEMU 2.11.1 -->
<pre><code class="language-console">$ sudo apt install \
  gdb-multiarch \
  openocd \
  qemu-system-arm
</code></pre>
<ul>
<li>Ubuntu 14.04 and 16.04</li>
</ul>
<blockquote>
<p><strong>NOTE</strong> <code>arm-none-eabi-gdb</code> is the GDB command you'll use to debug your ARM
Cortex-M programs</p>
</blockquote>
<!-- Ubuntu 14.04 -->
<!-- GDB 7.6 (!) -->
<!-- OpenOCD 0.7.0 (?) -->
<!-- QEMU 2.0.0 (?) -->
<pre><code class="language-console">$ sudo apt install \
  gdb-arm-none-eabi \
  openocd \
  qemu-system-arm
</code></pre>
<ul>
<li>Fedora 27 or newer</li>
</ul>
<blockquote>
<p><strong>NOTE</strong> <code>arm-none-eabi-gdb</code> is the GDB command you'll use to debug your ARM
Cortex-M programs</p>
</blockquote>
<!-- Fedora 27 -->
<!-- GDB 7.6 (!) -->
<!-- OpenOCD 0.10.0 -->
<!-- QEMU 2.10.2 -->
<pre><code class="language-console">$ sudo dnf install \
  arm-none-eabi-gdb \
  openocd \
  qemu-system-arm
</code></pre>
<ul>
<li>Arch Linux</li>
</ul>
<blockquote>
<p><strong>NOTE</strong> <code>arm-none-eabi-gdb</code> is the GDB command you'll use to debug ARM
Cortex-M programs</p>
</blockquote>
<pre><code class="language-console">$ sudo pacman -S \
  arm-none-eabi-gdb \
  qemu-arch-extra \
  openocd

</code></pre>
<a class="header" href="#udev-rules" id="udev-rules"><h2>udev rules</h2></a>
<p>This rule lets you use OpenOCD with the Discovery board without root privilege.</p>
<p>Create this file in <code>/etc/udev/rules.d</code> with the contents shown below.</p>
<pre><code class="language-console">$ cat /etc/udev/rules.d/70-st-link.rules
</code></pre>
<pre><code class="language-text"># STM32F3DISCOVERY rev A/B - ST-LINK/V2
ATTRS{idVendor}==&quot;0483&quot;, ATTRS{idProduct}==&quot;3748&quot;, TAG+=&quot;uaccess&quot;

# STM32F3DISCOVERY rev C+ - ST-LINK/V2-1
ATTRS{idVendor}==&quot;0483&quot;, ATTRS{idProduct}==&quot;374b&quot;, TAG+=&quot;uaccess&quot;
</code></pre>
<p>Then reload all the udev rules with:</p>
<pre><code class="language-console">$ sudo udevadm control --reload-rules
</code></pre>
<p>If you had the board plugged to your laptop, unplug it and then plug it again.</p>
<p>You can check the permissions by running these commands:</p>
<pre><code class="language-console">$ lsusb
(..)
Bus 001 Device 018: ID 0483:374b STMicroelectronics ST-LINK/V2.1
(..)
</code></pre>
<p>Take note of the bus and device numbers. Use those numbers in the following
command:</p>
<pre><code class="language-console">$ # the format of the path is /dev/bus/usb/&lt;bus&gt;/&lt;device&gt;
$ ls -l /dev/bus/usb/001/018
crw-------+ 1 root root 189, 17 Sep 13 12:34 /dev/bus/usb/001/018
$ getfacl /dev/bus/usb/001/018 | grep user
user::rw-
user:you:rw-
</code></pre>
<p>The <code>+</code> appended to permissions indicates the existence of an extended
permission. The <code>getfacl</code> command tells the user <code>you</code> can make use of
this device.</p>
<p>Now, go to the <a href="intro/install/verify.html">next section</a>.</p>
<a class="header" href="#macos" id="macos"><h1>macOS</h1></a>
<p>All the tools can be install using <a href="http://brew.sh/">Homebrew</a>:</p>
<pre><code class="language-console">$ # GDB
$ brew install armmbed/formulae/arm-none-eabi-gcc

$ # OpenOCD
$ brew install openocd

$ # QEMU
$ brew install qemu
</code></pre>
<p>That's all! Go to the <a href="intro/install/verify.html">next section</a>.</p>
<a class="header" href="#windows" id="windows"><h1>Windows</h1></a>
<a class="header" href="#arm-none-eabi-gdb" id="arm-none-eabi-gdb"><h2><code>arm-none-eabi-gdb</code></h2></a>
<p>ARM provides <code>.exe</code> installers for Windows. Grab one from <a href="https://developer.arm.com/open-source/gnu-toolchain/gnu-rm/downloads">here</a>, and follow the instructions.
Just before the installation process finishes tick/select the &quot;Add path to environment variable&quot;
option. Then verify that the tools are in your <code>%PATH%</code>:</p>
<pre><code class="language-console">$ arm-none-eabi-gdb -v
GNU gdb (GNU Tools for Arm Embedded Processors 7-2018-q2-update) 8.1.0.20180315-git
(..)
</code></pre>
<a class="header" href="#openocd-1" id="openocd-1"><h2>OpenOCD</h2></a>
<p>There's no official binary release of OpenOCD for Windows but there are unofficial releases
available <a href="https://github.com/gnu-mcu-eclipse/openocd/releases">here</a>. Grab the 0.10.x zipfile and extract it somewhere on your drive (I
recommend <code>C:\OpenOCD</code> but with the drive letter that makes sense to you) then update your <code>%PATH%</code>
environment variable to include the following path: <code>C:\OpenOCD\bin</code> (or the path that you used
before).</p>
<p>Verify that OpenOCD is in your <code>%PATH%</code> with:</p>
<pre><code class="language-console">$ openocd -v
Open On-Chip Debugger 0.10.0
(..)
</code></pre>
<a class="header" href="#qemu" id="qemu"><h2>QEMU</h2></a>
<p>Grab QEMU from <a href="https://www.qemu.org/download/#windows">the official website</a>.</p>
<a class="header" href="#st-link-usb-driver" id="st-link-usb-driver"><h2>ST-LINK USB driver</h2></a>
<p>You'll also need to install <a href="http://www.st.com/en/embedded-software/stsw-link009.html">this USB driver</a> or OpenOCD won't work. Follow the installer
instructions and make sure you install the right version (32-bit or 64-bit) of the driver.</p>
<p>That's all! Go to the <a href="intro/install/verify.html">next section</a>.</p>
<a class="header" href="#verify-installation" id="verify-installation"><h1>Verify Installation</h1></a>
<p>In this section we check that some of the required tools / drivers have been
correctly installed and configured.</p>
<p>Connect your laptop / PC to the discovery board using a micro USB cable. The
discovery board has two USB connectors; use the one labeled &quot;USB ST-LINK&quot; that
sits on the center of the edge of the board.</p>
<p>Also check that the ST-LINK header is populated. See the picture below; the
ST-LINK header is circled in red.</p>
<p align="center">
<img title="Connected discovery board" src="../../assets/verify.jpeg">
</p>
<p>Now run the following command:</p>
<pre><code class="language-console">$ openocd -f interface/stlink-v2-1.cfg -f target/stm32f3x.cfg
</code></pre>
<p>You should get the following output and the program should block the console:</p>
<pre><code class="language-text">Open On-Chip Debugger 0.10.0
Licensed under GNU GPL v2
For bug reports, read
        http://openocd.org/doc/doxygen/bugs.html
Info : auto-selecting first available session transport &quot;hla_swd&quot;. To override use 'transport select &lt;transport&gt;'.
adapter speed: 1000 kHz
adapter_nsrst_delay: 100
Info : The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD
none separate
Info : Unable to match requested speed 1000 kHz, using 950 kHz
Info : Unable to match requested speed 1000 kHz, using 950 kHz
Info : clock speed 950 kHz
Info : STLINK v2 JTAG v27 API v2 SWIM v15 VID 0x0483 PID 0x374B
Info : using stlink api v2
Info : Target voltage: 2.919881
Info : stm32f3x.cpu: hardware has 6 breakpoints, 4 watchpoints
</code></pre>
<p>The contents may not match exactly but you should get the last line about
breakpoints and watchpoints. If you got it then terminate the OpenOCD process
and move to the <a href="intro/install/../hardware.html">next section</a>.</p>
<p>If you didn't get the &quot;breakpoints&quot; line then try the following command.</p>
<pre><code class="language-console">$ openocd -f interface/stlink-v2.cfg -f target/stm32f3x.cfg
</code></pre>
<p>If that command works that means you got an old hardware revision of the
discovery board. That won't be a problem but commit that fact to memory as
you'll need to configure things a bit differently later on. You can move to the
<a href="intro/install/../hardware.html">next section</a>.</p>
<p>If neither command worked as a normal user then try to run them with root
permission (e.g. <code>sudo openocd ..</code>). If the commands do work with root
permission then check that the <a href="intro/install/linux.html#udev-rules">udev rules</a> has been correctly set.</p>
<p>If you have reached this point and OpenOCD is not working please open <a href="https://github.com/rust-embedded/book/issues">an issue</a>
and we'll help you out!</p>
<a class="header" href="#getting-started" id="getting-started"><h1>Getting Started</h1></a>
<p>In this section we'll walk you through the process of writing, building,
flashing and debugging embedded programs. You will be able to try most of the
examples without any special hardware as we will show you the basics using
QEMU, a popular open-source hardware emulator. The only section where hardware
is required is, naturally enough, the <a href="start/./hardware.html">Hardware</a> section,
where we use OpenOCD to program an <a href="http://www.st.com/en/evaluation-tools/stm32f3discovery.html">STM32F3DISCOVERY</a>.</p>
<a class="header" href="#qemu-1" id="qemu-1"><h1>QEMU</h1></a>
<p>We'll start writing a program for the <a href="http://www.ti.com/product/LM3S6965">LM3S6965</a>, a Cortex-M3 microcontroller.
We have chosen this as our initial target because it <a href="https://wiki.qemu.org/Documentation/Platforms/ARM#Supported_in_qemu-system-arm">can be emulated</a> using QEMU
so you don't need to fiddle with hardware in this section and we can focus on
the tooling and the development process.</p>
<a class="header" href="#creating-a-non-standard-rust-program" id="creating-a-non-standard-rust-program"><h2>Creating a non standard Rust program</h2></a>
<p>We'll use the <a href="https://github.com/rust-embedded/cortex-m-quickstart"><code>cortex-m-quickstart</code></a> project template to generate a new
project from it.</p>
<ul>
<li>Using <code>cargo-generate</code></li>
</ul>
<pre><code class="language-console">cargo generate --git https://github.com/rust-embedded/cortex-m-quickstart
</code></pre>
<pre><code class="language-text"> Project Name: app
 Creating project called `app`...
 Done! New project created /tmp/app
</code></pre>
<pre><code class="language-console">cd app
</code></pre>
<ul>
<li>Using <code>git</code></li>
</ul>
<p>Clone the repository</p>
<pre><code class="language-console">git clone https://github.com/rust-embedded/cortex-m-quickstart app
cd app
</code></pre>
<p>And then fill in the placeholders in the <code>Cargo.toml</code> file</p>
<pre><code class="language-toml">[package]
authors = [&quot;{{authors}}&quot;] # &quot;{{authors}}&quot; -&gt; &quot;John Smith&quot;
edition = &quot;2018&quot;
name = &quot;{{project-name}}&quot; # &quot;{{project-name}}&quot; -&gt; &quot;awesome-app&quot;
version = &quot;0.1.0&quot;

# ..

[[bin]]
name = &quot;{{project-name}}&quot; # &quot;{{project-name}}&quot; -&gt; &quot;awesome-app&quot;
test = false
bench = false
</code></pre>
<ul>
<li>Using neither</li>
</ul>
<p>Grab the latest snapshot of the <code>cortex-m-quickstart</code> template and extract it.</p>
<p>Using the command line:</p>
<pre><code class="language-console">curl -LO https://github.com/rust-embedded/cortex-m-quickstart/archive/master.zip
unzip master.zip
mv cortex-m-quickstart-master app
cd app
</code></pre>
<p>Or you can browse to <a href="https://github.com/rust-embedded/cortex-m-quickstart"><code>cortex-m-quickstart</code></a>, click the green &quot;Clone or
download&quot; button and then click &quot;Download ZIP&quot;.</p>
<p>Then fill in the placeholders in the <code>Cargo.toml</code> file as done in the second
part of the &quot;Using <code>git</code>&quot; version.</p>
<p><strong>IMPORTANT</strong> We'll use the name &quot;app&quot; for the project name in this tutorial.
Whenever you see the word &quot;app&quot; you should replace it with the name you selected
for your project. Or, you could also name your project &quot;app&quot; and avoid the
substitutions.</p>
<p>For convenience here are the most important parts of the source code in <code>src/main.rs</code>:</p>
<pre><code class="language-rust ignore">#![no_std]
#![no_main]

extern crate panic_halt;

use cortex_m_rt::entry;

#[entry]
fn main() -&gt; ! {
    loop {
        // your code goes here
    }
}
</code></pre>
<p>This program is a bit different from a standard Rust program so let's take a
closer look.</p>
<p><code>#![no_std]</code> indicates that this program will <em>not</em> link to the standard crate,
<code>std</code>. Instead it will link to its subset: the <code>core</code> crate.</p>
<p><code>#![no_main]</code> indicates that this program won't use the standard <code>main</code>
interface that most Rust programs use. The main (no pun intended) reason to go
with <code>no_main</code> is that using the <code>main</code> interface in <code>no_std</code> context requires
nightly.</p>
<p><code>extern crate panic_halt;</code>. This crate provides a <code>panic_handler</code> that defines
the panicking behavior of the program. We will cover this in more detail in the
<a href="start/panicking.html">Panicking</a> chapter of the book.</p>
<p><a href="https://docs.rs/cortex-m-rt-macros/latest/cortex_m_rt_macros/attr.entry.html"><code>#[entry]</code></a> is an attribute provided by the <a href="https://crates.io/crates/cortex-m-rt"><code>cortex-m-rt</code></a> crate that's used
to mark the entry point of the program. As we are not using the standard <code>main</code>
interface we need another way to indicate the entry point of the program and
that'd be <code>#[entry]</code>.</p>
<p><code>fn main() -&gt; !</code>. Our program will be the <em>only</em> process running on the target
hardware so we don't want it to end! We use a <a href="https://doc.rust-lang.org/rust-by-example/fn/diverging.html">divergent function</a> (the <code>-&gt; !</code>
bit in the function signature) to ensure at compile time that'll be the case.</p>
<a class="header" href="#cross-compiling" id="cross-compiling"><h3>Cross compiling</h3></a>
<p>The next step is to <em>cross</em> compile the program for the Cortex-M3 architecture.
That's as simple as running <code>cargo build --target $TRIPLE</code> if you know what the
compilation target (<code>$TRIPLE</code>) should be. Luckily, the <code>.cargo/config</code> in the
template has the answer:</p>
<pre><code class="language-console">tail -n6 .cargo/config
</code></pre>
<pre><code class="language-toml">[build]
# Pick ONE of these compilation targets
# target = &quot;thumbv6m-none-eabi&quot;    # Cortex-M0 and Cortex-M0+
target = &quot;thumbv7m-none-eabi&quot;    # Cortex-M3
# target = &quot;thumbv7em-none-eabi&quot;   # Cortex-M4 and Cortex-M7 (no FPU)
# target = &quot;thumbv7em-none-eabihf&quot; # Cortex-M4F and Cortex-M7F (with FPU)
</code></pre>
<p>To cross compile for the Cortex-M3 architecture we have to use
<code>thumbv7m-none-eabi</code>. This compilation target has been set as the default so the
two commands below do the same:</p>
<pre><code class="language-console">cargo build --target thumbv7m-none-eabi
cargo build
</code></pre>
<a class="header" href="#inspecting" id="inspecting"><h3>Inspecting</h3></a>
<p>Now we have a non-native ELF binary in <code>target/thumbv7m-none-eabi/debug/app</code>. We
can inspect it using <code>cargo-binutils</code>.</p>
<p>With <code>cargo-readobj</code> we can print the ELF headers to confirm that this is an ARM
binary.</p>
<pre><code class="language-console">cargo readobj --bin app -- -file-headers
</code></pre>
<p>Note that:</p>
<ul>
<li><code>--bin app</code> is sugar for inspect the binary at <code>target/$TRIPLE/debug/app</code></li>
<li><code>--bin app</code> will also (re)compile the binary, if necessary</li>
</ul>
<pre><code class="language-text">ELF Header:
  Magic:   7f 45 4c 46 01 01 01 00 00 00 00 00 00 00 00 00
  Class:                             ELF32
  Data:                              2's complement, little endian
  Version:                           1 (current)
  OS/ABI:                            UNIX - System V
  ABI Version:                       0x0
  Type:                              EXEC (Executable file)
  Machine:                           ARM
  Version:                           0x1
  Entry point address:               0x405
  Start of program headers:          52 (bytes into file)
  Start of section headers:          153204 (bytes into file)
  Flags:                             0x5000200
  Size of this header:               52 (bytes)
  Size of program headers:           32 (bytes)
  Number of program headers:         2
  Size of section headers:           40 (bytes)
  Number of section headers:         19
  Section header string table index: 18
</code></pre>
<p><code>cargo-size</code> can print the size of the linker sections of the binary.</p>
<blockquote>
<p><strong>NOTE</strong> this output assumes that rust-embedded/cortex-m-rt#111 has been
merged</p>
</blockquote>
<pre><code class="language-console">cargo size --bin app --release -- -A
</code></pre>
<p>we use <code>--release</code> to inspect the optimized version</p>
<pre><code class="language-text">app  :
section             size        addr
.vector_table       1024         0x0
.text                 92       0x400
.rodata                0       0x45c
.data                  0  0x20000000
.bss                   0  0x20000000
.debug_str          2958         0x0
.debug_loc            19         0x0
.debug_abbrev        567         0x0
.debug_info         4929         0x0
.debug_ranges         40         0x0
.debug_macinfo         1         0x0
.debug_pubnames     2035         0x0
.debug_pubtypes     1892         0x0
.ARM.attributes       46         0x0
.debug_frame         100         0x0
.debug_line          867         0x0
Total              14570
</code></pre>
<blockquote>
<p>A refresher on ELF linker sections</p>
<ul>
<li><code>.text</code> contains the program instructions</li>
<li><code>.rodata</code> contains constant values like strings</li>
<li><code>.data</code> contains statically allocated variables whose initial values are
<em>not</em> zero</li>
<li><code>.bss</code> also contains statically allocated variables whose initial values
<em>are</em> zero</li>
<li><code>.vector_table</code> is a <em>non</em>-standard section that we use to store the vector
(interrupt) table</li>
<li><code>.ARM.attributes</code> and the <code>.debug_*</code> sections contain metadata and will
<em>not</em> be loaded onto the target when flashing the binary.</li>
</ul>
</blockquote>
<p><strong>IMPORTANT</strong>: ELF files contain metadata like debug information so their <em>size
on disk</em> does <em>not</em> accurately reflect the space the program will occupy when
flashed on a device. <em>Always</em> use <code>cargo-size</code> to check how big a binary really
is.</p>
<p><code>cargo-objdump</code> can be used to disassemble the binary.</p>
<pre><code class="language-console">cargo objdump --bin app --release -- -disassemble -no-show-raw-insn -print-imm-hex
</code></pre>
<blockquote>
<p><strong>NOTE</strong> this output can differ on your system. New versions of rustc, LLVM
and libraries can generate different assembly. We truncated some of the instructions
to keep the snippet small.</p>
</blockquote>
<pre><code class="language-text">app:  file format ELF32-arm-little

Disassembly of section .text:
main:
     400: bl  #0x256
     404: b #-0x4 &lt;main+0x4&gt;

Reset:
     406: bl  #0x24e
     40a: movw  r0, #0x0
     &lt; .. truncated any more instructions .. &gt;

DefaultHandler_:
     656: b #-0x4 &lt;DefaultHandler_&gt;

UsageFault:
     657: strb  r7, [r4, #0x3]

DefaultPreInit:
     658: bx  lr

__pre_init:
     659: strb  r7, [r0, #0x1]

__nop:
     65a: bx  lr

HardFaultTrampoline:
     65c: mrs r0, msp
     660: b #-0x2 &lt;HardFault_&gt;

HardFault_:
     662: b #-0x4 &lt;HardFault_&gt;

HardFault:
     663: &lt;unknown&gt;
</code></pre>
<a class="header" href="#running" id="running"><h3>Running</h3></a>
<p>Next, let's see how to run an embedded program on QEMU! This time we'll use the
<code>hello</code> example which actually does something.</p>
<p>For convenience here's the source code of <code>examples/hello.rs</code>:</p>
<pre><code class="language-rust ignore">//! Prints &quot;Hello, world!&quot; on the host console using semihosting

#![no_main]
#![no_std]

extern crate panic_halt;

use cortex_m_rt::entry;
use cortex_m_semihosting::{debug, hprintln};

#[entry]
fn main() -&gt; ! {
    hprintln!(&quot;Hello, world!&quot;).unwrap();

    // exit QEMU
    // NOTE do not run this on hardware; it can corrupt OpenOCD state
    debug::exit(debug::EXIT_SUCCESS);

    loop {}
}
</code></pre>
<p>This program uses something called semihosting to print text to the <em>host</em>
console. When using real hardware this requires a debug session but when using
QEMU this Just Works.</p>
<p>Let's start by compiling the example:</p>
<pre><code class="language-console">cargo build --example hello
</code></pre>
<p>The output binary will be located at
<code>target/thumbv7m-none-eabi/debug/examples/hello</code>.</p>
<p>To run this binary on QEMU run the following command:</p>
<pre><code class="language-console">qemu-system-arm \
  -cpu cortex-m3 \
  -machine lm3s6965evb \
  -nographic \
  -semihosting-config enable=on,target=native \
  -kernel target/thumbv7m-none-eabi/debug/examples/hello
</code></pre>
<pre><code class="language-text">Hello, world!
</code></pre>
<p>The command should successfully exit (exit code = 0) after printing the text. On
*nix you can check that with the following command:</p>
<pre><code class="language-console">echo $?
</code></pre>
<pre><code class="language-text">0
</code></pre>
<p>Let me break down that long QEMU command for you:</p>
<ul>
<li>
<p><code>qemu-system-arm</code>. This is the QEMU emulator. There are a few variants of
these QEMU binaries; this one does full <em>system</em> emulation of <em>ARM</em> machines
hence the name.</p>
</li>
<li>
<p><code>-cpu cortex-m3</code>. This tells QEMU to emulate a Cortex-M3 CPU. Specifying the
CPU model lets us catch some miscompilation errors: for example, running a
program compiled for the Cortex-M4F, which has a hardware FPU, will make QEMU
error during its execution.</p>
</li>
<li>
<p><code>-machine lm3s6965evb</code>. This tells QEMU to emulate the LM3S6965EVB, a
evaluation board that contains a LM3S6965 microcontroller.</p>
</li>
<li>
<p><code>-nographic</code>. This tells QEMU to not launch its GUI.</p>
</li>
<li>
<p><code>-semihosting-config (..)</code>. This tells QEMU to enable semihosting. Semihosting
lets the emulated device, among other things, use the host stdout, stderr and
stdin and create files on the host.</p>
</li>
<li>
<p><code>-kernel $file</code>. This tells QEMU which binary to load and run on the emulated
machine.</p>
</li>
</ul>
<p>Typing out that long QEMU command is too much work! We can set a custom runner
to simplify the process. <code>.cargo/config</code> has a commented out runner that invokes
QEMU; let's uncomment it:</p>
<pre><code class="language-console">head -n3 .cargo/config
</code></pre>
<pre><code class="language-toml">[target.thumbv7m-none-eabi]
# uncomment this to make `cargo run` execute programs on QEMU
runner = &quot;qemu-system-arm -cpu cortex-m3 -machine lm3s6965evb -nographic -semihosting-config enable=on,target=native -kernel&quot;
</code></pre>
<p>This runner only applies to the <code>thumbv7m-none-eabi</code> target, which is our
default compilation target. Now <code>cargo run</code> will compile the program and run it
on QEMU:</p>
<pre><code class="language-console">cargo run --example hello --release
</code></pre>
<pre><code class="language-text">   Compiling app v0.1.0 (file:///tmp/app)
    Finished release [optimized + debuginfo] target(s) in 0.26s
     Running `qemu-system-arm -cpu cortex-m3 -machine lm3s6965evb -nographic -semihosting-config enable=on,target=native -kernel target/thumbv7m-none-eabi/release/examples/hello`
Hello, world!
</code></pre>
<a class="header" href="#debugging" id="debugging"><h3>Debugging</h3></a>
<p>Debugging is critical to embedded development. Let's see how it's done.</p>
<p>Debugging an embedded device involves <em>remote</em> debugging as the program that we
want to debug won't be running on the machine that's running the debugger
program (GDB or LLDB).</p>
<p>Remote debugging involves a client and a server. In a QEMU setup, the client
will be a GDB (or LLDB) process and the server will be the QEMU process that's
also running the embedded program.</p>
<p>In this section we'll use the <code>hello</code> example we already compiled.</p>
<p>The first debugging step is to launch QEMU in debugging mode:</p>
<pre><code class="language-console">qemu-system-arm \
  -cpu cortex-m3 \
  -machine lm3s6965evb \
  -nographic \
  -semihosting-config enable=on,target=native \
  -gdb tcp::3333 \
  -S \
  -kernel target/thumbv7m-none-eabi/debug/examples/hello
</code></pre>
<p>This command won't print anything to the console and will block the terminal. We
have passed two extra flags this time:</p>
<ul>
<li>
<p><code>-gdb tcp::3333</code>. This tells QEMU to wait for a GDB connection on TCP
port 3333.</p>
</li>
<li>
<p><code>-S</code>. This tells QEMU to freeze the machine at startup. Without this the
program would have reached the end of main before we had a chance to launch
the debugger!</p>
</li>
</ul>
<p>Next we launch GDB in another terminal and tell it to load the debug symbols of
the example:</p>
<pre><code class="language-console">gdb-multiarch -q target/thumbv7m-none-eabi/debug/examples/hello
</code></pre>
<p><strong>NOTE</strong>: you might need another version of gdb instead of <code>gdb-multiarch</code> depending
on which one you installed in the installation chapter. This could also be
<code>arm-none-eabi-gdb</code> or just <code>gdb</code>.</p>
<p>Then within the GDB shell we connect to QEMU, which is waiting for a connection
on TCP port 3333.</p>
<pre><code class="language-console">target remote :3333
</code></pre>
<pre><code class="language-text">Remote debugging using :3333
Reset () at $REGISTRY/cortex-m-rt-0.6.1/src/lib.rs:473
473     pub unsafe extern &quot;C&quot; fn Reset() -&gt; ! {
</code></pre>
<p>You'll see that the process is halted and that the program counter is pointing
to a function named <code>Reset</code>. That is the reset handler: what Cortex-M cores
execute upon booting.</p>
<p>This reset handler will eventually call our main function. Let's skip all the
way there using a breakpoint and the <code>continue</code> command:</p>
<pre><code class="language-console">break main
</code></pre>
<pre><code class="language-text">Breakpoint 1 at 0x400: file examples/panic.rs, line 29.
</code></pre>
<pre><code class="language-console">continue
</code></pre>
<pre><code class="language-text">Continuing.

Breakpoint 1, main () at examples/hello.rs:17
17          let mut stdout = hio::hstdout().unwrap();
</code></pre>
<p>We are now close to the code that prints &quot;Hello, world!&quot;. Let's move forward
using the <code>next</code> command.</p>
<pre><code class="language-console">next
</code></pre>
<pre><code class="language-text">18          writeln!(stdout, &quot;Hello, world!&quot;).unwrap();
</code></pre>
<pre><code class="language-console">next
</code></pre>
<pre><code class="language-text">20          debug::exit(debug::EXIT_SUCCESS);
</code></pre>
<p>At this point you should see &quot;Hello, world!&quot; printed on the terminal that's
running <code>qemu-system-arm</code>.</p>
<pre><code class="language-text">$ qemu-system-arm (..)
Hello, world!
</code></pre>
<p>Calling <code>next</code> again will terminate the QEMU process.</p>
<pre><code class="language-console">next
</code></pre>
<pre><code class="language-text">[Inferior 1 (Remote target) exited normally]
</code></pre>
<p>You can now exit the GDB session.</p>
<pre><code class="language-console">quit
</code></pre>
<a class="header" href="#hardware" id="hardware"><h1>Hardware</h1></a>
<p>By now you should be somewhat familiar with the tooling and the development
process. In this section we'll switch to real hardware; the process will remain
largely the same. Let's dive in.</p>
<a class="header" href="#know-your-hardware" id="know-your-hardware"><h2>Know your hardware</h2></a>
<p>Before we begin you need to identify some characteristics of the target device
as these will be used to configure the project:</p>
<ul>
<li>
<p>The ARM core. e.g. Cortex-M3.</p>
</li>
<li>
<p>Does the ARM core include an FPU? Cortex-M4<strong>F</strong> and Cortex-M7<strong>F</strong> cores do.</p>
</li>
<li>
<p>How much Flash memory and RAM does the target device have? e.g. 256 KiB of
Flash and 32 KiB of RAM.</p>
</li>
<li>
<p>Where are Flash memory and RAM mapped in the address space? e.g. RAM is
commonly located at address <code>0x2000_0000</code>.</p>
</li>
</ul>
<p>You can find this information in the data sheet or the reference manual of your
device.</p>
<p>In this section we'll be using our reference hardware, the STM32F3DISCOVERY.
This board contains an STM32F303VCT6 microcontroller. This microcontroller has:</p>
<ul>
<li>
<p>A Cortex-M4F core that includes a single precision FPU</p>
</li>
<li>
<p>256 KiB of Flash located at address 0x0800_0000.</p>
</li>
<li>
<p>40 KiB of RAM located at address 0x2000_0000. (There's another RAM region but
for simplicity we'll ignore it).</p>
</li>
</ul>
<a class="header" href="#configuring" id="configuring"><h2>Configuring</h2></a>
<p>We'll start from scratch with a fresh template instance. Refer to the
<a href="start/qemu.html">previous section on QEMU</a> for a refresher on how to do this without
<code>cargo-generate</code>.</p>
<pre><code class="language-console">$ cargo generate --git https://github.com/rust-embedded/cortex-m-quickstart
 Project Name: app
 Creating project called `app`...
 Done! New project created /tmp/app

 $ cd app
</code></pre>
<p>Step number one is to set a default compilation target in <code>.cargo/config</code>.</p>
<pre><code class="language-console">$ tail -n5 .cargo/config
</code></pre>
<pre><code class="language-toml"># Pick ONE of these compilation targets
# target = &quot;thumbv6m-none-eabi&quot;    # Cortex-M0 and Cortex-M0+
# target = &quot;thumbv7m-none-eabi&quot;    # Cortex-M3
# target = &quot;thumbv7em-none-eabi&quot;   # Cortex-M4 and Cortex-M7 (no FPU)
target = &quot;thumbv7em-none-eabihf&quot; # Cortex-M4F and Cortex-M7F (with FPU)
</code></pre>
<p>We'll use <code>thumbv7em-none-eabihf</code> as that covers the Cortex-M4F core.</p>
<p>The second step is to enter the memory region information into the <code>memory.x</code>
file.</p>
<pre><code class="language-console">$ cat memory.x
/* Linker script for the STM32F303VCT6 */
MEMORY
{
  /* NOTE 1 K = 1 KiBi = 1024 bytes */
  FLASH : ORIGIN = 0x08000000, LENGTH = 256K
  RAM : ORIGIN = 0x20000000, LENGTH = 40K
}
</code></pre>
<p>Make sure the <code>debug::exit()</code> call is commented out or removed, it is used
only for running in QEMU.</p>
<pre><code class="language-rust ignore">#[entry]
fn main() -&gt; ! {
    hprintln!(&quot;Hello, world!&quot;).unwrap();

    // exit QEMU
    // NOTE do not run this on hardware; it can corrupt OpenOCD state
    // debug::exit(debug::EXIT_SUCCESS);

    loop {}
}
</code></pre>
<p>You can now cross compile programs using <code>cargo build</code>
and inspect the binaries using <code>cargo-binutils</code> as you did before. The
<code>cortex-m-rt</code> crate handles all the magic required to get your chip running,
as helpfully, pretty much all Cortex-M CPUs boot in the same fashion.</p>
<pre><code class="language-console">$ cargo build --example hello
</code></pre>
<a class="header" href="#debugging-1" id="debugging-1"><h2>Debugging</h2></a>
<p>Debugging will look a bit different. In fact, the first steps can look different
depending on the target device. In this section we'll show the steps required to
debug a program running on the STM32F3DISCOVERY. This is meant to serve as a
reference; for device specific information about debugging check out <a href="https://github.com/rust-embedded/debugonomicon">the
Debugonomicon</a>.</p>
<p>As before we'll do remote debugging and the client will be a GDB process. This
time, however, the server will be OpenOCD.</p>
<p>As done during the <a href="start/../intro/install/verify.html">verify</a> section connect the discovery board to your laptop /
PC and check that the ST-LINK header is populated.</p>
<p>On a terminal run <code>openocd</code> to connect to the ST-LINK on the discovery board.
Run this command from the root of the template; <code>openocd</code> will pick up the
<code>openocd.cfg</code> file which indicates which interface file and target file to use.</p>
<pre><code class="language-console">$ cat openocd.cfg
</code></pre>
<pre><code class="language-text"># Sample OpenOCD configuration for the STM32F3DISCOVERY development board

# Depending on the hardware revision you got you'll have to pick ONE of these
# interfaces. At any time only one interface should be commented out.

# Revision C (newer revision)
source [find interface/stlink-v2-1.cfg]

# Revision A and B (older revisions)
# source [find interface/stlink-v2.cfg]

source [find target/stm32f3x.cfg]
</code></pre>
<blockquote>
<p><strong>NOTE</strong> If you found out that you have an older revision of the discovery
board during the <a href="start/../intro/install/verify.html">verify</a> section then you should modify the <code>openocd.cfg</code>
file at this point to use <code>interface/stlink-v2.cfg</code>.</p>
</blockquote>
<pre><code class="language-console">$ openocd
Open On-Chip Debugger 0.10.0
Licensed under GNU GPL v2
For bug reports, read
        http://openocd.org/doc/doxygen/bugs.html
Info : auto-selecting first available session transport &quot;hla_swd&quot;. To override use 'transport select &lt;transport&gt;'.
adapter speed: 1000 kHz
adapter_nsrst_delay: 100
Info : The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD
none separate
Info : Unable to match requested speed 1000 kHz, using 950 kHz
Info : Unable to match requested speed 1000 kHz, using 950 kHz
Info : clock speed 950 kHz
Info : STLINK v2 JTAG v27 API v2 SWIM v15 VID 0x0483 PID 0x374B
Info : using stlink api v2
Info : Target voltage: 2.913879
Info : stm32f3x.cpu: hardware has 6 breakpoints, 4 watchpoints
</code></pre>
<p>On another terminal run GDB, also from the root of the template.</p>
<pre><code class="language-console">$ &lt;gdb&gt; -q target/thumbv7em-none-eabihf/debug/examples/hello
</code></pre>
<p>Next connect GDB to OpenOCD, which is waiting for a TCP connection on port 3333.</p>
<pre><code class="language-console">(gdb) target remote :3333
Remote debugging using :3333
0x00000000 in ?? ()
</code></pre>
<p>Now proceed to <em>flash</em> (load) the program onto the microcontroller using the
<code>load</code> command.</p>
<pre><code class="language-console">(gdb) load
Loading section .vector_table, size 0x400 lma 0x8000000
Loading section .text, size 0x1e70 lma 0x8000400
Loading section .rodata, size 0x61c lma 0x8002270
Start address 0x800144e, load size 10380
Transfer rate: 17 KB/sec, 3460 bytes/write.
</code></pre>
<p>The program is now loaded. This program uses semihosting so before we do any
semihosting call we have to tell OpenOCD to enable semihosting. You can send
commands to OpenOCD using the <code>monitor</code> command.</p>
<pre><code class="language-console">(gdb) monitor arm semihosting enable
semihosting is enabled
</code></pre>
<blockquote>
<p>You can see all the OpenOCD commands by invoking the <code>monitor help</code> command.</p>
</blockquote>
<p>Like before we can skip all the way to <code>main</code> using a breakpoint and the
<code>continue</code> command.</p>
<pre><code class="language-console">(gdb) break main
Breakpoint 1 at 0x8000d18: file examples/hello.rs, line 15.

(gdb) continue
Continuing.
Note: automatically using hardware breakpoints for read-only addresses.

Breakpoint 1, main () at examples/hello.rs:15
15          let mut stdout = hio::hstdout().unwrap();
</code></pre>
<p>Advancing the program with <code>next</code> should produce the same results as before.</p>
<pre><code class="language-console">(gdb) next
16          writeln!(stdout, &quot;Hello, world!&quot;).unwrap();

(gdb) next
19          debug::exit(debug::EXIT_SUCCESS);
</code></pre>
<p>At this point you should see &quot;Hello, world!&quot; printed on the OpenOCD console,
among other stuff.</p>
<pre><code class="language-console">$ openocd
(..)
Info : halted: PC: 0x08000e6c
Hello, world!
Info : halted: PC: 0x08000d62
Info : halted: PC: 0x08000d64
Info : halted: PC: 0x08000d66
Info : halted: PC: 0x08000d6a
Info : halted: PC: 0x08000a0c
Info : halted: PC: 0x08000d70
Info : halted: PC: 0x08000d72
</code></pre>
<p>Issuing another <code>next</code> will make the processor execute <code>debug::exit</code>. This acts
as a breakpoint and halts the process:</p>
<pre><code class="language-console">(gdb) next

Program received signal SIGTRAP, Trace/breakpoint trap.
0x0800141a in __syscall ()
</code></pre>
<p>It also causes this to be printed to the OpenOCD console:</p>
<pre><code class="language-console">$ openocd
(..)
Info : halted: PC: 0x08001188
semihosting: *** application exited ***
Warn : target not halted
Warn : target not halted
target halted due to breakpoint, current mode: Thread
xPSR: 0x21000000 pc: 0x08000d76 msp: 0x20009fc0, semihosting
</code></pre>
<p>However, the process running on the microcontroller has not terminated and you
can resume it using <code>continue</code> or a similar command.</p>
<p>You can now exit GDB using the <code>quit</code> command.</p>
<pre><code class="language-console">(gdb) quit
</code></pre>
<p>Debugging now requires a few more steps so we have packed all those steps into a
single GDB script named <code>openocd.gdb</code>.</p>
<pre><code class="language-console">$ cat openocd.gdb
</code></pre>
<pre><code class="language-text">target remote :3333

# print demangled symbols
set print asm-demangle on

# detect unhandled exceptions, hard faults and panics
break DefaultHandler
break HardFault
break rust_begin_unwind

monitor arm semihosting enable

load

# start the process but immediately halt the processor
stepi
</code></pre>
<p>Now running <code>&lt;gdb&gt; -x openocd.gdb $program</code> will immediately connect GDB to
OpenOCD, enable semihosting, load the program and start the process.</p>
<p>Alternatively, you can turn <code>&lt;gdb&gt; -x openocd.gdb</code> into a custom runner to make
<code>cargo run</code> build a program <em>and</em> start a GDB session. This runner is included
in <code>.cargo/config</code> but it's commented out.</p>
<pre><code class="language-console">$ head -n10 .cargo/config
</code></pre>
<pre><code class="language-toml">[target.thumbv7m-none-eabi]
# uncomment this to make `cargo run` execute programs on QEMU
# runner = &quot;qemu-system-arm -cpu cortex-m3 -machine lm3s6965evb -nographic -semihosting-config enable=on,target=native -kernel&quot;

[target.'cfg(all(target_arch = &quot;arm&quot;, target_os = &quot;none&quot;))']
# uncomment ONE of these three option to make `cargo run` start a GDB session
# which option to pick depends on your system
runner = &quot;arm-none-eabi-gdb -x openocd.gdb&quot;
# runner = &quot;gdb-multiarch -x openocd.gdb&quot;
# runner = &quot;gdb -x openocd.gdb&quot;
</code></pre>
<pre><code class="language-console">$ cargo run --example hello
(..)
Loading section .vector_table, size 0x400 lma 0x8000000
Loading section .text, size 0x1e70 lma 0x8000400
Loading section .rodata, size 0x61c lma 0x8002270
Start address 0x800144e, load size 10380
Transfer rate: 17 KB/sec, 3460 bytes/write.
(gdb)
</code></pre>
<a class="header" href="#memory-mapped-registers" id="memory-mapped-registers"><h1>Memory Mapped Registers</h1></a>
<p>Embedded systems can only get so far by executing normal Rust code and moving data around in RAM. If we want to get any information into or out of our system (be that blinking an LED, detecting a button press or communicating with an off-chip peripheral on some sort of bus) we're going to have to dip into the world of Peripherals and their 'memory mapped registers'.</p>
<p>You may well find that the code you need to access the peripherals in your micro-controller has already been written, at one of the following levels:</p>
<ul>
<li>Micro-architecture Crate - This sort of crate handles any useful routines common to the processor core your microcontroller is using, as well as any peripherals that are common to all micro-controllers that use that particular type of processor core. For example the <a href="https://crates.io/crates/cortex-m">cortex-m</a> crate gives you functions to enable and disable interrupts, which are the same for all Cortex-M based micro-controllers. It also gives you access to the 'SysTick' peripheral included with all Cortex-M based micro-controllers.</li>
<li>Peripheral Access Crate (PAC) - This sort of crate is a thin wrapper over the various memory-wrapper registers defined for your particular part-number of micro-controller you are using. For example, <a href="https://crates.io/crates/tm4c123x">tm4c123x</a> for the Texas Instruments Tiva-C TM4C123 series, or <a href="https://crates.io/crates/stm32f30x">stm32f30x</a> for the ST-Micro STM32F30x series. Here, you'll be interacting with the registers directly, following each peripheral's operating instructions given in your micro-controller's Technical Reference Manual.</li>
<li>HAL Crate - These crates offer a more user-friendly API for your particular processor, often by implementing some common traits defined in <a href="https://crates.io/crates/embedded-hal">embedded-hal</a>. For example, this crate might offer a <code>Serial</code> struct, with a constructor that takes an appropriate set of GPIO pins and a baud rate, and offers some sort of <code>write_byte</code> function for sending data. See the chapter on <a href="start/../portability/index.html">Portability</a> for more information on <a href="https://crates.io/crates/embedded-hal">embedded-hal</a>.</li>
<li>Board Crate - These crates go one step further than a HAL Crate by pre-configuring various peripherals and GPIO pins to suit the specific developer kit or board you are using, such as <a href="https://crates.io/crates/f3">F3</a> for the STM32F3DISCOVERY board.</li>
</ul>
<a class="header" href="#starting-at-the-bottom" id="starting-at-the-bottom"><h2>Starting at the bottom</h2></a>
<p>Let's look at the SysTick peripheral that's common to all Cortex-M based micro-controllers. We can find a pretty low-level API in the <a href="https://crates.io/crates/cortex-m">cortex-m</a> crate, and we can use it like this:</p>
<pre><code class="language-rust ignore">use cortex_m::peripheral::{syst, Peripherals};
use cortex_m_rt::entry;

#[entry]
fn main() -&gt; ! {
    let mut peripherals = Peripherals::take().unwrap();
    let mut systick = peripherals.SYST;
    systick.set_clock_source(syst::SystClkSource::Core);
    systick.set_reload(1_000);
    systick.clear_current();
    systick.enable_counter();
    while !systick.has_wrapped() {
        // Loop
    }

    loop {}
}
</code></pre>
<p>The functions on the <code>SYST</code> struct map pretty closely to the functionality defined by the ARM Technical Reference Manual for this peripheral. There's nothing in this API about 'delaying for X milliseconds' - we have to crudely implement that ourselves using a <code>while</code> loop. Note that we can't access our <code>SYST</code> struct until we have called <code>Peripherals::take()</code> - this is a special routine that guarantees that there is only one <code>SYST</code> structure in our entire program. For more on that, see the <a href="start/../peripherals/index.html">Peripherals</a> section.</p>
<a class="header" href="#using-a-peripheral-access-crate-pac" id="using-a-peripheral-access-crate-pac"><h2>Using a Peripheral Access Crate (PAC)</h2></a>
<p>We won't get very far with our embedded software development if we restrict ourselves to only the basic peripherals included with every Cortex-M. At some point, we're going to need to write some code that's specific to the particular micro-controller we're using. In this example, let's assume we have an Texas Instruments TM4C123 - a middling 80MHz Cortex-M4 with 256 KiB of Flash. We're going to pull in the <a href="https://crates.io/crates/tm4c123x">tm4c123x</a> crate to make use of this chip.</p>
<pre><code class="language-rust ignore">#![no_std]
#![no_main]

extern crate panic_halt; // panic handler

use cortex_m_rt::entry;
use tm4c123x;

#[entry]
pub fn init() -&gt; (Delay, Leds) {
    let cp = cortex_m::Peripherals::take().unwrap();
    let p = tm4c123x::Peripherals::take().unwrap();

    let pwm = p.PWM0;
    pwm.ctl.write(|w| w.globalsync0().clear_bit());
    // Mode = 1 =&gt; Count up/down mode
    pwm._2_ctl.write(|w| w.enable().set_bit().mode().set_bit());
    pwm._2_gena.write(|w| w.actcmpau().zero().actcmpad().one());
    // 528 cycles (264 up and down) = 4 loops per video line (2112 cycles)
    pwm._2_load.write(|w| unsafe { w.load().bits(263) });
    pwm._2_cmpa.write(|w| unsafe { w.compa().bits(64) });
    pwm.enable.write(|w| w.pwm4en().set_bit());
}

</code></pre>
<p>We've accessed the <code>PWM0</code> peripheral in exactly the same way as we accessed the <code>SYST</code> peripheral earlier, except we called <code>tm4c123x::Peripherals::take()</code>. As this crate was auto-generated using <a href="https://crates.io/crates/svd2rust">svd2rust</a>, the access functions for our register fields take a closure, rather than a numeric argument. While this looks like a lot of code, the Rust compiler can use it to perform a bunch of checks for us, but then generate machine-code which is pretty close to hand-written assembler! Where the auto-generated code isn't able to determine that all possible arguments to a particular accessor function are valid (for example, if the SVD defines the register as 32-bit but doesn't say if some of those 32-bit values have a special meaning), then the function is marked as <code>unsafe</code>. We can see this in the example above when setting the <code>load</code> and <code>compa</code> sub-fields using the <code>bits()</code> function.</p>
<a class="header" href="#reading" id="reading"><h3>Reading</h3></a>
<p>The <code>read()</code> function returns an object which gives read-only access to the various sub-fields within this register, as defined by the manufacturer's SVD file for this chip. You can find all the functions available on special <code>R</code> return type for this particular register, in this particular peripheral, on this particular chip, in the <a href="https://docs.rs/tm4c123x/0.7.0/tm4c123x/pwm0/ctl/struct.R.html">tm4c123x documentation</a>.</p>
<pre><code class="language-rust ignore">if pwm.ctl.read().globalsync0().is_set() {
    // Do a thing
}
</code></pre>
<a class="header" href="#writing" id="writing"><h3>Writing</h3></a>
<p>The <code>write()</code> function takes a closure with a single argument. Typically we call this <code>w</code>. This argument then gives read-write access to the various sub-fields within this register, as defined by the manufacturer's SVD file for this chip. Again, you can find all the functions available on the 'w' for this particular register, in this particular peripheral, on this particular chip, in the <a href="https://docs.rs/tm4c123x/0.7.0/tm4c123x/pwm0/ctl/struct.W.html">tm4c123x documentation</a>. Note that all of the sub-fields that we do not set will be set to a default value for us - any existing content in the register will be lost.</p>
<pre><code class="language-rust ignore">pwm.ctl.write(|w| w.globalsync0().clear_bit());
</code></pre>
<a class="header" href="#modifying" id="modifying"><h3>Modifying</h3></a>
<p>If we wish to change only one particular sub-field in this register and leave the other sub-fields unchanged, we can use the <code>modify</code> function. This function takes a closure with two arguments - one for reading and one for writing. Typically we call these <code>r</code> and <code>w</code> respectively. The <code>r</code> argument can be used to inspect the current contents of the register, and the <code>w</code> argument can be used to modify the register contents.</p>
<pre><code class="language-rust ignore">pwm.ctl.modify(|r, w| w.globalsync0().clear_bit());
</code></pre>
<p>The <code>modify</code> function really shows the power of closures here. In C, we'd have to read into some temporary value, modify the correct bits and then write the value back. This means there's considerable scope for error:</p>
<pre><code class="language-C">uint32_t temp = pwm0.ctl.read();
temp |= PWM0_CTL_GLOBALSYNC0;
pwm0.ctl.write(temp);
uint32_t temp2 = pwm0.enable.read();
temp2 |= PWM0_ENABLE_PWM4EN;
pwm0.enable.write(temp); // Uh oh! Wrong variable!
</code></pre>
<a class="header" href="#using-a-hal-crate" id="using-a-hal-crate"><h2>Using a HAL crate</h2></a>
<p>The HAL crate for a chip typically works by implementing a custom Trait for the raw structures exposed by the PAC. Often this trait will define a function called <code>constrain()</code> for single peripherals or <code>split()</code> for things like GPIO ports with multiple pins. This function will consume the underlying raw peripheral structure and return a new object with a higher-level API. This API may also do things like have the Serial port <code>new</code> function require a borrow on some <code>Clock</code> structure, which can only be generated by calling the function which configures the PLLs and sets up all the clock frequencies. In this way, it is statically impossible to create a Serial port object without first having configured the clock rates, or for the Serial port object to mis-convert the baud rate into clock ticks. Some crates even define special traits for the states each GPIO pin can be in, requiring the user to put a pin into the correct state (say, by selecting the appropriate Alternate Function Mode) before passing the pin into Peripheral. All with no run-time cost!</p>
<p>Let's see an example:</p>
<pre><code class="language-rust ignore">#![no_std]
#![no_main]

extern crate panic_halt; // panic handler

use cortex_m_rt::entry;
use tm4c123x_hal as hal;
use tm4c123x_hal::prelude::*;
use tm4c123x_hal::serial::{NewlineMode, Serial};
use tm4c123x_hal::sysctl;

#[entry]
fn main() -&gt; ! {
    let p = hal::Peripherals::take().unwrap();
    let cp = hal::CorePeripherals::take().unwrap();

    // Wrap up the SYSCTL struct into an object with a higher-layer API
    let mut sc = p.SYSCTL.constrain();
    // Pick our oscillation settings
    sc.clock_setup.oscillator = sysctl::Oscillator::Main(
        sysctl::CrystalFrequency::_16mhz,
        sysctl::SystemClock::UsePll(sysctl::PllOutputFrequency::_80_00mhz),
    );
    // Configure the PLL with those settings
    let clocks = sc.clock_setup.freeze();

    // Wrap up the GPIO_PORTA struct into an object with a higher-layer API.
    // Note it needs to borrow `sc.power_control` so it can power up the GPIO
    // peripheral automatically.
    let mut porta = p.GPIO_PORTA.split(&amp;sc.power_control);

    // Activate the UART.
    let uart = Serial::uart0(
        p.UART0,
        // The transmit pin
        porta
            .pa1
            .into_af_push_pull::&lt;hal::gpio::AF1&gt;(&amp;mut porta.control),
        // The receive pin
        porta
            .pa0
            .into_af_push_pull::&lt;hal::gpio::AF1&gt;(&amp;mut porta.control),
        // No RTS or CTS required
        (),
        (),
        // The baud rate
        115200_u32.bps(),
        // Output handling
        NewlineMode::SwapLFtoCRLF,
        // We need the clock rates to calculate the baud rate divisors
        &amp;clocks,
        // We need this to power up the UART peripheral
        &amp;sc.power_control,
    );

    loop {
        writeln!(uart, &quot;Hello, World!\r\n&quot;).unwrap();
    }
}
</code></pre>
<a class="header" href="#semihosting" id="semihosting"><h1>Semihosting</h1></a>
<p>Semihosting is a mechanism that lets embedded devices do I/O on the host and is
mainly used to log messages to the host console. Semihosting requires a debug
session and pretty much nothing else (no extra wires!) so it's super convenient
to use. The downside is that it's super slow: each write operation can take
several milliseconds depending on the hardware debugger (e.g. ST-Link) you use.</p>
<p>The <a href="https://crates.io/crates/cortex-m-semihosting"><code>cortex-m-semihosting</code></a> crate provides an API to do semihosting operations
on Cortex-M devices. The program below is the semihosting version of &quot;Hello,
world!&quot;:</p>
<pre><code class="language-rust ignore">#![no_main]
#![no_std]

extern crate panic_halt;

use cortex_m_rt::entry;
use cortex_m_semihosting::hprintln;

#[entry]
fn main() -&gt; ! {
    hprintln!(&quot;Hello, world!&quot;).unwrap();

    loop {}
}
</code></pre>
<p>If you run this program on hardware you'll see the &quot;Hello, world!&quot; message
within the OpenOCD logs.</p>
<pre><code class="language-console">$ openocd
(..)
Hello, world!
(..)
</code></pre>
<p>You do need to enable semihosting in OpenOCD from GDB first:</p>
<pre><code class="language-console">(gdb) monitor arm semihosting enable
semihosting is enabled
</code></pre>
<p>QEMU understands semihosting operations so the above program will also work with
<code>qemu-system-arm</code> without having to start a debug session. Note that you'll
need to pass the <code>-semihosting-config</code> flag to QEMU to enable semihosting
support; these flags are already included in the <code>.cargo/config</code> file of the
template.</p>
<pre><code class="language-console">$ # this program will block the terminal
$ cargo run
     Running `qemu-system-arm (..)
Hello, world!
</code></pre>
<p>There's also an <code>exit</code> semihosting operation that can be used to terminate the
QEMU process. Important: do <strong>not</strong> use <code>debug::exit</code> on hardware; this function
can corrupt your OpenOCD session and you will not be able to debug more programs
until you restart it.</p>
<pre><code class="language-rust ignore">#![no_main]
#![no_std]

extern crate panic_halt;

use cortex_m_rt::entry;
use cortex_m_semihosting::debug;

#[entry]
fn main() -&gt; ! {
    let roses = &quot;blue&quot;;

    if roses == &quot;red&quot; {
        debug::exit(debug::EXIT_SUCCESS);
    } else {
        debug::exit(debug::EXIT_FAILURE);
    }

    loop {}
}
</code></pre>
<pre><code class="language-console">$ cargo run
     Running `qemu-system-arm (..)

$ echo $?
1
</code></pre>
<p>One last tip: you can set the panicking behavior to <code>exit(EXIT_FAILURE)</code>. This
will let you write <code>no_std</code> run-pass tests that you can run on QEMU.</p>
<p>For convenience, the <code>panic-semihosting</code> crate has an &quot;exit&quot; feature that when
enabled invokes <code>exit(EXIT_FAILURE)</code> after logging the panic message to the host
stderr.</p>
<pre><code class="language-rust ignore">#![no_main]
#![no_std]

extern crate panic_semihosting; // features = [&quot;exit&quot;]

use cortex_m_rt::entry;
use cortex_m_semihosting::debug;

#[entry]
fn main() -&gt; ! {
    let roses = &quot;blue&quot;;

    assert_eq!(roses, &quot;red&quot;);

    loop {}
}
</code></pre>
<pre><code class="language-console">$ cargo run
     Running `qemu-system-arm (..)
panicked at 'assertion failed: `(left == right)`
  left: `&quot;blue&quot;`,
 right: `&quot;red&quot;`', examples/hello.rs:15:5

$ echo $?
1
</code></pre>
<a class="header" href="#panicking" id="panicking"><h1>Panicking</h1></a>
<p>Panicking is a core part of the Rust language. Built-in operations like indexing
are runtime checked for memory safety. When out of bounds indexing is attempted
this results in a panic.</p>
<p>In the standard library panicking has a defined behavior: it unwinds the stack
of the panicking thread, unless the user opted for aborting the program on
panics.</p>
<p>In programs without standard library, however, the panicking behavior is left
undefined. A behavior can be chosen by declaring a <code>#[panic_handler]</code> function.
This function must appear exactly <em>once</em> in the dependency graph of a program,
and must have the following signature: <code>fn(&amp;PanicInfo) -&gt; !</code>, where <a href="https://doc.rust-lang.org/core/panic/struct.PanicInfo.html"><code>PanicInfo</code></a>
is a struct containing information about the location of the panic.</p>
<p>Given that embedded systems range from user facing to safety critical (cannot
crash) there's no one size fits all panicking behavior but there are plenty of
commonly used behaviors. These common behaviors have been packaged into crates
that define the <code>#[panic_handler]</code> function. Some examples include:</p>
<ul>
<li><a href="https://crates.io/crates/panic-abort"><code>panic-abort</code></a>. A panic causes the abort instruction to be executed.</li>
<li><a href="https://crates.io/crates/panic-halt"><code>panic-halt</code></a>. A panic causes the program, or the current thread, to halt by
entering an infinite loop.</li>
<li><a href="https://crates.io/crates/panic-itm"><code>panic-itm</code></a>. The panicking message is logged using the ITM, an ARM Cortex-M
specific peripheral.</li>
<li><a href="https://crates.io/crates/panic-semihosting"><code>panic-semihosting</code></a>. The panicking message is logged to the host using the
semihosting technique.</li>
</ul>
<p>You may be able to find even more crates searching for the <a href="https://crates.io/keywords/panic-handler"><code>panic-handler</code></a>
keyword on crates.io.</p>
<p>A program can pick one of these behaviors simply by linking to the corresponding
crate. The fact that the panicking behavior is expressed in the source of
an application as a single line of code is not only useful as documentation but
can also be used to change the panicking behavior according to the compilation
profile. For example:</p>
<pre><code class="language-rust ignore">#![no_main]
#![no_std]

// dev profile: easier to debug panics; can put a breakpoint on `rust_begin_unwind`
#[cfg(debug_assertions)]
extern crate panic_halt;

// release profile: minimize the binary size of the application
#[cfg(not(debug_assertions))]
extern crate panic_abort;

// ..
</code></pre>
<p>In this example the crate links to the <code>panic-halt</code> crate when built with the
dev profile (<code>cargo build</code>), but links to the <code>panic-abort</code> crate when built
with the release profile (<code>cargo build --release</code>).</p>
<a class="header" href="#an-example" id="an-example"><h2>An example</h2></a>
<p>Here's an example that tries to index an array beyond its length. The operation
results in a panic.</p>
<pre><code class="language-rust ignore">#![no_main]
#![no_std]

extern crate panic_semihosting;

use cortex_m_rt::entry;

#[entry]
fn main() -&gt; ! {
    let xs = [0, 1, 2];
    let i = xs.len() + 1;
    let _y = xs[i]; // out of bounds access

    loop {}
}
</code></pre>
<p>This example chose the <code>panic-semihosting</code> behavior which prints the panic
message to the host console using semihosting.</p>
<pre><code class="language-console">$ cargo run
     Running `qemu-system-arm -cpu cortex-m3 -machine lm3s6965evb (..)
panicked at 'index out of bounds: the len is 3 but the index is 4', src/main.rs:12:13
</code></pre>
<p>You can try changing the behavior to <code>panic-halt</code> and confirm that no message is
printed in that case.</p>
<a class="header" href="#exceptions" id="exceptions"><h1>Exceptions</h1></a>
<p>Exceptions, and interrupts, are a hardware mechanism by which the processor
handles asynchronous events and fatal errors (e.g. executing an invalid
instruction). Exceptions imply preemption and involve exception handlers,
subroutines executed in response to the signal that triggered the event.</p>
<p>The <code>cortex-m-rt</code> crate provides an <a href="https://docs.rs/cortex-m-rt-macros/latest/cortex_m_rt_macros/attr.exception.html"><code>exception</code></a> attribute to declare exception
handlers.</p>
<pre><code class="language-rust ignore">// Exception handler for the SysTick (System Timer) exception
#[exception]
fn SysTick() {
    // ..
}
</code></pre>
<p>Other than the <code>exception</code> attribute exception handlers look like plain
functions but there's one more difference: <code>exception</code> handlers can <em>not</em> be
called by software. Following the previous example, the statement <code>SysTick();</code>
would result in a compilation error.</p>
<p>This behavior is pretty much intended and it's required to provide a feature:
<code>static mut</code> variables declared <em>inside</em> <code>exception</code> handlers are <em>safe</em> to use.</p>
<pre><code class="language-rust ignore">#[exception]
fn SysTick() {
    static mut COUNT: u32 = 0;

    // `COUNT` has type `&amp;mut u32` and it's safe to use
    *COUNT += 1;
}
</code></pre>
<p>As you may know, using <code>static mut</code> variables in a function makes it
<a href="https://en.wikipedia.org/wiki/Reentrancy_(computing)"><em>non-reentrant</em></a>. It's undefined behavior to call a non-reentrant function,
directly or indirectly, from more than one exception / interrupt handler or from
<code>main</code> and one or more exception / interrupt handlers.</p>
<p>Safe Rust must never result in undefined behavior so non-reentrant functions
must be marked as <code>unsafe</code>. Yet I just told that <code>exception</code> handlers can safely
use <code>static mut</code> variables. How is this possible? This is possible because
<code>exception</code> handlers can <em>not</em> be called by software thus reentrancy is not
possible.</p>
<a class="header" href="#a-complete-example" id="a-complete-example"><h2>A complete example</h2></a>
<p>Here's an example that uses the system timer to raise a <code>SysTick</code> exception
roughly every second. The <code>SysTick</code> exception handler keeps track of how many
times it has been called in the <code>COUNT</code> variable and then prints the value of
<code>COUNT</code> to the host console using semihosting.</p>
<blockquote>
<p><strong>NOTE</strong>: You can run this example on any Cortex-M device; you can also run it
on QEMU</p>
</blockquote>
<pre><code class="language-rust ignore">#![deny(unsafe_code)]
#![no_main]
#![no_std]

extern crate panic_halt;

use core::fmt::Write;

use cortex_m::peripheral::syst::SystClkSource;
use cortex_m_rt::{entry, exception};
use cortex_m_semihosting::{
    debug,
    hio::{self, HStdout},
};

#[entry]
fn main() -&gt; ! {
    let p = cortex_m::Peripherals::take().unwrap();
    let mut syst = p.SYST;

    // configures the system timer to trigger a SysTick exception every second
    syst.set_clock_source(SystClkSource::Core);
    // this is configured for the LM3S6965 which has a default CPU clock of 12 MHz
    syst.set_reload(12_000_000);
    syst.enable_counter();
    syst.enable_interrupt();

    loop {}
}

#[exception]
fn SysTick() {
    static mut COUNT: u32 = 0;
    static mut STDOUT: Option&lt;HStdout&gt; = None;

    *COUNT += 1;

    // Lazy initialization
    if STDOUT.is_none() {
        *STDOUT = hio::hstdout().ok();
    }

    if let Some(hstdout) = STDOUT.as_mut() {
        write!(hstdout, &quot;{}&quot;, *COUNT).ok();
    }

    // IMPORTANT omit this `if` block if running on real hardware or your
    // debugger will end in an inconsistent state
    if *COUNT == 9 {
        // This will terminate the QEMU process
        debug::exit(debug::EXIT_SUCCESS);
    }
}
</code></pre>
<pre><code class="language-console">$ tail -n5 Cargo.toml
</code></pre>
<pre><code class="language-toml">[dependencies]
cortex-m = &quot;0.5.7&quot;
cortex-m-rt = &quot;0.6.3&quot;
panic-halt = &quot;0.2.0&quot;
cortex-m-semihosting = &quot;0.3.1&quot;
</code></pre>
<pre><code class="language-console">$ cargo run --release
     Running `qemu-system-arm -cpu cortex-m3 -machine lm3s6965evb (..)
123456789
</code></pre>
<p>If you run this on the Discovery board you'll see the output on the OpenOCD
console. Also, the program will <em>not</em> stop when the count reaches 9.</p>
<a class="header" href="#the-default-exception-handler" id="the-default-exception-handler"><h2>The default exception handler</h2></a>
<p>What the <code>exception</code> attribute actually does is <em>override</em> the default exception
handler for a specific exception. If you don't override the handler for a
particular exception it will be handled by the <code>DefaultHandler</code> function, which
defaults to:</p>
<pre><code class="language-rust ignore">fn DefaultHandler() {
    loop {}
}
</code></pre>
<p>This function is provided by the <code>cortex-m-rt</code> crate and marked as
<code>#[no_mangle]</code> so you can put a breakpoint on &quot;DefaultHandler&quot; and catch
<em>unhandled</em> exceptions.</p>
<p>It's possible to override this <code>DefaultHandler</code> using the <code>exception</code> attribute:</p>
<pre><code class="language-rust ignore">#[exception]
fn DefaultHandler(irqn: i16) {
    // custom default handler
}
</code></pre>
<p>The <code>irqn</code> argument indicates which exception is being serviced. A negative
value indicates that a Cortex-M exception is being serviced; and zero or a
positive value indicate that a device specific exception, AKA interrupt, is
being serviced.</p>
<a class="header" href="#the-hard-fault-handler" id="the-hard-fault-handler"><h2>The hard fault handler</h2></a>
<p>The <code>HardFault</code> exception is a bit special. This exception is fired when the
program enters an invalid state so its handler can <em>not</em> return as that could
result in undefined behavior. Also, the runtime crate does a bit of work before
the user defined <code>HardFault</code> handler is invoked to improve debuggability.</p>
<p>The result is that the <code>HardFault</code> handler must have the following signature:
<code>fn(&amp;ExceptionFrame) -&gt; !</code>. The argument of the handler is a pointer to
registers that were pushed into the stack by the exception. These registers are
a snapshot of the processor state at the moment the exception was triggered and
are useful to diagnose a hard fault.</p>
<p>Here's an example that performs an illegal operation: a read to a nonexistent
memory location.</p>
<blockquote>
<p><strong>NOTE</strong>: This program won't work, i.e. it won't crash, on QEMU because
<code>qemu-system-arm -machine lm3s6965evb</code> doesn't check memory loads and will
happily return <code>0</code>on reads to invalid memory.</p>
</blockquote>
<pre><code class="language-rust ignore">#![no_main]
#![no_std]

extern crate panic_halt;

use core::fmt::Write;
use core::ptr;

use cortex_m_rt::{entry, exception, ExceptionFrame};
use cortex_m_semihosting::hio;

#[entry]
fn main() -&gt; ! {
    // read a nonexistent memory location
    unsafe {
        ptr::read_volatile(0x3FFF_FFFE as *const u32);
    }

    loop {}
}

#[exception]
fn HardFault(ef: &amp;ExceptionFrame) -&gt; ! {
    if let Ok(mut hstdout) = hio::hstdout() {
        writeln!(hstdout, &quot;{:#?}&quot;, ef).ok();
    }

    loop {}
}
</code></pre>
<p>The <code>HardFault</code> handler prints the <code>ExceptionFrame</code> value. If you run this
you'll see something like this on the OpenOCD console.</p>
<pre><code class="language-console">$ openocd
(..)
ExceptionFrame {
    r0: 0x3ffffffe,
    r1: 0x00f00000,
    r2: 0x20000000,
    r3: 0x00000000,
    r12: 0x00000000,
    lr: 0x080008f7,
    pc: 0x0800094a,
    xpsr: 0x61000000
}
</code></pre>
<p>The <code>pc</code> value is the value of the Program Counter at the time of the exception
and it points to the instruction that triggered the exception.</p>
<p>If you look at the disassembly of the program:</p>
<pre><code class="language-console">$ cargo objdump --bin app --release -- -d -no-show-raw-insn -print-imm-hex
(..)
ResetTrampoline:
 8000942:       movw    r0, #0xfffe
 8000946:       movt    r0, #0x3fff
 800094a:       ldr     r0, [r0]
 800094c:       b       #-0x4 &lt;ResetTrampoline+0xa&gt;
</code></pre>
<p>You can lookup the value of the program counter <code>0x0800094a</code> in the dissassembly.
You'll see that a load operation (<code>ldr r0, [r0]</code> ) caused the exception.
The <code>r0</code> field of <code>ExceptionFrame</code> will tell you the value of register <code>r0</code>
was <code>0x3fff_fffe</code> at that time.</p>
<a class="header" href="#interrupts" id="interrupts"><h1>Interrupts</h1></a>
<p>Interrupts differ from exceptions in a variety of ways but their operation and
use is largely similar and they are also handled by the same interrupt
controller. Whereas exceptions are defined by the Cortex-M architecture,
interrupts are always vendor (and often even chip) specific implementations,
both in naming and functionality.</p>
<p>Interrupts do allow for a lot of flexibility which needs to be accounted for
when attempting to use them in an advanced way. We will not cover those uses in
this book, however it is a good idea to keep the following in mind:</p>
<ul>
<li>Interrupts have programmable priorities which determine their handlers' execution order</li>
<li>Interrupts can nest and preempt, i.e. execution of an interrupt handler might be interrupted by another higher-priority interrupt</li>
<li>In general the reason causing the interrupt to trigger needs to be cleared to prevent re-entering the interrupt handler endlessly</li>
</ul>
<p>The general initialization steps at runtime are always the same:</p>
<ul>
<li>Setup the peripheral(s) to generate interrupts requests at the desired occasions</li>
<li>Set the desired priority of the interrupt handler in the interrupt controller</li>
<li>Enable the interrupt handler in the interrupt controller</li>
</ul>
<p>Similarly to exceptions, the <code>cortex-m-rt</code> crate provides an <a href="https://docs.rs/cortex-m-rt-macros/0.1.5/cortex_m_rt_macros/attr.interrupt.html"><code>interrupt</code></a>
attribute to declare interrupt handlers. The available interrupts (and
their position in the interrupt handler table) are usually automatically
generated via <code>svd2rust</code> from a SVD description.</p>
<pre><code class="language-rust ignore">// Interrupt handler for the Timer2 interrupt
#[interrupt]
fn TIM2() {
    // ..
    // Clear reason for the generated interrupt request
}
</code></pre>
<p>Interrupt handlers look like plain functions (except for the lack of arguments)
similar to exception handlers. However they can not be called directly by other
parts of the firmware due to the special calling conventions. It is however
possible to generate interrupt requests in software to trigger a diversion to
to the interrupt handler.</p>
<p>Similar to exception handlers it is also possible to declare <code>static mut</code>
variables inside the interrupt handlers for <em>safe</em> state keeping.</p>
<pre><code class="language-rust ignore">#[interrupt]
fn TIM2() {
    static mut COUNT: u32 = 0;

    // `COUNT` has type `&amp;mut u32` and it's safe to use
    *COUNT += 1;
}
</code></pre>
<p>For a more detailed description about the mechanisms demonstrated here please
refer to the <a href="start/./exceptions.html">exceptions section</a>.</p>
<a class="header" href="#io" id="io"><h1>IO</h1></a>
<blockquote>
<p><strong>TODO</strong> Cover memory mapped I/O using registers.</p>
</blockquote>
<a class="header" href="#peripherals" id="peripherals"><h1>Peripherals</h1></a>
<a class="header" href="#what-are-peripherals" id="what-are-peripherals"><h2>What are Peripherals?</h2></a>
<p>Most Microcontrollers have more than just a CPU, RAM, or Flash Memory - they contain sections of silicon which are used for interacting with systems outside of the microcontroller, as well as directly and indirectly interacting with their surroundings in the world via sensors, motor controllers, or human interfaces such as a display or keyboard. These components are collectively known as Peripherals.</p>
<p>These peripherals are useful because they allow a developer to offload processing to them, avoiding having to handle everything in software. Similar to how a desktop developer would offload graphics processing to a video card, embedded developers can offload some tasks to peripherals allowing the CPU to spend its time doing something else important, or doing nothing in order to save power.</p>
<p>If you look at the main circuit board in an old-fashioned home computer from the 1970s or 1980s (and actually, the desktop PCs of yesterday are not so far removed from the embedded systems of today) you would expect to see:</p>
<ul>
<li>A processor</li>
<li>A RAM chip</li>
<li>A ROM chip</li>
<li>An I/O controller</li>
</ul>
<p>The RAM chip, ROM chip and I/O controller (the peripheral in this system) would be joined to the processor through a series of parallel traces known as a 'bus'. This bus carries address information, which selects which device on the bus the processor wishes to communicate with, and a data bus which carries the actual data. In our embedded microcontrollers, the same principles apply - it's just that everything is packed on to a single piece of silicon.</p>
<p>However, unlike graphics cards, which typically have a Software API like Vulkan, Metal, or OpenGL, peripherals are exposed to our Microcontroller with a hardware interface, which is mapped to a chunk of the memory.</p>
<a class="header" href="#linear-and-real-memory-space" id="linear-and-real-memory-space"><h2>Linear and Real Memory Space</h2></a>
<p>On a microcontroller, writing some data to some other arbitrary address, such as <code>0x4000_0000</code> or <code>0x0000_0000</code>, may also be a completely valid action.</p>
<p>On a desktop system, access to memory is tightly controlled by the MMU, or Memory Management Unit. This component has two major responsibilities: enforcing access permission to sections of memory (preventing one process from reading or modifying the memory of another process); and re-mapping segments of the physical memory to virtual memory ranges used in software. Microcontrollers do not typically have an MMU, and instead only use real physical addresses in software.</p>
<p>Although 32 bit microcontrollers have a real and linear address space from <code>0x0000_0000</code>, and <code>0xFFFF_FFFF</code>, they generally only use a few hundred kilobytes of that range for actual memory. This leaves a significant amount of address space remaining. In earlier chapters, we were talking about RAM being located at address <code>0x2000_0000</code>. If our RAM was 64 KiB long (i.e. with a maximum address of 0xFFFF) then addresses <code>0x2000_0000</code> to <code>0x2000_FFFF</code> would correspond to our RAM. When we write to a variable which lives at address <code>0x2000_1234</code>, what happens internally is that some logic detects the upper portion of the address (0x2000 in this example) and then activates the RAM so that it can act upon the lower portion of the address (0x1234 in this case). On a Cortex-M we also have our Flash ROM mapped in at address <code>0x0000_0000</code> up to, say, address <code>0x0007_FFFF</code> (if we have a 512 KiB Flash ROM). Rather than ignore all remaining space between these two regions, Microcontroller designers instead mapped the interface for peripherals in certain memory locations. This ends up looking something like this:</p>
<p><img src="../assets/nrf52-memory-map.png" alt="" /></p>
<p><a href="http://infocenter.nordicsemi.com/pdf/nRF52832_PS_v1.1.pdf">Nordic nRF52832 Datasheet (pdf)</a></p>
<a class="header" href="#memory-mapped-peripherals" id="memory-mapped-peripherals"><h2>Memory Mapped Peripherals</h2></a>
<p>Interaction with these peripherals is simple at a first glance - write the right data to the correct address. For example, sending a 32 bit word over a serial port could be as direct as writing that 32 bit word to a certain memory address. The Serial Port Peripheral would then take over and send out the data automatically.</p>
<p>Configuration of these peripherals works similarly. Instead of calling a function to configure a peripheral, a chunk of memory is exposed which serves as the hardware API. Write <code>0x8000_0000</code> to a SPI Frequency Configuration Register, and the SPI port will send data at 8 Megabits per second. Write <code>0x0200_0000</code> to the same address, and the SPI port will send data at 125 Kilobits per second. These configuration registers look a little bit like this:</p>
<p><img src="../assets/nrf52-spi-frequency-register.png" alt="" /></p>
<p><a href="http://infocenter.nordicsemi.com/pdf/nRF52832_PS_v1.1.pdf">Nordic nRF52832 Datasheet (pdf)</a></p>
<p>This interface is how interactions with the hardware are made, no matter what language is used, whether that language is Assembly, C, or Rust.</p>
<a class="header" href="#a-first-attempt" id="a-first-attempt"><h1>A First Attempt</h1></a>
<a class="header" href="#the-registers" id="the-registers"><h2>The Registers</h2></a>
<p>Let's look at the 'SysTick' peripheral - a simple timer which comes with every Cortex-M processor core. Typically you'll be looking these up in the chip manufacturer's data sheet or <em>Technical Reference Manual</em>, but this example is common to all ARM Cortex-M cores, let's look in the <a href="http://infocenter.arm.com/help/topic/com.arm.doc.dui0553a/Babieigh.html">ARM reference manual</a>. We see there are four registers:</p>
<table><thead><tr><th> Offset </th><th> Name        </th><th> Description                 </th><th> Width  </th></tr></thead><tbody>
<tr><td> 0x00   </td><td> SYST_CSR    </td><td> Control and Status Register </td><td> 32 bits</td></tr>
<tr><td> 0x04   </td><td> SYST_RVR    </td><td> Reload Value Register       </td><td> 32 bits</td></tr>
<tr><td> 0x08   </td><td> SYST_CVR    </td><td> Current Value Register      </td><td> 32 bits</td></tr>
<tr><td> 0x0C   </td><td> SYST_CALIB  </td><td> Calibration Value Register  </td><td> 32 bits</td></tr>
</tbody></table>
<a class="header" href="#the-c-approach" id="the-c-approach"><h2>The C Approach</h2></a>
<p>In Rust, we can represent a collection of registers in exactly the same way as we do in C - with a <code>struct</code>.</p>
<pre><code class="language-rust ignore">#[repr(C)]
struct SysTick {
    pub csr: u32,
    pub rvr: u32,
    pub cvr: u32,
    pub calib: u32,
}
</code></pre>
<p>The qualifier <code>#[repr(C)]</code> tells the Rust compiler to lay this structure out like a C compiler would. That's very important, as Rust allows structure fields to be re-ordered, while C does not. You can imagine the debugging we'd have to do if these fields were silently re-arranged by the compiler! With this qualifier in place, we have our four 32-bit fields which correspond to the table above. But of course, this <code>struct</code> is of no use by itself - we need a variable.</p>
<pre><code class="language-rust ignore">let systick = 0xE000_E010 as *mut SysTick;
let time = unsafe { (*systick).cvr };
</code></pre>
<a class="header" href="#volatile-accesses" id="volatile-accesses"><h2>Volatile Accesses</h2></a>
<p>Now, there are a couple of problems with the approach above.</p>
<ol>
<li>We have to use unsafe every time we want to access our Peripheral.</li>
<li>We've got no way of specifying which registers are read-only or read-write.</li>
<li>Any piece of code anywhere in your program could access the hardware
through this structure.</li>
<li>Most importantly, it doesn't actually work...</li>
</ol>
<p>Now, the problem is that compilers are clever. If you make two writes to the same piece of RAM, one after the other, the compiler can notice this and just skip the first write entirely. In C, we can mark variables as <code>volatile</code> to ensure that every read or write occurs as intended. In Rust, we instead mark the <em>accesses</em> as volatile, not the variable.</p>
<pre><code class="language-rust ignore">let systick = unsafe { &amp;mut *(0xE000_E010 as *mut SysTick) };
let time = unsafe { core::ptr::read_volatile(&amp;mut systick.cvr) };
</code></pre>
<p>So, we've fixed one of our four problems, but now we have even more <code>unsafe</code> code! Fortunately, there's a third party crate which can help - <a href="https://crates.io/crates/volatile_register"><code>volatile_register</code></a>.</p>
<pre><code class="language-rust ignore">use volatile_register::{RW, RO};

#[repr(C)]
struct SysTick {
    pub csr: RW&lt;u32&gt;,
    pub rvr: RW&lt;u32&gt;,
    pub cvr: RW&lt;u32&gt;,
    pub calib: RO&lt;u32&gt;,
}

fn get_systick() -&gt; &amp;'static mut SysTick {
    unsafe { &amp;mut *(0xE000_E010 as *mut SysTick) }
}

fn get_time() -&gt; u32 {
    let systick = get_systick();
    systick.cvr.read()
}
</code></pre>
<p>Now, the volatile accesses are performed automatically through the <code>read</code> and <code>write</code> methods. It's still <code>unsafe</code> to perform writes, but to be fair, hardware is a bunch of mutable state and there's no way for the compiler to know whether these writes are actually safe, so this is a good default position.</p>
<a class="header" href="#the-rusty-wrapper" id="the-rusty-wrapper"><h2>The Rusty Wrapper</h2></a>
<p>We need to wrap this <code>struct</code> up into a higher-layer API that is safe for our users to call. As the driver author, we manually verify the unsafe code is correct, and then present a safe API for our users so they don't have to worry about it (provided they trust us to get it right!).</p>
<p>One example might be:</p>
<pre><code class="language-rust ignore">use volatile_register::{RW, RO};

pub struct SystemTimer {
    p: &amp;'static mut RegisterBlock
}

#[repr(C)]
struct RegisterBlock {
    pub csr: RW&lt;u32&gt;,
    pub rvr: RW&lt;u32&gt;,
    pub cvr: RW&lt;u32&gt;,
    pub calib: RO&lt;u32&gt;,
}

impl SystemTimer {
    pub fn new() -&gt; SystemTimer {
        SystemTimer {
            p: unsafe { &amp;mut *(0xE000_E010 as *mut RegisterBlock) }
        }
    }

    pub fn get_time(&amp;self) -&gt; u32 {
        self.p.cvr.read()
    }

    pub fn set_reload(&amp;mut self, reload_value: u32) {
        unsafe { self.p.rvr.write(reload_value) }
    }
}

pub fn example_usage() -&gt; String {
    let mut st = SystemTimer::new();
    st.set_reload(0x00FF_FFFF);
    format!(&quot;Time is now 0x{:08x}&quot;, st.get_time())
}
</code></pre>
<p>Now, the problem with this approach is that the following code is perfectly acceptable to the compiler:</p>
<pre><code class="language-rust ignore">fn thread1() {
    let mut st = SystemTimer::new();
    st.set_reload(2000);
}

fn thread2() {
    let mut st = SystemTimer::new();
    st.set_reload(1000);
}
</code></pre>
<p>Our <code>&amp;mut self</code> argument to the <code>set_reload</code> function checks that there are no other references to <em>that</em> particular <code>SystemTimer</code> struct, but they don't stop the user creating a second <code>SystemTimer</code> which points to the exact same peripheral! Code written in this fashion will work if the author is diligent enough to spot all of these 'duplicate' driver instances, but once the code is spread out over multiple modules, drivers, developers, and days, it gets easier and easier to make these kinds of mistakes.</p>
<a class="header" href="#mutable-global-state" id="mutable-global-state"><h2>Mutable Global State</h2></a>
<p>Unfortunately, hardware is basically nothing but mutable global state, which can feel very frightening for a Rust developer. Hardware exists independently from the structures of the code we write, and can be modified at any time by the real world.</p>
<a class="header" href="#what-should-our-rules-be" id="what-should-our-rules-be"><h2>What should our rules be?</h2></a>
<p>How can we reliably interact with these peripherals?</p>
<ol>
<li>Always use <code>volatile</code> methods to read or write to peripheral memory, as it can change at any time</li>
<li>In software, we should be able to share any number of read-only accesses to these peripherals</li>
<li>If some software should have read-write access to a peripheral, it should hold the only reference to that peripheral</li>
</ol>
<a class="header" href="#the-borrow-checker" id="the-borrow-checker"><h2>The Borrow Checker</h2></a>
<p>The last two of these rules sound suspiciously similar to what the Borrow Checker does already!</p>
<p>Imagine if we could pass around ownership of these peripherals, or offer immutable or mutable references to them?</p>
<p>Well, we can, but for the Borrow Checker, we need to have exactly one instance of each peripheral, so Rust can handle this correctly. Well, luckliy in the hardware, there is only one instance of any given peripheral, but how can we expose that in the structure of our code?</p>
<a class="header" href="#singletons" id="singletons"><h1>Singletons</h1></a>
<blockquote>
<p>In software engineering, the singleton pattern is a software design pattern that restricts the instantiation of a class to one object.</p>
<p><em>Wikipedia: <a href="https://en.wikipedia.org/wiki/Singleton_pattern">Singleton Pattern</a></em></p>
</blockquote>
<a class="header" href="#but-why-cant-we-just-use-global-variables" id="but-why-cant-we-just-use-global-variables"><h2>But why can't we just use global variable(s)?</h2></a>
<p>We could make everything a public static, like this</p>
<pre><code class="language-rust ignore">static mut THE_SERIAL_PORT: SerialPort = SerialPort;

fn main() {
    let _ = unsafe {
        THE_SERIAL_PORT.read_speed();
    };
}
</code></pre>
<p>But this has a few problems. It is a mutable global variable, and in Rust, these are always unsafe to interact with. These variables are also visible across your whole program, which means the borrow checker is unable to help you track references and ownership of these variables.</p>
<a class="header" href="#how-do-we-do-this-in-rust" id="how-do-we-do-this-in-rust"><h2>How do we do this in Rust?</h2></a>
<p>Instead of just making our peripheral a global variable, we might instead decide to make a global variable, in this case called <code>PERIPHERALS</code>, which contains an <code>Option&lt;T&gt;</code> for each of our peripherals.</p>
<pre><code class="language-rust ignore">struct Peripherals {
    serial: Option&lt;SerialPort&gt;,
}
impl Peripherals {
    fn take_serial(&amp;mut self) -&gt; SerialPort {
        let p = replace(&amp;mut self.serial, None);
        p.unwrap()
    }
}
static mut PERIPHERALS: Peripherals = Peripherals {
    serial: Some(SerialPort),
};
</code></pre>
<p>This structure allows us to obtain a single instance of our peripheral. If we try to call <code>take_serial()</code> more than once, our code will panic!</p>
<pre><code class="language-rust ignore">fn main() {
    let serial_1 = unsafe { PERIPHERALS.take_serial() };
    // This panics!
    // let serial_2 = unsafe { PERIPHERALS.take_serial() };
}
</code></pre>
<p>Although interacting with this structure is <code>unsafe</code>, once we have the <code>SerialPort</code> it contained, we no longer need to use <code>unsafe</code>, or the <code>PERIPHERALS</code> structure at all.</p>
<p>This has a small runtime overhead because we must wrap the <code>SerialPort</code> structure in an option, and we'll need to call <code>take_serial()</code> once, however this small up-front cost allows us to leverage the borrow checker throughout the rest of our program.</p>
<a class="header" href="#existing-library-support" id="existing-library-support"><h2>Existing library support</h2></a>
<p>Although we created our own <code>Peripherals</code> structure above, it is not necessary to do this for your code. the <code>cortex_m</code> crate contains a macro called <code>singleton!()</code> that will perform this action for you.</p>
<pre><code class="language-rust ignore">#[macro_use(singleton)]
extern crate cortex_m;

fn main() {
    // OK if `main` is executed only once
    let x: &amp;'static mut bool =
        singleton!(: bool = false).unwrap();
}
</code></pre>
<p><a href="https://docs.rs/cortex-m/latest/cortex_m/macro.singleton.html">cortex_m docs</a></p>
<p>Additionally, if you use <code>cortex-m-rtfm</code>, the entire process of defining and obtaining these peripherals are abstracted for you, and you are instead handed a <code>Peripherals</code> structure that contains a non-<code>Option&lt;T&gt;</code> version of all of the items you define.</p>
<pre><code class="language-rust ignore">// cortex-m-rtfm v0.3.x
app! {
    resources: {
        static RX: Rx&lt;USART1&gt;;
        static TX: Tx&lt;USART1&gt;;
    }
}
fn init(p: init::Peripherals) -&gt; init::LateResources {
    // Note that this is now an owned value, not a reference
    let usart1: USART1 = p.device.USART1;
}
</code></pre>
<p><a href="https://blog.japaric.io/rtfm-v3/">japaric.io rtfm v3</a></p>
<a class="header" href="#but-why" id="but-why"><h2>But why?</h2></a>
<p>But how do these Singletons make a noticeable difference in how our Rust code works?</p>
<pre><code class="language-rust ignore">impl SerialPort {
    const SER_PORT_SPEED_REG: *mut u32 = 0x4000_1000 as _;

    fn read_speed(
        &amp;self // &lt;------ This is really, really important
    ) -&gt; u32 {
        unsafe {
            ptr::read_volatile(Self::SER_PORT_SPEED_REG)
        }
    }
}
</code></pre>
<p>There are two important factors in play here:</p>
<ul>
<li>Because we are using a singleton, there is only one way or place to obtain a <code>SerialPort</code> structure</li>
<li>To call the <code>read_speed()</code> method, we must have ownership or a reference to a <code>SerialPort</code> structure</li>
</ul>
<p>These two factors put together means that it is only possible to access the hardware if we have appropriately satisfied the borrow checker, meaning that at no point do we have multiple mutable references to the same hardware!</p>
<pre><code class="language-rust ignore">fn main() {
    // missing reference to `self`! Won't work.
    // SerialPort::read_speed();

    let serial_1 = unsafe { PERIPHERALS.take_serial() };

    // you can only read what you have access to
    let _ = serial_1.read_speed();
}
</code></pre>
<a class="header" href="#treat-your-hardware-like-data" id="treat-your-hardware-like-data"><h2>Treat your hardware like data</h2></a>
<p>Additionally, because some references are mutable, and some are immutable, it becomes possible to see whether a function or method could potentially modify the state of the hardware. For example,</p>
<p>This is allowed to change hardware settings:</p>
<pre><code class="language-rust ignore">fn setup_spi_port(
    spi: &amp;mut SpiPort,
    cs_pin: &amp;mut GpioPin
) -&gt; Result&lt;()&gt; {
    // ...
}
</code></pre>
<p>This isn't:</p>
<pre><code class="language-rust ignore">fn read_button(gpio: &amp;GpioPin) -&gt; bool {
    // ...
}
</code></pre>
<p>This allows us to enforce whether code should or should not make changes to hardware at <strong>compile time</strong>, rather than at runtime. As a note, this generally only works across one application, but for bare metal systems, our software will be compiled into a single application, so this is not usually a restriction.</p>
<a class="header" href="#static-guarantees" id="static-guarantees"><h1>Static Guarantees</h1></a>
<p>Rust's type system prevents data races at compile time (see <a href="https://doc.rust-lang.org/core/marker/trait.Send.html"><code>Send</code></a> and
<a href="https://doc.rust-lang.org/core/marker/trait.Sync.html"><code>Sync</code></a> traits). The type system can also be used to check other properties at
compile time; reducing the need for runtime checks in some cases.</p>
<p>When applied to embedded programs these <em>static checks</em> can be used, for
example, to enforce that configuration of I/O interfaces is done properly. For
instance, one can design an API where it is only possible to initialize a serial
interface by first configuring the pins that will be used by the interface.</p>
<p>One can also statically check that operations, like setting a pin low, can only
be performed on correctly configured peripherals. For example, trying to change
the output state of a pin configured in floating input mode would raise a
compile error.</p>
<p>And, as seen in the previous chapter, the concept of ownership can be applied
to peripherals to ensure that only certain parts of a program can modify a
peripheral. This <em>access control</em> makes software easier to reason about
compared to the alternative of treating peripherals as global mutable state.</p>
<a class="header" href="#typestate-programming" id="typestate-programming"><h1>Typestate Programming</h1></a>
<p>The concept of <a href="https://en.wikipedia.org/wiki/Typestate_analysis">typestates</a> describes the encoding of information about the current state of an object into the type of that object. Although this can sound a little arcane, if you have used the <a href="https://doc.rust-lang.org/1.0.0/style/ownership/builders.html">Builder Pattern</a> in Rust, you have already started using Typestate Programming!</p>
<pre><pre class="playpen"><code class="language-rust">#[derive(Debug)]
struct Foo {
    inner: u32,
}

struct FooBuilder {
    a: u32,
    b: u32,
}

impl FooBuilder {
    pub fn new(starter: u32) -&gt; Self {
        Self {
            a: starter,
            b: starter,
        }
    }

    pub fn double_a(self) -&gt; Self {
        Self {
            a: self.a * 2,
            b: self.b,
        }
    }

    pub fn into_foo(self) -&gt; Foo {
        Foo {
            inner: self.a + self.b,
        }
    }
}

fn main() {
    let x = FooBuilder::new(10)
        .double_a()
        .into_foo();

    println!(&quot;{:#?}&quot;, x);
}
</code></pre></pre>
<p>In this example, there is no direct way to create a <code>Foo</code> object. We must create a <code>FooBuilder</code>, and properly initialize it before we can obtain the <code>Foo</code> object we want.</p>
<p>This minimal example encodes two states:</p>
<ul>
<li><code>FooBuilder</code>, which represents an &quot;unconfigured&quot;, or &quot;configuration in process&quot; state</li>
<li><code>Foo</code>, which represents a &quot;configured&quot;, or &quot;ready to use&quot; state.</li>
</ul>
<a class="header" href="#strong-types" id="strong-types"><h2>Strong Types</h2></a>
<p>Because Rust has a <a href="https://en.wikipedia.org/wiki/Strong_and_weak_typing">Strong Type System</a>, there is no easy way to magically create an instance of <code>Foo</code>, or to turn a <code>FooBuilder</code> into a <code>Foo</code> without calling the <code>into_foo()</code> method. Additionally, calling the <code>into_foo()</code> method consumes the original <code>FooBuilder</code> structure, meaning it can not be reused without the creation of a new instance.</p>
<p>This allows us to represent the states of our system as types, and to include the necessary actions for state transitions into the methods that exchange one type for another. By creating a <code>FooBuilder</code>, and exchanging it for a <code>Foo</code> object, we have walked through the steps of a basic state machine.</p>
<a class="header" href="#peripherals-as-state-machines" id="peripherals-as-state-machines"><h1>Peripherals as State Machines</h1></a>
<p>The peripherals of a microcontroller can be thought of as set of state machines. For example, the configuration of a simplified <a href="https://en.wikipedia.org/wiki/General-purpose_input/output">GPIO pin</a> could be represented as the following tree of states:</p>
<ul>
<li>Disabled</li>
<li>Enabled
<ul>
<li>Configured as Output
<ul>
<li>Output: High</li>
<li>Output: Low</li>
</ul>
</li>
<li>Configured as Input
<ul>
<li>Input: High Resistance</li>
<li>Input: Pulled Low</li>
<li>Input: Pulled High</li>
</ul>
</li>
</ul>
</li>
</ul>
<p>If the peripheral starts in the <code>Disabled</code> mode, to move to the <code>Input: High Resistance</code> mode, we must perform the following steps:</p>
<ol>
<li>Disabled</li>
<li>Enabled</li>
<li>Configured as Input</li>
<li>Input: High Resistance</li>
</ol>
<p>If we wanted to move from <code>Input: High Resistance</code> to <code>Input: Pulled Low</code>, we must perform the following steps:</p>
<ol>
<li>Input: High Resistance</li>
<li>Input: Pulled Low</li>
</ol>
<p>Similarly, if we want to move a GPIO pin from configured as <code>Input: Pulled Low</code> to <code>Output: High</code>, we must perform the following steps:</p>
<ol>
<li>Input: Pulled Low</li>
<li>Configured as Input</li>
<li>Configured as Output</li>
<li>Output: High</li>
</ol>
<a class="header" href="#hardware-representation" id="hardware-representation"><h2>Hardware Representation</h2></a>
<p>Typically the states listed above are set by writing values to given registers mapped to a GPIO peripheral. Let's define an imaginary GPIO Configuration Register to illustrate this:</p>
<table><thead><tr><th align="right"> Name         </th><th align="right"> Bit Number(s) </th><th align="right"> Value </th><th align="right"> Meaning   </th><th align="right"> Notes </th></tr></thead><tbody>
<tr><td align="right"> enable       </td><td align="right"> 0             </td><td align="right"> 0     </td><td align="right"> disabled  </td><td align="right"> Disables the GPIO </td></tr>
<tr><td align="right">              </td><td align="right">               </td><td align="right"> 1     </td><td align="right"> enabled   </td><td align="right"> Enables the GPIO </td></tr>
<tr><td align="right"> direction    </td><td align="right"> 1             </td><td align="right"> 0     </td><td align="right"> input     </td><td align="right"> Sets the direction to Input </td></tr>
<tr><td align="right">              </td><td align="right">               </td><td align="right"> 1     </td><td align="right"> output    </td><td align="right"> Sets the direction to Output </td></tr>
<tr><td align="right"> input_mode   </td><td align="right"> 2..3          </td><td align="right"> 00    </td><td align="right"> hi-z      </td><td align="right"> Sets the input as high resistance </td></tr>
<tr><td align="right">              </td><td align="right">               </td><td align="right"> 01    </td><td align="right"> pull-low  </td><td align="right"> Input pin is pulled low </td></tr>
<tr><td align="right">              </td><td align="right">               </td><td align="right"> 10    </td><td align="right"> pull-high </td><td align="right"> Input pin is pulled high </td></tr>
<tr><td align="right">              </td><td align="right">               </td><td align="right"> 11    </td><td align="right"> n/a       </td><td align="right"> Invalid state. Do not set </td></tr>
<tr><td align="right"> output_mode  </td><td align="right"> 4             </td><td align="right"> 0     </td><td align="right"> set-low   </td><td align="right"> Output pin is driven low </td></tr>
<tr><td align="right">              </td><td align="right">               </td><td align="right"> 1     </td><td align="right"> set-high  </td><td align="right"> Output pin is driven high </td></tr>
<tr><td align="right"> input_status </td><td align="right"> 5             </td><td align="right"> x     </td><td align="right"> in-val    </td><td align="right"> 0 if input is &lt; 1.5v, 1 if input &gt;= 1.5v </td></tr>
</tbody></table>
<p>We <em>could</em> expose the following structure in Rust to control this GPIO:</p>
<pre><code class="language-rust ignore">/// GPIO interface
struct GpioConfig {
    /// GPIO Configuration structure generated by svd2rust
    periph: GPIO_CONFIG,
}

impl Gpio {
    pub fn set_enable(&amp;mut self, is_enabled: bool) {
        self.periph.modify(|_r, w| {
            w.enable().set_bit(is_enabled)
        });
    }

    pub fn set_direction(&amp;mut self, is_output: bool) {
        self.periph.modify(|r, w| {
            w.direction().set_bit(is_output)
        });
    }

    pub fn set_input_mode(&amp;mut self, variant: InputMode) {
        self.periph.modify(|_r, w| {
            w.input_mode().variant(variant)
        });
    }

    pub fn set_output_mode(&amp;mut self, is_high: bool) {
        self.periph.modify(|_r, w| {
            w.output_mode.set_bit(is_high)
        });
    }

    pub fn get_input_status(&amp;self) -&gt; bool {
        self.periph.read().input_status().bit_is_set()
    }
}
</code></pre>
<p>However, this would allow us to modify certain registers that do not make sense. For example, what happens if we set the <code>output_mode</code> field when our GPIO is configured as an input?</p>
<p>In general, use of this structure would allow us to reach states not defined by our state machine above: e.g. an output that is pulled low, or an input that is set high. For some hardware, this may not matter. On other hardware, it could cause unexpected or undefined behavior!</p>
<p>Although this interface is convenient to write, it doesn't enforce the design contracts set out by our hardware implementation.</p>
<a class="header" href="#design-contracts" id="design-contracts"><h1>Design Contracts</h1></a>
<p>In our last chapter, we wrote an interface that <em>didn't</em> enforce design contracts. Let's take another look at our imaginary GPIO configuration register:</p>
<table><thead><tr><th align="right"> Name         </th><th align="right"> Bit Number(s) </th><th align="right"> Value </th><th align="right"> Meaning   </th><th align="right"> Notes </th></tr></thead><tbody>
<tr><td align="right"> enable       </td><td align="right"> 0             </td><td align="right"> 0     </td><td align="right"> disabled  </td><td align="right"> Disables the GPIO </td></tr>
<tr><td align="right">              </td><td align="right">               </td><td align="right"> 1     </td><td align="right"> enabled   </td><td align="right"> Enables the GPIO </td></tr>
<tr><td align="right"> direction    </td><td align="right"> 1             </td><td align="right"> 0     </td><td align="right"> input     </td><td align="right"> Sets the direction to Input </td></tr>
<tr><td align="right">              </td><td align="right">               </td><td align="right"> 1     </td><td align="right"> output    </td><td align="right"> Sets the direction to Output </td></tr>
<tr><td align="right"> input_mode   </td><td align="right"> 2..3          </td><td align="right"> 00    </td><td align="right"> hi-z      </td><td align="right"> Sets the input as high resistance </td></tr>
<tr><td align="right">              </td><td align="right">               </td><td align="right"> 01    </td><td align="right"> pull-low  </td><td align="right"> Input pin is pulled low </td></tr>
<tr><td align="right">              </td><td align="right">               </td><td align="right"> 10    </td><td align="right"> pull-high </td><td align="right"> Input pin is pulled high </td></tr>
<tr><td align="right">              </td><td align="right">               </td><td align="right"> 11    </td><td align="right"> n/a       </td><td align="right"> Invalid state. Do not set </td></tr>
<tr><td align="right"> output_mode  </td><td align="right"> 4             </td><td align="right"> 0     </td><td align="right"> set-low   </td><td align="right"> Output pin is driven low </td></tr>
<tr><td align="right">              </td><td align="right">               </td><td align="right"> 1     </td><td align="right"> set-high  </td><td align="right"> Output pin is driven high </td></tr>
<tr><td align="right"> input_status </td><td align="right"> 5             </td><td align="right"> x     </td><td align="right"> in-val    </td><td align="right"> 0 if input is &lt; 1.5v, 1 if input &gt;= 1.5v </td></tr>
</tbody></table>
<p>If we instead checked the state before making use of the underlying hardware, enforcing our design contracts at runtime, we might write code that looks like this instead:</p>
<pre><code class="language-rust ignore">/// GPIO interface
struct GpioConfig {
    /// GPIO Configuration structure generated by svd2rust
    periph: GPIO_CONFIG,
}

impl Gpio {
    pub fn set_enable(&amp;mut self, is_enabled: bool) {
        self.periph.modify(|_r, w| {
            w.enable().set_bit(is_enabled)
        });
    }

    pub fn set_direction(&amp;mut self, is_output: bool) -&gt; Result&lt;(), ()&gt; {
        if self.periph.read().enable().bit_is_clear() {
            // Must be enabled to set direction
            return Err(());
        }

        self.periph.modify(|r, w| {
            w.direction().set_bit(is_output)
        });

        Ok(())
    }

    pub fn set_input_mode(&amp;mut self, variant: InputMode) -&gt; Result&lt;(), ()&gt; {
        if self.periph.read().enable().bit_is_clear() {
            // Must be enabled to set input mode
            return Err(());
        }

        if self.periph.read().direction().bit_is_set() {
            // Direction must be input
            return Err(());
        }

        self.periph.modify(|_r, w| {
            w.input_mode().variant(variant)
        });

        Ok(())
    }

    pub fn set_output_status(&amp;mut self, is_high: bool) -&gt; Result&lt;(), ()&gt; {
        if self.periph.read().enable().bit_is_clear() {
            // Must be enabled to set output status
            return Err(());
        }

        if self.periph.read().direction().bit_is_clear() {
            // Direction must be output
            return Err(());
        }

        self.periph.modify(|_r, w| {
            w.output_mode.set_bit(is_high)
        });

        Ok(())
    }

    pub fn get_input_status(&amp;self) -&gt; Result&lt;bool, ()&gt; {
        if self.periph.read().enable().bit_is_clear() {
            // Must be enabled to get status
            return Err(());
        }

        if self.periph.read().direction().bit_is_set() {
            // Direction must be input
            return Err(());
        }

        Ok(self.periph.read().input_status().bit_is_set())
    }
}
</code></pre>
<p>Because we need to enforce the restrictions on the hardware, we end up doing a lot of runtime checking which wastes time and resources, and this code will be much less pleasant for the developer to use.</p>
<a class="header" href="#type-states" id="type-states"><h2>Type States</h2></a>
<p>But what if instead, we used Rust's type system to enforce the state transition rules? Take this example:</p>
<pre><code class="language-rust ignore">/// GPIO interface
struct GpioConfig&lt;ENABLED, DIRECTION, MODE&gt; {
    /// GPIO Configuration structure generated by svd2rust
    periph: GPIO_CONFIG,
    enabled: ENABLED,
    direction: DIRECTION,
    mode: MODE,
}

// Type states for MODE in GpioConfig
struct Disabled;
struct Enabled;
struct Output;
struct Input;
struct PulledLow;
struct PulledHigh;
struct HighZ;
struct DontCare;

/// These functions may be used on any GPIO Pin
impl&lt;EN, DIR, IN_MODE&gt; GpioConfig&lt;EN, DIR, IN_MODE&gt; {
    pub fn into_disabled(self) -&gt; GpioConfig&lt;Disabled, DontCare, DontCare&gt; {
        self.periph.modify(|_r, w| w.enable.disabled());
        GpioConfig {
            periph: self.periph,
            enabled: Disabled,
            direction: DontCare,
            mode: DontCare,
        }
    }

    pub fn into_enabled_input(self) -&gt; GpioConfig&lt;Enabled, Input, HighZ&gt; {
        self.periph.modify(|_r, w| {
            w.enable.enabled()
             .direction.input()
             .input_mode.high_z()
        });
        GpioConfig {
            periph: self.periph,
            enabled: Enabled,
            direction: Input,
            mode: HighZ,
        }
    }

    pub fn into_enabled_output(self) -&gt; GpioConfig&lt;Enabled, Output, DontCare&gt; {
        self.periph.modify(|_r, w| {
            w.enable.enabled()
             .direction.output()
             .input_mode.set_high()
        });
        GpioConfig {
            periph: self.periph,
            enabled: Enabled,
            direction: Output,
            mode: DontCare,
        }
    }
}

/// This function may be used on an Output Pin
impl GpioConfig&lt;Enabled, Output, DontCare&gt; {
    pub fn set_bit(&amp;mut self, set_high: bool) {
        self.periph.modify(|_r, w| w.output_mode.set_bit(set_high));
    }
}

/// These methods may be used on any enabled input GPIO
impl&lt;IN_MODE&gt; GpioConfig&lt;Enabled, Input, IN_MODE&gt; {
    pub fn bit_is_set(&amp;self) -&gt; bool {
        self.periph.read().input_status.bit_is_set()
    }

    pub fn into_input_high_z(self) -&gt; GpioConfig&lt;Enabled, Input, HighZ&gt; {
        self.periph.modify(|_r, w| w.input_mode().high_z());
        GpioConfig {
            periph: self.periph,
            enabled: Enabled,
            direction: Input,
            mode: HighZ,
        }
    }

    pub fn into_input_pull_down(self) -&gt; GpioConfig&lt;Enabled, Input, PulledLow&gt; {
        self.periph.modify(|_r, w| w.input_mode().pull_low());
        GpioConfig {
            periph: self.periph,
            enabled: Enabled,
            direction: Input,
            mode: PulledLow,
        }
    }

    pub fn into_input_pull_up(self) -&gt; GpioConfig&lt;Enabled, Input, PulledHigh&gt; {
        self.periph.modify(|_r, w| w.input_mode().pull_high());
        GpioConfig {
            periph: self.periph,
            enabled: Enabled,
            direction: Input,
            mode: PulledHigh,
        }
    }
}
</code></pre>
<p>Now let's see what the code using this would look like:</p>
<pre><code class="language-rust ignore">/*
 * Example 1: Unconfigured to High-Z input
 */
let pin: GpioConfig&lt;Disabled, _, _&gt; = get_gpio();

// Can't do this, pin isn't enabled!
// pin.into_input_pull_down();

// Now turn the pin from unconfigured to a high-z input
let input_pin = pin.into_enabled_input();

// Read from the pin
let pin_state = input_pin.bit_is_set();

// Can't do this, input pins don't have this interface!
// input_pin.set_bit(true);

/*
 * Example 2: High-Z input to Pulled Low input
 */
let pulled_low = input_pin.into_input_pull_down();
let pin_state = pulled_low.bit_is_set();

/*
 * Example 3: Pulled Low input to Output, set high
 */
let output_pin = pulled_low.into_enabled_output();
output_pin.set_bit(false);

// Can't do this, output pins don't have this interface!
// output_pin.into_input_pull_down();
</code></pre>
<p>This is definitely a convenient way to store the state of the pin, but why do it this way? Why is this better than storing the state as an <code>enum</code> inside of our <code>GpioConfig</code> structure?</p>
<a class="header" href="#compile-time-functional-safety" id="compile-time-functional-safety"><h2>Compile Time Functional Safety</h2></a>
<p>Because we are enforcing our design constraints entirely at compile time, this incurs no runtime cost. It is impossible to set an output mode when you have a pin in an input mode. Instead, you must walk through the states by converting it to an output pin, and then setting the output mode. Because of this, there is no runtime penalty due to checking the current state before executing a function.</p>
<p>Also, because these states are enforced by the type system, there is no longer room for errors by consumers of this interface. If they try to perform an illegal state transition, the code will not compile!</p>
<a class="header" href="#zero-cost-abstractions" id="zero-cost-abstractions"><h1>Zero Cost Abstractions</h1></a>
<p>Type states are also an excellent example of Zero Cost Abstractions - the ability to move certain behaviors to compile time execution or analysis. These type states contain no actual data, and are instead used as markers. Since they contain no data, they have no actual representation in memory at runtime:</p>
<pre><code class="language-rust ignore">use core::mem::size_of;

let _ = size_of::&lt;Enabled&gt;();    // == 0
let _ = size_of::&lt;Input&gt;();      // == 0
let _ = size_of::&lt;PulledHigh&gt;(); // == 0
let _ = size_of::&lt;GpioConfig&lt;Enabled, Input, PulledHigh&gt;&gt;(); // == 0
</code></pre>
<a class="header" href="#zero-sized-types" id="zero-sized-types"><h2>Zero Sized Types</h2></a>
<pre><code class="language-rust ignore">struct Enabled;
</code></pre>
<p>Structures defined like this are called Zero Sized Types, as they contain no actual data. Although these types act &quot;real&quot; at compile time - you can copy them, move them, take references to them, etc., however the optimizer will completely strip them away.</p>
<p>In this snippet of code:</p>
<pre><code class="language-rust ignore">pub fn into_input_high_z(self) -&gt; GpioConfig&lt;Enabled, Input, HighZ&gt; {
    self.periph.modify(|_r, w| w.input_mode().high_z());
    GpioConfig {
        periph: self.periph,
        enabled: Enabled,
        direction: Input,
        mode: HighZ,
    }
}
</code></pre>
<p>The GpioConfig we return never exists at runtime. Calling this function will generally boil down to a single assembly instruction - storing a constant register value to a register location. This means that the type state interface we've developed is a zero cost abstraction - it uses no more CPU, RAM, or code space tracking the state of <code>GpioConfig</code>, and renders to the same machine code as a direct register access.</p>
<a class="header" href="#nesting" id="nesting"><h2>Nesting</h2></a>
<p>In general, these abstractions may be nested as deeply as you would like. As long as all components used are zero sized types, the whole structure will not exist at runtime.</p>
<p>For complex or deeply nested structures, it may be tedious to define all possible combinations of state. In these cases, macros may be used to generate all implementations.</p>
<a class="header" href="#portability" id="portability"><h1>Portability</h1></a>
<p>In embedded environments portability is a very important topic: Every vendor and even each family from a single manufacturer offers different peripherals and capabilities and similarly the ways to interact with the peripherals will vary.</p>
<p>A common way to equalize such differences is via a layer called Hardware Abstraction layer or <strong>HAL</strong>.</p>
<blockquote>
<p>Hardware abstractions are sets of routines in software that emulate some platform-specific details, giving programs direct access to the hardware resources.</p>
<p>They often allow programmers to write device-independent, high performance applications by providing standard operating system (OS) calls to hardware.</p>
<p><em>Wikipedia: <a href="https://en.wikipedia.org/wiki/Hardware_abstraction">Hardware Abstraction Layer</a></em></p>
</blockquote>
<p>Embedded systems are a bit special in this regard since we typically do not have operating systems and user installable software but firmware images which are compiled as a whole as well as a number of other constraints. So while the traditional approach as defined by Wikipedia could potentially work it is likely not the most productive approach to ensure portability.</p>
<p>How do we do this in Rust? Enter <strong>embedded-hal</strong>...</p>
<a class="header" href="#what-is-embedded-hal" id="what-is-embedded-hal"><h2>What is embedded-hal?</h2></a>
<p>In a nutshell it is a set of traits which define implementation contracts between <strong>HAL implementations</strong>, <strong>drivers</strong> and <strong>applications (or firmwares)</strong>. Those contracts include both capabilities (i.e. if a trait is implemented for a certain type, the <strong>HAL implementation</strong> provides a certain capability) and methods (i.e. if you can construct a type implementing a trait it is guaranteed that you have the methods specified in the trait available).</p>
<p>A typical layering might look like this:</p>
<p><img src="../assets/rust_layers.svg" alt="" /></p>
<p>Some of the defined traits in <strong>embedded-hal</strong> are:</p>
<ul>
<li>GPIO (input and output pins)</li>
<li>Serial communication</li>
<li>I2C</li>
<li>SPI</li>
<li>Timers/Countdowns</li>
<li>Analog Digital Conversion</li>
</ul>
<p>The main reason for having the <strong>embedded-hal</strong> traits and crates implementing and using them is to keep complexity in check. If you consider that an application might have to implement the use of the peripheral in the hardware as well as the application and potentially drivers for additional hardware components, then it should be easy to see that the re-usability is very limited. Expressed mathematically, if <strong>M</strong> is the number of peripheral HAL implementations and <strong>N</strong> the number of drivers then if we were to reinvent the wheel for every application then we would end up with <em><em>M</em>N</em>* implementations while by using the <em>API</em> provided by the <strong>embedded-hal</strong> traits will make the implementation complexity approach <strong>M+N</strong>. Of course there're additional benefits to be had, such as less trial-and-error due to a well-defined and ready-to-use APIs.</p>
<a class="header" href="#users-of-the-embedded-hal" id="users-of-the-embedded-hal"><h2>Users of the embedded-hal</h2></a>
<p>As said above there are three main users of the HAL:</p>
<a class="header" href="#hal-implementation" id="hal-implementation"><h3>HAL implementation</h3></a>
<p>A HAL implementation provides the interfacing between the hardware and the users of the HAL traits. Typical implementations consist of three parts:</p>
<ul>
<li>One or more hardware specific types</li>
<li>Functions to create and initialize such a type, often providing various configuration options (speed, operation mode, use pins, etc.)</li>
<li>one or more <code>trait</code> <code>impl</code> of <strong>embedded-hal</strong> traits for that type</li>
</ul>
<p>Such a <strong>HAL implementation</strong> can come in various flavours:</p>
<ul>
<li>Via low-level hardware access, e.g. via registers</li>
<li>Via operating system, e.g. by using the <code>sysfs</code> under Linux</li>
<li>Via adapter, e.g. a mock of types for unit testing</li>
<li>Via driver for hardware adapters, e.g. I2C multiplexer or GPIO expander</li>
</ul>
<a class="header" href="#driver" id="driver"><h3>Driver</h3></a>
<p>A driver implements a set of custom functionality for an internal or external component, connected to a peripheral implementing the embedded-hal traits. Typical examples for such drivers include various sensors (temperature, magnetometer, accelerometer, light), display devices (LED arrays, LCD displays) and actuators (motors, transmitters).</p>
<p>A driver has to be initialized with an instance of type that implements a certain <code>trait</code> of the embedded-hal which is ensured via trait bound and provides its own type instance with a custom set of methods allowing to interact with the driven device.</p>
<a class="header" href="#application" id="application"><h3>Application</h3></a>
<p>The application binds the various parts together and ensures that the desired functionality is achieved. When porting between different systems, this is the part which requires the most adaptation efforts, since the application needs to correctly initialize the real hardware via the HAL implementation and the initialisation of different hardware differs, sometimes drastically so. Also the user choice often plays a big role, since components can be physically connected to different terminals, hardware buses sometimes need external hardware to match the configuration or there are different trade-offs to be made in the use of internal peripherals (e.g. multiple timers with different capabilities are available or peripherals conflict with others).</p>
<a class="header" href="#concurrency" id="concurrency"><h1>Concurrency</h1></a>
<p>Concurrency happens whenever different parts of your program might execute
at different times or out of order. In an embedded context, this includes:</p>
<ul>
<li>interrupt handlers, which run whenever the associated interrupt happens,</li>
<li>various forms of multithreading, where your microprocessor regularly swaps
between parts of your program,</li>
<li>and in some systems, multiple-core microprocessors, where each core can be
independently running a different part of your program at the same time.</li>
</ul>
<p>Since many embedded programs need to deal with interrupts, concurrency will
usually come up sooner or later, and it's also where many subtle and difficult
bugs can occur. Luckily, Rust provides a number of abstractions and safety
guarantees to help us write correct code.</p>
<a class="header" href="#no-concurrency" id="no-concurrency"><h2>No Concurrency</h2></a>
<p>The simplest concurrency for an embedded program is no concurrency: your
software consists of a single main loop which just keeps running, and there
are no interrupts at all. Sometimes this is perfectly suited to the problem
at hand! Typically your loop will read some inputs, perform some processing,
and write some outputs.</p>
<pre><code class="language-rust ignore">#[entry]
fn main() {
    let peripherals = setup_peripherals();
    loop {
        let inputs = read_inputs(&amp;peripherals);
        let outputs = process(inputs);
        write_outputs(&amp;peripherals, outputs);
    }
}
</code></pre>
<p>Since there's no concurrency, there's no need to worry about sharing data
between parts of your program or synchronising access to peripherals. If
you can get away with such a simple approach this can be a great solution.</p>
<a class="header" href="#global-mutable-data" id="global-mutable-data"><h2>Global Mutable Data</h2></a>
<p>Unlike non-embedded Rust, we will not usually have the luxury of creating
heap allocations and passing references to that data into a newly-created
thread. Instead our interrupt handlers might be called at any time and must
know how to access whatever shared memory we are using. At the lowest level,
this means we must have <em>statically allocated</em> mutable memory, which
both the interrupt handler and the main code can refer to.</p>
<p>In Rust, such <a href="https://doc.rust-lang.org/book/ch19-01-unsafe-rust.html#accessing-or-modifying-a-mutable-static-variable"><code>static mut</code></a> variables are always unsafe to read or write,
because without taking special care, you might trigger a race condition,
where your access to the variable is interrupted halfway through by an
interrupt which also accesses that variable.</p>
<p>For an example of how this behaviour can cause subtle errors in your code,
consider an embedded program which counts rising edges of some input signal
in each one-second period (a frequency counter):</p>
<pre><code class="language-rust ignore">static mut COUNTER: u32 = 0;

#[entry]
fn main() -&gt; ! {
    set_timer_1hz();
    let mut last_state = false;
    loop {
        let state = read_signal_level();
        if state &amp;&amp; !last_state {
            // DANGER - Not actually safe! Could cause data races.
            unsafe { COUNTER += 1 };
        }
        last_state = state;
    }
}

#[interrupt]
fn timer() {
    unsafe { COUNTER = 0; }
}
</code></pre>
<p>Each second, the timer interrupt sets the counter back to 0. Meanwhile, the
main loop continually measures the signal, and incremements the counter when
it sees a change from low to high. We've had to use <code>unsafe</code> to access
<code>COUNTER</code>, as it's <code>static mut</code>, and that means we're promising the compiler
we won't cause any undefined behaviour. Can you spot the race condition? The
increment on <code>COUNTER</code> is <em>not</em> guaranteed to be atomic — in fact, on most
embedded platforms, it will be split into a load, then the increment, then
a store. If the interrupt fired after the load but before the store, the
reset back to 0 would be ignored after the interrupt returns — and we would
count twice as many transitions for that period.</p>
<a class="header" href="#critical-sections" id="critical-sections"><h2>Critical Sections</h2></a>
<p>So, what can we do about data races? A simple approach is to use <em>critical
sections</em>, a context where interrupts are disabled. By wrapping the access to
<code>COUNTER</code> in <code>main</code> in a critical section, we can be sure the timer interrupt
will not fire until we're finished incrementing <code>COUNTER</code>:</p>
<pre><code class="language-rust ignore">static mut COUNTER: u32 = 0;

#[entry]
fn main() -&gt; ! {
    set_timer_1hz();
    let mut last_state = false;
    loop {
        let state = read_signal_level();
        if state &amp;&amp; !last_state {
            // New critical section ensures synchronised access to COUNTER
            cortex_m::interrupt::free(|_| {
                unsafe { COUNTER += 1 };
            });
        }
        last_state = state;
    }
}

#[interrupt]
fn timer() {
    unsafe { COUNTER = 0; }
}
</code></pre>
<p>In this example we use <code>cortex_m::interrupt::free</code>, but other platforms will
have similar mechanisms for executing code in a critical section. This is also
the same as disabling interrupts, running some code, and then re-enabling
interrupts.</p>
<p>Note we didn't need to put a critical section inside the timer interrupt,
for two reasons:</p>
<ul>
<li>Writing 0 to <code>COUNTER</code> can't be affected by a race since we don't read it</li>
<li>It will never be interrupted by the <code>main</code> thread anyway</li>
</ul>
<p>If <code>COUNTER</code> was being shared by multiple interrupt handlers that might
<em>preempt</em> each other, then each one might require a critical section as well.</p>
<p>This solves our immediate problem, but we're still left writing a lot of
<code>unsafe</code> code which we need to carefully reason about, and we might be using
critical sections needlessly — which comes at a cost to overhead and interrupt
latency and jitter.</p>
<p>It's worth noting that while a critical section guarantees no interrupts will
fire, it does not provide an exclusivity guarantee on multi-core systems!  The
other core could be happily accessing the same memory as your core, even
without interrupts. You will need stronger synchronisation primitives if you
are using multiple cores.</p>
<a class="header" href="#atomic-access" id="atomic-access"><h2>Atomic Access</h2></a>
<p>On some platforms, atomic instructions are available, which provide guarantees
about read-modify-write operations. Specifically for Cortex-M, <code>thumbv6</code>
(Cortex-M0) does not provide atomic instructions, while <code>thumbv7</code> (Cortex-M3
and above) do. These instructions give an alternative to the heavy-handed
disabling of all interrupts: we can attempt the increment, it will succeed most
of the time, but if it was interrupted it will automatically retry the entire
increment operation. These atomic operations are safe even across multiple
cores.</p>
<pre><code class="language-rust ignore">use core::sync::atomic::{AtomicUsize, Ordering};

static COUNTER: AtomicUsize = AtomicUsize::new(0);

#[entry]
fn main() -&gt; ! {
    set_timer_1hz();
    let mut last_state = false;
    loop {
        let state = read_signal_level();
        if state &amp;&amp; !last_state {
            // Use `fetch_add` to atomically add 1 to COUNTER
            COUNTER.fetch_add(1, Ordering::Relaxed);
        }
        last_state = state;
    }
}

#[interrupt]
fn timer() {
    // Use `store` to write 0 directly to COUNTER
    COUNTER.store(0, Ordering::Relaxed)
}
</code></pre>
<p>This time <code>COUNTER</code> is a safe <code>static</code> variable. Thanks to the <code>AtomicUsize</code>
type <code>COUNTER</code> can be safely modified from both the interrupt handler and the
main thread without disabling interrupts. When possible, this is a better
solution — but it may not be supported on your platform.</p>
<p>A note on <a href="https://doc.rust-lang.org/core/sync/atomic/enum.Ordering.html"><code>Ordering</code></a>: this affects how the compiler and hardware may reorder
instructions, and also has consequences on cache visibility. Assuming that the
target is a single core platform <code>Relaxed</code> is sufficient and the most efficient
choice in this particular case. Stricter ordering will cause the compiler to
emit memory barriers around the atomic operations; depending on what you're
using atomics for you may or may not need this! The precise details of the
atomic model are complicated and best described elsewhere.</p>
<p>For more details on atomics and ordering, see the <a href="https://doc.rust-lang.org/nomicon/atomics.html">nomicon</a>.</p>
<a class="header" href="#abstractions-send-and-sync" id="abstractions-send-and-sync"><h2>Abstractions, Send, and Sync</h2></a>
<p>None of the above solutions are especially satisfactory. They require <code>unsafe</code>
blocks which must be very carefully checked and are not ergonomic. Surely we
can do better in Rust!</p>
<p>We can abstract our counter into a safe interface which can be safely used
anywhere else in our code. For this example we'll use the critical-section
counter, but you could do something very similar with atomics.</p>
<pre><code class="language-rust ignore">use core::cell::UnsafeCell;
use cortex_m::interrupt;

// Our counter is just a wrapper around UnsafeCell&lt;u32&gt;, which is the heart
// of interior mutability in Rust. By using interior mutability, we can have
// COUNTER be `static` instead of `static mut`, but still able to mutate
// its counter value.
struct CSCounter(UnsafeCell&lt;u32&gt;);

const CS_COUNTER_INIT: CSCounter = CSCounter(UnsafeCell::new(0));

impl CSCounter {
    pub fn reset(&amp;self, _cs: &amp;interrupt::CriticalSection) {
        // By requiring a CriticalSection be passed in, we know we must
        // be operating inside a CriticalSection, and so can confidently
        // use this unsafe block (required to call UnsafeCell::get).
        unsafe { *self.0.get() = 0 };
    }

    pub fn increment(&amp;self, _cs: &amp;interrupt::CriticalSection) {
        unsafe { *self.0.get() += 1 };
    }
}

// Required to allow static CSCounter. See explanation below.
unsafe impl Sync for CSCounter {}

// COUNTER is no longer `mut` as it uses interior mutability;
// therefore it also no longer requires unsafe blocks to access.
static COUNTER: CSCounter = CS_COUNTER_INIT;

#[entry]
fn main() -&gt; ! {
    set_timer_1hz();
    let mut last_state = false;
    loop {
        let state = read_signal_level();
        if state &amp;&amp; !last_state {
            // No unsafe here!
            interrupt::free(|cs| COUNTER.increment(cs));
        }
        last_state = state;
    }
}

#[interrupt]
fn timer() {
    // We do need to enter a critical section here just to obtain a valid
    // cs token, even though we know no other interrupt could pre-empt
    // this one.
    interrupt::free(|cs| COUNTER.reset(cs));

    // We could use unsafe code to generate a fake CriticalSection if we
    // really wanted to, avoiding the overhead:
    // let cs = unsafe { interrupt::CriticalSection::new() };
}
</code></pre>
<p>We've moved our <code>unsafe</code> code to inside our carefully-planned abstraction,
and now our appplication code does not contain any <code>unsafe</code> blocks.</p>
<p>This design requires the application pass a <code>CriticalSection</code> token in:
these tokens are only safely generated by <code>interrupt::free</code>, so by requiring
one be passed in, we ensure we are operating inside a critical section, without
having to actually do the lock ourselves. This guarantee is provided statically
by the compiler: there won't be any runtime overhead associated with <code>cs</code>.
If we had multiple counters, they could all be given the same <code>cs</code>, without
requiring multiple nested critical sections.</p>
<p>This also brings up an important topic for concurrency in Rust: the
<a href="https://doc.rust-lang.org/nomicon/send-and-sync.html"><code>Send</code> and <code>Sync</code></a> traits. To summarise the Rust book, a type is Send
when it can safely be moved to another thread, while it is Sync when
it can be safely shared between multiple threads. In an embedded context,
we consider interrupts to be executing in a separate thread to the application
code, so variables accessed by both an interrupt and the main code must be
Sync.</p>
<p>For most types in Rust, both of these traits are automatically derived for you
by the compiler. However, because <code>CSCounter</code> contains an <a href="https://doc.rust-lang.org/core/cell/struct.UnsafeCell.html"><code>UnsafeCell</code></a>, it is
not Sync, and therefore we could not make a <code>static CSCounter</code>: <code>static</code>
variables <em>must</em> be Sync, since they can be accessed by multiple threads.</p>
<p>To tell the compiler we have taken care that the <code>CSCounter</code> is in fact safe
to share between threads, we implement the Sync trait explicitly. As with the
previous use of critical sections, this is only safe on single-core platforms:
with multiple cores you would need to go to greater lengths to ensure safety.</p>
<a class="header" href="#mutexes" id="mutexes"><h2>Mutexes</h2></a>
<p>We've created a useful abstraction specific to our counter problem, but
there are many common abstractions used for concurrency.</p>
<p>One such <em>synchronisation primitive</em> is a mutex, short for mutual exclusion.
These constructs ensure exclusive access to a variable, such as our counter. A
thread can attempt to <em>lock</em> (or <em>acquire</em>) the mutex, and either succeeds
immediately, or blocks waiting for the lock to be acquired, or returns an error
that the mutex could not be locked. While that thread holds the lock, it is
granted access to the protected data. When the thread is done, it <em>unlocks</em> (or
<em>releases</em>) the mutex, allowing another thread to lock it. In Rust, we would
usually implement the unlock using the <a href="https://doc.rust-lang.org/core/ops/trait.Drop.html"><code>Drop</code></a> trait to ensure it is always
released when the mutex goes out of scope.</p>
<p>Using a mutex with interrupt handlers can be tricky: it is not normally
acceptable for the interrupt handler to block, and it would be especially
disastrous for it to block waiting for the main thread to release a lock,
since we would then <em>deadlock</em> (the main thread will never release the lock
because execution stays in the interrupt handler). Deadlocking is not
considered unsafe: it is possible even in safe Rust.</p>
<p>To avoid this behaviour entirely, we could implement a mutex which requires
a critical section to lock, just like our counter example. So long as the
critical section must last as long as the lock, we can be sure we have
exclusive access to the wrapped variable without even needing to track
the lock/unlock state of the mutex.</p>
<p>This is in fact done for us in the <code>cortex_m</code> crate! We could have written
our counter using it:</p>
<pre><code class="language-rust ignore">use core::cell::Cell;
use cortex_m::interrupt::Mutex;

static COUNTER: Mutex&lt;Cell&lt;u32&gt;&gt; = Mutex::new(Cell::new(0));

#[entry]
fn main() -&gt; ! {
    set_timer_1hz();
    let mut last_state = false;
    loop {
        let state = read_signal_level();
        if state &amp;&amp; !last_state {
            interrupt::free(|cs|
                COUNTER.borrow(cs).set(COUNTER.borrow(cs).get() + 1));
        }
        last_state = state;
    }
}

#[interrupt]
fn timer() {
    // We still need to enter a critical section here to satisfy the Mutex.
    interrupt::free(|cs| COUNTER.borrow(cs).set(0));
}
</code></pre>
<p>We're now using <a href="https://doc.rust-lang.org/core/cell/struct.Cell.html"><code>Cell</code></a>, which along with its sibling <code>RefCell</code> is used to
provide safe interior mutability. We've already seen <code>UnsafeCell</code> which is
the bottom layer of interior mutability in Rust: it allows you to obtain
multiple mutable references to its value, but only with unsafe code. A <code>Cell</code>
is like an <code>UnsafeCell</code> but it provides a safe interface: it only permits
taking a copy of the current value or replacing it, not taking a reference,
and since it is not Sync, it cannot be shared between threads. These
constraints mean it's safe to use, but we couldn't use it directly in a
<code>static</code> variable as a <code>static</code> must be Sync.</p>
<p>So why does the example above work? The <code>Mutex&lt;T&gt;</code> implements Sync for any
<code>T</code> which is Send — such as a <code>Cell</code>. It can do this safely because it only
gives access to its contents during a critical section. We're therefore able
to get a safe counter with no unsafe code at all!</p>
<p>This is great for simple types like the <code>u32</code> of our counter, but what about
more complex types which are not Copy? An extremely common example in an
embedded context is a peripheral struct, which generally are not Copy.
For that we can turn to <code>RefCell</code>.</p>
<a class="header" href="#sharing-peripherals" id="sharing-peripherals"><h2>Sharing Peripherals</h2></a>
<p>Device crates generated using <code>svd2rust</code> and similar abstractions provide
safe access to peripherals by enforcing that only one instance of the
peripheral struct can exist at a time. This ensures safety, but makes it
difficult to access a peripheral from both the main thread and an interrupt
handler.</p>
<p>To safely share peripheral access, we can use the <code>Mutex</code> we saw before. We'll
also need to use <a href="https://doc.rust-lang.org/core/cell/struct.RefCell.html"><code>RefCell</code></a>, which uses a runtime check to ensure only one
reference to a peripheral is given out at a time. This has more overhead than
the plain <code>Cell</code>, but since we are giving out references rather than copies,
we must be sure only one exists at a time.</p>
<p>Finally, we'll also have to account for somehow moving the peripheral into
the shared variable after it has been initialised in the main code. To do
this we can use the <code>Option</code> type, initialised to <code>None</code> and later set to
the instance of the peripheral.</p>
<pre><code class="language-rust ignore">use core::cell::RefCell;
use cortex_m::interrupt::{self, Mutex};
use stm32f4::stm32f405;

static MY_GPIO: Mutex&lt;RefCell&lt;Option&lt;stm32f405::GPIOA&gt;&gt;&gt; =
    Mutex::new(RefCell::new(None));

#[entry]
fn main() -&gt; ! {
    // Obtain the peripheral singletons and configure it.
    // This example is from an svd2rust-generated crate, but
    // most embedded device crates will be similar.
    let dp = stm32f405::Peripherals::take().unwrap();
    let gpioa = &amp;dp.GPIOA;

    // Some sort of configuration function.
    // Assume it sets PA0 to an input and PA1 to an output.
    configure_gpio(gpioa);

    // Store the GPIOA in the mutex, moving it.
    interrupt::free(|cs| MY_GPIO.borrow(cs).replace(Some(dp.GPIOA)));
    // We can no longer use `gpioa` or `dp.GPIOA`, and instead have to
    // access it via the mutex.

    // Be careful to enable the interrupt only after setting MY_GPIO:
    // otherwise the interrupt might fire while it still contains None,
    // and as-written (with `unwrap()`), it would panic.
    set_timer_1hz();
    let mut last_state = false;
    loop {
        // We'll now read state as a digital input, via the mutex
        let state = interrupt::free(|cs| {
            let gpioa = MY_GPIO.borrow(cs).borrow();
            gpioa.as_ref().unwrap().idr.read().idr0().bit_is_set()
        });

        if state &amp;&amp; !last_state {
            // Set PA1 high if we've seen a rising edge on PA0.
            interrupt::free(|cs| {
                let gpioa = MY_GPIO.borrow(cs).borrow();
                gpioa.as_ref().unwrap().odr.modify(|_, w| w.odr1().set_bit());
            });
        }
        last_state = state;
    }
}

#[interrupt]
fn timer() {
    // This time in the interrupt we'll just clear PA0.
    interrupt::free(|cs| {
        // We can use `unwrap()` because we know the interrupt wasn't enabled
        // until after MY_GPIO was set; otherwise we should handle the potential
        // for a None value.
        let gpioa = MY_GPIO.borrow(cs).borrow();
        gpioa.as_ref().unwrap().odr.modify(|_, w| w.odr1().clear_bit());
    });
}
</code></pre>
<p>That's quite a lot to take in, so let's break down the important lines.</p>
<pre><code class="language-rust ignore">static MY_GPIO: Mutex&lt;RefCell&lt;Option&lt;stm32f405::GPIOA&gt;&gt;&gt; =
    Mutex::new(RefCell::new(None));
</code></pre>
<p>Our shared variable is now a <code>Mutex</code> around a <code>RefCell</code> which contains an
<code>Option</code>. The <code>Mutex</code> ensures we only have access during a critical section,
and therefore makes the variable Sync, even though a plain <code>RefCell</code> would not
be Sync. The <code>RefCell</code> gives us interior mutability with references, which
we'll need to use our <code>GPIOA</code>. The <code>Option</code> lets us initialise this variable
to something empty, and only later actually move the variable in. We cannot
access the peripheral singleton statically, only at runtime, so this is
required.</p>
<pre><code class="language-rust ignore">interrupt::free(|cs| MY_GPIO.borrow(cs).replace(Some(dp.GPIOA)));
</code></pre>
<p>Inside a critical section we can call <code>borrow()</code> on the mutex, which gives us
a reference to the <code>RefCell</code>. We then call <code>replace()</code> to move our new value
into the <code>RefCell</code>.</p>
<pre><code class="language-rust ignore">interrupt::free(|cs| {
    let gpioa = MY_GPIO.borrow(cs).borrow();
    gpioa.as_ref().unwrap().odr.modify(|_, w| w.odr1().set_bit());
});
</code></pre>
<p>Finally we use <code>MY_GPIO</code> in a safe and concurrent fashion. The critical section
prevents the interrupt firing as usual, and lets us borrow the mutex.  The
<code>RefCell</code> then gives us an <code>&amp;Option&lt;GPIOA&gt;</code>, and tracks how long it remains
borrowed - once that reference goes out of scope, the <code>RefCell</code> will be updated
to indicate it is no longer borrowed.</p>
<p>Since we can't move the <code>GPIOA</code> out of the <code>&amp;Option</code>, we need to convert it to
an <code>&amp;Option&lt;&amp;GPIOA&gt;</code> with <code>as_ref()</code>, which we can finally <code>unwrap()</code> to obtain
the <code>&amp;GPIOA</code> which lets us modify the peripheral.</p>
<p>Whew! This is safe, but it is also a little unwieldy. Is there anything else
we can do?</p>
<a class="header" href="#rtfm" id="rtfm"><h2>RTFM</h2></a>
<p>One alternative is the <a href="https://github.com/japaric/cortex-m-rtfm">RTFM framework</a>, short for Real Time For the Masses. It
enforces static priorities and tracks accesses to <code>static mut</code> variables
(&quot;resources&quot;) to statically ensure that shared resources are always accessed
safely, without requiring the overhead of always entering critical sections and
using reference counting (as in <code>RefCell</code>). This has a number of advantages such
as guaranteeing no deadlocks and giving extremely low time and memory overhead.</p>
<p>The framework also includes other features like message passing, which reduces
the need for explicit shared state, and the ability to schedule tasks to run at
a given time, which can be used to implement periodic tasks. Check out <a href="https://japaric.github.io/cortex-m-rtfm/book/">the
documentation</a> for more information!</p>
<a class="header" href="#real-time-operating-systems" id="real-time-operating-systems"><h2>Real Time Operating Systems</h2></a>
<p>Another common model for embedded concurrency is the real-time operating system
(RTOS). While currently less well explored in Rust, they are widely used in
traditional embedded development. Open source examples include <a href="https://freertos.org/">FreeRTOS</a> and
<a href="http://chibios.org/">ChibiOS</a>. These RTOSs provide support for running multiple application threads
which the CPU swaps between, either when the threads yield control (called
cooperative multitasking) or based on a regular timer or interrupts (preemptive
multitasking). The RTOS typically provide mutexes and other synchronisation
primitives, and often interoperate with hardware features such as DMA engines.</p>
<p>At the time of writing there are not many Rust RTOS examples to point to,
but it's an interesting area so watch this space!</p>
<a class="header" href="#multiple-cores" id="multiple-cores"><h2>Multiple Cores</h2></a>
<p>It is becoming more common to have two or more cores in embedded processors,
which adds an extra layer of complexity to concurrency. All the examples using
a critical section (including the <code>cortex_m::interrupt::Mutex</code>) assume the only
other execution thread is the interrupt thread, but on a multi-core system
that's no longer true. Instead, we'll need synchronisation primitives designed
for multiple cores (also called SMP, for symmetric multi-processing).</p>
<p>These typically use the atomic instructions we saw earlier, since the
processing system will ensure that atomicity is maintained over all cores.</p>
<p>Covering these topics in detail is currently beyond the scope of this book,
but the general patterns are the same as for the single-core case.</p>
<a class="header" href="#collections" id="collections"><h1>Collections</h1></a>
<p>Eventually you'll want to use dynamic data structures (AKA collections) in your
program. <code>std</code> provides a set of common collections: <a href="https://doc.rust-lang.org/std/vec/struct.Vec.html"><code>Vec</code></a>, <a href="https://doc.rust-lang.org/std/string/struct.String.html"><code>String</code></a>,
<a href="https://doc.rust-lang.org/std/collections/struct.HashMap.html"><code>HashMap</code></a>, etc. All the collections implemented in <code>std</code> use a global dynamic
memory allocator (AKA the heap).</p>
<p>As <code>core</code> is, by definition, free of memory allocations these implementations
are not available there, but they can be found in the <em>unstable</em> <code>alloc</code> crate
that's shipped with the compiler.</p>
<p>If you need collections, a heap allocated implementation is not your only
option. You can also use <em>fixed capacity</em> collections; one such implementation
can be found in the <a href="https://crates.io/crates/heapless"><code>heapless</code></a> crate.</p>
<p>In this section, we'll explore and compare these two implementations.</p>
<a class="header" href="#using-alloc" id="using-alloc"><h2>Using <code>alloc</code></h2></a>
<p>The <code>alloc</code> crate is shipped with the standard Rust distribution. To import the
crate you can directly <code>use</code> it <em>without</em> declaring it as a dependency in your
<code>Cargo.toml</code> file.</p>
<pre><code class="language-rust ignore">#![feature(alloc)]

extern crate alloc;

use alloc::vec::Vec;
</code></pre>
<p>To be able to use any collection you'll first need use the <code>global_allocator</code>
attribute to declare the global allocator your program will use. It's required
that the allocator you select implements the <a href="https://doc.rust-lang.org/core/alloc/trait.GlobalAlloc.html"><code>GlobalAlloc</code></a> trait.</p>
<p>For completeness and to keep this section as self-contained as possible we'll
implement a simple bump pointer allocator and use that as the global allocator.
However, we <em>strongly</em> suggest you use a battle tested allocator from crates.io
in your program instead of this allocator.</p>
<pre><code class="language-rust ignore">// Bump pointer allocator implementation

extern crate cortex_m;

use core::alloc::GlobalAlloc;
use core::ptr;

use cortex_m::interrupt;

// Bump pointer allocator for *single* core systems
struct BumpPointerAlloc {
    head: UnsafeCell&lt;usize&gt;,
    end: usize,
}

unsafe impl Sync for BumpPointerAlloc {}

unsafe impl GlobalAlloc for BumpPointerAlloc {
    unsafe fn alloc(&amp;self, layout: Layout) -&gt; *mut u8 {
        // `interrupt::free` is a critical section that makes our allocator safe
        // to use from within interrupts
        interrupt::free(|_| {
            let head = self.head.get();

            let align = layout.align();
            let res = *head % align;
            let start = if res == 0 { *head } else { *head + align - res };
            if start + align &gt; self.end {
                // a null pointer signal an Out Of Memory condition
                ptr::null_mut()
            } else {
                *head = start + align;
                start as *mut u8
            }
        })
    }

    unsafe fn dealloc(&amp;self, _: *mut u8, _: Layout) {
        // this allocator never deallocates memory
    }
}

// Declaration of the global memory allocator
// NOTE the user must ensure that the memory region `[0x2000_0100, 0x2000_0200]`
// is not used by other parts of the program
#[global_allocator]
static HEAP: BumpPointerAlloc = BumpPointerAlloc {
    head: UnsafeCell::new(0x2000_0100),
    end: 0x2000_0200,
};
</code></pre>
<p>Apart from selecting a global allocator the user will also have to define how
Out Of Memory (OOM) errors are handled using the <em>unstable</em>
<code>alloc_error_handler</code> attribute.</p>
<pre><code class="language-rust ignore">#![feature(alloc_error_handler)]

use cortex_m::asm;

#[alloc_error_handler]
fn on_oom(_layout: Layout) -&gt; ! {
    asm::bkpt();

    loop {}
}
</code></pre>
<p>Once all that is in place, the user can finally use the collections in <code>alloc</code>.</p>
<pre><code class="language-rust ignore">#[entry]
fn main() -&gt; ! {
    let mut xs = Vec::new();

    xs.push(42);
    assert!(xs.pop(), Some(42));

    loop {
        // ..
    }
}
</code></pre>
<p>If you have used the collections in the <code>std</code> crate then these will be familiar
as they are exact same implementation.</p>
<a class="header" href="#using-heapless" id="using-heapless"><h2>Using <code>heapless</code></h2></a>
<p><code>heapless</code> requires no setup as its collections don't depend on a global memory
allocator. Just <code>use</code> its collections and proceed to instantiate them:</p>
<pre><code class="language-rust ignore">extern crate heapless; // v0.4.x

use heapless::Vec;
use heapless::consts::*;

#[entry]
fn main() -&gt; ! {
    let mut xs: Vec&lt;_, U8&gt; = Vec::new();

    xs.push(42).unwrap();
    assert_eq!(xs.pop(), Some(42));
}
</code></pre>
<p>You'll note two differences between these collections and the ones in <code>alloc</code>.</p>
<p>First, you have to declare upfront the capacity of the collection. <code>heapless</code>
collections never reallocate and have fixed capacities; this capacity is part of
the type signature of the collection. In this case we have declared that <code>xs</code>
has a capacity of 8 elements that is the vector can, at most, hold 8 elements.
This is indicated by the <code>U8</code> (see <a href="https://crates.io/crates/typenum"><code>typenum</code></a>) in the type signature.</p>
<p>Second, the <code>push</code> method, and many other methods, return a <code>Result</code>. Since the
<code>heapless</code> collections have fixed capacity all operations that insert elements
into the collection can potentially fail. The API reflects this problem by
returning a <code>Result</code> indicating whether the operation succeeded or not. In
contrast, <code>alloc</code> collections will reallocate themselves on the heap to increase
their capacity.</p>
<p>As of version v0.4.x all <code>heapless</code> collections store all their elements inline.
This means that an operation like <code>let x = heapless::Vec::new();</code> will allocate
the collection on the stack, but it's also possible to allocate the collection
on a <code>static</code> variable, or even on the heap (<code>Box&lt;Vec&lt;_, _&gt;&gt;</code>).</p>
<a class="header" href="#trade-offs" id="trade-offs"><h2>Trade-offs</h2></a>
<p>Keep these in mind when choosing between heap allocated, relocatable collections
and fixed capacity collections.</p>
<a class="header" href="#out-of-memory-and-error-handling" id="out-of-memory-and-error-handling"><h3>Out Of Memory and error handling</h3></a>
<p>With heap allocations Out Of Memory is always a possibility and can occur in
any place where a collection may need to grow: for example, all
<code>alloc::Vec.push</code> invocations can potentially generate an OOM condition. Thus
some operations can <em>implicitly</em> fail. Some <code>alloc</code> collections expose
<code>try_reserve</code> methods that let you check for potential OOM conditions when
growing the collection but you need be proactive about using them.</p>
<p>If you exclusively use <code>heapless</code> collections and you don't use a memory
allocator for anything else then an OOM condition is impossible. Instead, you'll
have to deal with collections running out of capacity on a case by case basis.
That is you'll have deal with <em>all</em> the <code>Result</code>s returned by methods like
<code>Vec.push</code>.</p>
<p>OOM failures can be harder to debug than say <code>unwrap</code>-ing on all <code>Result</code>s
returned by <code>heapless::Vec.push</code> because the observed location of failure may
<em>not</em> match with the location of the cause of the problem. For example, even
<code>vec.reserve(1)</code> can trigger an OOM if the allocator is nearly exhausted because
some other collection was leaking memory (memory leaks are possible in safe
Rust).</p>
<a class="header" href="#memory-usage" id="memory-usage"><h3>Memory usage</h3></a>
<p>Reasoning about memory usage of heap allocated collections is hard because the
capacity of long lived collections can change at runtime. Some operations may
implicitly reallocate the collection increasing its memory usage, and some
collections expose methods like <code>shrink_to_fit</code> that can potentially reduce the
memory used by the collection -- ultimately, it's up to the allocator to decide
whether to actually shrink the memory allocation or not. Additionally, the
allocator may have to deal with memory fragmentation which can increase the
<em>apparent</em> memory usage.</p>
<p>On the other hand if you exclusively use fixed capacity collections, store
most of them in <code>static</code> variables and set a maximum size for the call stack
then the linker will detect if you try to use more memory than what's physically
available.</p>
<p>Furthermore, fixed capacity collections allocated on the stack will be reported
by <a href="https://doc.rust-lang.org/beta/unstable-book/compiler-flags/emit-stack-sizes.html"><code>-Z emit-stack-sizes</code></a> flag which means that tools that analyze stack usage
(like <a href="https://crates.io/crates/stack-sizes"><code>stack-sizes</code></a>) will include them in their analysis.</p>
<p>However, fixed capacity collections can <em>not</em> be shrunk which can result in
lower load factors (the ratio between the size of the collection and its
capacity) than what relocatable collections can achieve.</p>
<a class="header" href="#worst-case-execution-time-wcet" id="worst-case-execution-time-wcet"><h3>Worst Case Execution Time (WCET)</h3></a>
<p>If are building time sensitive applications or hard real time applications then
you care, maybe a lot, about the worst case execution time of the different
parts of your program.</p>
<p>The <code>alloc</code> collections can reallocate so the WCET of operations that may grow
the collection will also include the time it takes to reallocate the collection,
which itself depends on the <em>runtime</em> capacity of the collection. This makes it
hard to determine the WCET of, for example, the <code>alloc::Vec.push</code> operation as
it depends on both the allocator being used and its runtime capacity.</p>
<p>On the other hand fixed capacity collections never reallocate so all operations
have a predictable execution time. For example, <code>heapless::Vec.push</code> executes in
constant time.</p>
<a class="header" href="#ease-of-use" id="ease-of-use"><h3>Ease of use</h3></a>
<p><code>alloc</code> requires setting up a global allocator whereas <code>heapless</code> does not.
However, <code>heapless</code> requires you to pick the capacity of each collection that
you instantiate.</p>
<p>The <code>alloc</code> API will be familiar to virtually every Rust developer. The
<code>heapless</code> API tries to closely mimic the <code>alloc</code> API but it will never be
exactly the same due to its explicit error handling -- some developers may feel
the explicit error handling is excessive or too cumbersome.</p>
<a class="header" href="#tips-for-embedded-c-developers" id="tips-for-embedded-c-developers"><h1>Tips for embedded C developers</h1></a>
<p>This chapter collects a variety of tips that might be useful to experienced
embedded C developers looking to start writing Rust. It will especially
highlight how things you might already be used to in C are different in Rust.</p>
<a class="header" href="#preprocessor" id="preprocessor"><h2>Preprocessor</h2></a>
<p>In embedded C it is very common to use the preprocessor for a variety of
purposes, such as:</p>
<ul>
<li>Compile-time selection of code blocks with <code>#ifdef</code></li>
<li>Compile-time array sizes and computations</li>
<li>Macros to simplify common patterns (to avoid function call overhead)</li>
</ul>
<p>In Rust there is no preprocessor, and so many of these use cases are addressed
differently. In the rest of this section we cover various alternatives to
using the preprocessor.</p>
<a class="header" href="#compile-time-code-selection" id="compile-time-code-selection"><h3>Compile-Time Code Selection</h3></a>
<p>The closest match to <code>#ifdef ... #endif</code> in Rust are <a href="https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section">Cargo features</a>. These
are a little more formal than the C preprocessor: all possible features are
explicitly listed per crate, and can only be either on or off. Features are
turned on when you list a crate as a dependency, and are additive: if any crate
in your dependency tree enables a feature for another crate, that feature will
be enabled for all users of that crate.</p>
<p>For example, you might have a crate which provides a library of signal
processing primitives. Each one might take some extra time to compile or
declare some large table of constants which you'd like to avoid. You could
declare a Cargo feature for each component in your <code>Cargo.toml</code>:</p>
<pre><code class="language-toml">[features]
FIR = []
IIR = []
</code></pre>
<p>Then, in your code, use <code>#[cfg(feature=&quot;FIR&quot;)]</code> to control what is included.</p>
<pre><pre class="playpen"><code class="language-rust">
# #![allow(unused_variables)]
#fn main() {
/// In your top-level lib.rs

#[cfg(feature=&quot;FIR&quot;)]
pub mod fir;

#[cfg(feature=&quot;IIR&quot;)]
pub mod iir;
#}</code></pre></pre>
<p>You can similarly include code blocks only if a feature is <em>not</em> enabled, or if
any combination of features are or are not enabled.</p>
<p>Additionally, Rust provides a number of automatically-set conditions you can
use, such as <code>target_arch</code> to select different code based on architecture. For
full details of the conditional compilation support, refer to the
<a href="https://doc.rust-lang.org/reference/conditional-compilation.html">conditional compilation</a> chapter of the Rust reference.</p>
<p>The conditional compilation will only apply to the next statement or block. If
a block can not be used in the current scope then the <code>cfg</code> attribute will
need to be used multiple times.  It's worth noting that most of the time it is
better to simply include all the code and allow the compiler to remove dead
code when optimising: it's simpler for you and your users, and in general the
compiler will do a good job of removing unused code.</p>
<a class="header" href="#compile-time-sizes-and-computation" id="compile-time-sizes-and-computation"><h3>Compile-Time Sizes and Computation</h3></a>
<p>Rust supports <code>const fn</code>, functions which are guaranteed to be evaluable at
compile-time and can therefore be used where constants are required, such as
in the size of arrays. This can be used alongside features mentioned above,
for example:</p>
<pre><pre class="playpen"><code class="language-rust">
# #![allow(unused_variables)]
#fn main() {
const fn array_size() -&gt; usize {
    #[cfg(feature=&quot;use_more_ram&quot;)]
    { 1024 }
    #[cfg(not(feature=&quot;use_more_ram&quot;))]
    { 128 }
}

static BUF: [u32; array_size()] = [0u32; array_size()];
#}</code></pre></pre>
<p>These are new to stable Rust as of 1.31, so documentation is still sparse. The
functionality available to <code>const fn</code> is also very limited at the time of
writing; in future Rust releases it is expected to expand on what is permitted
in a <code>const fn</code>.</p>
<a class="header" href="#macros" id="macros"><h3>Macros</h3></a>
<p>Rust provides an extremely powerful <a href="https://doc.rust-lang.org/book/ch19-06-macros.html">macro system</a>. While the C preprocessor
operates almost directly on the text of your source code, the Rust macro system
operates at a higher level. There are two varieties of Rust macro: <em>macros by
example</em> and <em>procedural macros</em>. The former are simpler and most common; they
look like function calls and can expand to a complete expression, statement,
item, or pattern. Procedural macros are more complex but permit extremely
powerful additions to the Rust language: they can transform arbitrary Rust
syntax into new Rust syntax.</p>
<p>In general, where you might have used a C preprocessor macro, you probably want
to see if a macro-by-example can do the job instead. They can be defined in
your crate and easily used by your own crate or exported for other users. Be
aware that since they must expand to complete expressions, statements, items,
or patterns, some use cases of C preprocessor macros will not work, for example
a macro that expands to part of a variable name or an incomplete set of items
in a list.</p>
<p>As with Cargo features, it is worth considering if you even need the macro. In
many cases a regular function is easier to understand and will be inlined to
the same code as a macro. The <code>#[inline]</code> and <code>#[inline(always)]</code> <a href="https://doc.rust-lang.org/reference/attributes.html#inline-attribute">attributes</a>
give you further control over this process, although care should be taken here
as well — the compiler will automatically inline functions from the same crate
where appropriate, so forcing it to do so inappropriately might actually lead
to decreased performance.</p>
<p>Explaining the entire Rust macro system is out of scope for this tips page, so
you are encouraged to consult the Rust documentation for full details.</p>
<a class="header" href="#build-system" id="build-system"><h2>Build System</h2></a>
<p>Most Rust crates are built using Cargo (although it is not required). This
takes care of many difficult problems with traditional build systems. However,
you may wish to customise the build process. Cargo provides <a href="https://doc.rust-lang.org/cargo/reference/build-scripts.html"><code>build.rs</code>
scripts</a> for this purpose. They are Rust scripts which can interact with the
Cargo build system as required.</p>
<p>Common use cases for build scripts include:</p>
<ul>
<li>provide build-time information, for example statically embedding the build
date or Git commit hash into your executable</li>
<li>generate linker scripts at build time depending on selected features or other
logic</li>
<li>change the Cargo build configuration</li>
<li>add extra static libraries to link against</li>
</ul>
<p>At present there is no support for post-build scripts, which you might
traditionally have used for tasks like automatic generation of binaries from
the build objects or printing build information.</p>
<a class="header" href="#cross-compiling-1" id="cross-compiling-1"><h3>Cross-Compiling</h3></a>
<p>Using Cargo for your build system also simplifies cross-compiling. In most
cases it suffices to tell Cargo <code>--target thumbv6m-none-eabi</code> and find a
suitable executable in <code>target/thumbv6m-none-eabi/debug/myapp</code>.</p>
<p>For platforms not natively supported by Rust, you will need to build <code>libcore</code>
for that target yourself. On such platforms, <a href="https://github.com/japaric/xargo">Xargo</a> can be used as a stand-in
for Cargo which automatically builds <code>libcore</code> for you.</p>
<a class="header" href="#iterators-vs-array-access" id="iterators-vs-array-access"><h2>Iterators vs Array Access</h2></a>
<p>In C you are probably used to accessing arrays directly by their index:</p>
<pre><code class="language-c">int16_t arr[16];
int i;
for(i=0; i&lt;sizeof(arr)/sizeof(arr[0]); i++) {
    process(arr[i]);
}
</code></pre>
<p>In Rust this is an anti-pattern: indexed access can be slower (as it needs to
be bounds checked) and may prevent various compiler optimisations. This is an
important distinction and worth repeating: Rust will check for out-of-bounds
access on manual array indexing to guarantee memory safety, while C will
happily index outside the array.</p>
<p>Instead, use iterators:</p>
<pre><code class="language-rust ignore">let arr = [0u16; 16];
for element in arr.iter() {
    process(*element);
}
</code></pre>
<p>Iterators provide a powerful array of functionality you would have to implement
manually in C, such as chaining, zipping, enumerating, finding the min or max,
summing, and more. Iterator methods can also be chained, giving very readable
data processing code.</p>
<p>See the <a href="https://doc.rust-lang.org/book/ch13-02-iterators.html">Iterators in the Book</a> and <a href="https://doc.rust-lang.org/core/iter/trait.Iterator.html">Iterator documentation</a> for more details.</p>
<a class="header" href="#references-vs-pointers" id="references-vs-pointers"><h2>References vs Pointers</h2></a>
<p>In Rust, pointers (called <a href="https://doc.rust-lang.org/book/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer"><em>raw pointers</em></a>) exist but are only used in specific
circumstances, as dereferencing them is always considered <code>unsafe</code> -- Rust
cannot provide its usual guarantees about what might be behind the pointer.</p>
<p>In most cases, we instead use <em>references</em>, indicated by the <code>&amp;</code> symbol, or
<em>mutable references</em>, indicated by <code>&amp;mut</code>. References behave similarly to
pointers, in that they can be dereferenced to access the underlying values, but
they are a key part of Rust's ownership system: Rust will strictly enforce that
you may only have one mutable reference <em>or</em> multiple non-mutable references to
the same value at any given time.</p>
<p>In practice this means you have to be more careful about whether you need
mutable access to data: where in C the default is mutable and you must be
explicit about <code>const</code>, in Rust the opposite is true.</p>
<p>One situation where you might still use raw pointers is interacting directly
with hardware (for example, writing a pointer to a buffer into a DMA peripheral
register), and they are also used under the hood for all peripheral access
crates to allow you to read and write memory-mapped registers.</p>
<a class="header" href="#volatile-access" id="volatile-access"><h2>Volatile Access</h2></a>
<p>In C, individual variables may be marked <code>volatile</code>, indicating to the compiler
that the value in the variable may change between accesses. Volatile variables
are commonly used in an embedded context for memory-mapped registers.</p>
<p>In Rust, instead of marking a variable as <code>volatile</code>, we use specific methods
to perform volatile access: <a href="https://doc.rust-lang.org/core/ptr/fn.read_volatile.html"><code>core::ptr::read_volatile</code></a> and
<a href="https://doc.rust-lang.org/core/ptr/fn.write_volatile.html"><code>core::ptr::write_volatile</code></a>. These methods take a <code>*const T</code> or a <code>*mut T</code>
(<em>raw pointers</em>, as discussed above) and perform a volatile read or write.</p>
<p>For example, in C you might write:</p>
<pre><code class="language-c">volatile bool signalled = false;

void ISR() {
    // Signal that the interrupt has occurred
    signalled = true;
}

void driver() {
    while(true) {
        // Sleep until signalled
        while(!signalled) { WFI(); }
        // Reset signalled indicator
        signalled = false;
        // Perform some task that was waiting for the interrupt
        run_task();
    }
}
</code></pre>
<p>The equivalent in Rust would use volatile methods on each access:</p>
<pre><code class="language-rust ignore">static mut SIGNALLED: bool = false;

#[interrupt]
fn ISR() {
    // Signal that the interrupt has occurred
    // (In real code, you should consider a higher level primitive,
    //  such as an atomic type).
    unsafe { core::ptr::write_volatile(&amp;mut SIGNALLED, true) };
}

fn driver() {
    loop {
        // Sleep until signalled
        while unsafe { !core::ptr::read_volatile(&amp;SIGNALLED) } {}
        // Reset signalled indicator
        unsafe { core::ptr::write_volatile(&amp;mut SIGNALLED, false) };
        // Perform some task that was waiting for the interrupt
        run_task();
    }
}
</code></pre>
<p>A few things are worth noting in the code sample:</p>
<ul>
<li>We can pass <code>&amp;mut SIGNALLED</code> into the function requiring <code>*mut T</code>, since
<code>&amp;mut T</code> automatically converts to a <code>*mut T</code> (and the same for <code>*const T</code>)</li>
<li>We need <code>unsafe</code> blocks for the <code>read_volatile</code>/<code>write_volatile</code> methods,
since they are <code>unsafe</code> functions. It is the programmer's responsibility
to ensure safe use: see the methods' documentation for further details.</li>
</ul>
<p>It is rare to require these functions directly in your code, as they will
usually be taken care of for you by higher-level libraries. For memory mapped
peripherals, the peripheral access crates will implement volatile access
automatically, while for concurrency primitives there are better abstractions
available (see the <a href="c-tips/../concurrency/index.html">Concurrency chapter</a>).</p>
<a class="header" href="#packed-and-aligned-types" id="packed-and-aligned-types"><h2>Packed and Aligned Types</h2></a>
<p>In embedded C it is common to tell the compiler a variable must have a certain
alignment or a struct must be packed rather than aligned, usually to meet
specific hardware or protocol requirements.</p>
<p>In Rust this is controlled by the <code>repr</code> attribute on a struct or union. The
default representation provides no guarantees of layout, so should not be used
for code that interoperates with hardware or C. The compiler may re-order
struct members or insert padding and the behaviour may change with future
versions of Rust.</p>
<pre><pre class="playpen"><code class="language-rust">struct Foo {
    x: u16,
    y: u8,
    z: u16,
}

fn main() {
    let v = Foo { x: 0, y: 0, z: 0 };
    println!(&quot;{:p} {:p} {:p}&quot;, &amp;v.x, &amp;v.y, &amp;v.z);
}

// 0x7ffecb3511d0 0x7ffecb3511d4 0x7ffecb3511d2
// Note ordering has been changed to x, z, y to improve packing.
</code></pre></pre>
<p>To ensure layouts that are interoperable with C, use <code>repr(C)</code>:</p>
<pre><pre class="playpen"><code class="language-rust">#[repr(C)]
struct Foo {
    x: u16,
    y: u8,
    z: u16,
}

fn main() {
    let v = Foo { x: 0, y: 0, z: 0 };
    println!(&quot;{:p} {:p} {:p}&quot;, &amp;v.x, &amp;v.y, &amp;v.z);
}

// 0x7fffd0d84c60 0x7fffd0d84c62 0x7fffd0d84c64
// Ordering is preserved and the layout will not change over time.
// `z` is two-byte aligned so a byte of padding exists between `y` and `z`.
</code></pre></pre>
<p>To ensure a packed representation, use <code>repr(packed)</code>:</p>
<pre><pre class="playpen"><code class="language-rust">#[repr(packed)]
struct Foo {
    x: u16,
    y: u8,
    z: u16,
}

fn main() {
    let v = Foo { x: 0, y: 0, z: 0 };
    // Unsafe is required to borrow a field of a packed struct.
    unsafe { println!(&quot;{:p} {:p} {:p}&quot;, &amp;v.x, &amp;v.y, &amp;v.z) };
}

// 0x7ffd33598490 0x7ffd33598492 0x7ffd33598493
// No padding has been inserted between `y` and `z`, so now `z` is unaligned.
</code></pre></pre>
<p>Note that using <code>repr(packed)</code> also sets the alignment of the type to <code>1</code>.</p>
<p>Finally, to specify a specific alignment, use <code>repr(align(n))</code>, where <code>n</code> is
the number of bytes to align to (and must be a power of two):</p>
<pre><pre class="playpen"><code class="language-rust">#[repr(C)]
#[repr(align(4096))]
struct Foo {
    x: u16,
    y: u8,
    z: u16,
}

fn main() {
    let v = Foo { x: 0, y: 0, z: 0 };
    let u = Foo { x: 0, y: 0, z: 0 };
    println!(&quot;{:p} {:p} {:p}&quot;, &amp;v.x, &amp;v.y, &amp;v.z);
    println!(&quot;{:p} {:p} {:p}&quot;, &amp;u.x, &amp;u.y, &amp;u.z);
}

// 0x7ffec909a000 0x7ffec909a002 0x7ffec909a004
// 0x7ffec909b000 0x7ffec909b002 0x7ffec909b004
// The two instances `u` and `v` have been placed on 4096-byte alignments,
// evidenced by the `000` at the end of their addresses.
</code></pre></pre>
<p>Note we can combine <code>repr(C)</code> with <code>repr(align(n))</code> to obtain an aligned and
C-compatible layout. It is not permissible to combine <code>repr(align(n))</code> with
<code>repr(packed)</code>, since <code>repr(packed)</code> sets the alignment to <code>1</code>. It is also not
permissible for a <code>repr(packed)</code> type to contain a <code>repr(align(n))</code> type.</p>
<p>For further details on type layouts, refer to the <a href="https://doc.rust-lang.org/reference/type-layout.html">type layout</a> chapter of the
Rust Reference.</p>
<a class="header" href="#other-resources-1" id="other-resources-1"><h2>Other Resources</h2></a>
<ul>
<li>In this book:
<ul>
<li><a href="c-tips/../interoperability/c-with-rust.html">A little C with your Rust</a></li>
<li><a href="c-tips/../interoperability/rust-with-c.html">A little Rust with your C</a></li>
</ul>
</li>
<li><a href="https://docs.rust-embedded.org/faq.html">The Rust Embedded FAQs</a></li>
<li><a href="http://blahg.josefsipek.net/?p=580">Rust Pointers for C Programmers</a></li>
<li><a href="https://github.com/diwic/reffers-rs/blob/master/docs/Pointers.md">I used to use pointers - now what?</a></li>
</ul>
<a class="header" href="#interoperability" id="interoperability"><h1>Interoperability</h1></a>
<p>Interoperability between Rust and C code is always dependent
on transforming data between the two languages.
For this purposes there are two dedicated modules
in the <code>stdlib</code> called
<a href="https://doc.rust-lang.org/std/ffi/index.html"><code>std::ffi</code></a> and
<a href="https://doc.rust-lang.org/std/os/raw/index.html"><code>std::os::raw</code></a>.</p>
<p><code>std::os::raw</code> deals with low-level primitive types that can
be converted implicitly by the compiler
because the memory layout between Rust and C
is similar enough or the same.</p>
<p><code>std::ffi</code> provides some utility for converting more complex
types such as Strings, mapping both <code>&amp;str</code> and <code>String</code>
to C-types that are easier and safer to handle.</p>
<p>Neither of these modules are available in <code>core</code>, but you can find a <code>#![no_std]</code>
compatible version of <code>std::ffi::{CStr,CString}</code> in the <a href="https://crates.io/crates/cstr_core"><code>cstr_core</code></a> crate, and
most of the <code>std::os::raw</code> types in the <a href="https://crates.io/crates/cty"><code>cty</code></a> crate.</p>
<table><thead><tr><th> Rust type  </th><th> Intermediate </th><th> C type       </th></tr></thead><tbody>
<tr><td> String     </td><td> CString      </td><td> *char        </td></tr>
<tr><td> &amp;str       </td><td> CStr         </td><td> *const char  </td></tr>
<tr><td> ()         </td><td> c_void       </td><td> void         </td></tr>
<tr><td> u32 or u64 </td><td> c_uint       </td><td> unsigned int </td></tr>
<tr><td> etc        </td><td> ...          </td><td> ...          </td></tr>
</tbody></table>
<p>As mentioned above, primitive types can be converted
by the compiler implicitly.</p>
<pre><code class="language-rust ignore">unsafe fn foo(num: u32) {
    let c_num: c_uint = num;
    let r_num: u32 = c_num;
}
</code></pre>
<a class="header" href="#interoperability-with-other-build-systems" id="interoperability-with-other-build-systems"><h2>Interoperability with other build systems</h2></a>
<p>A common requirement for including Rust in your embedded project is combining
Cargo with your existing build system, such as make or cmake.</p>
<p>We are collecting examples and use cases for this on our issue tracker in
<a href="https://github.com/rust-embedded/book/issues/61">issue #61</a>.</p>
<a class="header" href="#interoperability-with-rtoss" id="interoperability-with-rtoss"><h2>Interoperability with RTOSs</h2></a>
<p>Integrating Rust with an RTOS such as FreeRTOS or ChibiOS is still a work in
progress; especially calling RTOS functions from Rust can be tricky.</p>
<p>We are collecting examples and use cases for this on our issue tracker in
<a href="https://github.com/rust-embedded/book/issues/62">issue #62</a>.</p>
<a class="header" href="#a-little-c-with-your-rust" id="a-little-c-with-your-rust"><h1>A little C with your Rust</h1></a>
<p>Using C or C++ inside of a Rust project consists of two major parts:</p>
<ul>
<li>Wrapping the exposed C API for use with Rust</li>
<li>Building your C or C++ code to be integrated with the Rust code</li>
</ul>
<p>As C++ does not have a stable ABI for the Rust compiler to target, it is recommended to use the <code>C</code> ABI when combining Rust with C or C++.</p>
<a class="header" href="#defining-the-interface" id="defining-the-interface"><h2>Defining the interface</h2></a>
<p>Before consuming C or C++ code from Rust, it is necessary to define (in Rust) what data types and function signatures exist in the linked code. In C or C++, you would include a header (<code>.h</code> or <code>.hpp</code>) file which defines this data. In Rust, it is necessary to either manually translate these definitions to Rust, or use a tool to generate these definitions.</p>
<p>First, we will cover manually translating these definitions from C/C++ to Rust.</p>
<a class="header" href="#wrapping-c-functions-and-datatypes" id="wrapping-c-functions-and-datatypes"><h3>Wrapping C functions and Datatypes</h3></a>
<p>Typically, libraries written in C or C++ will provide a header file defining all types and functions used in public interfaces. An example file may look like this:</p>
<pre><code class="language-C">/* File: cool.h */
typedef struct CoolStruct {
    int x;
    int y;
} CoolStruct;

void cool_function(int i, char c, CoolStruct* cs);
</code></pre>
<p>When translated to Rust, this interface would look as such:</p>
<pre><code class="language-rust ignore">/* File: cool_bindings.rs */
#[repr(C)]
pub struct CoolStruct {
    pub x: cty::c_int,
    pub y: cty::c_int,
}

pub extern &quot;C&quot; fn cool_function(
    i: cty::c_int,
    c: cty::c_char,
    cs: *mut CoolStruct
);
</code></pre>
<p>Let's take a look at this definition one piece at a time, to explain each of the parts.</p>
<pre><code class="language-rust ignore">#[repr(C)]
pub struct CoolStruct { ... }
</code></pre>
<p>By default, Rust does not guarantee order, padding, or the size of data included in a <code>struct</code>. In order to guarantee compatibility with C code, we include the <code>#[repr(C)]</code> attribute, which instructs the Rust compiler to always use the same rules C does for organizing data within a struct.</p>
<pre><code class="language-rust ignore">pub x: cty::c_int,
pub y: cty::c_int,
</code></pre>
<p>Due to the flexibility of how C or C++ defines an <code>int</code> or <code>char</code>, it is recommended to use primitive data types defined in <code>cty</code>, which will map types from C to types in Rust</p>
<pre><code class="language-rust ignore">pub extern &quot;C&quot; fn cool_function( ... );
</code></pre>
<p>This statement defines the signature of a function that uses the C ABI, called <code>cool_function</code>. By defining the signature without defining the body of the function, the definition of this function will need to be provided elsewhere, or linked into the final library or binary from a static library.</p>
<pre><code class="language-rust ignore">    i: cty::c_int,
    c: cty::c_char,
    cs: *mut CoolStruct
</code></pre>
<p>Similar to our datatype above, we define the datatypes of the function arguments using C-compatible definitions. We also retain the same argument names, for clarity.</p>
<p>We have one new type here, <code>*mut CoolStruct</code>. As C does not have a concept of Rust's references, which would look like this: <code>&amp;mut CoolStruct</code>, we instead have a raw pointer. As dereferencing this pointer is <code>unsafe</code>, and the pointer may in fact be a <code>null</code> pointer, care must be taken to ensure the guarantees typical of Rust when interacting with C or C++ code.</p>
<a class="header" href="#automatically-generating-the-interface" id="automatically-generating-the-interface"><h3>Automatically generating the interface</h3></a>
<p>Rather than manually generating these interfaces, which may be tedious and error prone, there is a tool called <a href="https://github.com/rust-lang-nursery/rust-bindgen">bindgen</a> which will perform these conversions automatically. For instructions of the usage of <a href="https://github.com/rust-lang-nursery/rust-bindgen">bindgen</a>, please refer to the <a href="https://rust-lang.github.io/rust-bindgen/">bindgen user's manual</a>, however the typical process consists of the following:</p>
<ol>
<li>Gather all C or C++ headers defining interfaces or datatypes you would like to use with Rust</li>
<li>Write a <code>bindings.h</code> file, which <code>#include &quot;...&quot;</code>'s each of the files you gathered in step one</li>
<li>Feed this <code>bindings.h</code> file, along with any compilation flags used to compile
your code into <code>bindgen</code>. Tip: use <code>Builder.ctypes_prefix(&quot;cty&quot;)</code> /
<code>--ctypes-prefix=cty</code> to make the generated code <code>#![no_std]</code> compatible.</li>
<li><code>bindgen</code> will produce the generated Rust code to the output of the terminal window. This file may be piped to a file in your project, such as <code>bindings.rs</code>. You may use this file in your Rust project to interact with C/C++ code compiled and linked as an external library</li>
</ol>
<a class="header" href="#building-your-cc-code" id="building-your-cc-code"><h2>Building your C/C++ code</h2></a>
<p>As the Rust compiler does not directly know how to compile C or C++ code (or code from any other language, which presents a C interface), it is necessary to compile your non-Rust code ahead of time.</p>
<p>For embedded projects, this most commonly means compiling the C/C++ code to a static archive (such as <code>cool-library.a</code>), which can then be combined with your Rust code at the final linking step.</p>
<p>If the library you would like to use is already distributed as a static archive, it is not necessary to rebuild your code. Just convert the provided interface header file as described above, and include the static archive at compile/link time.</p>
<p>If your code exists as a source project, it will be necessary to compile your C/C++ code to a static library, either by triggering your existing build system (such as <code>make</code>, <code>CMake</code>, etc.), or by porting the necessary compilation steps to use a tool called the <code>cc</code> crate. For both of these steps, it is necessary to use a <code>build.rs</code> script.</p>
<a class="header" href="#rust-buildrs-build-scripts" id="rust-buildrs-build-scripts"><h3>Rust <code>build.rs</code> build scripts</h3></a>
<p>A <code>build.rs</code> script is a file written in Rust syntax, that is executed on your compilation machine, AFTER dependencies of your project have been built, but BEFORE your project is built.</p>
<p>The full reference may be found <a href="https://doc.rust-lang.org/cargo/reference/build-scripts.html">here</a>. <code>build.rs</code> scripts are useful for generating code (such as via <a href="https://github.com/rust-lang-nursery/rust-bindgen">bindgen</a>), calling out to external build systems such as <code>Make</code>, or directly compiling C/C++ through use of the <code>cc</code> crate</p>
<a class="header" href="#triggering-external-build-systems" id="triggering-external-build-systems"><h3>Triggering external build systems</h3></a>
<p>For projects with complex external projects or build systems, it may be easiest to use [<code>std::process::Command</code>] to &quot;shell out&quot; to your other build systems by traversing relative paths, calling a fixed command (such as <code>make library</code>), and then copying the resulting static library to the proper location in the <code>target</code> build directory.</p>
<p>While your crate may be targeting a <code>no_std</code> embedded platform, your <code>build.rs</code> executes only on machines compiling your crate. This means you may use any Rust crates which will run on your compilation host.</p>
<a class="header" href="#building-cc-code-with-the-cc-crate" id="building-cc-code-with-the-cc-crate"><h3>Building C/C++ code with the <code>cc</code> crate</h3></a>
<p>For projects with limited dependencies or complexity, or for projects where it is difficult to modify the build system to produce a static library (rather than a final binary or executable), it may be easier to instead utilize the <a href="https://github.com/alexcrichton/cc-rs"><code>cc</code> crate</a>, which provides an idiomatic Rust interface to the compiler provided by the host.</p>
<p>In the simplest case of compiling a single C file as a dependency to a static library, an example <code>build.rs</code> script using the <a href="https://github.com/alexcrichton/cc-rs"><code>cc</code> crate</a> would look like this:</p>
<pre><code class="language-rust ignore">extern crate cc;

fn main() {
    cc::Build::new()
        .file(&quot;foo.c&quot;)
        .compile(&quot;libfoo.a&quot;);
}
</code></pre>
<a class="header" href="#a-little-rust-with-your-c" id="a-little-rust-with-your-c"><h1>A little Rust with your C</h1></a>
<p>Using Rust code inside a C or C++ project mostly consists of two parts.</p>
<ul>
<li>Creating a C-friendly API in Rust</li>
<li>Embedding your Rust project into an external build system</li>
</ul>
<p>Apart from <code>cargo</code> and <code>meson</code>, most build systems don't have native Rust support.
So you're most likely best off just using <code>cargo</code> for compiling your crate and
any dependencies.</p>
<a class="header" href="#setting-up-a-project" id="setting-up-a-project"><h2>Setting up a project</h2></a>
<p>Create a new <code>cargo</code> project as usual.</p>
<p>There are flags to tell <code>cargo</code> to emit a systems library, instead of
its regular rust target.
This also allows you to set a different output name for your library,
if you want it to differ from the rest of your crate.</p>
<pre><code class="language-toml">[lib]
name = &quot;your_crate&quot;
crate-type = [&quot;cdylib&quot;]      # Creates dynamic lib
# crate-type = [&quot;staticlib&quot;] # Creates static lib
</code></pre>
<a class="header" href="#building-a-c-api" id="building-a-c-api"><h2>Building a <code>C</code> API</h2></a>
<p>Because C++ has no stable ABI for the Rust compiler to target, we use <code>C</code> for
any interoperability between different languages. This is no exception when using Rust
inside of C and C++ code.</p>
<a class="header" href="#no_mangle" id="no_mangle"><h3><code>#[no_mangle]</code></h3></a>
<p>The Rust compiler mangles symbol names differently than native code linkers expect.
As such, any function that Rust exports to be used outside of Rust needs to be told
not to be mangled by the compiler.</p>
<a class="header" href="#extern-c" id="extern-c"><h3><code>extern &quot;C&quot;</code></h3></a>
<p>By default, any function you write in Rust will use the
Rust ABI (which is also not stabilized).
Instead, when building outwards facing FFI APIs we need to
tell the compiler to use the system ABI.</p>
<p>Depending on your platform, you might want to target a specific ABI version, which are
documented <a href="https://doc.rust-lang.org/reference/items/external-blocks.html">here</a>.</p>
<hr />
<p>Putting these parts together, you get a function that looks roughly like this.</p>
<pre><code class="language-rust ignore">#[no_mangle]
pub extern &quot;C&quot; fn rust_function() {

}
</code></pre>
<p>Just as when using <code>C</code> code in your Rust project you now need to transform data
from and to a form that the rest of the application will understand.</p>
<a class="header" href="#linking-and-greater-project-context" id="linking-and-greater-project-context"><h2>Linking and greater project context.</h2></a>
<p>So then, that's one half of the problem solved.
How do you use this now?</p>
<p><strong>This very much depends on your project and/or build system</strong></p>
<p><code>cargo</code> will create a <code>my_lib.so</code>/<code>my_lib.dll</code> or <code>my_lib.a</code> file,
depending on your platform and settings. This library can simply be linked
by your build system.</p>
<p>However, calling a Rust function from C requires a header file to declare
the function signatures.</p>
<p>Every function in your Rust-ffi API needs to have a corresponding header function.</p>
<pre><code class="language-rust ignore">#[no_mangle]
pub extern &quot;C&quot; fn rust_function() {}
</code></pre>
<p>would then become</p>
<pre><code class="language-C">void rust_function();
</code></pre>
<p>etc.</p>
<p>There is a tool to automate this process,
called <a href="https://github.com/eqrion/cbindgen">cbindgen</a> which analyses your Rust code
and then generates headers for your C and C++ projects from it.</p>
<p>At this point, using the Rust functions from C
is as simple as including the header and calling them!</p>
<pre><code class="language-C">#include &quot;my-rust-project.h&quot;
rust_function();
</code></pre>
<a class="header" href="#unsorted-topics" id="unsorted-topics"><h1>Unsorted topics</h1></a>
<a class="header" href="#optimizations-the-speed-size-tradeoff" id="optimizations-the-speed-size-tradeoff"><h1>Optimizations: the speed size tradeoff</h1></a>
<p>Everyone wants their program to be super fast and super small but it's usually
not possible to have both characteristics. This section discusses the
different optimization levels that <code>rustc</code> provides and how they affect the
execution time and binary size of a program.</p>
<a class="header" href="#no-optimizations" id="no-optimizations"><h2>No optimizations</h2></a>
<p>This is the default. When you call <code>cargo build</code> you use the development (AKA
<code>dev</code>) profile. This profile is optimized for debugging so it enables debug
information and does <em>not</em> enable any optimizations, i.e. it uses <code>-C opt-level = 0</code>.</p>
<p>At least for bare metal development, debuginfo is zero cost in the sense that it
won't occupy space in Flash / ROM so we actually recommend that you enable
debuginfo in the release profile -- it is disabled by default. That will let you
use breakpoints when debugging release builds.</p>
<pre><code class="language-toml">[profile.release]
# symbols are nice and they don't increase the size on Flash
debug = true
</code></pre>
<p>No optimizations is great for debugging because stepping through the code feels
like you are executing the program statement by statement, plus you can <code>print</code>
stack variables and function arguments in GDB. When the code is optimized, trying
to print variables results in <code>$0 = &lt;value optimized out&gt;</code> being printed.</p>
<p>The biggest downside of the <code>dev</code> profile is that the resulting binary will be
huge and slow. The size is usually more of a problem because unoptimized
binaries can occupy dozens of KiB of Flash, which your target device may not
have -- the result: your unoptimized binary doesn't fit in your device!</p>
<p>Can we have smaller, debugger friendly binaries? Yes, there's a trick.</p>
<a class="header" href="#optimizing-dependencies" id="optimizing-dependencies"><h3>Optimizing dependencies</h3></a>
<blockquote>
<p><strong>WARNING</strong> This section uses an unstable feature and it was last tested on
2018-09-18. Things may have changed since then!</p>
</blockquote>
<p>On nightly, there's a Cargo feature named <a href="https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#profile-overrides"><code>profile-overrides</code></a> that lets you
override the optimization level of dependencies. You can use that feature to
optimize all dependencies for size while keeping the top crate unoptimized and
debugger friendly.</p>
<p>Here's an example:</p>
<pre><code class="language-toml"># Cargo.toml
cargo-features = [&quot;profile-overrides&quot;] # +

[package]
name = &quot;app&quot;
# ..

[profile.dev.overrides.&quot;*&quot;] # +
opt-level = &quot;z&quot; # +
</code></pre>
<p>Without the override:</p>
<pre><code class="language-console">$ cargo size --bin app -- -A
app  :
section               size        addr
.vector_table         1024   0x8000000
.text                 9060   0x8000400
.rodata               1708   0x8002780
.data                    0  0x20000000
.bss                     4  0x20000000
</code></pre>
<p>With the override:</p>
<pre><code class="language-console">$ cargo size --bin app -- -A
app  :
section               size        addr
.vector_table         1024   0x8000000
.text                 3490   0x8000400
.rodata               1100   0x80011c0
.data                    0  0x20000000
.bss                     4  0x20000000
</code></pre>
<p>That's a 6 KiB reduction in Flash usage without any loss in the debuggability of
the top crate. If you step into a dependency then you'll start seeing those
<code>&lt;value optimized out&gt;</code> messages again but it's usually the case that you want
to debug the top crate and not the dependencies. And if you <em>do</em> need to debug a
dependency then you can use the <code>profile-overrides</code> feature to exclude a
particular dependency from being optimized. See example below:</p>
<pre><code class="language-toml"># ..

# don't optimize the `cortex-m-rt` crate
[profile.dev.overrides.cortex-m-rt] # +
opt-level = 0 # +

# but do optimize all the other dependencies
[profile.dev.overrides.&quot;*&quot;]
codegen-units = 1 # better optimizations
opt-level = &quot;z&quot;
</code></pre>
<p>Now the top crate and <code>cortex-m-rt</code> are debugger friendly!</p>
<a class="header" href="#optimize-for-speed" id="optimize-for-speed"><h2>Optimize for speed</h2></a>
<p>As of 2018-09-18 <code>rustc</code> supports three &quot;optimize for speed&quot; levels: <code>opt-level = 1</code>, <code>2</code> and <code>3</code>. When you run <code>cargo build --release</code> you are using the release
profile which defaults to <code>opt-level = 3</code>.</p>
<p>Both <code>opt-level = 2</code> and <code>3</code> optimize for speed at the expense of binary size,
but level <code>3</code> does more vectorization and inlining than level <code>2</code>. In
particular, you'll see that at <code>opt-level</code> equal to or greater than <code>2</code> LLVM will
unroll loops. Loop unrolling has a rather high cost in terms of Flash / ROM
(e.g. from 26 bytes to 194 for a zero this array loop) but can also halve the
execution time given the right conditions (e.g. number of iterations is big
enough).</p>
<p>Currently there's no way to disable loop unrolling in <code>opt-level = 2</code> and <code>3</code> so
if you can't afford its cost you should optimize your program for size.</p>
<a class="header" href="#optimize-for-size" id="optimize-for-size"><h2>Optimize for size</h2></a>
<p>As of 2018-09-18 <code>rustc</code> supports two &quot;optimize for size&quot; levels: <code>opt-level = &quot;s&quot;</code> and <code>&quot;z&quot;</code>. These names were inherited from clang / LLVM and are not too
descriptive but <code>&quot;z&quot;</code> is meant to give the idea that it produces smaller
binaries than <code>&quot;s&quot;</code>.</p>
<p>If you want your release binaries to be optimized for size then change the
<code>profile.release.opt-level</code> setting in <code>Cargo.toml</code> as shown below.</p>
<pre><code class="language-toml">[profile.release]
# or &quot;z&quot;
opt-level = &quot;s&quot;
</code></pre>
<p>These two optimization levels greatly reduce LLVM's inline threshold, a metric
used to decide whether to inline a function or not. One of Rust principles are
zero cost abstractions; these abstractions tend to use a lot of newtypes and
small functions to hold invariants (e.g. functions that borrow an inner value
like <code>deref</code>, <code>as_ref</code>) so a low inline threshold can make LLVM miss
optimization opportunities (e.g. eliminate dead branches, inline calls to
closures).</p>
<p>When optimizing for size you may want to try increasing the inline threshold to
see if that has any effect on the binary size. The recommended way to change the
inline threshold is to append the <code>-C inline-threshold</code> flag to the other
rustflags in <code>.cargo/config</code>.</p>
<pre><code class="language-toml"># .cargo/config
# this assumes that you are using the cortex-m-quickstart template
[target.'cfg(all(target_arch = &quot;arm&quot;, target_os = &quot;none&quot;))']
rustflags = [
  # ..
  &quot;-C&quot;, &quot;inline-threshold=123&quot;, # +
]
</code></pre>
<p>What value to use? <a href="https://github.com/rust-lang/rust/blob/1.29.0/src/librustc_codegen_llvm/back/write.rs#L2105-L2122">As of 1.29.0 these are the inline thresholds that the
different optimization levels use</a>:</p>
<ul>
<li><code>opt-level = 3</code> uses 275</li>
<li><code>opt-level = 2</code> uses 225</li>
<li><code>opt-level = &quot;s&quot;</code> uses 75</li>
<li><code>opt-level = &quot;z&quot;</code> uses 25</li>
</ul>
<p>You should try <code>225</code> and <code>275</code> when optimizing for size.</p>

                    </main>

                    <nav class="nav-wrapper" aria-label="Page navigation">
                        <!-- Mobile navigation buttons -->
                        

                        

                        <div style="clear: both"></div>
                    </nav>
                </div>
            </div>

            <nav class="nav-wide-wrapper" aria-label="Page navigation">
                

                
            </nav>

        </div>

        

        

        

        
        <script src="elasticlunr.min.js" type="text/javascript" charset="utf-8"></script>
        <script src="mark.min.js" type="text/javascript" charset="utf-8"></script>
        <script src="searcher.js" type="text/javascript" charset="utf-8"></script>
        

        <script src="clipboard.min.js" type="text/javascript" charset="utf-8"></script>
        <script src="highlight.js" type="text/javascript" charset="utf-8"></script>
        <script src="book.js" type="text/javascript" charset="utf-8"></script>

        <!-- Custom JS scripts -->
        

        
        
        <script type="text/javascript">
        window.addEventListener('load', function() {
            window.setTimeout(window.print, 100);
        });
        </script>
        
        

    </body>
</html>