acid/firmware/acid-firmware/src/ui/dpi.rs

1166 lines
42 KiB
Rust
Raw Normal View History

2026-02-15 02:33:42 +01:00
use core::{
alloc::Layout,
cell::UnsafeCell,
marker::PhantomData,
ops::{Deref, DerefMut},
2026-02-15 02:33:42 +01:00
pin::Pin,
2026-02-15 18:17:16 +01:00
sync::atomic::{self, AtomicBool, AtomicU32, AtomicUsize},
2026-02-15 02:33:42 +01:00
};
2026-02-14 20:03:32 +01:00
use alloc::{
alloc::{Allocator, Global},
boxed::Box,
sync::Arc,
2026-02-14 20:03:32 +01:00
vec,
};
use bytemuck::{AnyBitPattern, NoUninit};
2026-02-15 18:17:16 +01:00
use embassy_sync::{
channel::{Channel, TrySendError},
signal::Signal,
};
2026-02-18 05:03:05 +01:00
use embassy_time::{Duration, Instant, Timer, WithTimeout};
2026-02-14 20:03:32 +01:00
use esp_alloc::MemoryCapability;
use esp_hal::{
Blocking,
dma::{
self, AnyGdmaChannel, BufView, BurstConfig, DmaChannel, DmaChannelConvert, DmaDescriptor,
DmaDescriptorFlags, DmaEligible, DmaRxStreamBuf, DmaTxBuf, DmaTxBuffer, DmaTxInterrupt,
ExternalBurstConfig, InternalBurstConfig, Mem2Mem, SimpleMem2Mem, SimpleMem2MemTransfer,
2026-02-14 20:03:32 +01:00
},
2026-02-15 02:33:42 +01:00
dma_descriptors, handler,
interrupt::{self, Priority},
lcd_cam::lcd::dpi::{Dpi, DpiTransfer},
2026-02-18 05:03:05 +01:00
peripherals::{DMA, DMA_CH0, Interrupt, Peripherals, SPI2},
2026-02-14 20:03:32 +01:00
ram,
spi::master::AnySpi,
};
use esp_sync::RawMutex;
use i_slint_core::software_renderer::{Rgb565Pixel, TargetPixel};
2026-02-14 20:03:32 +01:00
use indoc::{formatdoc, indoc};
2026-02-15 02:33:42 +01:00
use log::{error, info, warn};
use ouroboros::self_referencing;
2026-02-15 18:17:16 +01:00
use rmk::{
futures::{self, FutureExt, pin_mut},
2026-02-15 18:17:16 +01:00
join_all,
};
2026-02-14 20:03:32 +01:00
2026-02-22 00:59:01 +01:00
use crate::{PSRAM_ALLOCATOR, peripherals::st7701s::St7701s, util::DurationExt};
2026-02-14 20:03:32 +01:00
2026-02-15 02:33:42 +01:00
/// THIS IS TAKEN FROM https://github.com/esp-rs/esp-hal/blob/main/esp-hal/src/soc/esp32s3/mod.rs
/// Write back a specific range of data in the cache.
#[doc(hidden)]
#[unsafe(link_section = ".rwtext")]
pub unsafe fn cache_writeback_addr(addr: u32, size: u32) {
unsafe extern "C" {
fn rom_Cache_WriteBack_Addr(addr: u32, size: u32);
fn Cache_Suspend_DCache_Autoload() -> u32;
fn Cache_Resume_DCache_Autoload(value: u32);
}
// suspend autoload, avoid load cachelines being written back
unsafe {
let autoload = Cache_Suspend_DCache_Autoload();
rom_Cache_WriteBack_Addr(addr, size);
Cache_Resume_DCache_Autoload(autoload);
}
}
/// THIS IS TAKEN FROM https://github.com/esp-rs/esp-hal/blob/main/esp-hal/src/soc/esp32s3/mod.rs
/// Invalidate a specific range of addresses in the cache.
#[doc(hidden)]
#[unsafe(link_section = ".rwtext")]
pub unsafe fn cache_invalidate_addr(addr: u32, size: u32) {
unsafe extern "C" {
fn Cache_Invalidate_Addr(addr: u32, size: u32);
}
unsafe {
Cache_Invalidate_Addr(addr, size);
}
}
2026-02-18 05:03:05 +01:00
// const DMA_CHANNEL_INBOUND: usize = 0;
// const INTERRUPT_INBOUND: Interrupt = Interrupt::DMA_IN_CH0;
const DMA_CHANNEL_OUTBOUND: usize = 2;
const INTERRUPT_OUTBOUND: Interrupt = Interrupt::DMA_OUT_CH2;
#[self_referencing]
struct ReceivingTransfer {
mem2mem: SimpleMem2Mem<'static, Blocking>,
#[borrows(mut mem2mem)]
#[covariant]
transfer: Option<SimpleMem2MemTransfer<'this, 'static, Blocking>>,
}
pub struct Swapchain {
pub framebuffers: [&'static mut [u8]; 2],
}
impl Swapchain {
pub fn into_reader_writer(self) -> (SwapchainReader, SwapchainWriter) {
assert_eq!(
self.framebuffers[0].len(),
self.framebuffers[1].len(),
"framebuffers in a swapchain must have an equal length"
);
let reader_index = Arc::new(AtomicBool::new(true));
(
SwapchainReader {
framebuffers_rw: [
self.framebuffers[0] as *const [u8],
self.framebuffers[1] as *const [u8],
],
reader_index: reader_index.clone(),
},
SwapchainWriter {
framebuffers_wr: [
self.framebuffers[1] as *mut [u8],
self.framebuffers[0] as *mut [u8],
],
reader_index,
},
)
}
}
// TODO: Don't need to store the framebuffer length twice. Use `*const u8` instead, and store length separately.
pub struct SwapchainReader {
/// These are in the opposite order to `SwapchainWriter`'s framebuffers.
framebuffers_rw: [*const [u8]; 2],
reader_index: Arc<AtomicBool>,
}
unsafe impl Send for SwapchainReader {}
impl SwapchainReader {
fn len(&self) -> usize {
self.framebuffers_rw[0].len()
}
fn load_read_index(&self) -> usize {
self.reader_index.load(atomic::Ordering::SeqCst) as usize
}
fn get_latest_framebuffer(&self) -> &[u8] {
unsafe { &*self.framebuffers_rw[self.load_read_index()] }
}
}
// TODO: Don't need to store the framebuffer length twice. Use `*mut u8` instead, and store length separately.
pub struct SwapchainWriter {
/// These are in the opposite order to `SwapchainReader`'s framebuffers.
framebuffers_wr: [*mut [u8]; 2],
reader_index: Arc<AtomicBool>,
}
unsafe impl Send for SwapchainWriter {}
impl SwapchainWriter {
fn len(&self) -> usize {
self.framebuffers_wr[0].len()
}
pub fn write(&mut self) -> SwapchainWriteGuard<'_> {
let framebuffer_ptr =
self.framebuffers_wr[self.reader_index.load(atomic::Ordering::SeqCst) as usize];
SwapchainWriteGuard {
framebuffer: unsafe { &mut *framebuffer_ptr },
reader_index: &self.reader_index,
}
}
}
pub struct SwapchainWriteGuard<'a> {
framebuffer: &'a mut [u8],
reader_index: &'a AtomicBool,
}
impl<'a> SwapchainWriteGuard<'a> {
pub fn cast<T: NoUninit + AnyBitPattern>(&mut self) -> &mut [T] {
bytemuck::cast_slice_mut::<_, T>(&mut self.framebuffer)
}
}
impl Drop for SwapchainWriteGuard<'_> {
fn drop(&mut self) {
self.reader_index.fetch_xor(true, atomic::Ordering::SeqCst);
}
}
impl<'a> Deref for SwapchainWriteGuard<'a> {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.framebuffer
}
}
impl<'a> DerefMut for SwapchainWriteGuard<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.framebuffer
}
}
2026-02-15 02:33:42 +01:00
pub struct DmaBounce {
2026-02-14 20:03:32 +01:00
// TODO: Make these generic.
// They currently cannot be generic, because they lacks a `reborrow` method.
channel: DMA_CH0<'static>,
// This can also be more generic, see `DmaEligible` in `Mem2Mem::new`.
2026-02-15 02:33:42 +01:00
peripheral_src: AnySpi<'static>,
// This can also be more generic, see `DmaEligible` in `Mem2Mem::new`.
peripheral_dst: Option<Dpi<'static, Blocking>>,
// TODO: Combine with peripheral_dst using an enum?
transfer_dst: Option<DpiTransfer<'static, DmaTxBounceBuf, Blocking>>,
2026-02-14 20:03:32 +01:00
2026-02-15 02:33:42 +01:00
// TODO: Consider having a separate burst config for the two transfers.
2026-02-14 20:03:32 +01:00
burst_config: BurstConfig,
cyclic: bool,
2026-02-14 20:03:32 +01:00
/// The size of each window.
window_size: usize,
/// The number of windows.
windows_len: usize,
swapchain_src: SwapchainReader,
2026-02-14 20:03:32 +01:00
// Two buffers of size `window_size`,
// one of which is being written to, while the other is being read from.
2026-02-15 02:33:42 +01:00
bounce_buffer_dst: &'static mut [u8],
bounce_buffer_src: &'static mut [u8],
// A descriptor list that spans a buffer of size `window_size`.
// The buffer pointers need to be updated before each transmission to point to the correct window in the source buffer `src_buffer`.
src_descs: &'static mut [DmaDescriptor],
// A descriptor list that spans a buffer of size `window_size`.
// The buffer pointers need to be updated before each transmission to point to the correct bounce buffer.
bounce_dst_descs: &'static mut [DmaDescriptor],
// A cyclic descriptor list that spans the buffers `bounce_buffer_dst` and `bounce_buffer_src`.
bounce_src_descs: &'static mut [DmaDescriptor],
descriptors_per_window: usize,
2026-02-15 18:17:16 +01:00
// The index of the next window about to be received into the destination bounce buffer.
window_index_next: usize,
frame_index_next: usize,
receiving_transfer: Option<ReceivingTransfer>,
2026-02-14 20:03:32 +01:00
}
2026-02-15 02:33:42 +01:00
impl DmaBounce {
/// * `allocator` - The allocator used to allocate the bounce buffers.
/// * `channel` - The DMA channel used to transfer data from the source buffer to the bounce buffers.
/// * `peripheral_src` - The peripheral to transfer data from the source buffer to the bounce buffers.
/// * `peripheral_dst` - The peripheral to transfer data to, from the bounce buffers.
/// * `buffer_src` - The source buffer, typically allocated in external memory.
/// * `row_front_porch_bytes` - The number of arbitrary-valued bytes to be sent in front of each row to the destination peripheral.
/// * `row_width_bytes` - The width of a row, in bytes.
/// * `window_size_rows` - The size of a single bounce buffer, in rows.
/// * `burst_config` - The burst config to use for memory transfers (both in and out). TODO: This could be split.
/// * `cyclic` - Experimental! Whether to use a cyclic descriptor list for transfer from the bounce buffers to the destination peripheral.
2026-02-14 20:03:32 +01:00
pub fn new(
2026-02-22 00:59:01 +01:00
allocator: impl Allocator + Copy + 'static,
2026-02-14 20:03:32 +01:00
channel: DMA_CH0<'static>,
2026-02-15 02:33:42 +01:00
peripheral_src: AnySpi<'static>,
peripheral_dst: Dpi<'static, Blocking>,
swapchain_src: SwapchainReader,
row_front_porch_bytes: usize,
row_width_bytes: usize,
window_size_rows: usize,
2026-02-14 20:03:32 +01:00
burst_config: BurstConfig,
cyclic: bool,
2026-02-14 20:03:32 +01:00
) -> Self {
let window_size = row_width_bytes * window_size_rows;
2026-02-14 20:03:32 +01:00
assert_eq!(
swapchain_src.len() % window_size,
2026-02-14 20:03:32 +01:00
0,
"the size of a source buffer must be a multiple of the window size ({window_size} bytes), but it is {len} bytes large",
len = swapchain_src.len()
2026-02-14 20:03:32 +01:00
);
2026-02-22 00:59:01 +01:00
// Conservative alignment. Maxiumum of the cartesian product of [tx, rx] × [internal, external].
let alignment = burst_config.min_compatible_alignment();
for &swapchain_ptr in &swapchain_src.framebuffers_rw {
assert_eq!(
unsafe { &*swapchain_ptr }.as_ptr() as usize % alignment,
0,
"the source buffer must be sufficiently aligned to {alignment} bytes for the burst config",
);
}
2026-02-22 00:59:01 +01:00
assert_eq!(
row_width_bytes % alignment,
2026-02-22 00:59:01 +01:00
0,
"the size of a row in bytes must be sufficiently aligned to {alignment} bytes for the burst config",
);
assert_eq!(
row_front_porch_bytes % alignment,
0,
"the size of a row's front porch in bytes must be sufficiently aligned to {alignment} bytes for the burst config",
);
// We need to make the destination peripheral read the front porch data from somewhere,
// and that somewhere is currently the bounce buffer.
// Therefore the front porch must be in bounds.
assert!(
row_front_porch_bytes <= window_size,
"front porch too large"
2026-02-22 00:59:01 +01:00
);
let windows_len = swapchain_src.len() / window_size;
2026-02-22 00:59:01 +01:00
// TODO: Figure out a way to avoid `leak`ing memory.
// We probably want to store the `Box`es and then unsafely extend the lifetime at sites of usage.
let bounce_buffer_dst =
Box::leak(allocate_dma_buffer_in(window_size, burst_config, allocator));
let bounce_buffer_src =
Box::leak(allocate_dma_buffer_in(window_size, burst_config, allocator));
2026-02-15 02:33:42 +01:00
let src_descs = Self::linear_descriptors_for_buffer(window_size, burst_config, |desc| {
desc.reset_for_tx(desc.next.is_null());
// Length for TX buffers must be set in software.
// In RX buffers, it is set by hardware.
desc.set_length(desc.size());
});
let bounce_dst_descs =
Self::linear_descriptors_for_buffer(window_size, burst_config, |_| {});
let (bounce_src_descs, descriptors_per_window) = Self::bounce_descriptors_for_buffer(
windows_len,
row_front_porch_bytes,
row_width_bytes,
window_size_rows,
unsafe {
(
&mut *(bounce_buffer_dst as *mut _),
&mut *(bounce_buffer_src as *mut _),
)
},
burst_config,
cyclic,
);
2026-02-15 02:33:42 +01:00
2026-02-14 20:03:32 +01:00
Self {
channel,
2026-02-15 02:33:42 +01:00
peripheral_src,
peripheral_dst: Some(peripheral_dst),
transfer_dst: None,
2026-02-14 20:03:32 +01:00
burst_config,
cyclic,
2026-02-14 20:03:32 +01:00
window_size,
windows_len,
swapchain_src,
2026-02-15 02:33:42 +01:00
bounce_buffer_dst,
bounce_buffer_src,
src_descs,
bounce_dst_descs,
bounce_src_descs,
descriptors_per_window,
2026-02-15 18:17:16 +01:00
window_index_next: 0,
frame_index_next: 0,
receiving_transfer: None,
2026-02-14 20:03:32 +01:00
}
}
fn linear_descriptors_for_buffer(
buffer_len: usize,
2026-02-15 02:33:42 +01:00
burst_config: BurstConfig,
2026-02-14 20:03:32 +01:00
mut setup_desc: impl FnMut(&mut DmaDescriptor),
) -> &'static mut [DmaDescriptor] {
let max_chunk_size = burst_config.max_compatible_chunk_size();
2026-02-15 02:33:42 +01:00
let descriptors_len = dma::descriptor_count(buffer_len, max_chunk_size, false);
2026-02-14 20:03:32 +01:00
// TODO: This leaks memory. Ensure it's only called during setup.
let descriptors = Box::leak(vec![DmaDescriptor::EMPTY; descriptors_len].into_boxed_slice());
// Link up the descriptors.
let mut next = core::ptr::null_mut();
for desc in descriptors.iter_mut().rev() {
desc.next = next;
next = desc;
}
// Prepare each descriptor's buffer size.
let mut descriptors_it = descriptors.iter_mut();
let mut remaining_len = buffer_len;
2026-02-15 02:33:42 +01:00
2026-02-14 20:03:32 +01:00
while remaining_len > 0 {
let chunk_size = core::cmp::min(max_chunk_size, remaining_len);
let desc = descriptors_it.next().unwrap();
desc.set_size(chunk_size);
(setup_desc)(desc);
remaining_len -= chunk_size;
}
descriptors
}
fn prepare_descriptors_window(
bounce_buffer: &mut [u8],
descriptors_window: &mut [DmaDescriptor],
row_front_porch_bytes: usize,
row_width_bytes: usize,
window_size_rows: usize,
max_chunk_size: usize,
descriptors_per_row: usize,
descriptors_per_row_front_porch: usize,
) {
for (row_index_in_window, descriptors_row) in descriptors_window
.chunks_mut(descriptors_per_row)
.enumerate()
{
// let row_index = row_index_in_window + window_index * window_size_rows;
let (descriptors_row_front_porch, descriptors_row_stored) =
descriptors_row.split_at_mut(descriptors_per_row_front_porch);
// Prepare front porch descriptors.
{
let mut descriptors_it = descriptors_row_front_porch.iter_mut();
let mut remaining_front_porch = row_front_porch_bytes;
while remaining_front_porch > 0 {
let desc = descriptors_it.next().unwrap();
let chunk_size = core::cmp::min(max_chunk_size, remaining_front_porch);
remaining_front_porch -= chunk_size;
// Just make it point at a bounce buffer.
// It is guaranteed to have enough bytes by `DmaBounce::new`.
desc.buffer = bounce_buffer.as_mut_ptr();
desc.set_size(chunk_size);
desc.set_length(chunk_size);
desc.reset_for_tx(false);
}
assert!(
descriptors_it.next().is_none(),
"front porch descriptors must be used up"
);
assert_eq!(
descriptors_row_front_porch
.iter()
.map(|desc| desc.size())
.sum::<usize>(),
row_front_porch_bytes
);
}
// Prepare window descriptors.
{
let mut remaining_bounce_buffer =
&mut bounce_buffer[row_index_in_window * row_width_bytes..][..row_width_bytes];
// if remaining_bounce_buffer.len() > row_width_bytes {
// remaining_bounce_buffer = &mut remaining_bounce_buffer[..row_width_bytes];
// }
for desc in &mut *descriptors_row_stored {
let chunk_size = core::cmp::min(max_chunk_size, remaining_bounce_buffer.len());
desc.buffer = remaining_bounce_buffer.as_mut_ptr();
remaining_bounce_buffer = &mut remaining_bounce_buffer[chunk_size..];
desc.set_size(chunk_size);
desc.set_length(chunk_size);
desc.reset_for_tx(false);
}
assert!(
remaining_bounce_buffer.is_empty(),
"bounce buffer must be used up"
);
assert_eq!(
descriptors_row_stored
.iter()
.map(|desc| desc.size())
.sum::<usize>(),
row_width_bytes
);
}
}
// Set EOF bit on the last descriptor of the window, to signal
// that the bounce buffer is done being read from.
if let Some(last_desc) = descriptors_window.last_mut() {
last_desc.reset_for_tx(true);
}
assert_eq!(
descriptors_window
.iter()
.map(|desc| desc.size())
.sum::<usize>(),
window_size_rows * (row_front_porch_bytes + row_width_bytes)
);
}
fn bounce_descriptors_for_buffer(
windows_len: usize,
row_front_porch_bytes: usize,
row_width_bytes: usize,
window_size_rows: usize,
bounce_buffers: (&'static mut [u8], &'static mut [u8]),
burst_config: BurstConfig,
cyclic: bool,
) -> (&'static mut [DmaDescriptor], usize) {
assert_eq!(
bounce_buffers.0.len(),
bounce_buffers.1.len(),
"bounce buffers must be equal in size"
);
// If an odd number of windows were needed, two descriptor lists would be needed,
assert_eq!(windows_len % 2, 0, "the number of windows must be even");
let buffer_len = bounce_buffers.0.len();
assert_eq!(
buffer_len,
row_width_bytes * window_size_rows,
"the provided bounce buffers have an invalid size"
);
// Implementation note:
// A cyclic descriptor could consist of just a set of descriptors per window,
// so two sets in total, because there are two bounce buffers.
// However, we can also access the pointer of the EOF descriptor within the
// EOF interrupt handler, which lets us compute which descriptor generated that
// interrupt.
// This is useful in the case when an interrupt is missed. Then the number of interrupts
// handled doesn't correspond to the number of windows sent to the destination peripheral.
// In that case, the number of windows sent can be computed from the address of the descriptor.
let max_chunk_size = burst_config.max_compatible_chunk_size();
let descriptors_per_row_front_porch =
dma::descriptor_count(row_front_porch_bytes, max_chunk_size, false);
let descriptors_per_row_stored =
dma::descriptor_count(row_width_bytes, max_chunk_size, false);
let descriptors_per_row = descriptors_per_row_stored + descriptors_per_row_front_porch;
let descriptors_per_window = window_size_rows * descriptors_per_row;
let descriptors_per_frame = descriptors_per_window * windows_len;
let descriptors_frame =
Box::leak(vec![DmaDescriptor::EMPTY; descriptors_per_frame].into_boxed_slice());
let descriptors_frame_ptr = descriptors_frame.as_ptr();
// Link up the descriptors.
let mut next = if cyclic {
descriptors_frame.first_mut().unwrap() as *mut _
} else {
core::ptr::null_mut()
};
for desc in descriptors_frame.iter_mut().rev() {
desc.next = next;
next = desc;
}
// Prepare each descriptor's buffer size.
let bounce_buffers = [bounce_buffers.0, bounce_buffers.1];
for (window_index, descriptors_window) in descriptors_frame
.chunks_mut(descriptors_per_window)
.enumerate()
{
let bounce_buffer_index = window_index % 2;
let bounce_buffer = &mut *bounce_buffers[bounce_buffer_index];
Self::prepare_descriptors_window(
bounce_buffer,
descriptors_window,
row_front_porch_bytes,
row_width_bytes,
window_size_rows,
max_chunk_size,
descriptors_per_row,
descriptors_per_row_front_porch,
);
}
assert_eq!(
descriptors_frame
.iter()
.map(|desc| desc.size())
.sum::<usize>(),
windows_len * window_size_rows * (row_front_porch_bytes + row_width_bytes)
);
(descriptors_frame, descriptors_per_window)
}
2026-02-15 02:33:42 +01:00
fn linear_descriptors_prepare(
descriptors: &mut [DmaDescriptor],
mut buffer: Option<&[u8]>,
mut setup_desc: impl FnMut(&mut DmaDescriptor),
) {
for descriptor in descriptors.iter_mut() {
if let Some(inner_buffer) = buffer {
descriptor.buffer = inner_buffer.as_ptr() as *mut u8;
buffer = Some(&inner_buffer[descriptor.size()..]);
}
(setup_desc)(descriptor);
}
if let Some(buffer) = buffer {
assert!(
buffer.is_empty(),
"a buffer of an incompatible length was assigned to a descriptor set"
);
}
}
fn linear_descriptors_prepare_mut(
2026-02-14 20:03:32 +01:00
descriptors: &mut [DmaDescriptor],
2026-02-15 02:33:42 +01:00
mut buffer: Option<&mut [u8]>,
2026-02-14 20:03:32 +01:00
mut setup_desc: impl FnMut(&mut DmaDescriptor),
) {
for descriptor in descriptors.iter_mut() {
2026-02-15 02:33:42 +01:00
if let Some(inner_buffer) = buffer {
descriptor.buffer = inner_buffer.as_mut_ptr();
buffer = Some(&mut inner_buffer[descriptor.size()..]);
}
2026-02-14 20:03:32 +01:00
(setup_desc)(descriptor);
}
2026-02-15 02:33:42 +01:00
if let Some(buffer) = buffer {
assert!(
buffer.is_empty(),
2026-02-22 00:59:01 +01:00
"a buffer of an incompatible length was assigned to a descriptor set"
2026-02-15 02:33:42 +01:00
);
}
}
2026-02-14 20:03:32 +01:00
2026-02-15 02:33:42 +01:00
fn enable_interrupts() {
2026-02-14 20:03:32 +01:00
// Enable interrupts for the peripheral
2026-02-18 05:03:05 +01:00
// interrupt::enable(INTERRUPT_INBOUND, dma_inbound_interrupt_handler.priority()).unwrap();
interrupt::enable(
INTERRUPT_OUTBOUND,
dma_outbound_interrupt_handler.priority(),
)
.unwrap();
2026-02-14 20:03:32 +01:00
// Bind the handler
unsafe {
2026-02-18 05:03:05 +01:00
// interrupt::bind_interrupt(INTERRUPT_INBOUND, dma_inbound_interrupt_handler.handler());
interrupt::bind_interrupt(INTERRUPT_OUTBOUND, dma_outbound_interrupt_handler.handler());
2026-02-14 20:03:32 +01:00
}
// Enable interrupts in the peripheral.
2026-02-18 05:03:05 +01:00
// DMA::regs()
// .ch(DMA_CHANNEL_INBOUND)
// .in_int()
// .ena()
// .modify(|_, w| w.in_done().bit(true));
2026-02-14 20:03:32 +01:00
DMA::regs()
2026-02-18 05:03:05 +01:00
.ch(DMA_CHANNEL_OUTBOUND)
2026-02-14 20:03:32 +01:00
.out_int()
.ena()
.modify(|_, w| w.out_eof().bit(true));
2026-02-15 18:17:16 +01:00
}
/// Receive a window of bytes into the current dst bounce buffer.
/// Finally, swaps the bounce buffers.
///
/// # Safety:
/// TODO
unsafe fn receive_window_start(&mut self) -> ReceivingTransfer {
// Descriptors are initialized by `DmaTxBuf::new`.
let buffer_src_window = &self.swapchain_src.get_latest_framebuffer()
[self.window_index_next * self.window_size..][..self.window_size];
Self::linear_descriptors_prepare(self.src_descs, Some(buffer_src_window), |_desc| {
// No need to call `DmaDescriptor::reset_for_tx`, because
// 1. we don't rely on the ownership flag;
// 2. the EOF flag is already set during the construction of this buffer.
});
// TODO: Precompute a descriptor list for each buffer, then use `None` instead of `Some(&mut *self.bounce_buffer_dst)`.
Self::linear_descriptors_prepare_mut(
self.bounce_dst_descs,
Some(&mut *self.bounce_buffer_dst),
|desc| {
desc.reset_for_rx();
},
);
// Extend the lifetime to 'static because it is required by Mem2Mem.
//
// Safety:
// Pointees are done being used by the driver before this scope ends,
// this is because we `SimpleMem2MemTransfer::wait()` on the transfer to finish.
let bounce_dst_descs: &'static mut [DmaDescriptor] =
unsafe { &mut *(self.bounce_dst_descs as *mut _) };
let src_descs: &'static mut [DmaDescriptor] = unsafe { &mut *(self.src_descs as *mut _) };
let mem2mem = unsafe {
Mem2Mem::new(
self.channel.clone_unchecked(),
self.peripheral_src.clone_unchecked(),
)
}
.with_descriptors(bounce_dst_descs, src_descs, self.burst_config)
.unwrap();
ReceivingTransferBuilder {
mem2mem,
transfer_builder: |mem2mem| {
Some(
mem2mem
.start_transfer(&mut self.bounce_buffer_dst, buffer_src_window)
.unwrap(),
)
},
}
.build()
}
2026-02-15 18:17:16 +01:00
/// Receive a window of bytes into the current dst bounce buffer.
/// Finally, swaps the bounce buffers.
2026-02-18 05:03:05 +01:00
async fn receive_window(&mut self) {
2026-02-15 18:17:16 +01:00
// Descriptors are initialized by `DmaTxBuf::new`.
let buffer_src_window = &self.swapchain_src.get_latest_framebuffer()
[self.window_index_next * self.window_size..][..self.window_size];
2026-02-15 18:17:16 +01:00
2026-02-22 00:59:01 +01:00
Self::linear_descriptors_prepare(self.src_descs, Some(buffer_src_window), |_desc| {
// No need to call `DmaDescriptor::reset_for_tx`, because
// 1. we don't rely on the ownership flag;
// 2. the EOF flag is already set during the construction of this buffer.
});
2026-02-15 18:17:16 +01:00
// TODO: Precompute a descriptor list for each buffer, then use `None` instead of `Some(&mut *self.bounce_buffer_dst)`.
Self::linear_descriptors_prepare_mut(
2026-02-15 18:17:16 +01:00
self.bounce_dst_descs,
Some(&mut *self.bounce_buffer_dst),
|desc| {
desc.reset_for_rx();
},
2026-02-15 02:33:42 +01:00
);
2026-02-15 18:17:16 +01:00
{
// Extend the lifetime to 'static because it is required by Mem2Mem.
//
// Safety:
// Pointees are done being used by the driver before this scope ends,
// this is because we `SimpleMem2MemTransfer::wait()` on the transfer to finish.
2026-02-22 00:59:01 +01:00
let bounce_dst_descs: &'static mut [DmaDescriptor] =
unsafe { &mut *(self.bounce_dst_descs as *mut _) };
let src_descs: &'static mut [DmaDescriptor] =
unsafe { &mut *(self.src_descs as *mut _) };
2026-02-15 18:17:16 +01:00
let mut mem2mem = Mem2Mem::new(self.channel.reborrow(), self.peripheral_src.reborrow())
2026-02-18 05:03:05 +01:00
.into_async()
2026-02-15 18:17:16 +01:00
.with_descriptors(bounce_dst_descs, src_descs, self.burst_config)
.unwrap();
let transfer = mem2mem
.start_transfer(&mut self.bounce_buffer_dst, buffer_src_window)
.unwrap();
2026-02-18 05:03:05 +01:00
transfer.wait_async().await.unwrap();
2026-02-15 18:17:16 +01:00
}
self.increase_window_counter(1);
}
fn increase_window_counter(&mut self, windows: isize) {
if windows.rem_euclid(2) == 1 {
2026-02-15 18:17:16 +01:00
core::mem::swap(&mut self.bounce_buffer_dst, &mut self.bounce_buffer_src);
}
let window_index_next = self.window_index_next as isize + windows;
self.frame_index_next = (self.frame_index_next as isize
+ window_index_next / self.windows_len as isize)
as usize;
self.window_index_next = window_index_next.rem_euclid(self.windows_len as isize) as usize;
}
pub async fn launch_interrupt_driven_task(mut self) {
Self::enable_interrupts();
// Receive the first 2 windows, so that the outbound transfer can read valid data.
self.receive_window().await;
let dma_tx_buffer = self.get_dma_tx_buffer();
let transfer = self
.peripheral_dst
.take()
.unwrap()
.send(self.cyclic /* Send perpetually */, dma_tx_buffer)
.unwrap_or_else(|(error, _, _)| {
panic!("failed to begin the transmission of the first frame: {error:?}");
});
self.transfer_dst = Some(transfer);
self.receiving_transfer = Some(unsafe { self.receive_window_start() });
unsafe {
*DMA_STATE.0.get() = Some(self);
}
2026-02-15 02:33:42 +01:00
}
2026-02-22 00:59:01 +01:00
pub async fn send(&mut self) {
2026-02-15 02:33:42 +01:00
Self::enable_interrupts();
2026-02-15 18:17:16 +01:00
// Receive the first window, so that the outbound transfer can read valid data.
2026-02-18 05:03:05 +01:00
self.receive_window().await;
2026-02-15 18:17:16 +01:00
2026-02-15 02:33:42 +01:00
let mut dma_tx_buffer = self.get_dma_tx_buffer();
let mut transfer = self
2026-02-15 02:33:42 +01:00
.peripheral_dst
.take()
.unwrap()
.send(self.cyclic /* Send perpetually */, dma_tx_buffer)
.unwrap_or_else(|(error, _, _)| {
panic!("failed to begin the transmission of the first frame: {error:?}");
});
2026-02-14 20:03:32 +01:00
2026-02-15 18:17:16 +01:00
let mut windows_skipped_total = 0;
2026-02-14 20:03:32 +01:00
2026-02-15 18:17:16 +01:00
loop {
2026-02-22 00:59:01 +01:00
// warn!(
// "Receiving window: {} {}",
// self.window_index_next, self.frame_index_next
// );
2026-02-18 05:03:05 +01:00
self.receive_window().await;
2026-02-22 00:59:01 +01:00
// warn!(
// "Window received: {} {}",
// self.window_index_next, self.frame_index_next
// );
let windows_sent = WINDOWS_SENT
2026-02-18 05:03:05 +01:00
.wait()
2026-02-22 00:59:01 +01:00
.with_timeout(Duration::from_millis(100))
.await
.unwrap_or_else(|_| {
error!("Timed out when waiting for skipped windows.");
0
2026-02-22 00:59:01 +01:00
});
let windows_skipped = windows_sent as isize - 1;
2026-02-22 00:59:01 +01:00
if windows_skipped != 0 {
2026-02-15 18:17:16 +01:00
self.increase_window_counter(windows_skipped);
windows_skipped_total += windows_skipped;
2026-02-22 00:59:01 +01:00
// error!(
// "Skipped {windows_skipped} windows. Windows skipped per frame: {:.2}%",
// 100.0 * windows_skipped_total as f32
// / (self.windows_len * (self.frame_index_next + 1)) as f32
2026-02-22 00:59:01 +01:00
// );
2026-02-15 02:33:42 +01:00
}
2026-02-22 00:59:01 +01:00
// warn!(
// "X: {} {} {}",
// windows_skipped, self.window_index_next, self.frame_index_next
// );
if !self.cyclic && (self.window_index_next == 1 || transfer.is_done()) {
if self.window_index_next > 1 {
self.increase_window_counter(
self.windows_len as isize - self.window_index_next as isize + 1,
);
2026-02-22 00:59:01 +01:00
} else if self.window_index_next == 0 {
self.increase_window_counter(1);
}
// TODO: Investigate why the DPI transfer isn't done at this point.
// The `DpiTransfer::wait()` below takes 0.001039 s.
// Perhaps it's the minimum screen refresh period?
//
// assert!(transfer.is_done());
// if !transfer.is_done() {
// error!(
// "transfer is not done yet. {} {}",
// self.frame_index_next, self.window_index_next
// );
// }
let result;
let peripheral_dst;
// let start = Instant::now();
(result, peripheral_dst, dma_tx_buffer) = transfer.wait();
// let duration = Instant::now().duration_since(start);
// warn!("Waited for {} seconds", duration.display_as_secs());
if let Err(error) = result {
error!("DPI error during sending: {error:?}");
}
transfer =
peripheral_dst
.send(false, dma_tx_buffer)
.unwrap_or_else(|(error, _, _)| {
panic!("failed to begin the transmission of a frame: {error:?}");
});
2026-02-22 00:59:01 +01:00
FRAMES_SKIPPED.signal(
FRAMES_SKIPPED
.try_take()
.map(|frames_skipped| frames_skipped + 1)
.unwrap_or_default(),
);
}
2026-02-15 02:33:42 +01:00
}
}
fn get_dma_tx_buffer(&mut self) -> DmaTxBounceBuf {
DmaTxBounceBuf {
preparation: dma::Preparation {
start: self.bounce_src_descs.first_mut().unwrap(),
direction: dma::TransferDirection::Out,
accesses_psram: false,
burst_transfer: self.burst_config,
check_owner: Some(false), // Possibly want to set this to false
auto_write_back: false, // Possibly true
2026-02-15 02:33:42 +01:00
},
}
}
}
pub struct DmaTxBounceBuf {
preparation: dma::Preparation,
}
unsafe impl DmaTxBuffer for DmaTxBounceBuf {
type View = Self;
type Final = Self;
fn prepare(&mut self) -> dma::Preparation {
dma::Preparation {
start: self.preparation.start,
direction: self.preparation.direction,
accesses_psram: self.preparation.accesses_psram,
burst_transfer: self.preparation.burst_transfer,
check_owner: self.preparation.check_owner,
auto_write_back: self.preparation.auto_write_back,
2026-02-14 20:03:32 +01:00
}
}
2026-02-15 02:33:42 +01:00
fn into_view(self) -> Self::View {
self
}
fn from_view(view: Self::View) -> Self::Final {
view
}
2026-02-14 20:03:32 +01:00
}
2026-02-22 00:59:01 +01:00
/// Intended to be listened on by the renderer, to synchronize the refresh frequency with.
pub static FRAMES_SKIPPED: Signal<RawMutex, usize> = Signal::new();
static WINDOWS_SENT: Signal<RawMutex, usize> = Signal::new();
static DMA_STATE: SyncUnsafeCell<Option<DmaBounce>> = SyncUnsafeCell(UnsafeCell::new(None));
#[repr(transparent)]
pub struct SyncUnsafeCell<T>(UnsafeCell<T>);
unsafe impl<T> Sync for SyncUnsafeCell<T> {}
// #[derive(Clone, Copy)]
// struct DmaState {
// descriptors_ptr: *const DmaDescriptor,
// descriptors_per_window: usize,
// windows_per_frame: usize,
// last_window_index: usize,
// }
// unsafe impl Sync for DmaState {}
2026-02-14 20:03:32 +01:00
2026-02-15 02:33:42 +01:00
#[handler(priority = Priority::Priority3)]
2026-02-14 20:03:32 +01:00
#[ram] // Improves performance.
2026-02-18 05:03:05 +01:00
fn dma_outbound_interrupt_handler() {
let interrupt = DMA::regs().ch(DMA_CHANNEL_OUTBOUND).out_int();
let bounce_buffer_sent = interrupt.st().read().out_eof().bit_is_set();
if !bounce_buffer_sent {
return;
}
// Clear the bit by writing 1 to the clear bits.
interrupt.clr().write(|w| w.out_eof().bit(true));
// SAFETY: This value is only ever read in our interrupt handler,
// and interrupts are disabled, and we only use this in one thread.
let Some(dma_state) = unsafe { &mut *DMA_STATE.0.get() }.as_mut() else {
error!("no DMA state available when executing DMA interrupt handler");
return;
};
// The descriptor of the buffer with an EOF flag that just finished being sent.
let descriptor_ptr = DMA::regs()
.ch(DMA_CHANNEL_OUTBOUND)
.out_eof_des_addr()
.read()
.out_eof_des_addr()
.bits() as *const DmaDescriptor;
// This is the index of the window that just finished being transmitted to the destination peripheral.
let window_sent_index =
unsafe { descriptor_ptr.offset_from_unsigned(dma_state.bounce_src_descs.as_ptr()) }
/ dma_state.descriptors_per_window;
// warn!("{window_sent_index}");
// The next window to be sent is `(window_sent_index + 1) % dma_state.windows_len`.
// That is not the window we want to buffer, because the transmissions would race.
// We instead want to buffer the next window:
let window_index_next = (window_sent_index + 2) % dma_state.windows_len;
// Swap bounce buffers.
if (dma_state.windows_len + window_index_next - dma_state.window_index_next) % 2 == 1 {
core::mem::swap(
&mut dma_state.bounce_buffer_dst,
&mut dma_state.bounce_buffer_src,
2026-02-22 00:59:01 +01:00
);
2026-02-15 18:17:16 +01:00
}
dma_state.window_index_next = window_index_next;
// warn!("{window_sent_index} {window_index_next}");
let mut receiving_transfer = dma_state
.receiving_transfer
.take()
.expect("no ongoing transfer to a bounce buffer present");
let receiving_transfer = receiving_transfer
.with_mut(|x| x.transfer.take())
.expect("no ongoing inner transfer to a bounce buffer present");
// if !receiving_transfer.is_done() {
// error!("{window_sent_index}");
// error!("the transfer to a bounce buffer has not finished yet, aborting");
// }
if receiving_transfer.is_done() {
drop(receiving_transfer);
} else {
receiving_transfer.wait().unwrap();
}
// If there is any ongoing transfer, cancel it and start a new one.
dma_state.receiving_transfer = Some(unsafe { dma_state.receive_window_start() });
2026-02-14 20:03:32 +01:00
}
2026-02-18 05:03:05 +01:00
// #[handler(priority = Priority::Priority3)]
// #[ram] // Improves performance.
// fn dma_inbound_interrupt_handler() {
// warn!("Inbound");
// let interrupt = DMA::regs().ch(DMA_CHANNEL_INBOUND).in_int();
// let bounce_buffer_processed = interrupt.st().read().in_done().bit_is_set();
// if bounce_buffer_processed {
// // Clear the bit by writing 1 to the clear bits.
// interrupt.clr().write(|w| w.in_done().bit(true));
// assert!(
// !INBOUND_TRANSFER_FINISHED.signaled(),
// "inbound transfer already signalled"
// );
// INBOUND_TRANSFER_FINISHED.signal(());
// }
// }
2026-02-22 00:59:01 +01:00
// pub async fn run_lcd(
// mut st7701s: St7701s<'static, Blocking>,
// framebuffer: &'static mut Framebuffer,
// ) {
// loop {
// // Timer::after(Duration::from_millis(100)).await;
// // yield_now().await;
// SIGNAL_LCD_SUBMIT.wait().await;
// // TODO: Use bounce buffers:
// // https://docs.espressif.com/projects/esp-idf/en/v5.0/esp32s3/api-reference/peripherals/lcd.html#bounce-buffer-with-single-psram-frame-buffer
// // This can be implemented as a `DmaTxBuffer`.
// let transfer = match st7701s.dpi.send(false, framebuffer.dma_buf.take().unwrap()) {
// Err((error, result_dpi, result_dma_buf)) => {
// error!(
// "An error occurred while initiating transfer of the framebuffer to the LCD display: {error:?}"
// );
// st7701s.dpi = result_dpi;
// framebuffer.dma_buf = Some(result_dma_buf);
// continue;
// }
// Ok(transfer) => transfer,
// };
// // This could be used to allow other tasks to be executed on the first core, but that causes
// // the flash to be accessed, which interferes with the framebuffer transfer.
// // For that reason, it is disabled, and this task blocks the first core, until the transfer
// // is complete.
// #[cfg(not(feature = "limit-fps"))]
// while !transfer.is_done() {
// // Timer::after_millis(1).await;
// rmk::embassy_futures::yield_now().await;
// }
// let result;
// let dma_buf;
// (result, st7701s.dpi, dma_buf) = transfer.wait();
// framebuffer.dma_buf = Some(dma_buf);
// SIGNAL_UI_RENDER.signal(());
// if let Err(error) = result {
// error!(
// "An error occurred while transferring framebuffer to the LCD display: {error:?}"
// );
// }
// }
// }
2026-02-14 20:03:32 +01:00
/// Allocates a buffer appropriately aligned for use with DMA.
2026-02-22 00:59:01 +01:00
pub fn allocate_dma_buffer_in<A: Allocator>(
len: usize,
burst_config: BurstConfig,
alloc: A,
) -> Box<[u8], A> {
// Conservative alignment. Maxiumum of the cartesian product of [tx, rx] × [internal, external].
let alignment = burst_config.min_compatible_alignment();
2026-02-14 20:03:32 +01:00
assert_eq!(
2026-02-22 00:59:01 +01:00
len % alignment,
2026-02-14 20:03:32 +01:00
0,
2026-02-22 00:59:01 +01:00
"the size of a DMA buffer must be a multiple of {alignment} bytes, but it is {len} bytes large"
2026-02-14 20:03:32 +01:00
);
// ⚠️ Note: For chips that support DMA to/from PSRAM (ESP32-S3) DMA transfers to/from PSRAM
// have extra alignment requirements. The address and size of the buffer pointed to by each
// descriptor must be a multiple of the cache line (block) size. This is 32 bytes on ESP32-S3.
// That is ensured by the `assert_eq` preceding this block.
unsafe {
let raw = alloc
2026-02-22 00:59:01 +01:00
.allocate_zeroed(Layout::from_size_align(len, alignment).unwrap())
2026-02-14 20:03:32 +01:00
.expect("failed to allocate a DMA buffer");
Box::from_raw_in(raw.as_ptr(), alloc)
}
}
// TODO: Rename or get rid of.
2026-02-22 00:59:01 +01:00
pub struct Framebuffer {
pub width: u32,
pub height: u32,
pub swapchain: Option<SwapchainWriter>,
pub bounce_buffers: Option<DmaBounce>,
2026-02-22 00:59:01 +01:00
}
2026-02-14 20:03:32 +01:00
impl Framebuffer {
2026-02-22 00:59:01 +01:00
pub fn new(
channel: DMA_CH0<'static>,
peripheral_src: AnySpi<'static>,
peripheral_dst: Dpi<'static, Blocking>,
burst_config: BurstConfig,
front_porch_pixels: u32,
width_pixels: u32,
height_pixels: u32,
2026-02-22 00:59:01 +01:00
rows_per_window: usize,
cyclic: bool,
) -> Self {
const BYTES_PER_PIXEL: usize = core::mem::size_of::<u16>();
let buffer_size = width_pixels as usize * height_pixels as usize * BYTES_PER_PIXEL;
let framebuffers = [
Box::leak(allocate_dma_buffer_in(
buffer_size,
burst_config,
&PSRAM_ALLOCATOR,
)),
Box::leak(allocate_dma_buffer_in(
buffer_size,
burst_config,
&PSRAM_ALLOCATOR,
)),
];
let (swapchain_reader, swapchain_writer) = Swapchain { framebuffers }.into_reader_writer();
2026-02-22 00:59:01 +01:00
let bounce_buffers = DmaBounce::new(
Global,
channel,
peripheral_src,
peripheral_dst,
swapchain_reader,
front_porch_pixels as usize * BYTES_PER_PIXEL,
width_pixels as usize * BYTES_PER_PIXEL,
rows_per_window,
2026-02-22 00:59:01 +01:00
burst_config,
cyclic,
2026-02-14 20:03:32 +01:00
);
Self {
width: width_pixels,
height: height_pixels,
swapchain: Some(swapchain_writer),
bounce_buffers: Some(bounce_buffers),
2026-02-14 20:03:32 +01:00
}
}
}