acid/firmware/acid-firmware/src/ui/dpi.rs

623 lines
22 KiB
Rust
Raw Normal View History

2026-02-15 02:33:42 +01:00
use core::{
alloc::Layout,
pin::Pin,
sync::atomic::{self, AtomicBool},
};
2026-02-14 20:03:32 +01:00
use alloc::{
alloc::{Allocator, Global},
boxed::Box,
vec,
};
use embassy_sync::channel::Channel;
2026-02-15 02:33:42 +01:00
use embassy_time::Timer;
2026-02-14 20:03:32 +01:00
use esp_alloc::MemoryCapability;
use esp_hal::{
Blocking,
dma::{
self, AnyGdmaChannel, BufView, BurstConfig, DmaChannel, DmaChannelConvert, DmaDescriptor,
DmaDescriptorFlags, DmaEligible, DmaRxStreamBuf, DmaTxBuf, DmaTxBuffer, DmaTxInterrupt,
ExternalBurstConfig, Mem2Mem,
},
2026-02-15 02:33:42 +01:00
dma_descriptors, handler,
interrupt::{self, Priority},
lcd_cam::lcd::dpi::{Dpi, DpiTransfer},
2026-02-14 20:03:32 +01:00
peripherals::{DMA, DMA_CH0, Peripherals, SPI2},
ram,
spi::master::AnySpi,
};
use esp_sync::RawMutex;
use i_slint_core::software_renderer::Rgb565Pixel;
use indoc::{formatdoc, indoc};
2026-02-15 02:33:42 +01:00
use log::{error, info, warn};
2026-02-14 20:03:32 +01:00
use crate::{PSRAM_ALLOCATOR, SIGNAL_LCD_SUBMIT, SIGNAL_UI_RENDER, peripherals::st7701s::St7701s};
2026-02-15 02:33:42 +01:00
/// THIS IS TAKEN FROM https://github.com/esp-rs/esp-hal/blob/main/esp-hal/src/soc/esp32s3/mod.rs
/// Write back a specific range of data in the cache.
#[doc(hidden)]
#[unsafe(link_section = ".rwtext")]
pub unsafe fn cache_writeback_addr(addr: u32, size: u32) {
unsafe extern "C" {
fn rom_Cache_WriteBack_Addr(addr: u32, size: u32);
fn Cache_Suspend_DCache_Autoload() -> u32;
fn Cache_Resume_DCache_Autoload(value: u32);
}
// suspend autoload, avoid load cachelines being written back
unsafe {
let autoload = Cache_Suspend_DCache_Autoload();
rom_Cache_WriteBack_Addr(addr, size);
Cache_Resume_DCache_Autoload(autoload);
}
}
/// THIS IS TAKEN FROM https://github.com/esp-rs/esp-hal/blob/main/esp-hal/src/soc/esp32s3/mod.rs
/// Invalidate a specific range of addresses in the cache.
#[doc(hidden)]
#[unsafe(link_section = ".rwtext")]
pub unsafe fn cache_invalidate_addr(addr: u32, size: u32) {
unsafe extern "C" {
fn Cache_Invalidate_Addr(addr: u32, size: u32);
}
unsafe {
Cache_Invalidate_Addr(addr, size);
}
}
pub struct DmaBounce {
2026-02-14 20:03:32 +01:00
// TODO: Make these generic.
// They currently cannot be generic, because they lacks a `reborrow` method.
channel: DMA_CH0<'static>,
// This can also be more generic, see `DmaEligible` in `Mem2Mem::new`.
2026-02-15 02:33:42 +01:00
peripheral_src: AnySpi<'static>,
// This can also be more generic, see `DmaEligible` in `Mem2Mem::new`.
peripheral_dst: Option<Dpi<'static, Blocking>>,
2026-02-14 20:03:32 +01:00
2026-02-15 02:33:42 +01:00
// TODO: Consider having a separate burst config for the two transfers.
2026-02-14 20:03:32 +01:00
burst_config: BurstConfig,
// rx: DmaRxStreamBuf,
/// The size of each window.
window_size: usize,
/// The number of windows.
windows_len: usize,
buffer_src: &'static mut [u8],
// Two buffers of size `window_size`,
// one of which is being written to, while the other is being read from.
2026-02-15 02:33:42 +01:00
bounce_buffer_dst: &'static mut [u8],
bounce_buffer_src: &'static mut [u8],
// A descriptor list that spans a buffer of size `window_size`.
// The buffer pointers need to be updated before each transmission to point to the correct window in the source buffer `src_buffer`.
src_descs: &'static mut [DmaDescriptor],
// A descriptor list that spans a buffer of size `window_size`.
// The buffer pointers need to be updated before each transmission to point to the correct bounce buffer.
bounce_dst_descs: &'static mut [DmaDescriptor],
// A cyclic descriptor list that spans the buffers `bounce_buffer_dst` and `bounce_buffer_src`.
bounce_src_descs: &'static mut [DmaDescriptor],
2026-02-14 20:03:32 +01:00
}
2026-02-15 02:33:42 +01:00
impl DmaBounce {
2026-02-14 20:03:32 +01:00
pub fn new(
channel: DMA_CH0<'static>,
2026-02-15 02:33:42 +01:00
peripheral_src: AnySpi<'static>,
peripheral_dst: Dpi<'static, Blocking>,
2026-02-14 20:03:32 +01:00
buffer_src: &'static mut [u8],
window_size: usize,
burst_config: BurstConfig,
) -> Self {
assert_eq!(
buffer_src.len() % window_size,
0,
"the size of a source buffer must be a multiple of the window size ({window_size} bytes), but it is {len} bytes large",
len = buffer_src.len()
);
2026-02-15 02:33:42 +01:00
let bounce_buffer_dst = Box::leak(allocate_dma_buffer_in(window_size, Global));
let bounce_buffer_src = Box::leak(allocate_dma_buffer_in(window_size, Global));
let src_descs = Self::linear_descriptors_for_buffer(window_size, burst_config, |desc| {
desc.reset_for_tx(desc.next.is_null());
// Length for TX buffers must be set in software.
// In RX buffers, it is set by hardware.
desc.set_length(desc.size());
});
let bounce_dst_descs =
Self::linear_descriptors_for_buffer(window_size, burst_config, |_| {});
let bounce_src_descs = Self::bounce_descriptors_for_buffer(
unsafe {
(
&mut *(bounce_buffer_dst as *mut _),
&mut *(bounce_buffer_src as *mut _),
)
},
burst_config,
);
2026-02-14 20:03:32 +01:00
Self {
channel,
2026-02-15 02:33:42 +01:00
peripheral_src,
peripheral_dst: Some(peripheral_dst),
2026-02-14 20:03:32 +01:00
burst_config,
window_size,
windows_len: buffer_src.len() / window_size,
buffer_src,
2026-02-15 02:33:42 +01:00
bounce_buffer_dst,
bounce_buffer_src,
src_descs,
bounce_dst_descs,
bounce_src_descs,
2026-02-14 20:03:32 +01:00
}
}
fn linear_descriptors_for_buffer(
buffer_len: usize,
2026-02-15 02:33:42 +01:00
burst_config: BurstConfig,
2026-02-14 20:03:32 +01:00
mut setup_desc: impl FnMut(&mut DmaDescriptor),
) -> &'static mut [DmaDescriptor] {
let max_chunk_size = burst_config.max_compatible_chunk_size();
2026-02-15 02:33:42 +01:00
let descriptors_len = dma::descriptor_count(buffer_len, max_chunk_size, false);
2026-02-14 20:03:32 +01:00
// TODO: This leaks memory. Ensure it's only called during setup.
let descriptors = Box::leak(vec![DmaDescriptor::EMPTY; descriptors_len].into_boxed_slice());
// Link up the descriptors.
let mut next = core::ptr::null_mut();
for desc in descriptors.iter_mut().rev() {
desc.next = next;
next = desc;
}
// Prepare each descriptor's buffer size.
let mut descriptors_it = descriptors.iter_mut();
let mut remaining_len = buffer_len;
2026-02-15 02:33:42 +01:00
2026-02-14 20:03:32 +01:00
while remaining_len > 0 {
let chunk_size = core::cmp::min(max_chunk_size, remaining_len);
let desc = descriptors_it.next().unwrap();
desc.set_size(chunk_size);
(setup_desc)(desc);
remaining_len -= chunk_size;
}
descriptors
}
2026-02-15 02:33:42 +01:00
fn bounce_descriptors_for_buffer(
bounce_buffers: (&'static mut [u8], &'static mut [u8]),
burst_config: BurstConfig,
) -> &'static mut [DmaDescriptor] {
assert_eq!(
bounce_buffers.0.len(),
bounce_buffers.1.len(),
"bounce buffers must be equal in size"
);
let buffer_len = bounce_buffers.0.len();
let max_chunk_size = burst_config.max_compatible_chunk_size();
let descriptors_len = dma::descriptor_count(buffer_len, max_chunk_size, false);
// TODO: This leaks memory. Ensure it's only called during setup.
let descriptors_combined =
Box::leak(vec![DmaDescriptor::EMPTY; 2 * descriptors_len].into_boxed_slice());
let descriptors_pair = descriptors_combined.split_at_mut(descriptors_len);
// Link up the descriptors.
fn link_up_descriptors(
descriptors: &mut [DmaDescriptor],
descriptors_other: &mut [DmaDescriptor],
) {
let mut next = descriptors_other.first_mut().unwrap();
for desc in descriptors.iter_mut().rev() {
desc.next = next;
next = desc;
}
}
link_up_descriptors(descriptors_pair.0, descriptors_pair.1);
link_up_descriptors(descriptors_pair.1, descriptors_pair.0);
// Prepare each descriptor's buffer size.
for (bounce_buffer, descriptors) in [
(bounce_buffers.0, descriptors_pair.0),
(bounce_buffers.1, descriptors_pair.1),
] {
let mut descriptors_it = descriptors.iter_mut();
let mut remaining_bounce_buffer = bounce_buffer;
while !remaining_bounce_buffer.is_empty() {
let chunk_size = core::cmp::min(max_chunk_size, remaining_bounce_buffer.len());
let desc = descriptors_it.next().unwrap();
desc.buffer = remaining_bounce_buffer.as_mut_ptr();
remaining_bounce_buffer = &mut remaining_bounce_buffer[chunk_size..];
let is_last = remaining_bounce_buffer.is_empty();
desc.set_size(chunk_size);
desc.set_length(chunk_size);
desc.reset_for_tx(is_last);
}
}
descriptors_combined
}
fn linear_descriptors_prepare(
2026-02-14 20:03:32 +01:00
descriptors: &mut [DmaDescriptor],
2026-02-15 02:33:42 +01:00
mut buffer: Option<&mut [u8]>,
2026-02-14 20:03:32 +01:00
mut setup_desc: impl FnMut(&mut DmaDescriptor),
) {
for descriptor in descriptors.iter_mut() {
2026-02-15 02:33:42 +01:00
if let Some(inner_buffer) = buffer {
descriptor.buffer = inner_buffer.as_mut_ptr();
buffer = Some(&mut inner_buffer[descriptor.size()..]);
}
2026-02-14 20:03:32 +01:00
(setup_desc)(descriptor);
}
2026-02-15 02:33:42 +01:00
if let Some(buffer) = buffer {
assert!(
buffer.is_empty(),
"a buffer of an incompatible length was asssigned to a descriptor set"
);
}
}
2026-02-14 20:03:32 +01:00
2026-02-15 02:33:42 +01:00
fn enable_interrupts() {
2026-02-14 20:03:32 +01:00
// Enable interrupts for the peripheral
interrupt::enable(
esp_hal::peripherals::Interrupt::DMA_OUT_CH0,
dma_interrupt_handler.priority(),
)
.unwrap();
interrupt::enable(
2026-02-15 02:33:42 +01:00
esp_hal::peripherals::Interrupt::DMA_IN_CH0,
2026-02-14 20:03:32 +01:00
dma_interrupt_handler.priority(),
)
.unwrap();
2026-02-15 02:33:42 +01:00
// interrupt::enable(
// esp_hal::peripherals::Interrupt::SPI2_DMA,
// dma_interrupt_handler.priority(),
// )
// .unwrap();
2026-02-14 20:03:32 +01:00
// Bind the handler
unsafe {
interrupt::bind_interrupt(
esp_hal::peripherals::Interrupt::DMA_OUT_CH0,
dma_interrupt_handler.handler(),
);
interrupt::bind_interrupt(
2026-02-15 02:33:42 +01:00
esp_hal::peripherals::Interrupt::DMA_IN_CH0,
2026-02-14 20:03:32 +01:00
dma_interrupt_handler.handler(),
);
2026-02-15 02:33:42 +01:00
// interrupt::bind_interrupt(
// esp_hal::peripherals::Interrupt::SPI2_DMA,
// dma_interrupt_handler.handler(),
// );
2026-02-14 20:03:32 +01:00
}
// Enable interrupts in the peripheral.
let channel_number = 0; // TODO: Get from self.channel
DMA::regs()
.ch(channel_number)
.out_int()
.ena()
.modify(|_, w| {
w.out_total_eof().bit(true);
w.out_eof().bit(true);
w.out_done().bit(true);
w
});
2026-02-15 02:33:42 +01:00
DMA::regs()
.ch(channel_number)
.in_int()
.ena()
.modify(|_, w| {
w.in_suc_eof().bit(true);
w.in_done().bit(true);
w
});
// SPI2::regs().dma_int_ena().modify(|_, w| {
// w.slv_rd_dma_done().bit(true);
// w.slv_wr_dma_done().bit(true);
// w.dma_seg_trans_done().bit(true);
// w.trans_done().bit(true);
// w
// });
}
fn print_regs() {
let channel_number = 0; // TODO: Get from self.channel
let out_int_raw = DMA::regs()
.ch(channel_number as usize)
.out_int()
.st()
.read();
let in_int_raw = DMA::regs().ch(channel_number as usize).in_int().st().read();
log::error!(
indoc! {"
int_raw:
flag: {flag}
out:
total_eof: {out_total_eof}
eof: {out_eof}
done: {out_done}
in:
suc_eof: {in_suc_eof}
done: {in_done}
"},
flag = FLAG.load(atomic::Ordering::SeqCst),
out_total_eof = out_int_raw.out_total_eof().bit_is_set(),
out_eof = out_int_raw.out_eof().bit_is_set(),
out_done = out_int_raw.out_done().bit_is_set(),
in_suc_eof = in_int_raw.in_suc_eof().bit_is_set(),
in_done = in_int_raw.in_done().bit_is_set(),
);
error!("int_raw_msg: 0x{:08x?}", INT_CHANNEL.try_receive());
}
pub async fn send(&mut self) // -> DpiTransfer<'static, DmaTxBounceBuf, Blocking>
{
Self::enable_interrupts();
let mut dma_tx_buffer = self.get_dma_tx_buffer();
let transfer = match self
.peripheral_dst
.take()
.unwrap()
.send(true /* Send perpetually */, dma_tx_buffer)
{
Ok(transfer) => {
// let result;
// let peripheral_dst;
// (result, peripheral_dst, dma_tx_buffer) = transfer.wait();
// self.peripheral_dst = Some(peripheral_dst);
// if let Err(error) = result {
// error!("DPI error during sending: {error:?}");
// }
warn!("Sending data to DPI!");
transfer
}
Err(error_tuple) => {
let error;
let peripheral_dst;
(error, peripheral_dst, dma_tx_buffer) = error_tuple;
self.peripheral_dst = Some(peripheral_dst);
panic!("DPI error when starting transfer: {error:?}");
}
};
2026-02-14 20:03:32 +01:00
for window_index in 0..self.windows_len {
// Descriptors are initialized by `DmaTxBuf::new`.
let buffer_src_window =
&mut self.buffer_src[window_index * self.window_size..][..self.window_size];
2026-02-15 02:33:42 +01:00
Self::linear_descriptors_prepare(self.src_descs, Some(buffer_src_window), |_| {});
// TODO: Precompute a descriptor list for each buffer, then use `None` instead of `Some(&mut *self.bounce_buffer_dst)`.
Self::linear_descriptors_prepare(
self.bounce_dst_descs,
Some(&mut *self.bounce_buffer_dst),
|desc| {
desc.reset_for_rx();
},
);
2026-02-14 20:03:32 +01:00
{
// Extend the lifetime to 'static because it is required by Mem2Mem.
//
// Safety:
// Pointees are done being used by the driver before this scope ends,
// this is because we `SimpleMem2MemTransfer::wait()` on the transfer to finish.
2026-02-15 02:33:42 +01:00
let bounce_dst_descs = unsafe { &mut *(self.bounce_dst_descs as *mut _) };
let src_descs = unsafe { &mut *(self.src_descs as *mut _) };
let mut mem2mem =
Mem2Mem::new(self.channel.reborrow(), self.peripheral_src.reborrow())
.with_descriptors(bounce_dst_descs, src_descs, self.burst_config)
.unwrap();
2026-02-14 20:03:32 +01:00
let transfer = mem2mem
.start_transfer(&mut self.bounce_buffer_dst, buffer_src_window)
.unwrap();
2026-02-15 02:33:42 +01:00
Self::print_regs();
2026-02-14 20:03:32 +01:00
transfer.wait().unwrap();
}
2026-02-15 02:33:42 +01:00
Self::print_regs();
2026-02-14 20:03:32 +01:00
// TODO: Get rid of this!
2026-02-15 02:33:42 +01:00
unsafe {
cache_invalidate_addr(
self.bounce_buffer_dst.as_ptr() as u32,
self.bounce_buffer_dst.len() as u32,
);
}
assert_eq!(self.bounce_buffer_dst, buffer_src_window);
}
loop {
warn!("Still sending data to DPI? {}", !transfer.is_done());
Timer::after_secs(10).await;
}
// transfer
}
fn get_dma_tx_buffer(&mut self) -> DmaTxBounceBuf {
DmaTxBounceBuf {
preparation: dma::Preparation {
start: self.bounce_src_descs.first_mut().unwrap(),
direction: dma::TransferDirection::Out,
accesses_psram: false,
burst_transfer: self.burst_config,
check_owner: Some(true), // Possibly want to set this to false
auto_write_back: false, // Possibly true
},
}
}
}
pub struct DmaTxBounceBuf {
preparation: dma::Preparation,
}
unsafe impl DmaTxBuffer for DmaTxBounceBuf {
type View = Self;
type Final = Self;
fn prepare(&mut self) -> dma::Preparation {
dma::Preparation {
start: self.preparation.start,
direction: self.preparation.direction,
accesses_psram: self.preparation.accesses_psram,
burst_transfer: self.preparation.burst_transfer,
check_owner: self.preparation.check_owner,
auto_write_back: self.preparation.auto_write_back,
2026-02-14 20:03:32 +01:00
}
}
2026-02-15 02:33:42 +01:00
fn into_view(self) -> Self::View {
self
}
fn from_view(view: Self::View) -> Self::Final {
view
}
2026-02-14 20:03:32 +01:00
}
static INT_CHANNEL: Channel<RawMutex, u32, 128> = Channel::new();
2026-02-15 02:33:42 +01:00
static FLAG: AtomicBool = AtomicBool::new(false);
2026-02-14 20:03:32 +01:00
2026-02-15 02:33:42 +01:00
#[handler(priority = Priority::Priority3)]
2026-02-14 20:03:32 +01:00
#[ram] // Improves performance.
fn dma_interrupt_handler() {
2026-02-15 02:33:42 +01:00
FLAG.store(true, atomic::Ordering::SeqCst);
2026-02-14 20:03:32 +01:00
let int_raw = DMA::regs().ch(0).out_int().raw().read();
INT_CHANNEL.try_send(int_raw.bits()).unwrap();
// let lcd_cam = unsafe { &*esp_hal::peripherals::LCD_CAM::PTR };
// // Check and clear VSYNC interrupt
// if lcd_cam
// .lc_dma_int_raw()
// .read()
// .lcd_vsync_int_raw()
// .bit_is_set()
// {
// lcd_cam
// .lc_dma_int_clr()
// .write(|w| w.lcd_vsync_int_clr().set_bit());
// INT_CHANNEL.send();
// // VSYNC_SIGNAL.signal(());
// // Signal the event
// // critical_section::with(|cs| {
// // *VSYNC_FLAG.borrow_ref_mut(cs) = true;
// // });
// }
}
pub async fn run_lcd(
mut st7701s: St7701s<'static, Blocking>,
framebuffer: &'static mut Framebuffer,
) {
loop {
// Timer::after(Duration::from_millis(100)).await;
// yield_now().await;
SIGNAL_LCD_SUBMIT.wait().await;
// TODO: Use bounce buffers:
// https://docs.espressif.com/projects/esp-idf/en/v5.0/esp32s3/api-reference/peripherals/lcd.html#bounce-buffer-with-single-psram-frame-buffer
// This can be implemented as a `DmaTxBuffer`.
let transfer = match st7701s.dpi.send(false, framebuffer.dma_buf.take().unwrap()) {
Err((error, result_dpi, result_dma_buf)) => {
error!(
"An error occurred while initiating transfer of the framebuffer to the LCD display: {error:?}"
);
st7701s.dpi = result_dpi;
framebuffer.dma_buf = Some(result_dma_buf);
continue;
}
Ok(transfer) => transfer,
};
// This could be used to allow other tasks to be executed on the first core, but that causes
// the flash to be accessed, which interferes with the framebuffer transfer.
// For that reason, it is disabled, and this task blocks the first core, until the transfer
// is complete.
#[cfg(not(feature = "limit-fps"))]
while !transfer.is_done() {
// Timer::after_millis(1).await;
rmk::embassy_futures::yield_now().await;
}
let result;
let dma_buf;
(result, st7701s.dpi, dma_buf) = transfer.wait();
framebuffer.dma_buf = Some(dma_buf);
SIGNAL_UI_RENDER.signal(());
if let Err(error) = result {
error!(
"An error occurred while transferring framebuffer to the LCD display: {error:?}"
);
}
}
}
pub struct Framebuffer {
pub width: u32,
pub height: u32,
pub dma_buf: Option<DmaTxBuf>,
}
/// Allocates a buffer appropriately aligned for use with DMA.
pub fn allocate_dma_buffer_in<A: Allocator>(len: usize, alloc: A) -> Box<[u8], A> {
const DMA_ALIGNMENT: usize = 32;
assert_eq!(
len % DMA_ALIGNMENT,
0,
"the size of a DMA buffer must be a multiple of {DMA_ALIGNMENT} bytes, but it is {len} bytes large"
);
// ⚠️ Note: For chips that support DMA to/from PSRAM (ESP32-S3) DMA transfers to/from PSRAM
// have extra alignment requirements. The address and size of the buffer pointed to by each
// descriptor must be a multiple of the cache line (block) size. This is 32 bytes on ESP32-S3.
// That is ensured by the `assert_eq` preceding this block.
unsafe {
let raw = alloc
.allocate_zeroed(Layout::from_size_align(len, DMA_ALIGNMENT).unwrap())
.expect("failed to allocate a DMA buffer");
Box::from_raw_in(raw.as_ptr(), alloc)
}
}
impl Framebuffer {
pub fn new(width: u32, height: u32) -> Self {
let buffer_len = width as usize * height as usize * core::mem::size_of::<u16>();
let buffer = allocate_dma_buffer_in(buffer_len, &PSRAM_ALLOCATOR);
let burst_config: BurstConfig = ExternalBurstConfig::Size16.into();
info!(
"PSRAM SPI burst config: max_compatible_chunk_size={}",
burst_config.max_compatible_chunk_size()
);
let dma_buf_descs_len = esp_hal::dma::descriptor_count(
buffer_len,
burst_config.max_compatible_chunk_size(),
false,
);
// Descriptors are initialized by `DmaTxBuf::new`.
let dma_buf_descs = vec![DmaDescriptor::EMPTY; dma_buf_descs_len].into_boxed_slice();
// We just leak the buffers.
let dma_buf = DmaTxBuf::new(Box::leak(dma_buf_descs), Box::leak(buffer)).unwrap();
Self {
width,
height,
dma_buf: Some(dma_buf),
}
}
pub fn as_target_pixels(&mut self) -> &mut [Rgb565Pixel] {
bytemuck::cast_slice_mut::<_, Rgb565Pixel>(self.dma_buf.as_mut().unwrap().as_mut_slice())
}
}