acid/firmware/acid-firmware/src/ui/dpi.rs

428 lines
15 KiB
Rust
Raw Normal View History

2026-02-14 20:03:32 +01:00
use core::{alloc::Layout, pin::Pin};
use alloc::{
alloc::{Allocator, Global},
boxed::Box,
vec,
};
use embassy_sync::channel::Channel;
use esp_alloc::MemoryCapability;
use esp_hal::{
Blocking,
dma::{
self, AnyGdmaChannel, BufView, BurstConfig, DmaChannel, DmaChannelConvert, DmaDescriptor,
DmaDescriptorFlags, DmaEligible, DmaRxStreamBuf, DmaTxBuf, DmaTxBuffer, DmaTxInterrupt,
ExternalBurstConfig, Mem2Mem,
},
dma_descriptors, handler, interrupt,
peripherals::{DMA, DMA_CH0, Peripherals, SPI2},
ram,
spi::master::AnySpi,
};
use esp_sync::RawMutex;
use i_slint_core::software_renderer::Rgb565Pixel;
use indoc::{formatdoc, indoc};
use log::{error, info};
use crate::{PSRAM_ALLOCATOR, SIGNAL_LCD_SUBMIT, SIGNAL_UI_RENDER, peripherals::st7701s::St7701s};
pub struct DmaTxBounceBuf {
// TODO: Make these generic.
// They currently cannot be generic, because they lacks a `reborrow` method.
channel: DMA_CH0<'static>,
// This can also be more generic, see `DmaEligible` in `Mem2Mem::new`.
peripheral: AnySpi<'static>,
burst_config: BurstConfig,
// rx: DmaRxStreamBuf,
/// The size of each window.
window_size: usize,
/// The number of windows.
windows_len: usize,
buffer_src: &'static mut [u8],
// Two buffers of size `window_size`,
// one of which is being written to, while the other is being read from.
bounce_buffer_dst: Box<[u8]>,
bounce_buffer_src: Box<[u8]>,
}
impl DmaTxBounceBuf {
pub fn new(
channel: DMA_CH0<'static>,
peripheral: AnySpi<'static>,
buffer_src: &'static mut [u8],
window_size: usize,
burst_config: BurstConfig,
) -> Self {
assert_eq!(
buffer_src.len() % window_size,
0,
"the size of a source buffer must be a multiple of the window size ({window_size} bytes), but it is {len} bytes large",
len = buffer_src.len()
);
// let dma_buf_descs_len = esp_hal::dma::descriptor_count(
// window_size,
// burst_config.max_compatible_chunk_size(),
// false,
// );
// let dma_buf_descs =
// Box::leak(vec![DmaDescriptor::EMPTY; dma_buf_descs_len].into_boxed_slice());
// let rx = DmaRxStreamBuf::new(dma_buf_descs, buffer_src).unwrap();
Self {
channel,
peripheral,
burst_config,
// rx,
window_size,
windows_len: buffer_src.len() / window_size,
buffer_src,
bounce_buffer_dst: allocate_dma_buffer_in(window_size, Global),
bounce_buffer_src: allocate_dma_buffer_in(window_size, Global),
}
}
fn linear_descriptors_for_buffer(
buffer_len: usize,
burst_config: &BurstConfig,
mut setup_desc: impl FnMut(&mut DmaDescriptor),
) -> &'static mut [DmaDescriptor] {
let max_chunk_size = burst_config.max_compatible_chunk_size();
let descriptors_len = esp_hal::dma::descriptor_count(buffer_len, max_chunk_size, false);
// TODO: This leaks memory. Ensure it's only called during setup.
let descriptors = Box::leak(vec![DmaDescriptor::EMPTY; descriptors_len].into_boxed_slice());
// Link up the descriptors.
let mut next = core::ptr::null_mut();
for desc in descriptors.iter_mut().rev() {
desc.next = next;
next = desc;
}
// Prepare each descriptor's buffer size.
let mut descriptors_it = descriptors.iter_mut();
let mut remaining_len = buffer_len;
while remaining_len > 0 {
let chunk_size = core::cmp::min(max_chunk_size, remaining_len);
let desc = descriptors_it.next().unwrap();
desc.set_size(chunk_size);
(setup_desc)(desc);
remaining_len -= chunk_size;
}
descriptors
}
fn linear_descriptors_set_buffer(
descriptors: &mut [DmaDescriptor],
mut buffer: &mut [u8],
mut setup_desc: impl FnMut(&mut DmaDescriptor),
) {
for descriptor in descriptors.iter_mut() {
descriptor.buffer = buffer.as_mut_ptr();
(setup_desc)(descriptor);
buffer = &mut buffer[descriptor.size()..];
}
assert!(
buffer.is_empty(),
"a buffer of an incompatible length was asssigned to a descriptor set"
);
}
pub async fn send(&mut self) {
// TODO: Precompute as much as possible by moving to `Self::new()`.
let src_descs =
Self::linear_descriptors_for_buffer(self.window_size, &self.burst_config, |desc| {
desc.reset_for_tx(desc.next.is_null());
// Length for TX buffers must be set in software.
// In RX buffers, it is set by hardware.
desc.set_length(desc.size());
});
let bounce_dst_descs =
Self::linear_descriptors_for_buffer(self.window_size, &self.burst_config, |_| {});
// Enable interrupts for the peripheral
interrupt::enable(
esp_hal::peripherals::Interrupt::DMA_OUT_CH0,
dma_interrupt_handler.priority(),
)
.unwrap();
interrupt::enable(
esp_hal::peripherals::Interrupt::SPI2_DMA,
dma_interrupt_handler.priority(),
)
.unwrap();
// Bind the handler
unsafe {
interrupt::bind_interrupt(
esp_hal::peripherals::Interrupt::DMA_OUT_CH0,
dma_interrupt_handler.handler(),
);
interrupt::bind_interrupt(
esp_hal::peripherals::Interrupt::SPI2_DMA,
dma_interrupt_handler.handler(),
);
}
// Enable interrupts in the peripheral.
let channel_number = 0; // TODO: Get from self.channel
DMA::regs()
.ch(channel_number)
.out_int()
.ena()
.modify(|_, w| {
w.out_total_eof().bit(true);
w.out_dscr_err().bit(true);
w.out_eof().bit(true);
w.out_done().bit(true);
w
});
SPI2::regs().dma_int_ena().modify(|_, w| {
w.slv_rd_dma_done().bit(true);
w.slv_wr_dma_done().bit(true);
w.dma_seg_trans_done().bit(true);
w.trans_done().bit(true);
w
});
for window_index in 0..self.windows_len {
// Descriptors are initialized by `DmaTxBuf::new`.
let buffer_src_window =
&mut self.buffer_src[window_index * self.window_size..][..self.window_size];
Self::linear_descriptors_set_buffer(src_descs, buffer_src_window, |_| {});
Self::linear_descriptors_set_buffer(bounce_dst_descs, buffer_src_window, |desc| {
desc.reset_for_rx();
});
// let (channel_rx, channel_tx) = self.channel.split();
{
// Extend the lifetime to 'static because it is required by Mem2Mem.
//
// Safety:
// Pointees are done being used by the driver before this scope ends,
// this is because we `SimpleMem2MemTransfer::wait()` on the transfer to finish.
let bounce_dst_descs = unsafe { &mut *(bounce_dst_descs as *mut _) };
let src_descs = unsafe { &mut *(src_descs as *mut _) };
let mut mem2mem = Mem2Mem::new(self.channel.reborrow(), self.peripheral.reborrow())
.with_descriptors(bounce_dst_descs, src_descs, self.burst_config)
.unwrap();
let transfer = mem2mem
.start_transfer(&mut self.bounce_buffer_dst, buffer_src_window)
.unwrap();
let int_raw = DMA::regs()
.ch(channel_number as usize)
.out_int()
.raw()
.read();
log::error!(
indoc! {"
int_raw:
total_eof: {total_eof}
eof: {eof}
done: {done}
dscr_err: {dscr_err}
"},
total_eof = int_raw.out_total_eof().bit_is_set(),
eof = int_raw.out_eof().bit_is_set(),
done = int_raw.out_done().bit_is_set(),
dscr_err = int_raw.out_dscr_err().bit_is_set(),
);
error!("int_raw_msg: 0x{:08x?}", INT_CHANNEL.try_receive());
transfer.wait().unwrap();
}
let int_raw = DMA::regs()
.ch(channel_number as usize)
.out_int()
.raw()
.read();
log::error!(
indoc! {"
int_raw:
total_eof: {total_eof}
eof: {eof}
done: {done}
dscr_err: {dscr_err}
"},
total_eof = int_raw.out_total_eof().bit_is_set(),
eof = int_raw.out_eof().bit_is_set(),
done = int_raw.out_done().bit_is_set(),
dscr_err = int_raw.out_dscr_err().bit_is_set(),
);
error!("int_raw_msg: 0x{:08x?}", INT_CHANNEL.try_receive());
// TODO: Get rid of this!
assert_eq!(&*self.bounce_buffer_dst, buffer_src_window);
}
}
}
static INT_CHANNEL: Channel<RawMutex, u32, 128> = Channel::new();
#[handler]
#[ram] // Improves performance.
fn dma_interrupt_handler() {
let int_raw = DMA::regs().ch(0).out_int().raw().read();
INT_CHANNEL.try_send(int_raw.bits()).unwrap();
// let lcd_cam = unsafe { &*esp_hal::peripherals::LCD_CAM::PTR };
// // Check and clear VSYNC interrupt
// if lcd_cam
// .lc_dma_int_raw()
// .read()
// .lcd_vsync_int_raw()
// .bit_is_set()
// {
// lcd_cam
// .lc_dma_int_clr()
// .write(|w| w.lcd_vsync_int_clr().set_bit());
// INT_CHANNEL.send();
// // VSYNC_SIGNAL.signal(());
// // Signal the event
// // critical_section::with(|cs| {
// // *VSYNC_FLAG.borrow_ref_mut(cs) = true;
// // });
// }
}
// unsafe impl DmaTxBuffer for DmaTxStreamBuf {
// type View = Self;
// type Final = Self;
// fn prepare(&mut self) -> dma::Preparation {
// dma::Preparation {
// start: (),
// direction: (),
// accesses_psram: false,
// burst_transfer: (),
// check_owner: (),
// auto_write_back: (),
// }
// }
// fn into_view(self) -> Self::View {
// self
// }
// fn from_view(view: Self::View) -> Self::Final {
// view
// }
// }
pub async fn run_lcd(
mut st7701s: St7701s<'static, Blocking>,
framebuffer: &'static mut Framebuffer,
) {
loop {
// Timer::after(Duration::from_millis(100)).await;
// yield_now().await;
SIGNAL_LCD_SUBMIT.wait().await;
// TODO: Use bounce buffers:
// https://docs.espressif.com/projects/esp-idf/en/v5.0/esp32s3/api-reference/peripherals/lcd.html#bounce-buffer-with-single-psram-frame-buffer
// This can be implemented as a `DmaTxBuffer`.
let transfer = match st7701s.dpi.send(false, framebuffer.dma_buf.take().unwrap()) {
Err((error, result_dpi, result_dma_buf)) => {
error!(
"An error occurred while initiating transfer of the framebuffer to the LCD display: {error:?}"
);
st7701s.dpi = result_dpi;
framebuffer.dma_buf = Some(result_dma_buf);
continue;
}
Ok(transfer) => transfer,
};
// This could be used to allow other tasks to be executed on the first core, but that causes
// the flash to be accessed, which interferes with the framebuffer transfer.
// For that reason, it is disabled, and this task blocks the first core, until the transfer
// is complete.
#[cfg(not(feature = "limit-fps"))]
while !transfer.is_done() {
// Timer::after_millis(1).await;
rmk::embassy_futures::yield_now().await;
}
let result;
let dma_buf;
(result, st7701s.dpi, dma_buf) = transfer.wait();
framebuffer.dma_buf = Some(dma_buf);
SIGNAL_UI_RENDER.signal(());
if let Err(error) = result {
error!(
"An error occurred while transferring framebuffer to the LCD display: {error:?}"
);
}
}
}
pub struct Framebuffer {
pub width: u32,
pub height: u32,
pub dma_buf: Option<DmaTxBuf>,
}
/// Allocates a buffer appropriately aligned for use with DMA.
pub fn allocate_dma_buffer_in<A: Allocator>(len: usize, alloc: A) -> Box<[u8], A> {
const DMA_ALIGNMENT: usize = 32;
assert_eq!(
len % DMA_ALIGNMENT,
0,
"the size of a DMA buffer must be a multiple of {DMA_ALIGNMENT} bytes, but it is {len} bytes large"
);
// ⚠️ Note: For chips that support DMA to/from PSRAM (ESP32-S3) DMA transfers to/from PSRAM
// have extra alignment requirements. The address and size of the buffer pointed to by each
// descriptor must be a multiple of the cache line (block) size. This is 32 bytes on ESP32-S3.
// That is ensured by the `assert_eq` preceding this block.
unsafe {
let raw = alloc
.allocate_zeroed(Layout::from_size_align(len, DMA_ALIGNMENT).unwrap())
.expect("failed to allocate a DMA buffer");
Box::from_raw_in(raw.as_ptr(), alloc)
}
}
impl Framebuffer {
pub fn new(width: u32, height: u32) -> Self {
let buffer_len = width as usize * height as usize * core::mem::size_of::<u16>();
let buffer = allocate_dma_buffer_in(buffer_len, &PSRAM_ALLOCATOR);
let burst_config: BurstConfig = ExternalBurstConfig::Size16.into();
info!(
"PSRAM SPI burst config: max_compatible_chunk_size={}",
burst_config.max_compatible_chunk_size()
);
let dma_buf_descs_len = esp_hal::dma::descriptor_count(
buffer_len,
burst_config.max_compatible_chunk_size(),
false,
);
// Descriptors are initialized by `DmaTxBuf::new`.
let dma_buf_descs = vec![DmaDescriptor::EMPTY; dma_buf_descs_len].into_boxed_slice();
// We just leak the buffers.
let dma_buf = DmaTxBuf::new(Box::leak(dma_buf_descs), Box::leak(buffer)).unwrap();
Self {
width,
height,
dma_buf: Some(dma_buf),
}
}
pub fn as_target_pixels(&mut self) -> &mut [Rgb565Pixel] {
bytemuck::cast_slice_mut::<_, Rgb565Pixel>(self.dma_buf.as_mut().unwrap().as_mut_slice())
}
}