use core::{ alloc::Layout, pin::Pin, sync::atomic::{self, AtomicBool, AtomicU32, AtomicUsize}, }; use alloc::{ alloc::{Allocator, Global}, boxed::Box, vec, }; use embassy_sync::{ channel::{Channel, TrySendError}, signal::Signal, }; use embassy_time::{Duration, Instant, Timer, WithTimeout}; use esp_alloc::MemoryCapability; use esp_hal::{ Blocking, dma::{ self, AnyGdmaChannel, BufView, BurstConfig, DmaChannel, DmaChannelConvert, DmaDescriptor, DmaDescriptorFlags, DmaEligible, DmaRxStreamBuf, DmaTxBuf, DmaTxBuffer, DmaTxInterrupt, ExternalBurstConfig, InternalBurstConfig, Mem2Mem, SimpleMem2MemTransfer, }, dma_descriptors, handler, interrupt::{self, Priority}, lcd_cam::lcd::dpi::{Dpi, DpiTransfer}, peripherals::{DMA, DMA_CH0, Interrupt, Peripherals, SPI2}, ram, spi::master::AnySpi, }; use esp_sync::RawMutex; use i_slint_core::software_renderer::{Rgb565Pixel, TargetPixel}; use indoc::{formatdoc, indoc}; use log::{error, info, warn}; use rmk::{ futures::{FutureExt, pin_mut}, join_all, }; use crate::{PSRAM_ALLOCATOR, peripherals::st7701s::St7701s, util::DurationExt}; /// THIS IS TAKEN FROM https://github.com/esp-rs/esp-hal/blob/main/esp-hal/src/soc/esp32s3/mod.rs /// Write back a specific range of data in the cache. #[doc(hidden)] #[unsafe(link_section = ".rwtext")] pub unsafe fn cache_writeback_addr(addr: u32, size: u32) { unsafe extern "C" { fn rom_Cache_WriteBack_Addr(addr: u32, size: u32); fn Cache_Suspend_DCache_Autoload() -> u32; fn Cache_Resume_DCache_Autoload(value: u32); } // suspend autoload, avoid load cachelines being written back unsafe { let autoload = Cache_Suspend_DCache_Autoload(); rom_Cache_WriteBack_Addr(addr, size); Cache_Resume_DCache_Autoload(autoload); } } /// THIS IS TAKEN FROM https://github.com/esp-rs/esp-hal/blob/main/esp-hal/src/soc/esp32s3/mod.rs /// Invalidate a specific range of addresses in the cache. #[doc(hidden)] #[unsafe(link_section = ".rwtext")] pub unsafe fn cache_invalidate_addr(addr: u32, size: u32) { unsafe extern "C" { fn Cache_Invalidate_Addr(addr: u32, size: u32); } unsafe { Cache_Invalidate_Addr(addr, size); } } // const DMA_CHANNEL_INBOUND: usize = 0; // const INTERRUPT_INBOUND: Interrupt = Interrupt::DMA_IN_CH0; const DMA_CHANNEL_OUTBOUND: usize = 2; const INTERRUPT_OUTBOUND: Interrupt = Interrupt::DMA_OUT_CH2; pub struct DmaBounce { // TODO: Make these generic. // They currently cannot be generic, because they lacks a `reborrow` method. channel: DMA_CH0<'static>, // This can also be more generic, see `DmaEligible` in `Mem2Mem::new`. peripheral_src: AnySpi<'static>, // This can also be more generic, see `DmaEligible` in `Mem2Mem::new`. peripheral_dst: Option>, // TODO: Consider having a separate burst config for the two transfers. burst_config: BurstConfig, cyclic: bool, /// The size of each window. window_size: usize, /// The number of windows. windows_len: usize, buffer_src: &'static mut [u8], // Two buffers of size `window_size`, // one of which is being written to, while the other is being read from. bounce_buffer_dst: &'static mut [u8], bounce_buffer_src: &'static mut [u8], // A descriptor list that spans a buffer of size `window_size`. // The buffer pointers need to be updated before each transmission to point to the correct window in the source buffer `src_buffer`. src_descs: &'static mut [DmaDescriptor], // A descriptor list that spans a buffer of size `window_size`. // The buffer pointers need to be updated before each transmission to point to the correct bounce buffer. bounce_dst_descs: &'static mut [DmaDescriptor], // A cyclic descriptor list that spans the buffers `bounce_buffer_dst` and `bounce_buffer_src`. bounce_src_descs: &'static mut [DmaDescriptor], // The index of the next window about to be received into the destination bounce buffer. window_index_next: usize, frame_index_next: usize, } impl DmaBounce { /// * `allocator` - The allocator used to allocate the bounce buffers. /// * `channel` - The DMA channel used to transfer data from the source buffer to the bounce buffers. /// * `peripheral_src` - The peripheral to transfer data from the source buffer to the bounce buffers. /// * `peripheral_dst` - The peripheral to transfer data to, from the bounce buffers. /// * `buffer_src` - The source buffer, typically allocated in external memory. /// * `row_front_porch_bytes` - The number of arbitrary-valued bytes to be sent in front of each row to the destination peripheral. /// * `row_width_bytes` - The width of a row, in bytes. /// * `window_size_rows` - The size of a single bounce buffer, in rows. /// * `burst_config` - The burst config to use for memory transfers (both in and out). TODO: This could be split. /// * `cyclic` - Experimental! Whether to use a cyclic descriptor list for transfer from the bounce buffers to the destination peripheral. pub fn new( allocator: impl Allocator + Copy + 'static, channel: DMA_CH0<'static>, peripheral_src: AnySpi<'static>, peripheral_dst: Dpi<'static, Blocking>, buffer_src: &'static mut [u8], row_front_porch_bytes: usize, row_width_bytes: usize, window_size_rows: usize, burst_config: BurstConfig, cyclic: bool, ) -> Self { let window_size = row_width_bytes * window_size_rows; assert_eq!( buffer_src.len() % window_size, 0, "the size of a source buffer must be a multiple of the window size ({window_size} bytes), but it is {len} bytes large", len = buffer_src.len() ); // Conservative alignment. Maxiumum of the cartesian product of [tx, rx] × [internal, external]. let alignment = burst_config.min_compatible_alignment(); assert_eq!( buffer_src.as_ptr() as usize % alignment, 0, "the source buffer must be sufficiently aligned to {alignment} bytes for the burst config", ); assert_eq!( row_width_bytes % alignment, 0, "the size of a row in bytes must be sufficiently aligned to {alignment} bytes for the burst config", ); assert_eq!( row_front_porch_bytes % alignment, 0, "the size of a row's front porch in bytes must be sufficiently aligned to {alignment} bytes for the burst config", ); // We need to make the destination peripheral read the front porch data from somewhere, // and that somewhere is currently the bounce buffer. // Therefore the front porch must be in bounds. assert!( row_front_porch_bytes <= window_size, "front porch too large" ); let windows_len = buffer_src.len() / window_size; // TODO: Figure out a way to avoid `leak`ing memory. // We probably want to store the `Box`es and then unsafely extend the lifetime at sites of usage. let bounce_buffer_dst = Box::leak(allocate_dma_buffer_in(window_size, burst_config, allocator)); let bounce_buffer_src = Box::leak(allocate_dma_buffer_in(window_size, burst_config, allocator)); let src_descs = Self::linear_descriptors_for_buffer(window_size, burst_config, |desc| { desc.reset_for_tx(desc.next.is_null()); // Length for TX buffers must be set in software. // In RX buffers, it is set by hardware. desc.set_length(desc.size()); }); let bounce_dst_descs = Self::linear_descriptors_for_buffer(window_size, burst_config, |_| {}); let bounce_src_descs = if cyclic { Self::bounce_descriptors_for_buffer_cyclic( unsafe { ( &mut *(bounce_buffer_dst as *mut _), &mut *(bounce_buffer_src as *mut _), ) }, burst_config, ) } else { Self::bounce_descriptors_for_buffer_single( windows_len, row_front_porch_bytes, row_width_bytes, window_size_rows, unsafe { ( &mut *(bounce_buffer_dst as *mut _), &mut *(bounce_buffer_src as *mut _), ) }, burst_config, ) }; Self { channel, peripheral_src, peripheral_dst: Some(peripheral_dst), burst_config, cyclic, window_size, windows_len, buffer_src, bounce_buffer_dst, bounce_buffer_src, src_descs, bounce_dst_descs, bounce_src_descs, window_index_next: 0, frame_index_next: 0, } } fn linear_descriptors_for_buffer( buffer_len: usize, burst_config: BurstConfig, mut setup_desc: impl FnMut(&mut DmaDescriptor), ) -> &'static mut [DmaDescriptor] { let max_chunk_size = burst_config.max_compatible_chunk_size(); let descriptors_len = dma::descriptor_count(buffer_len, max_chunk_size, false); // TODO: This leaks memory. Ensure it's only called during setup. let descriptors = Box::leak(vec![DmaDescriptor::EMPTY; descriptors_len].into_boxed_slice()); // Link up the descriptors. let mut next = core::ptr::null_mut(); for desc in descriptors.iter_mut().rev() { desc.next = next; next = desc; } // Prepare each descriptor's buffer size. let mut descriptors_it = descriptors.iter_mut(); let mut remaining_len = buffer_len; while remaining_len > 0 { let chunk_size = core::cmp::min(max_chunk_size, remaining_len); let desc = descriptors_it.next().unwrap(); desc.set_size(chunk_size); (setup_desc)(desc); remaining_len -= chunk_size; } descriptors } fn bounce_descriptors_for_buffer_cyclic( bounce_buffers: (&'static mut [u8], &'static mut [u8]), burst_config: BurstConfig, ) -> &'static mut [DmaDescriptor] { assert_eq!( bounce_buffers.0.len(), bounce_buffers.1.len(), "bounce buffers must be equal in size" ); let buffer_len = bounce_buffers.0.len(); let max_chunk_size = burst_config.max_compatible_chunk_size(); let descriptors_len = dma::descriptor_count( buffer_len, max_chunk_size, // TODO: This might need to be set to true? // I don't know why cyclic descriptor lists must be at least 3 descriptors long. false, ); let descriptors_combined = Box::leak(vec![DmaDescriptor::EMPTY; 2 * descriptors_len].into_boxed_slice()); let descriptors_pair = descriptors_combined.split_at_mut(descriptors_len); // Link up the descriptors. fn link_up_descriptors( descriptors: &mut [DmaDescriptor], descriptors_other: &mut [DmaDescriptor], ) { let mut next = descriptors_other.first_mut().unwrap(); for desc in descriptors.iter_mut().rev() { desc.next = next; next = desc; } } link_up_descriptors(descriptors_pair.0, descriptors_pair.1); link_up_descriptors(descriptors_pair.1, descriptors_pair.0); // Prepare each descriptor's buffer size. for (bounce_buffer, descriptors) in [ (bounce_buffers.0, descriptors_pair.0), (bounce_buffers.1, descriptors_pair.1), ] { let mut descriptors_it = descriptors.iter_mut(); let mut remaining_bounce_buffer = bounce_buffer; while !remaining_bounce_buffer.is_empty() { let chunk_size = core::cmp::min(max_chunk_size, remaining_bounce_buffer.len()); let desc = descriptors_it.next().unwrap(); desc.buffer = remaining_bounce_buffer.as_mut_ptr(); remaining_bounce_buffer = &mut remaining_bounce_buffer[chunk_size..]; let is_last = remaining_bounce_buffer.is_empty(); desc.set_size(chunk_size); desc.set_length(chunk_size); desc.reset_for_tx(is_last); } } descriptors_combined } fn bounce_descriptors_for_buffer_single( windows_len: usize, row_front_porch_bytes: usize, row_width_bytes: usize, window_size_rows: usize, bounce_buffers: (&'static mut [u8], &'static mut [u8]), burst_config: BurstConfig, ) -> &'static mut [DmaDescriptor] { assert_eq!( bounce_buffers.0.len(), bounce_buffers.1.len(), "bounce buffers must be equal in size" ); // If an odd number of windows were needed, two descriptor lists would be needed, assert_eq!(windows_len % 2, 0, "the number of windows must be even"); let buffer_len = bounce_buffers.0.len(); assert_eq!( buffer_len, row_width_bytes * window_size_rows, "the provided bounce buffers have an invalid size" ); warn!( "windows_len: {windows_len}\nrow_front_porch_bytes: {row_front_porch_bytes}\nrow_width_bytes: {row_width_bytes}\nwindow_size_rows: {window_size_rows}\nbuffer_len: {buffer_len}", ); let max_chunk_size = burst_config.max_compatible_chunk_size(); let descriptors_per_row_front_porch = dma::descriptor_count(row_front_porch_bytes, max_chunk_size, false); let descriptors_per_row_stored = dma::descriptor_count(row_width_bytes, max_chunk_size, false); let descriptors_per_row = descriptors_per_row_stored + descriptors_per_row_front_porch; let descriptors_per_window = window_size_rows * descriptors_per_row; let descriptors_per_frame = descriptors_per_window * windows_len; let descriptors_frame = Box::leak(vec![DmaDescriptor::EMPTY; descriptors_per_frame].into_boxed_slice()); // Link up the descriptors. let mut next = core::ptr::null_mut(); for desc in descriptors_frame.iter_mut().rev() { desc.next = next; next = desc; } // Prepare each descriptor's buffer size. let bounce_buffers = [bounce_buffers.0, bounce_buffers.1]; for (window_index, descriptors_window) in descriptors_frame .chunks_mut(descriptors_per_window) .enumerate() { let bounce_buffer_index = window_index % 2; let bounce_buffer = &mut *bounce_buffers[bounce_buffer_index]; // let bounce_buffer_ptr = bounce_buffers[bounce_buffer_index].as_mut_ptr(); // let mut remaining_bounce_buffer = &mut *bounce_buffers[bounce_buffer_index]; for (row_index_in_window, descriptors_row) in descriptors_window .chunks_mut(descriptors_per_row) .enumerate() { // let row_index = row_index_in_window + window_index * window_size_rows; let (descriptors_row_front_porch, descriptors_row_stored) = descriptors_row.split_at_mut(descriptors_per_row_front_porch); // Prepare front porch descriptors. { let mut descriptors_it = descriptors_row_front_porch.iter_mut(); let mut remaining_front_porch = row_front_porch_bytes; while remaining_front_porch > 0 { let desc = descriptors_it.next().unwrap(); let chunk_size = core::cmp::min(max_chunk_size, remaining_front_porch); remaining_front_porch -= chunk_size; // Just make it point at a bounce buffer. // It is guaranteed to have enough bytes by `DmaBounce::new`. desc.buffer = bounce_buffer.as_mut_ptr(); desc.set_size(chunk_size); desc.set_length(chunk_size); desc.reset_for_tx(false); } assert!( descriptors_it.next().is_none(), "front porch descriptors must be used up" ); assert_eq!( descriptors_row_front_porch .iter() .map(|desc| desc.size()) .sum::(), row_front_porch_bytes ); } // Prepare window descriptors. { let mut remaining_bounce_buffer = &mut bounce_buffer [row_index_in_window * row_width_bytes..][..row_width_bytes]; // if remaining_bounce_buffer.len() > row_width_bytes { // remaining_bounce_buffer = &mut remaining_bounce_buffer[..row_width_bytes]; // } for desc in &mut *descriptors_row_stored { let chunk_size = core::cmp::min(max_chunk_size, remaining_bounce_buffer.len()); desc.buffer = remaining_bounce_buffer.as_mut_ptr(); remaining_bounce_buffer = &mut remaining_bounce_buffer[chunk_size..]; desc.set_size(chunk_size); desc.set_length(chunk_size); desc.reset_for_tx(false); } assert!( remaining_bounce_buffer.is_empty(), "bounce buffer must be used up" ); assert_eq!( descriptors_row_stored .iter() .map(|desc| desc.size()) .sum::(), row_width_bytes ); } } // Set EOF bit on the last descriptor of the window, to signal // that the bounce buffer is done being read from. if let Some(last_desc) = descriptors_window.last_mut() { last_desc.reset_for_tx(true); } assert_eq!( descriptors_window .iter() .map(|desc| desc.size()) .sum::(), window_size_rows * (row_front_porch_bytes + row_width_bytes) ); } assert_eq!( descriptors_frame .iter() .map(|desc| desc.size()) .sum::(), windows_len * window_size_rows * (row_front_porch_bytes + row_width_bytes) ); descriptors_frame } fn linear_descriptors_prepare( descriptors: &mut [DmaDescriptor], mut buffer: Option<&mut [u8]>, mut setup_desc: impl FnMut(&mut DmaDescriptor), ) { for descriptor in descriptors.iter_mut() { if let Some(inner_buffer) = buffer { descriptor.buffer = inner_buffer.as_mut_ptr(); buffer = Some(&mut inner_buffer[descriptor.size()..]); } (setup_desc)(descriptor); } if let Some(buffer) = buffer { assert!( buffer.is_empty(), "a buffer of an incompatible length was assigned to a descriptor set" ); } } fn enable_interrupts() { // Enable interrupts for the peripheral // interrupt::enable(INTERRUPT_INBOUND, dma_inbound_interrupt_handler.priority()).unwrap(); interrupt::enable( INTERRUPT_OUTBOUND, dma_outbound_interrupt_handler.priority(), ) .unwrap(); // Bind the handler unsafe { // interrupt::bind_interrupt(INTERRUPT_INBOUND, dma_inbound_interrupt_handler.handler()); interrupt::bind_interrupt(INTERRUPT_OUTBOUND, dma_outbound_interrupt_handler.handler()); } // Enable interrupts in the peripheral. // DMA::regs() // .ch(DMA_CHANNEL_INBOUND) // .in_int() // .ena() // .modify(|_, w| w.in_done().bit(true)); DMA::regs() .ch(DMA_CHANNEL_OUTBOUND) .out_int() .ena() .modify(|_, w| w.out_eof().bit(true)); } /// Receive a window of bytes into the current dst bounce buffer. /// Finally, swaps the bounce buffers. async fn receive_window(&mut self) { // Descriptors are initialized by `DmaTxBuf::new`. let buffer_src_window = &mut self.buffer_src[self.window_index_next * self.window_size..][..self.window_size]; Self::linear_descriptors_prepare(self.src_descs, Some(buffer_src_window), |_desc| { // No need to call `DmaDescriptor::reset_for_tx`, because // 1. we don't rely on the ownership flag; // 2. the EOF flag is already set during the construction of this buffer. }); // TODO: Precompute a descriptor list for each buffer, then use `None` instead of `Some(&mut *self.bounce_buffer_dst)`. Self::linear_descriptors_prepare( self.bounce_dst_descs, Some(&mut *self.bounce_buffer_dst), |desc| { desc.reset_for_rx(); }, ); { // Extend the lifetime to 'static because it is required by Mem2Mem. // // Safety: // Pointees are done being used by the driver before this scope ends, // this is because we `SimpleMem2MemTransfer::wait()` on the transfer to finish. let bounce_dst_descs: &'static mut [DmaDescriptor] = unsafe { &mut *(self.bounce_dst_descs as *mut _) }; let src_descs: &'static mut [DmaDescriptor] = unsafe { &mut *(self.src_descs as *mut _) }; let mut mem2mem = Mem2Mem::new(self.channel.reborrow(), self.peripheral_src.reborrow()) .into_async() .with_descriptors(bounce_dst_descs, src_descs, self.burst_config) .unwrap(); let transfer = mem2mem .start_transfer(&mut self.bounce_buffer_dst, buffer_src_window) .unwrap(); transfer.wait_async().await.unwrap(); } self.increase_window_counter(1); } fn increase_window_counter(&mut self, windows: usize) { if windows % 2 == 1 { core::mem::swap(&mut self.bounce_buffer_dst, &mut self.bounce_buffer_src); } self.window_index_next += windows; self.frame_index_next += self.window_index_next / self.windows_len; self.window_index_next = self.window_index_next % self.windows_len; } pub async fn send(&mut self) { Self::enable_interrupts(); // Receive the first window, so that the outbound transfer can read valid data. self.receive_window().await; let mut dma_tx_buffer = self.get_dma_tx_buffer(); let mut transfer = self .peripheral_dst .take() .unwrap() .send(self.cyclic /* Send perpetually */, dma_tx_buffer) .unwrap_or_else(|(error, _, _)| { panic!("failed to begin the transmission of the first frame: {error:?}"); }); let mut windows_skipped_total = 0; loop { // warn!( // "Receiving window: {} {}", // self.window_index_next, self.frame_index_next // ); self.receive_window().await; // warn!( // "Window received: {} {}", // self.window_index_next, self.frame_index_next // ); let windows_skipped = WINDOWS_SKIPPED .wait() .with_timeout(Duration::from_millis(100)) .await .unwrap_or_else(|_| { error!("Timed out when waiting for skipped windows."); 0 // TODO: This should be -1 to repeat the same window. }); // let windows_skipped = match windows_skipped { // Ok(windows_skipped) => windows_skipped, // Err(_) => { // warn!( // "Waiting for skipped windows timed out. Transfer done: {}", // transfer.is_done() // ); // if transfer.is_done() { // let (result, _, _) = transfer.wait(); // panic!("Transfer result: {result:?}"); // } // 0 // } // }; if windows_skipped > 0 { self.increase_window_counter(windows_skipped); windows_skipped_total += windows_skipped; // error!( // "Skipped {windows_skipped} windows. Windows skipped per frame: {:.2}%", // 100.0 * windows_skipped_total as f32 // / (self.windows_len * (self.frame_index_next + 1)) as f32 // ); } // warn!( // "X: {} {} {}", // windows_skipped, self.window_index_next, self.frame_index_next // ); if !self.cyclic && (self.window_index_next == 1 || transfer.is_done()) { if self.window_index_next > 1 { self.increase_window_counter(self.windows_len - self.window_index_next + 1); } else if self.window_index_next == 0 { self.increase_window_counter(1); } // TODO: Investigate why the DPI transfer isn't done at this point. // The `DpiTransfer::wait()` below takes 0.001039 s. // Perhaps it's the minimum screen refresh period? // // assert!(transfer.is_done()); // if !transfer.is_done() { // error!( // "transfer is not done yet. {} {}", // self.frame_index_next, self.window_index_next // ); // } let result; let peripheral_dst; // let start = Instant::now(); (result, peripheral_dst, dma_tx_buffer) = transfer.wait(); // let duration = Instant::now().duration_since(start); // warn!("Waited for {} seconds", duration.display_as_secs()); if let Err(error) = result { error!("DPI error during sending: {error:?}"); } transfer = peripheral_dst .send(false, dma_tx_buffer) .unwrap_or_else(|(error, _, _)| { panic!("failed to begin the transmission of a frame: {error:?}"); }); FRAMES_SKIPPED.signal( FRAMES_SKIPPED .try_take() .map(|frames_skipped| frames_skipped + 1) .unwrap_or_default(), ); } } } fn get_dma_tx_buffer(&mut self) -> DmaTxBounceBuf { DmaTxBounceBuf { preparation: dma::Preparation { start: self.bounce_src_descs.first_mut().unwrap(), direction: dma::TransferDirection::Out, accesses_psram: false, burst_transfer: self.burst_config, check_owner: Some(false), // Possibly want to set this to false auto_write_back: false, // Possibly true }, } } } pub struct DmaTxBounceBuf { preparation: dma::Preparation, } unsafe impl DmaTxBuffer for DmaTxBounceBuf { type View = Self; type Final = Self; fn prepare(&mut self) -> dma::Preparation { dma::Preparation { start: self.preparation.start, direction: self.preparation.direction, accesses_psram: self.preparation.accesses_psram, burst_transfer: self.preparation.burst_transfer, check_owner: self.preparation.check_owner, auto_write_back: self.preparation.auto_write_back, } } fn into_view(self) -> Self::View { self } fn from_view(view: Self::View) -> Self::Final { view } } /// Intended to be listened on by the renderer, to synchronize the refresh frequency with. pub static FRAMES_SKIPPED: Signal = Signal::new(); static WINDOWS_SKIPPED: Signal = Signal::new(); // static INBOUND_TRANSFER_FINISHED: Signal = Signal::new(); #[handler(priority = Priority::Priority3)] #[ram] // Improves performance. fn dma_outbound_interrupt_handler() { let interrupt = DMA::regs().ch(DMA_CHANNEL_OUTBOUND).out_int(); let bounce_buffer_processed = interrupt.st().read().out_eof().bit_is_set(); if bounce_buffer_processed { // Clear the bit by writing 1 to the clear bits. interrupt.clr().write(|w| w.out_eof().bit(true)); WINDOWS_SKIPPED.signal( WINDOWS_SKIPPED .try_take() .map(|windows_skipped| windows_skipped + 1) .unwrap_or_default(), ); } } // #[handler(priority = Priority::Priority3)] // #[ram] // Improves performance. // fn dma_inbound_interrupt_handler() { // warn!("Inbound"); // let interrupt = DMA::regs().ch(DMA_CHANNEL_INBOUND).in_int(); // let bounce_buffer_processed = interrupt.st().read().in_done().bit_is_set(); // if bounce_buffer_processed { // // Clear the bit by writing 1 to the clear bits. // interrupt.clr().write(|w| w.in_done().bit(true)); // assert!( // !INBOUND_TRANSFER_FINISHED.signaled(), // "inbound transfer already signalled" // ); // INBOUND_TRANSFER_FINISHED.signal(()); // } // } // pub async fn run_lcd( // mut st7701s: St7701s<'static, Blocking>, // framebuffer: &'static mut Framebuffer, // ) { // loop { // // Timer::after(Duration::from_millis(100)).await; // // yield_now().await; // SIGNAL_LCD_SUBMIT.wait().await; // // TODO: Use bounce buffers: // // https://docs.espressif.com/projects/esp-idf/en/v5.0/esp32s3/api-reference/peripherals/lcd.html#bounce-buffer-with-single-psram-frame-buffer // // This can be implemented as a `DmaTxBuffer`. // let transfer = match st7701s.dpi.send(false, framebuffer.dma_buf.take().unwrap()) { // Err((error, result_dpi, result_dma_buf)) => { // error!( // "An error occurred while initiating transfer of the framebuffer to the LCD display: {error:?}" // ); // st7701s.dpi = result_dpi; // framebuffer.dma_buf = Some(result_dma_buf); // continue; // } // Ok(transfer) => transfer, // }; // // This could be used to allow other tasks to be executed on the first core, but that causes // // the flash to be accessed, which interferes with the framebuffer transfer. // // For that reason, it is disabled, and this task blocks the first core, until the transfer // // is complete. // #[cfg(not(feature = "limit-fps"))] // while !transfer.is_done() { // // Timer::after_millis(1).await; // rmk::embassy_futures::yield_now().await; // } // let result; // let dma_buf; // (result, st7701s.dpi, dma_buf) = transfer.wait(); // framebuffer.dma_buf = Some(dma_buf); // SIGNAL_UI_RENDER.signal(()); // if let Err(error) = result { // error!( // "An error occurred while transferring framebuffer to the LCD display: {error:?}" // ); // } // } // } /// Allocates a buffer appropriately aligned for use with DMA. pub fn allocate_dma_buffer_in( len: usize, burst_config: BurstConfig, alloc: A, ) -> Box<[u8], A> { // Conservative alignment. Maxiumum of the cartesian product of [tx, rx] × [internal, external]. let alignment = burst_config.min_compatible_alignment(); assert_eq!( len % alignment, 0, "the size of a DMA buffer must be a multiple of {alignment} bytes, but it is {len} bytes large" ); // ⚠️ Note: For chips that support DMA to/from PSRAM (ESP32-S3) DMA transfers to/from PSRAM // have extra alignment requirements. The address and size of the buffer pointed to by each // descriptor must be a multiple of the cache line (block) size. This is 32 bytes on ESP32-S3. // That is ensured by the `assert_eq` preceding this block. unsafe { let raw = alloc .allocate_zeroed(Layout::from_size_align(len, alignment).unwrap()) .expect("failed to allocate a DMA buffer"); Box::from_raw_in(raw.as_ptr(), alloc) } } pub struct Framebuffer { pub width: u32, pub height: u32, pub bounce_buffers: DmaBounce, } impl Framebuffer { pub fn new( channel: DMA_CH0<'static>, peripheral_src: AnySpi<'static>, peripheral_dst: Dpi<'static, Blocking>, front_porch_pixels: u32, width_pixels: u32, height_pixels: u32, rows_per_window: usize, cyclic: bool, ) -> Self { const BYTES_PER_PIXEL: usize = core::mem::size_of::(); let buffer_size = width_pixels as usize * height_pixels as usize * BYTES_PER_PIXEL; let burst_config = BurstConfig { internal_memory: InternalBurstConfig::Enabled, external_memory: ExternalBurstConfig::Size64, }; let psram_buffer = Box::leak(allocate_dma_buffer_in( buffer_size, burst_config, &PSRAM_ALLOCATOR, )); let bounce_buffers = DmaBounce::new( Global, channel, peripheral_src, peripheral_dst, psram_buffer, front_porch_pixels as usize * BYTES_PER_PIXEL, width_pixels as usize * BYTES_PER_PIXEL, rows_per_window, burst_config, cyclic, ); Self { width: width_pixels, height: height_pixels, bounce_buffers, } } pub fn as_target_pixels(&mut self) -> &mut [Rgb565Pixel] { bytemuck::cast_slice_mut::<_, Rgb565Pixel>(self.bounce_buffers.buffer_src) } }