diff options
| author | Ivan Molodetskikh <yalterz@gmail.com> | 2025-08-03 13:29:48 +0300 |
|---|---|---|
| committer | Ivan Molodetskikh <yalterz@gmail.com> | 2025-08-04 14:36:34 +0200 |
| commit | f49ecc31c4d2a187c4c479e84fc6e6acaf5231f2 (patch) | |
| tree | 9b7763a90f7700ec78a515c41472a25602d76c08 | |
| parent | 15b4acc17ee2c8ac31f7e6093df57aa66e6bc2de (diff) | |
| download | niri-f49ecc31c4d2a187c4c479e84fc6e6acaf5231f2.tar.gz niri-f49ecc31c4d2a187c4c479e84fc6e6acaf5231f2.tar.bz2 niri-f49ecc31c4d2a187c4c479e84fc6e6acaf5231f2.zip | |
pw_utils: Wait for frame completion before queueing
Without explicit sync, we have no way to signal the PipeWire consumer when the
rendering is done. So, wait until it's done before giving it the frame.
This should fix flickering screencasts on NVIDIA.
| -rw-r--r-- | niri-config/src/lib.rs | 3 | ||||
| -rw-r--r-- | src/niri.rs | 31 | ||||
| -rw-r--r-- | src/pw_utils.rs | 126 | ||||
| -rw-r--r-- | wiki/Configuration:-Debug-Options.md | 17 | ||||
| -rw-r--r-- | wiki/Nvidia.md | 4 |
5 files changed, 101 insertions, 80 deletions
diff --git a/niri-config/src/lib.rs b/niri-config/src/lib.rs index 06067871..527e6f2b 100644 --- a/niri-config/src/lib.rs +++ b/niri-config/src/lib.rs @@ -2331,8 +2331,6 @@ pub struct DebugConfig { #[knuffel(child)] pub wait_for_frame_completion_before_queueing: bool, #[knuffel(child)] - pub wait_for_frame_completion_in_pipewire: bool, - #[knuffel(child)] pub enable_overlay_planes: bool, #[knuffel(child)] pub disable_cursor_plane: bool, @@ -5327,7 +5325,6 @@ mod tests { preview_render: None, dbus_interfaces_in_non_session_instances: false, wait_for_frame_completion_before_queueing: false, - wait_for_frame_completion_in_pipewire: false, enable_overlay_planes: false, disable_cursor_plane: false, disable_direct_scanout: false, diff --git a/src/niri.rs b/src/niri.rs index 5f93ec21..088742b5 100644 --- a/src/niri.rs +++ b/src/niri.rs @@ -1881,12 +1881,8 @@ impl State { match &cast.target { CastTarget::Nothing => { - let config = self.niri.config.borrow(); - let wait_for_sync = config.debug.wait_for_frame_completion_in_pipewire; - drop(config); - self.backend.with_primary_renderer(|renderer| { - if cast.dequeue_buffer_and_clear(renderer, wait_for_sync) { + if cast.dequeue_buffer_and_clear(renderer) { cast.last_frame_time = get_monotonic_time(); } }); @@ -1926,10 +1922,6 @@ impl State { } } - let config = self.niri.config.borrow(); - let wait_for_sync = config.debug.wait_for_frame_completion_in_pipewire; - drop(config); - self.backend.with_primary_renderer(|renderer| { // FIXME: pointer. let elements = mapped @@ -1937,13 +1929,7 @@ impl State { .rev() .collect::<Vec<_>>(); - if cast.dequeue_buffer_and_render( - renderer, - &elements, - bbox.size, - scale, - wait_for_sync, - ) { + if cast.dequeue_buffer_and_render(renderer, &elements, bbox.size, scale) { cast.last_frame_time = get_monotonic_time(); } }); @@ -4994,10 +4980,6 @@ impl Niri { let scale = Scale::from(output.current_scale().fractional_scale()); - let config = self.config.borrow(); - let wait_for_sync = config.debug.wait_for_frame_completion_in_pipewire; - drop(config); - let mut elements = None; let mut casts_to_stop = vec![]; @@ -5029,7 +5011,7 @@ impl Niri { self.render(renderer, output, true, RenderTarget::Screencast) }); - if cast.dequeue_buffer_and_render(renderer, elements, size, scale, wait_for_sync) { + if cast.dequeue_buffer_and_render(renderer, elements, size, scale) { cast.last_frame_time = target_presentation_time; } } @@ -5051,10 +5033,6 @@ impl Niri { let scale = Scale::from(output.current_scale().fractional_scale()); - let config = self.config.borrow(); - let wait_for_sync = config.debug.wait_for_frame_completion_in_pipewire; - drop(config); - let mut casts_to_stop = vec![]; let mut casts = mem::take(&mut self.casts); @@ -5093,8 +5071,7 @@ impl Niri { // FIXME: pointer. let elements: Vec<_> = mapped.render_for_screen_cast(renderer, scale).collect(); - if cast.dequeue_buffer_and_render(renderer, &elements, bbox.size, scale, wait_for_sync) - { + if cast.dequeue_buffer_and_render(renderer, &elements, bbox.size, scale) { cast.last_frame_time = target_presentation_time; } } diff --git a/src/pw_utils.rs b/src/pw_utils.rs index 3c9a69b3..7abe7e94 100644 --- a/src/pw_utils.rs +++ b/src/pw_utils.rs @@ -38,6 +38,7 @@ use smithay::backend::drm::DrmDeviceFd; use smithay::backend::renderer::damage::OutputDamageTracker; use smithay::backend::renderer::element::RenderElement; use smithay::backend::renderer::gles::GlesRenderer; +use smithay::backend::renderer::sync::SyncPoint; use smithay::output::{Output, OutputModeSource}; use smithay::reexports::calloop::generic::Generic; use smithay::reexports::calloop::{Interest, LoopHandle, Mode, PostAction}; @@ -92,6 +93,13 @@ struct CastInner { refresh: u32, min_time_between_frames: Duration, dmabufs: HashMap<i64, Dmabuf>, + /// Buffers dequeued from PipeWire in process of rendering. + /// + /// This is an ordered list of buffers that we started rendering to and waiting for the + /// rendering to complete. The completion can be checked from the `SyncPoint`s. The buffers are + /// stored in order from oldest to newest, and the same ordering should be preserved when + /// submitting completed buffers to PipeWire. + rendering_buffers: Vec<(NonNull<pw_buffer>, SyncPoint)>, } #[allow(clippy::large_enum_variant)] @@ -236,6 +244,7 @@ impl PipeWire { refresh, min_time_between_frames: Duration::ZERO, dmabufs: HashMap::new(), + rendering_buffers: Vec::new(), })); let listener = stream @@ -655,6 +664,10 @@ impl PipeWire { trace!("pw stream: remove_buffer"); let mut inner = inner.borrow_mut(); + inner + .rendering_buffers + .retain(|(buf, _)| buf.as_ptr() != buffer); + unsafe { let spa_buffer = (*buffer).buffer; let spa_data = (*spa_buffer).datas; @@ -856,13 +869,84 @@ impl Cast { unsafe { NonNull::new(self.stream.dequeue_raw_buffer()) } } + fn queue_completed_buffers(&mut self) { + let mut inner = self.inner.borrow_mut(); + + // We want to queue buffers in order, so find the first still-rendering buffer, and queue + // everything up to that. Even if there are completed buffers past the first + // still-rendering buffer, we do not want to queue them, since that would send frames out + // of order. + let first_in_progress_idx = inner + .rendering_buffers + .iter() + .position(|(_, sync)| !sync.is_reached()) + .unwrap_or(inner.rendering_buffers.len()); + + for (buffer, _) in inner.rendering_buffers.drain(..first_in_progress_idx) { + trace!("queueing completed buffer"); + unsafe { + pw_stream_queue_buffer(self.stream.as_raw_ptr(), buffer.as_ptr()); + } + } + } + + unsafe fn queue_after_sync(&mut self, pw_buffer: NonNull<pw_buffer>, sync_point: SyncPoint) { + let mut inner = self.inner.borrow_mut(); + + let mut sync_point = sync_point; + let sync_fd = match sync_point.export() { + Some(sync_fd) => Some(sync_fd), + None => { + // There are two main ways this can happen. First is that the SyncPoint is + // pre-signalled, then the buffer is already ready and no waiting is needed. Second + // is that the SyncPoint is potentially still not signalled, but exporting a fence + // fd had failed. In this case, there's not much we can do (perhaps do a blocking + // wait for the SyncPoint, which itself might fail). + // + // So let's hope for the best and mark the buffer as submittable. We do not reuse + // the original SyncPoint because if we do hit the second case (when it's not + // signalled), then without a sync fd we cannot schedule a queue upon its + // completion, effectively going stuck. It's better to queue an incomplete buffer + // than getting stuck. + sync_point = SyncPoint::signaled(); + None + } + }; + + inner.rendering_buffers.push((pw_buffer, sync_point)); + drop(inner); + + match sync_fd { + None => { + trace!("sync_fd is None, queueing completed buffers"); + // In case this is the only buffer in the list, we will queue it right away. + self.queue_completed_buffers(); + } + Some(sync_fd) => { + trace!("scheduling buffer to queue"); + let stream_id = self.stream_id; + let source = Generic::new(sync_fd, Interest::READ, Mode::OneShot); + self.event_loop + .insert_source(source, move |_, _, state| { + for cast in &mut state.niri.casts { + if cast.stream_id == stream_id { + cast.queue_completed_buffers(); + } + } + + Ok(PostAction::Remove) + }) + .unwrap(); + } + } + } + pub fn dequeue_buffer_and_render( &mut self, renderer: &mut GlesRenderer, elements: &[impl RenderElement<GlesRenderer>], size: Size<i32, Physical>, scale: Scale<f64>, - wait_for_sync: bool, ) -> bool { let mut inner = self.inner.borrow_mut(); @@ -909,33 +993,20 @@ impl Cast { elements.iter().rev(), ) { Ok(sync_point) => { - // FIXME: implement PipeWire explicit sync, and at the very least async wait. - if wait_for_sync { - let _span = tracy_client::span!("wait for completion"); - if let Err(err) = sync_point.wait() { - warn!("error waiting for pw frame completion: {err:?}"); - } - } + mark_buffer_as_good(pw_buffer); + self.queue_after_sync(pw_buffer, sync_point); + true } Err(err) => { warn!("error rendering to dmabuf: {err:?}"); return_unused_buffer(&self.stream, pw_buffer); - return false; + false } } - - mark_buffer_as_good(pw_buffer); - pw_stream_queue_buffer(self.stream.as_raw_ptr(), buffer); } - - true } - pub fn dequeue_buffer_and_clear( - &mut self, - renderer: &mut GlesRenderer, - wait_for_sync: bool, - ) -> bool { + pub fn dequeue_buffer_and_clear(&mut self, renderer: &mut GlesRenderer) -> bool { let mut inner = self.inner.borrow_mut(); // Clear out the damage tracker if we're in Ready state. @@ -958,26 +1029,17 @@ impl Cast { match clear_dmabuf(renderer, dmabuf) { Ok(sync_point) => { - // FIXME: implement PipeWire explicit sync, and at the very least async wait. - if wait_for_sync { - let _span = tracy_client::span!("wait for completion"); - if let Err(err) = sync_point.wait() { - warn!("error waiting for pw frame completion: {err:?}"); - } - } + mark_buffer_as_good(pw_buffer); + self.queue_after_sync(pw_buffer, sync_point); + true } Err(err) => { warn!("error clearing dmabuf: {err:?}"); return_unused_buffer(&self.stream, pw_buffer); - return false; + false } } - - mark_buffer_as_good(pw_buffer); - pw_stream_queue_buffer(self.stream.as_raw_ptr(), buffer); } - - true } } diff --git a/wiki/Configuration:-Debug-Options.md b/wiki/Configuration:-Debug-Options.md index 299437ca..ad74dba7 100644 --- a/wiki/Configuration:-Debug-Options.md +++ b/wiki/Configuration:-Debug-Options.md @@ -21,7 +21,6 @@ debug { force-pipewire-invalid-modifier dbus-interfaces-in-non-session-instances wait-for-frame-completion-before-queueing - wait-for-frame-completion-in-pipewire emulate-zero-presentation-time disable-resize-throttling disable-transactions @@ -155,22 +154,6 @@ debug { } ``` -### `wait-for-frame-completion-in-pipewire` - -<sup>Since: 25.05</sup> - -Wait until every screencast frame is done rendering before handing it over to PipeWire. - -Sometimes helps on NVIDIA to prevent glitched frames when screencasting. - -This debug flag will eventually be removed once we handle this properly (via explicit sync in PipeWire). - -```kdl -debug { - wait-for-frame-completion-in-pipewire -} -``` - ### `emulate-zero-presentation-time` Emulate zero (unknown) presentation time returned from DRM. diff --git a/wiki/Nvidia.md b/wiki/Nvidia.md index 13b551d8..dc94c4b7 100644 --- a/wiki/Nvidia.md +++ b/wiki/Nvidia.md @@ -42,9 +42,11 @@ The fix shipped in the driver at the time of writing uses a value of 0, while th ### Screencast flickering fix +<sup>Until: next release</sup> + If you have screencast glitches or flickering on NVIDIA, set this in the niri config: -```kdl +```kdl,must-fail debug { wait-for-frame-completion-in-pipewire } |
