From 9386dd4c964f7efd3e4872a4ec7705f7327222c7 Mon Sep 17 00:00:00 2001 From: dec05eba Date: Wed, 10 Sep 2025 21:24:34 +0200 Subject: [PATCH] Revert "Use pipewire audio routing to merge audio when possible (this fixes out of sync audio when using multiple audio inputs for some users)" This reverts commit 59d16899ab4b4bee6e6a5cfc37f8a721648f892e. --- TODO | 3 --- src/main.cpp | 40 ++++++++-------------------------------- 2 files changed, 8 insertions(+), 35 deletions(-) diff --git a/TODO b/TODO index c18f6aa..938f914 100644 --- a/TODO +++ b/TODO @@ -94,9 +94,6 @@ Enable b-frames. Support vfr matching games exact fps all the time. On x11 use damage tracking, on wayland? maybe there is drm plane damage tracking. But that may not be accurate as the compositor may update it every monitor hz anyways. On wayland maybe only support it for desktop portal + pipewire capture. Another method to track damage that works regardless of the display server would be to do a diff between frames with a shader. - A 1x1 texture could be created and then write to the texture with imageStore in glsl. - Multiple textures aren't needed for diff, the diff between the color conversion output can be done by using it as an input - as well, which would diff it against the previous frame. Support selecting which gpu to use. This can be done in egl with eglQueryDevicesEXT and then eglGetPlatformDisplayEXT. This will automatically work on AMD and Intel as vaapi uses the same device. On nvidia we need to use eglQueryDeviceAttribEXT with EGL_CUDA_DEVICE_NV. Maybe on glx (nvidia x11 nvfbc) we need to use __NV_PRIME_RENDER_OFFLOAD, __NV_PRIME_RENDER_OFFLOAD_PROVIDER, __GLX_VENDOR_LIBRARY_NAME, __VK_LAYER_NV_optimus, VK_ICD_FILENAMES instead. Just look at prime-run /usr/bin/prime-run. diff --git a/src/main.cpp b/src/main.cpp index ef66f0d..a97b399 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -76,12 +76,6 @@ static const int VIDEO_STREAM_INDEX = 0; static thread_local char av_error_buffer[AV_ERROR_MAX_STRING_SIZE]; -enum class AudioMergeType { - NONE, - AMIX, - PIPEWIRE -}; - typedef struct { const gsr_window *window; } MonitorOutputCallbackUserdata; @@ -3125,24 +3119,12 @@ int main(int argc, char **argv) { std::vector requested_audio_inputs = parse_audio_inputs(audio_devices, audio_input_arg); const bool uses_app_audio = merged_audio_inputs_has_app_audio(requested_audio_inputs); - AudioMergeType audio_merge_type = AudioMergeType::NONE; std::vector app_audio_names; #ifdef GSR_APP_AUDIO - const bool audio_server_is_pipewire = audio_input_arg->num_values > 0 && pulseaudio_server_is_pipewire(); - if(merged_audio_inputs_should_use_amix(requested_audio_inputs)) { - if(audio_server_is_pipewire || uses_app_audio) - audio_merge_type = AudioMergeType::PIPEWIRE; - else - audio_merge_type = AudioMergeType::AMIX; - } - gsr_pipewire_audio pipewire_audio; memset(&pipewire_audio, 0, sizeof(pipewire_audio)); - // TODO: When recording multiple audio devices and merging them (for example desktop audio and microphone) then one (or more) of the audio sources - // can get desynced. I'm unable to reproduce this but some others are. Instead of merging audio with ffmpeg amix, merge audio with pipewire (if available). - // This fixes the issue for people that had the issue. - if(audio_merge_type == AudioMergeType::PIPEWIRE || uses_app_audio) { - if(!audio_server_is_pipewire) { + if(uses_app_audio) { + if(!pulseaudio_server_is_pipewire()) { fprintf(stderr, "gsr error: your sound server is not PipeWire. Application audio is only available when running PipeWire audio server\n"); _exit(2); } @@ -3158,14 +3140,6 @@ int main(int argc, char **argv) { return true; }, &app_audio_names); } -#else - if(merged_audio_inputs_should_use_amix(requested_audio_inputs)) - audio_merge_type = AudioMergeType::AMIX; - - if(uses_app_audio) { - fprintf(stderr, "gsr error: application audio can't be recorded because GPU Screen Recorder is built without application audio support (-Dapp_audio option)\n"); - _exit(2); - } #endif validate_merged_audio_inputs_app_audio(requested_audio_inputs, app_audio_names); @@ -3271,7 +3245,8 @@ int main(int argc, char **argv) { const bool force_no_audio_offset = arg_parser.is_livestream || arg_parser.is_output_piped || (file_extension != "mp4" && file_extension != "mkv" && file_extension != "webm"); const double target_fps = 1.0 / (double)arg_parser.fps; - arg_parser.audio_codec = select_audio_codec_with_fallback(arg_parser.audio_codec, file_extension, audio_merge_type == AudioMergeType::AMIX); + const bool uses_amix = merged_audio_inputs_should_use_amix(requested_audio_inputs); + arg_parser.audio_codec = select_audio_codec_with_fallback(arg_parser.audio_codec, file_extension, uses_amix); gsr_capture *capture = create_capture_impl(arg_parser, &egl, false); @@ -3428,7 +3403,7 @@ int main(int argc, char **argv) { std::vector src_filter_ctx; AVFilterGraph *graph = nullptr; AVFilterContext *sink = nullptr; - if(use_amix && audio_merge_type == AudioMergeType::AMIX) { + if(use_amix) { int err = init_filter_graph(audio_codec_context, &graph, &sink, src_filter_ctx, merged_audio_inputs.audio_inputs.size()); if(err < 0) { fprintf(stderr, "gsr error: failed to create audio filter\n"); @@ -3445,7 +3420,8 @@ int main(int argc, char **argv) { const double num_audio_frames_shift = audio_startup_time_seconds / timeout_sec; std::vector audio_track_audio_devices; - if((use_amix && audio_merge_type == AudioMergeType::PIPEWIRE) || audio_inputs_has_app_audio(merged_audio_inputs.audio_inputs)) { + if(audio_inputs_has_app_audio(merged_audio_inputs.audio_inputs)) { + assert(!use_amix); #ifdef GSR_APP_AUDIO audio_track_audio_devices.push_back(create_application_audio_audio_input(merged_audio_inputs, audio_codec_context, num_channels, num_audio_frames_shift, &pipewire_audio)); #endif @@ -3660,7 +3636,7 @@ int main(int argc, char **argv) { } std::thread amix_thread; - if(audio_merge_type == AudioMergeType::AMIX) { + if(uses_amix) { amix_thread = std::thread([&]() { AVFrame *aframe = av_frame_alloc(); while(running) {