Compare commits

..

4 Commits
5.6.7 ... 5.6.8

5 changed files with 26 additions and 31 deletions

14
TODO
View File

@@ -322,4 +322,16 @@ KDE Plasma Wayland seems to use overlay planes now in non-fullscreen mode(limite
If it is, then support it in kms capture.
Check if pipewire audio link-factory is available before attempting to use app audio or merging audio with pipewire.
Also do the same in supports_app_audio check in gpu-screen-recorder --info output.
Also do the same in supports_app_audio check in gpu-screen-recorder --info output.
Move region capture to an option in the color conversion instead of having the region code in kms capture code. This would make it cleaner and make it work with all capture methods.
-w region would just be an option for selecting the monitor and -region would work with all capture methods (-w).
Set top level window argument for portal capture. Same for gpu-screen-recorder-gtk global shortcuts.
Remove unix domain socket code from kms-client/server and use socketpair directly. To make this possible always execute the kms server permission setup in flatpak, before starting recording (in gpu-screen-recorder-gtk).
Application audio capture isn't good enough. It creates a sink that for some automatically gets selected as the default output device and it's visible as an output device.
When shutting down gpu screen recorder it will also cause audio applications to pause.
Fix some of these issues by setting gsr-app-sink media class to "Stream/Input/Audio" and node.virtual=true.
However that causes pulseaudio to be unable to record from gsr-app-sink, and it ends up being stuck in pa_sound_device_handle_reconnect in the loop with pa_mainloop_iterate.

View File

@@ -1,4 +1,4 @@
project('gpu-screen-recorder', ['c', 'cpp'], version : '5.6.7', default_options : ['warning_level=2'])
project('gpu-screen-recorder', ['c', 'cpp'], version : '5.6.8', default_options : ['warning_level=2'])
add_project_arguments('-Wshadow', language : ['c', 'cpp'])
if get_option('buildtype') == 'debug'

View File

@@ -7,7 +7,9 @@ extern "C" {
#define GSR_PLUGIN_INTERFACE_MAJOR_VERSION 0
#define GSR_PLUGIN_INTERFACE_MINOR_VERSION 1
#define GSR_PLUGIN_INTERFACE_VERSION ((GSR_PLUGIN_INTERFACE_MAJOR_VERSION << 16) | GSR_PLUGIN_INTERFACE_MINOR_VERSION)
#define GSR_PLUGIN_INTERFACE_MAKE_VERSION(major, minor) (((major) << 16) | (minor))
#define GSR_PLUGIN_INTERFACE_VERSION GSR_PLUGIN_INTERFACE_MAKE_VERSION(GSR_PLUGIN_INTERFACE_MAJOR_VERSION, GSR_PLUGIN_INTERFACE_MINOR_VERSION)
#include <stdbool.h>

View File

@@ -1,7 +1,7 @@
[package]
name = "gpu-screen-recorder"
type = "executable"
version = "5.6.7"
version = "5.6.8"
platforms = ["posix"]
[config]

View File

@@ -76,12 +76,6 @@ static const int VIDEO_STREAM_INDEX = 0;
static thread_local char av_error_buffer[AV_ERROR_MAX_STRING_SIZE];
enum class AudioMergeType {
NONE,
AMIX,
PIPEWIRE
};
typedef struct {
const gsr_window *window;
} MonitorOutputCallbackUserdata;
@@ -3125,24 +3119,12 @@ int main(int argc, char **argv) {
std::vector<MergedAudioInputs> requested_audio_inputs = parse_audio_inputs(audio_devices, audio_input_arg);
const bool uses_app_audio = merged_audio_inputs_has_app_audio(requested_audio_inputs);
AudioMergeType audio_merge_type = AudioMergeType::NONE;
std::vector<std::string> app_audio_names;
#ifdef GSR_APP_AUDIO
const bool audio_server_is_pipewire = audio_input_arg->num_values > 0 && pulseaudio_server_is_pipewire();
if(merged_audio_inputs_should_use_amix(requested_audio_inputs)) {
if(audio_server_is_pipewire || uses_app_audio)
audio_merge_type = AudioMergeType::PIPEWIRE;
else
audio_merge_type = AudioMergeType::AMIX;
}
gsr_pipewire_audio pipewire_audio;
memset(&pipewire_audio, 0, sizeof(pipewire_audio));
// TODO: When recording multiple audio devices and merging them (for example desktop audio and microphone) then one (or more) of the audio sources
// can get desynced. I'm unable to reproduce this but some others are. Instead of merging audio with ffmpeg amix, merge audio with pipewire (if available).
// This fixes the issue for people that had the issue.
if(audio_merge_type == AudioMergeType::PIPEWIRE || uses_app_audio) {
if(!audio_server_is_pipewire) {
if(uses_app_audio) {
if(!pulseaudio_server_is_pipewire()) {
fprintf(stderr, "gsr error: your sound server is not PipeWire. Application audio is only available when running PipeWire audio server\n");
_exit(2);
}
@@ -3159,9 +3141,6 @@ int main(int argc, char **argv) {
}, &app_audio_names);
}
#else
if(merged_audio_inputs_should_use_amix(requested_audio_inputs))
audio_merge_type = AudioMergeType::AMIX;
if(uses_app_audio) {
fprintf(stderr, "gsr error: application audio can't be recorded because GPU Screen Recorder is built without application audio support (-Dapp_audio option)\n");
_exit(2);
@@ -3271,7 +3250,8 @@ int main(int argc, char **argv) {
const bool force_no_audio_offset = arg_parser.is_livestream || arg_parser.is_output_piped || (file_extension != "mp4" && file_extension != "mkv" && file_extension != "webm");
const double target_fps = 1.0 / (double)arg_parser.fps;
arg_parser.audio_codec = select_audio_codec_with_fallback(arg_parser.audio_codec, file_extension, audio_merge_type == AudioMergeType::AMIX);
const bool uses_amix = merged_audio_inputs_should_use_amix(requested_audio_inputs);
arg_parser.audio_codec = select_audio_codec_with_fallback(arg_parser.audio_codec, file_extension, uses_amix);
gsr_capture *capture = create_capture_impl(arg_parser, &egl, false);
@@ -3428,7 +3408,7 @@ int main(int argc, char **argv) {
std::vector<AVFilterContext*> src_filter_ctx;
AVFilterGraph *graph = nullptr;
AVFilterContext *sink = nullptr;
if(use_amix && audio_merge_type == AudioMergeType::AMIX) {
if(use_amix) {
int err = init_filter_graph(audio_codec_context, &graph, &sink, src_filter_ctx, merged_audio_inputs.audio_inputs.size());
if(err < 0) {
fprintf(stderr, "gsr error: failed to create audio filter\n");
@@ -3445,7 +3425,8 @@ int main(int argc, char **argv) {
const double num_audio_frames_shift = audio_startup_time_seconds / timeout_sec;
std::vector<AudioDeviceData> audio_track_audio_devices;
if((use_amix && audio_merge_type == AudioMergeType::PIPEWIRE) || audio_inputs_has_app_audio(merged_audio_inputs.audio_inputs)) {
if(audio_inputs_has_app_audio(merged_audio_inputs.audio_inputs)) {
assert(!use_amix);
#ifdef GSR_APP_AUDIO
audio_track_audio_devices.push_back(create_application_audio_audio_input(merged_audio_inputs, audio_codec_context, num_channels, num_audio_frames_shift, &pipewire_audio));
#endif
@@ -3660,7 +3641,7 @@ int main(int argc, char **argv) {
}
std::thread amix_thread;
if(audio_merge_type == AudioMergeType::AMIX) {
if(uses_amix) {
amix_thread = std::thread([&]() {
AVFrame *aframe = av_frame_alloc();
while(running) {