Compare commits

...

14 Commits

Author SHA1 Message Date
dec05eba
6cbf660afa 5.11.5 2026-01-08 01:23:55 +01:00
dec05eba
827751cc55 Correctly reconnect default audio device when changing it on the system 2026-01-08 01:23:55 +01:00
dec05eba
a4b3be3786 Fix application audio node getting recreated by pipewire after suspended (idle) for 30 seconds 2026-01-08 00:53:30 +01:00
dec05eba
240ccf569c debug output 2026-01-07 19:37:37 +01:00
dec05eba
88d356386b Test audio 2026-01-07 19:35:03 +01:00
dec05eba
c4104e18cc Test audio node leak fix 2026-01-07 01:35:30 +01:00
dec05eba
640e377c90 5.11.4 2026-01-06 19:53:09 +01:00
dec05eba
2545db7e50 Fix incorrect padding when amd gpu doesn't support padding fix for hevc 2026-01-06 19:52:56 +01:00
dec05eba
997d4ae922 m 2026-01-06 19:39:55 +01:00
dec05eba
57e9b994a7 5.11.3 2026-01-06 19:39:33 +01:00
dec05eba
3117f30143 Fix cursor flicker on nvidia when capturing monitor
Thanks to David Kleuker for bug testing
2026-01-06 19:39:00 +01:00
dec05eba
f1acb95cf3 Add -ffmpeg-video-opts and -ffmpeg-audio-opts 2026-01-05 20:24:41 +01:00
dec05eba
cb9cb6c567 Add -ffmpeg-opts argument to pass additional options to ffmpeg 2025-12-30 02:33:12 +01:00
dec05eba
5857cfa1b4 Correct deactivation of mjpeg if libturbo not available 2025-12-28 00:35:40 +01:00
12 changed files with 155 additions and 32 deletions

6
TODO
View File

@@ -387,3 +387,9 @@ Support camera controls, such as white balance. Otherwise tell user to use camer
Camera capture doesn't work perfectly. The image gets glitched, need to properly wait for image to be done.
Use one pipewire connection (pipewire video) instead of multiple ones when recording with portal multiple times (multiple sources).
Close pipewire links or maybe there are file descriptor leaks?
Make multiple capture sources work properly in regards to size. The size of the video should be the region size of each capture source.
--

View File

@@ -301,6 +301,27 @@ Script to run after saving video. Receives filepath and type ("regular", "replay
.BI \-portal\-session\-token\-filepath " path"
Portal session token file (default: ~/.config/gpu-screen-recorder/restore_token).
.TP
.BI \-ffmpeg-opts " options"
Additional arguments to pass to FFmpeg for the file in a list of key-values pairs in the format "key=value;key=value",
.br
for example: -ffmpeg-opts "hls_list_size=3;hls_time=1;hls_flags=delete_segments".
.br
Note: this overwrites options set by GPU Screen Recorder with the same name.
.TP
.BI \-ffmpeg-video-opts " options"
Additional arguments to pass to FFmpeg for the video in a list of key-values pairs in the format "key=value;key=value",
.br
for example: -ffmpeg-video-opts "codec=cabac;rc_mode=CQP;qp=16".
.br
Note: this overwrites options set by GPU Screen Recorder with the same name.
.TP
.BI \-ffmpeg-audio-opts " options"
Additional arguments to pass to FFmpeg for the audio in a list of key-values pairs in the format "key=value;key=value",
.br
for example: -ffmpeg-audio-opts "aac_coder=fast;aac_pce=true".
.br
Note: this overwrites options set by GPU Screen Recorder with the same name.
.TP
.BI \-gl\-debug " yes|no"
OpenGL debug output (default: no).
.TP

View File

@@ -8,7 +8,7 @@
typedef struct gsr_egl gsr_egl;
#define NUM_ARGS 32
#define NUM_ARGS 35
typedef enum {
GSR_CAPTURE_SOURCE_TYPE_WINDOW,
@@ -85,6 +85,9 @@ typedef struct {
const char *replay_recording_directory;
const char *portal_session_token_filepath;
const char *recording_saved_script;
const char *ffmpeg_opts;
const char *ffmpeg_video_opts;
const char *ffmpeg_audio_opts;
bool verbose;
bool gl_debug;
bool fallback_cpu_encoding;

View File

@@ -1,4 +1,4 @@
project('gpu-screen-recorder', ['c', 'cpp'], version : '5.11.2', default_options : ['warning_level=2'])
project('gpu-screen-recorder', ['c', 'cpp'], version : '5.11.5', default_options : ['warning_level=2'])
add_project_arguments('-Wshadow', language : ['c', 'cpp'])
if get_option('buildtype') == 'debug'

View File

@@ -1,7 +1,7 @@
[package]
name = "gpu-screen-recorder"
type = "executable"
version = "5.11.2"
version = "5.11.5"
platforms = ["posix"]
[config]

View File

@@ -196,8 +196,8 @@ static void usage_header(void) {
"[-k h264|hevc|av1|vp8|vp9|hevc_hdr|av1_hdr|hevc_10bit|av1_10bit] [-ac aac|opus|flac] [-ab <bitrate>] [-oc yes|no] [-fm cfr|vfr|content] "
"[-bm auto|qp|vbr|cbr] [-cr limited|full] [-tune performance|quality] [-df yes|no] [-sc <script_path>] [-p <plugin_path>] "
"[-cursor yes|no] [-keyint <value>] [-restore-portal-session yes|no] [-portal-session-token-filepath filepath] [-encoder gpu|cpu] "
"[-fallback-cpu-encoding yes|no] [-o <output_file>] [-ro <output_directory>] [--list-capture-options [card_path]] [--list-audio-devices] "
"[--list-application-audio] [--list-v4l2-devices] [-v yes|no] [-gl-debug yes|no] [--version] [-h|--help]\n", program_name);
"[-fallback-cpu-encoding yes|no] [-o <output_file>] [-ro <output_directory>] [-ffmpeg-opts <options>] [--list-capture-options [card_path]] "
"[--list-audio-devices] [--list-application-audio] [--list-v4l2-devices] [-v yes|no] [-gl-debug yes|no] [--version] [-h|--help]\n", program_name);
fflush(stdout);
}
@@ -440,6 +440,10 @@ static bool args_parser_set_values(args_parser *self) {
self->recording_saved_script = NULL;
}
self->ffmpeg_opts = args_get_value_by_key(self->args, NUM_ARGS, "-ffmpeg-opts");
self->ffmpeg_video_opts = args_get_value_by_key(self->args, NUM_ARGS, "-ffmpeg-video-opts");
self->ffmpeg_audio_opts = args_get_value_by_key(self->args, NUM_ARGS, "-ffmpeg-audio-opts");
return true;
}
@@ -529,6 +533,9 @@ bool args_parser_parse(args_parser *self, int argc, char **argv, const args_hand
self->args[arg_index++] = (Arg){ .key = "-fallback-cpu-encoding", .optional = true, .list = false, .type = ARG_TYPE_BOOLEAN };
self->args[arg_index++] = (Arg){ .key = "-replay-storage", .optional = true, .list = false, .type = ARG_TYPE_ENUM, .enum_values = replay_storage_enums, .num_enum_values = sizeof(replay_storage_enums)/sizeof(ArgEnum) };
self->args[arg_index++] = (Arg){ .key = "-p", .optional = true, .list = true, .type = ARG_TYPE_STRING };
self->args[arg_index++] = (Arg){ .key = "-ffmpeg-opts", .optional = true, .list = false, .type = ARG_TYPE_STRING };
self->args[arg_index++] = (Arg){ .key = "-ffmpeg-video-opts", .optional = true, .list = false, .type = ARG_TYPE_STRING };
self->args[arg_index++] = (Arg){ .key = "-ffmpeg-audio-opts", .optional = true, .list = false, .type = ARG_TYPE_STRING };
assert(arg_index == NUM_ARGS);
for(int i = 1; i < argc; i += 2) {

View File

@@ -553,6 +553,9 @@ static int gsr_capture_v4l2_capture(gsr_capture *cap, gsr_capture_metadata *capt
const vec2i target_pos = gsr_capture_get_target_position(output_size, capture_metadata);
self->params.egl->glFlush();
// TODO: Use the minimal barrier required
self->params.egl->glMemoryBarrier(GL_ALL_BARRIER_BITS);
// TODO: Remove this?
if(self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA)
self->params.egl->glFinish();
@@ -671,8 +674,11 @@ void gsr_capture_v4l2_list_devices(v4l2_devices_query_callback callback, void *u
if(xioctl(fd, VIDIOC_G_FMT, &fmt) == -1)
goto next;
const gsr_capture_v4l2_supported_pixfmts supported_pixfmts = gsr_capture_v4l2_get_supported_pixfmts(fd);
if(supported_pixfmts.yuyv || (supported_pixfmts.mjpeg && has_libturbojpeg_lib))
gsr_capture_v4l2_supported_pixfmts supported_pixfmts = gsr_capture_v4l2_get_supported_pixfmts(fd);
if(!has_libturbojpeg_lib)
supported_pixfmts.mjpeg = false;
if(supported_pixfmts.yuyv || supported_pixfmts.mjpeg)
callback(v4l2_device_path, supported_pixfmts, (vec2i){ fmt.fmt.pix.width, fmt.fmt.pix.height }, userdata);
next:

View File

@@ -781,7 +781,6 @@ static void gsr_color_conversion_draw_graphics(gsr_color_conversion *self, unsig
self->params.egl->glBindBuffer(GL_ARRAY_BUFFER, self->vertex_buffer_object_id);
self->params.egl->glBufferSubData(GL_ARRAY_BUFFER, 0, 24 * sizeof(float), vertices);
// TODO:
switch(source_color) {
case GSR_SOURCE_COLOR_RGB:
case GSR_SOURCE_COLOR_BGR: {

View File

@@ -523,4 +523,7 @@ void gsr_egl_swap_buffers(gsr_egl *self) {
self->glFlush();
// TODO: Use the minimal barrier required
self->glMemoryBarrier(GL_ALL_BARRIER_BITS); // GL_SHADER_IMAGE_ACCESS_BARRIER_BIT
// TODO: This is needed on nvidia because the cursor can flicker otherwise. Find a better solution
if(self->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA)
self->glFinish();
}

View File

@@ -245,8 +245,8 @@ static bool gsr_video_encoder_vaapi_start(gsr_video_encoder *encoder, AVCodecCon
video_codec_context->width = FFALIGN(video_codec_context->width, 2);
video_codec_context->height = FFALIGN(video_codec_context->height, 2);
} else {
video_codec_context->width = FFALIGN(video_codec_context->width, 256);
video_codec_context->height = FFALIGN(video_codec_context->height, 256);
video_codec_context->width = FFALIGN(video_codec_context->width, 64);
video_codec_context->height = FFALIGN(video_codec_context->height, 16);
}
} else if(self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD && video_codec_context->codec_id == AV_CODEC_ID_AV1) {
// TODO: Dont do this for VCN 5 and forward which should fix this hardware bug

View File

@@ -528,10 +528,13 @@ static AVCodecContext *create_video_codec_context(AVPixelFormat pix_fmt, const A
return codec_context;
}
static void open_audio(AVCodecContext *audio_codec_context) {
static void open_audio(AVCodecContext *audio_codec_context, const char *ffmpeg_audio_opts) {
AVDictionary *options = nullptr;
av_dict_set(&options, "strict", "experimental", 0);
if(ffmpeg_audio_opts)
av_dict_parse_string(&options, ffmpeg_audio_opts, "=", ";", 0);
int ret;
ret = avcodec_open2(audio_codec_context, audio_codec_context->codec, &options);
if(ret < 0) {
@@ -678,6 +681,9 @@ static void open_video_software(AVCodecContext *codec_context, const args_parser
av_dict_set(&options, "strict", "experimental", 0);
if(arg_parser.ffmpeg_video_opts)
av_dict_parse_string(&options, arg_parser.ffmpeg_video_opts, "=", ";", 0);
int ret = avcodec_open2(codec_context, codec_context->codec, &options);
if (ret < 0) {
fprintf(stderr, "gsr error: Could not open video codec: %s\n", av_error_to_string(ret));
@@ -926,6 +932,9 @@ static void open_video_hardware(AVCodecContext *codec_context, bool low_power, c
av_dict_set(&options, "strict", "experimental", 0);
if(arg_parser.ffmpeg_video_opts)
av_dict_parse_string(&options, arg_parser.ffmpeg_video_opts, "=", ";", 0);
int ret = avcodec_open2(codec_context, codec_context->codec, &options);
if (ret < 0) {
fprintf(stderr, "gsr error: Could not open video codec: %s\n", av_error_to_string(ret));
@@ -1193,9 +1202,9 @@ struct VideoSource {
CaptureSource *capture_source;
};
static RecordingStartResult start_recording_create_streams(const char *filename, const char *container_format, AVCodecContext *video_codec_context, const std::vector<AudioTrack> &audio_tracks, bool hdr, std::vector<VideoSource> &video_sources) {
static RecordingStartResult start_recording_create_streams(const char *filename, const args_parser &args_parser, AVCodecContext *video_codec_context, const std::vector<AudioTrack> &audio_tracks, bool hdr, std::vector<VideoSource> &video_sources) {
AVFormatContext *av_format_context;
avformat_alloc_output_context2(&av_format_context, nullptr, container_format, filename);
avformat_alloc_output_context2(&av_format_context, nullptr, args_parser.container_format, filename);
AVStream *video_stream = create_stream(av_format_context, video_codec_context);
avcodec_parameters_from_context(video_stream->codecpar, video_codec_context);
@@ -1220,6 +1229,9 @@ static RecordingStartResult start_recording_create_streams(const char *filename,
AVDictionary *options = nullptr;
av_dict_set(&options, "strict", "experimental", 0);
if(args_parser.ffmpeg_opts)
av_dict_parse_string(&options, args_parser.ffmpeg_opts, "=", ";", 0);
const int header_write_ret = avformat_write_header(av_format_context, &options);
av_dict_free(&options);
if(header_write_ret < 0) {
@@ -1282,7 +1294,7 @@ struct AudioPtsOffset {
int stream_index = 0;
};
static void save_replay_async(AVCodecContext *video_codec_context, int video_stream_index, const std::vector<AudioTrack> &audio_tracks, gsr_replay_buffer *replay_buffer, std::string output_dir, const char *container_format, const std::string &file_extension, bool date_folders, bool hdr, std::vector<VideoSource> &video_sources, int current_save_replay_seconds) {
static void save_replay_async(AVCodecContext *video_codec_context, int video_stream_index, const std::vector<AudioTrack> &audio_tracks, gsr_replay_buffer *replay_buffer, const args_parser &arg_parser, const std::string &file_extension, bool date_folders, bool hdr, std::vector<VideoSource> &video_sources, int current_save_replay_seconds) {
if(save_replay_thread.valid())
return;
@@ -1310,8 +1322,8 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
return;
}
std::string output_filepath = create_new_recording_filepath_from_timestamp(output_dir, "Replay", file_extension, date_folders);
RecordingStartResult recording_start_result = start_recording_create_streams(output_filepath.c_str(), container_format, video_codec_context, audio_tracks, hdr, video_sources);
std::string output_filepath = create_new_recording_filepath_from_timestamp(arg_parser.filename, "Replay", file_extension, date_folders);
RecordingStartResult recording_start_result = start_recording_create_streams(output_filepath.c_str(), arg_parser, video_codec_context, audio_tracks, hdr, video_sources);
if(!recording_start_result.av_format_context)
return;
@@ -2386,6 +2398,7 @@ static std::vector<VideoSource> create_video_sources(const args_parser &arg_pars
}
}
// TODO: Video size should be end pos - start pos, where start pos = pos and end pos = pos + size
video_size = {0, 0};
for(const VideoSource &video_source : video_sources) {
video_size.x = std::max(video_size.x, video_source.metadata.video_size.x);
@@ -3430,7 +3443,7 @@ static bool get_image_format_from_filename(const char *filename, gsr_image_forma
}
// TODO: replace this with start_recording_create_steams
static bool av_open_file_write_header(AVFormatContext *av_format_context, const char *filename) {
static bool av_open_file_write_header(AVFormatContext *av_format_context, const char *filename, const char *ffmpeg_opts) {
int ret = avio_open(&av_format_context->pb, filename, AVIO_FLAG_WRITE);
if(ret < 0) {
fprintf(stderr, "gsr error: Could not open '%s': %s\n", filename, av_error_to_string(ret));
@@ -3439,7 +3452,9 @@ static bool av_open_file_write_header(AVFormatContext *av_format_context, const
AVDictionary *options = nullptr;
av_dict_set(&options, "strict", "experimental", 0);
//av_dict_set_int(&av_format_context->metadata, "video_full_range_flag", 1, 0);
if(ffmpeg_opts)
av_dict_parse_string(&options, ffmpeg_opts, "=", ";", 0);
ret = avformat_write_header(av_format_context, &options);
if(ret < 0)
@@ -3931,7 +3946,7 @@ int main(int argc, char **argv) {
if(audio_stream && !merged_audio_inputs.track_name.empty())
av_dict_set(&audio_stream->metadata, "title", merged_audio_inputs.track_name.c_str(), 0);
open_audio(audio_codec_context);
open_audio(audio_codec_context, arg_parser.ffmpeg_audio_opts);
if(audio_stream)
avcodec_parameters_from_context(audio_stream->codecpar, audio_codec_context);
@@ -3989,7 +4004,7 @@ int main(int argc, char **argv) {
//av_dump_format(av_format_context, 0, filename, 1);
if(!is_replaying) {
if(!av_open_file_write_header(av_format_context, arg_parser.filename))
if(!av_open_file_write_header(av_format_context, arg_parser.filename, arg_parser.ffmpeg_opts))
_exit(1);
}
@@ -4448,7 +4463,7 @@ int main(int argc, char **argv) {
std::lock_guard<std::mutex> lock(audio_filter_mutex);
replay_recording_items.clear();
replay_recording_filepath = create_new_recording_filepath_from_timestamp(arg_parser.replay_recording_directory, "Video", file_extension, arg_parser.date_folders);
replay_recording_start_result = start_recording_create_streams(replay_recording_filepath.c_str(), arg_parser.container_format, video_codec_context, audio_tracks, hdr, video_sources);
replay_recording_start_result = start_recording_create_streams(replay_recording_filepath.c_str(), arg_parser, video_codec_context, audio_tracks, hdr, video_sources);
if(replay_recording_start_result.av_format_context) {
const size_t video_recording_destination_id = gsr_encoder_add_recording_destination(&encoder, video_codec_context, replay_recording_start_result.av_format_context, replay_recording_start_result.video_stream, video_frame->pts);
if(video_recording_destination_id != (size_t)-1)
@@ -4510,7 +4525,7 @@ int main(int argc, char **argv) {
save_replay_seconds = 0;
save_replay_output_filepath.clear();
save_replay_async(video_codec_context, VIDEO_STREAM_INDEX, audio_tracks, encoder.replay_buffer, arg_parser.filename, arg_parser.container_format, file_extension, arg_parser.date_folders, hdr, video_sources, current_save_replay_seconds);
save_replay_async(video_codec_context, VIDEO_STREAM_INDEX, audio_tracks, encoder.replay_buffer, arg_parser, file_extension, arg_parser.date_folders, hdr, video_sources, current_save_replay_seconds);
if(arg_parser.restart_replay_on_save && current_save_replay_seconds == save_replay_seconds_full)
gsr_replay_buffer_clear(encoder.replay_buffer);

View File

@@ -59,17 +59,26 @@ struct pa_handle {
std::mutex reconnect_mutex;
DeviceType device_type;
char stream_name[256];
char node_name[256];
bool reconnect;
double reconnect_last_tried_seconds;
char device_name[DEVICE_NAME_MAX_SIZE];
char default_output_device_name[DEVICE_NAME_MAX_SIZE];
char default_input_device_name[DEVICE_NAME_MAX_SIZE];
pa_proplist *proplist;
bool connected;
};
static void pa_sound_device_free(pa_handle *p) {
assert(p);
if(p->proplist) {
pa_proplist_free(p->proplist);
p->proplist = NULL;
}
if (p->stream) {
pa_stream_unref(p->stream);
p->stream = NULL;
@@ -186,6 +195,7 @@ static pa_handle* pa_sound_device_new(const char *server,
p = pa_xnew0(pa_handle, 1);
p->attr = *attr;
p->ss = *ss;
snprintf(p->node_name, sizeof(p->node_name), "%s", name);
snprintf(p->stream_name, sizeof(p->stream_name), "%s", stream_name);
p->reconnect = true;
@@ -206,17 +216,17 @@ static pa_handle* pa_sound_device_new(const char *server,
p->output_length = buffer_size;
p->output_index = 0;
pa_proplist *proplist = pa_proplist_new();
pa_proplist_sets(proplist, PA_PROP_MEDIA_ROLE, "production");
p->proplist = pa_proplist_new();
pa_proplist_sets(p->proplist, PA_PROP_MEDIA_ROLE, "production");
if(strcmp(device_name, "") == 0) {
pa_proplist_sets(proplist, "node.autoconnect", "false");
pa_proplist_sets(proplist, "node.dont-reconnect", "true");
pa_proplist_sets(p->proplist, "node.autoconnect", "false");
pa_proplist_sets(p->proplist, "node.dont-reconnect", "true");
}
if (!(p->mainloop = pa_mainloop_new()))
goto fail;
if (!(p->context = pa_context_new_with_proplist(pa_mainloop_get_api(p->mainloop), name, proplist)))
if (!(p->context = pa_context_new_with_proplist(pa_mainloop_get_api(p->mainloop), p->node_name, p->proplist)))
goto fail;
if (pa_context_connect(p->context, server, PA_CONTEXT_NOFLAGS, NULL) < 0) {
@@ -246,17 +256,58 @@ static pa_handle* pa_sound_device_new(const char *server,
if(pa)
pa_operation_unref(pa);
pa_proplist_free(proplist);
p->connected = true;
return p;
fail:
if (rerror)
*rerror = error;
pa_sound_device_free(p);
pa_proplist_free(proplist);
return NULL;
}
static void pa_sound_device_update_context_status(pa_handle *p) {
if(p->connected || !p->context || pa_context_get_state(p->context) != PA_CONTEXT_READY)
return;
p->connected = true;
pa_context_set_subscribe_callback(p->context, subscribe_cb, p);
pa_operation *pa = pa_context_subscribe(p->context, PA_SUBSCRIPTION_MASK_SERVER, NULL, NULL);
if(pa)
pa_operation_unref(pa);
}
static bool pa_sound_device_handle_context_recreate(pa_handle *p) {
if(p->context) {
pa_context_disconnect(p->context);
pa_context_unref(p->context);
p->context = NULL;
p->connected = false;
}
if (!(p->context = pa_context_new_with_proplist(pa_mainloop_get_api(p->mainloop), p->node_name, p->proplist))) {
fprintf(stderr, "gsr error: pa_context_new_with_proplist failed\n");
goto fail;
}
if(pa_context_connect(p->context, nullptr, PA_CONTEXT_NOFLAGS, NULL) < 0) {
fprintf(stderr, "gsr error: pa_context_connect failed\n");
goto fail;
}
pa_mainloop_iterate(p->mainloop, 0, NULL);
pa_sound_device_update_context_status(p);
return true;
fail:
if(p->context) {
pa_context_disconnect(p->context);
pa_context_unref(p->context);
p->context = NULL;
}
return false;
}
static bool pa_sound_device_should_reconnect(pa_handle *p, double now, char *device_name, size_t device_name_size) {
std::lock_guard<std::mutex> lock(p->reconnect_mutex);
@@ -276,7 +327,6 @@ static bool pa_sound_device_should_reconnect(pa_handle *p, double now, char *dev
}
static bool pa_sound_device_handle_reconnect(pa_handle *p, char *device_name, size_t device_name_size, double now) {
int r;
if(!pa_sound_device_should_reconnect(p, now, device_name, device_name_size))
return true;
@@ -284,6 +334,10 @@ static bool pa_sound_device_handle_reconnect(pa_handle *p, char *device_name, si
pa_stream_disconnect(p->stream);
pa_stream_unref(p->stream);
p->stream = NULL;
pa_sound_device_handle_context_recreate(p);
if(!p->connected)
return false;
}
if(!(p->stream = pa_stream_new(p->context, p->stream_name, &p->ss, NULL))) {
@@ -291,8 +345,8 @@ static bool pa_sound_device_handle_reconnect(pa_handle *p, char *device_name, si
return false;
}
r = pa_stream_connect_record(p->stream, device_name, &p->attr,
(pa_stream_flags_t)(PA_STREAM_INTERPOLATE_TIMING|PA_STREAM_ADJUST_LATENCY|PA_STREAM_AUTO_TIMING_UPDATE));
const int r = pa_stream_connect_record(p->stream, device_name, &p->attr,
(pa_stream_flags_t)(PA_STREAM_INTERPOLATE_TIMING|PA_STREAM_ADJUST_LATENCY|PA_STREAM_AUTO_TIMING_UPDATE|PA_STREAM_DONT_MOVE));
if(r < 0) {
//pa_context_errno(p->context);
@@ -320,6 +374,15 @@ static int pa_sound_device_read(pa_handle *p, double timeout_seconds) {
pa_mainloop_iterate(p->mainloop, 0, NULL);
if(!p->context) {
if(!pa_sound_device_handle_context_recreate(p))
goto fail;
}
pa_sound_device_update_context_status(p);
if(!p->connected)
goto fail;
if(!pa_sound_device_handle_reconnect(p, device_name, sizeof(device_name), start_time) || !p->stream)
goto fail;