mirror of
https://repo.dec05eba.com/gpu-screen-recorder
synced 2026-03-31 09:07:13 +09:00
Add option to change output resolution (-s)
This commit is contained in:
18
README.md
18
README.md
@@ -101,7 +101,8 @@ When compiling GPU Screen Recorder with portal support (`-Dportal=true`, which i
|
|||||||
* libpipewire (and libspa which is usually part of libpipewire)
|
* libpipewire (and libspa which is usually part of libpipewire)
|
||||||
|
|
||||||
# How to use
|
# How to use
|
||||||
Run `gpu-screen-recorder --help` to see all options and also examples.
|
Run `gpu-screen-recorder --help` to see all options and also examples.\
|
||||||
|
There is also a gui for the gpu screen recorder called [gpu-screen-recorder-gtk](https://git.dec05eba.com/gpu-screen-recorder-gtk/).
|
||||||
## Recording
|
## Recording
|
||||||
Here is an example of how to record your monitor and the default audio output: `gpu-screen-recorder -w screen -f 60 -a default_output -o ~/Videos/test_video.mp4`.
|
Here is an example of how to record your monitor and the default audio output: `gpu-screen-recorder -w screen -f 60 -a default_output -o ~/Videos/test_video.mp4`.
|
||||||
Yyou can stop and save the recording with `Ctrl+C` or by running `killall -SIGINT gpu-screen-recorder`.
|
Yyou can stop and save the recording with `Ctrl+C` or by running `killall -SIGINT gpu-screen-recorder`.
|
||||||
@@ -119,15 +120,14 @@ The replay buffer is stored in ram (as encoded video), so don't use a too large
|
|||||||
To save a video in replay mode, you need to send signal SIGUSR1 to gpu screen recorder. You can do this by running `killall -SIGUSR1 gpu-screen-recorder`.\
|
To save a video in replay mode, you need to send signal SIGUSR1 to gpu screen recorder. You can do this by running `killall -SIGUSR1 gpu-screen-recorder`.\
|
||||||
To stop recording send SIGINT to gpu screen recorder. You can do this by running `killall -SIGINT gpu-screen-recorder` or pressing `Ctrl-C` in the terminal that runs gpu screen recorder. When recording a regular non-replay video this will also save the video.\
|
To stop recording send SIGINT to gpu screen recorder. You can do this by running `killall -SIGINT gpu-screen-recorder` or pressing `Ctrl-C` in the terminal that runs gpu screen recorder. When recording a regular non-replay video this will also save the video.\
|
||||||
To pause/unpause recording send SIGUSR2 to gpu screen recorder. You can do this by running `killall -SIGUSR2 gpu-screen-recorder`. This is only applicable and useful when recording (not streaming nor replay).\
|
To pause/unpause recording send SIGUSR2 to gpu screen recorder. You can do this by running `killall -SIGUSR2 gpu-screen-recorder`. This is only applicable and useful when recording (not streaming nor replay).\
|
||||||
## Finding audio device name
|
## Audio device name
|
||||||
You can find the default output audio device (headset, speakers (in other words, desktop audio)) with the command `pactl get-default-sink`. Add `monitor` to the end of that to use that as an audio input in gpu screen recorder.\
|
To record the default output device (desktop audio) you can use the `default_output` option, for example `-a default_output`.\
|
||||||
You can find the default input audio device (microphone) with the command `pactl get-default-source`. This input should not have `monitor` added to the end when used in gpu screen recorder.\
|
To record the default input device (microphone) you can use the `default_input` option, for example `-a default_input`.\
|
||||||
Example of recording both desktop audio and microphone: `gpu-screen-recorder -w screen -f 60 -a "$(pactl get-default-sink).monitor" -a "$(pactl get-default-source)" -o ~/Videos/test_video.mp4`.\
|
To list all available audio devices run `gpu-screen-recorder --list-audio-devices`. The name to use with GPU Screen Recorder will be on the left side and the human readable name is on the right side.\
|
||||||
A name (that is visible to pipewire) can be given to an audio input device by prefixing the audio input with `<name>/`, for example `dummy/$(pactl get-default-sink).monitor`.\
|
To record multiple audio devices to multiple audio tracks specify the `-a` option multiple times, for example `-a default_output -a default_input`.\
|
||||||
Note that if you use multiple audio inputs then they are each recorded into separate audio tracks in the video file. If you want to merge multiple audio inputs into one audio track then separate the audio inputs by "|" in one -a argument,
|
To record multiple audio devices into one audio track (merged) specify the `-a` option once split with `|` for each audio device, for example `-a "default_output|default_input"`.\
|
||||||
for example `-a "$(pactl get-default-sink).monitor|$(pactl get-default-source)"`.
|
In wireplumber the name of the audio will be in the format `gsr-<audio_device>`, but you can change that name by prefixing the audio device with a name and then a forward slash, for example: `-a "name/default_output"`.
|
||||||
|
|
||||||
There is also a gui for the gpu screen recorder called [gpu-screen-recorder-gtk](https://git.dec05eba.com/gpu-screen-recorder-gtk/).
|
|
||||||
## Simple way to run replay without gui
|
## Simple way to run replay without gui
|
||||||
Run the script `scripts/start-replay.sh` to start replay and then `scripts/save-replay.sh` to save a replay and `scripts/stop-replay.sh` to stop the replay. The videos are saved to `$HOME/Videos`.
|
Run the script `scripts/start-replay.sh` to start replay and then `scripts/save-replay.sh` to save a replay and `scripts/stop-replay.sh` to stop the replay. The videos are saved to `$HOME/Videos`.
|
||||||
You can use these scripts to start replay at system startup if you add `scripts/start-replay.sh` to startup (this can be done differently depending on your desktop environment / window manager) and then go into
|
You can use these scripts to start replay at system startup if you add `scripts/start-replay.sh` to startup (this can be done differently depending on your desktop environment / window manager) and then go into
|
||||||
|
|||||||
4
TODO
4
TODO
@@ -74,8 +74,6 @@ Make it possible to select which /dev/dri/card* to use, but that requires opengl
|
|||||||
Test if p2 state can be worked around by using pure nvenc api and overwriting cuInit/cuCtxCreate* to not do anything. Cuda might be loaded when using nvenc but it might not be used, with certain record options? (such as h264 p5).
|
Test if p2 state can be worked around by using pure nvenc api and overwriting cuInit/cuCtxCreate* to not do anything. Cuda might be loaded when using nvenc but it might not be used, with certain record options? (such as h264 p5).
|
||||||
nvenc uses cuda when using b frames and rgb->yuv conversion, so convert the image ourselves instead.-
|
nvenc uses cuda when using b frames and rgb->yuv conversion, so convert the image ourselves instead.-
|
||||||
|
|
||||||
Mesa doesn't support global headers (AV_CODEC_FLAG_GLOBAL_HEADER) with h264... which also breaks mkv since mkv requires global header. Right now gpu screen recorder will forcefully set video codec to hevc when h264 is requested for mkv files.
|
|
||||||
|
|
||||||
Drop frames if live streaming cant keep up with target fps, or dynamically change resolution/quality.
|
Drop frames if live streaming cant keep up with target fps, or dynamically change resolution/quality.
|
||||||
|
|
||||||
Support low power option.
|
Support low power option.
|
||||||
@@ -103,7 +101,7 @@ Investigate if there is a way to do gpu->gpu copy directly without touching syst
|
|||||||
|
|
||||||
Go back to using pure vaapi without opengl for video encoding? rotation (transpose) can be done if its done after (rgb to yuv) color conversion.
|
Go back to using pure vaapi without opengl for video encoding? rotation (transpose) can be done if its done after (rgb to yuv) color conversion.
|
||||||
|
|
||||||
Implement scaling and use lanczos resampling for better quality. Lanczos resampling can also be used for YUV chroma for better color quality on small text.
|
Use lanczos resampling for better scaling quality. Lanczos resampling can also be used for YUV chroma for better color quality on small text.
|
||||||
|
|
||||||
Flac is disabled because the frame sizes are too large which causes big audio/video desync.
|
Flac is disabled because the frame sizes are too large which causes big audio/video desync.
|
||||||
|
|
||||||
|
|||||||
@@ -19,8 +19,9 @@ Environment=COLOR_RANGE=limited
|
|||||||
Environment=KEYINT=2
|
Environment=KEYINT=2
|
||||||
Environment=ENCODER=gpu
|
Environment=ENCODER=gpu
|
||||||
Environment=RESTORE_PORTAL_SESSION=yes
|
Environment=RESTORE_PORTAL_SESSION=yes
|
||||||
|
Environment=OUTPUT_RESOLUTION=0x0
|
||||||
Environment=ADDITIONAL_ARGS=
|
Environment=ADDITIONAL_ARGS=
|
||||||
ExecStart=gpu-screen-recorder -v no -w "${WINDOW}" -c "${CONTAINER}" -q "${QUALITY}" -k "${CODEC}" -ac "${AUDIO_CODEC}" -a "${AUDIO_DEVICE}" -a "${SECONDARY_AUDIO_DEVICE}" -f "${FRAMERATE}" -r "${REPLAYDURATION}" -o "${OUTPUTDIR}" -df "${MAKEFOLDERS}" $ADDITIONAL_ARGS -cr "${COLOR_RANGE}" -keyint "${KEYINT}" -restore-portal-session "${RESTORE_PORTAL_SESSION}" -encoder "${ENCODER}" -bm "${BITRATE_MODE}"
|
ExecStart=gpu-screen-recorder -v no -w "${WINDOW}" -s "${OUTPUT_RESOLUTION}" -c "${CONTAINER}" -q "${QUALITY}" -k "${CODEC}" -ac "${AUDIO_CODEC}" -a "${AUDIO_DEVICE}" -a "${SECONDARY_AUDIO_DEVICE}" -f "${FRAMERATE}" -r "${REPLAYDURATION}" -o "${OUTPUTDIR}" -df "${MAKEFOLDERS}" $ADDITIONAL_ARGS -cr "${COLOR_RANGE}" -keyint "${KEYINT}" -restore-portal-session "${RESTORE_PORTAL_SESSION}" -encoder "${ENCODER}" -bm "${BITRATE_MODE}"
|
||||||
KillSignal=SIGINT
|
KillSignal=SIGINT
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
RestartSec=5s
|
RestartSec=5s
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ typedef struct {
|
|||||||
bool hdr;
|
bool hdr;
|
||||||
bool record_cursor;
|
bool record_cursor;
|
||||||
int fps;
|
int fps;
|
||||||
|
vec2i output_resolution;
|
||||||
} gsr_capture_kms_params;
|
} gsr_capture_kms_params;
|
||||||
|
|
||||||
gsr_capture* gsr_capture_kms_create(const gsr_capture_kms_params *params);
|
gsr_capture* gsr_capture_kms_create(const gsr_capture_kms_params *params);
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ typedef struct {
|
|||||||
gsr_color_range color_range;
|
gsr_color_range color_range;
|
||||||
bool record_cursor;
|
bool record_cursor;
|
||||||
bool use_software_video_encoder;
|
bool use_software_video_encoder;
|
||||||
|
vec2i output_resolution;
|
||||||
} gsr_capture_nvfbc_params;
|
} gsr_capture_nvfbc_params;
|
||||||
|
|
||||||
gsr_capture* gsr_capture_nvfbc_create(const gsr_capture_nvfbc_params *params);
|
gsr_capture* gsr_capture_nvfbc_create(const gsr_capture_nvfbc_params *params);
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ typedef struct {
|
|||||||
bool restore_portal_session;
|
bool restore_portal_session;
|
||||||
/* If this is set to NULL then this defaults to $XDG_CONFIG_HOME/gpu-screen-recorder/restore_token ($XDG_CONFIG_HOME defaults to $HOME/.config) */
|
/* If this is set to NULL then this defaults to $XDG_CONFIG_HOME/gpu-screen-recorder/restore_token ($XDG_CONFIG_HOME defaults to $HOME/.config) */
|
||||||
const char *portal_session_token_filepath;
|
const char *portal_session_token_filepath;
|
||||||
|
vec2i output_resolution;
|
||||||
} gsr_capture_portal_params;
|
} gsr_capture_portal_params;
|
||||||
|
|
||||||
gsr_capture* gsr_capture_portal_create(const gsr_capture_portal_params *params);
|
gsr_capture* gsr_capture_portal_create(const gsr_capture_portal_params *params);
|
||||||
|
|||||||
@@ -8,10 +8,10 @@ typedef struct {
|
|||||||
gsr_egl *egl;
|
gsr_egl *egl;
|
||||||
unsigned long window;
|
unsigned long window;
|
||||||
bool follow_focused; /* If this is set then |window| is ignored */
|
bool follow_focused; /* If this is set then |window| is ignored */
|
||||||
vec2i region_size; /* This is currently only used with |follow_focused| */
|
|
||||||
gsr_color_range color_range;
|
gsr_color_range color_range;
|
||||||
bool record_cursor;
|
bool record_cursor;
|
||||||
gsr_color_depth color_depth;
|
gsr_color_depth color_depth;
|
||||||
|
vec2i output_resolution;
|
||||||
} gsr_capture_xcomposite_params;
|
} gsr_capture_xcomposite_params;
|
||||||
|
|
||||||
gsr_capture* gsr_capture_xcomposite_create(const gsr_capture_xcomposite_params *params);
|
gsr_capture* gsr_capture_xcomposite_create(const gsr_capture_xcomposite_params *params);
|
||||||
|
|||||||
@@ -51,4 +51,6 @@ bool video_codec_context_is_vaapi(AVCodecContext *video_codec_context);
|
|||||||
bool vaapi_copy_drm_planes_to_video_surface(AVCodecContext *video_codec_context, AVFrame *video_frame, vec2i source_pos, vec2i source_size, vec2i dest_pos, vec2i dest_size, uint32_t format, vec2i size, const int *fds, const uint32_t *offsets, const uint32_t *pitches, const uint64_t *modifiers, int num_planes);
|
bool vaapi_copy_drm_planes_to_video_surface(AVCodecContext *video_codec_context, AVFrame *video_frame, vec2i source_pos, vec2i source_size, vec2i dest_pos, vec2i dest_size, uint32_t format, vec2i size, const int *fds, const uint32_t *offsets, const uint32_t *pitches, const uint64_t *modifiers, int num_planes);
|
||||||
bool vaapi_copy_egl_image_to_video_surface(gsr_egl *egl, EGLImage image, vec2i source_pos, vec2i source_size, vec2i dest_pos, vec2i dest_size, AVCodecContext *video_codec_context, AVFrame *video_frame);
|
bool vaapi_copy_egl_image_to_video_surface(gsr_egl *egl, EGLImage image, vec2i source_pos, vec2i source_size, vec2i dest_pos, vec2i dest_size, AVCodecContext *video_codec_context, AVFrame *video_frame);
|
||||||
|
|
||||||
|
vec2i scale_keep_aspect_ratio(vec2i from, vec2i to);
|
||||||
|
|
||||||
#endif /* GSR_UTILS_H */
|
#endif /* GSR_UTILS_H */
|
||||||
|
|||||||
@@ -9,4 +9,8 @@ typedef struct {
|
|||||||
float x, y;
|
float x, y;
|
||||||
} vec2f;
|
} vec2f;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
double x, y;
|
||||||
|
} vec2d;
|
||||||
|
|
||||||
#endif /* VEC2_H */
|
#endif /* VEC2_H */
|
||||||
|
|||||||
@@ -214,8 +214,15 @@ static int gsr_capture_kms_start(gsr_capture *cap, AVCodecContext *video_codec_c
|
|||||||
/* Disable vsync */
|
/* Disable vsync */
|
||||||
self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0);
|
self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0);
|
||||||
|
|
||||||
|
if(self->params.output_resolution.x == 0 && self->params.output_resolution.y == 0) {
|
||||||
|
self->params.output_resolution = self->capture_size;
|
||||||
video_codec_context->width = FFALIGN(self->capture_size.x, 2);
|
video_codec_context->width = FFALIGN(self->capture_size.x, 2);
|
||||||
video_codec_context->height = FFALIGN(self->capture_size.y, 2);
|
video_codec_context->height = FFALIGN(self->capture_size.y, 2);
|
||||||
|
} else {
|
||||||
|
self->params.output_resolution = scale_keep_aspect_ratio(self->capture_size, self->params.output_resolution);
|
||||||
|
video_codec_context->width = FFALIGN(self->params.output_resolution.x, 2);
|
||||||
|
video_codec_context->height = FFALIGN(self->params.output_resolution.y, 2);
|
||||||
|
}
|
||||||
|
|
||||||
frame->width = video_codec_context->width;
|
frame->width = video_codec_context->width;
|
||||||
frame->height = video_codec_context->height;
|
frame->height = video_codec_context->height;
|
||||||
@@ -429,7 +436,12 @@ static gsr_kms_response_item* find_cursor_drm_if_on_monitor(gsr_capture_kms *sel
|
|||||||
return cursor_drm_fd;
|
return cursor_drm_fd;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void render_drm_cursor(gsr_capture_kms *self, gsr_color_conversion *color_conversion, const gsr_kms_response_item *cursor_drm_fd, vec2i target_pos, float texture_rotation) {
|
static void render_drm_cursor(gsr_capture_kms *self, gsr_color_conversion *color_conversion, const gsr_kms_response_item *cursor_drm_fd, vec2i target_pos, float texture_rotation, vec2i output_size) {
|
||||||
|
const vec2d scale = {
|
||||||
|
self->capture_size.x == 0 ? 0 : (double)output_size.x / (double)self->capture_size.x,
|
||||||
|
self->capture_size.y == 0 ? 0 : (double)output_size.y / (double)self->capture_size.y
|
||||||
|
};
|
||||||
|
|
||||||
const bool cursor_texture_id_is_external = self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA;
|
const bool cursor_texture_id_is_external = self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA;
|
||||||
const vec2i cursor_size = {cursor_drm_fd->width, cursor_drm_fd->height};
|
const vec2i cursor_size = {cursor_drm_fd->width, cursor_drm_fd->height};
|
||||||
|
|
||||||
@@ -458,6 +470,9 @@ static void render_drm_cursor(gsr_capture_kms *self, gsr_color_conversion *color
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cursor_pos.x *= scale.x;
|
||||||
|
cursor_pos.y *= scale.y;
|
||||||
|
|
||||||
cursor_pos.x += target_pos.x;
|
cursor_pos.x += target_pos.x;
|
||||||
cursor_pos.y += target_pos.y;
|
cursor_pos.y += target_pos.y;
|
||||||
|
|
||||||
@@ -487,32 +502,37 @@ static void render_drm_cursor(gsr_capture_kms *self, gsr_color_conversion *color
|
|||||||
self->params.egl->eglDestroyImage(self->params.egl->egl_display, cursor_image);
|
self->params.egl->eglDestroyImage(self->params.egl->egl_display, cursor_image);
|
||||||
|
|
||||||
self->params.egl->glEnable(GL_SCISSOR_TEST);
|
self->params.egl->glEnable(GL_SCISSOR_TEST);
|
||||||
self->params.egl->glScissor(target_pos.x, target_pos.y, self->capture_size.x, self->capture_size.y);
|
self->params.egl->glScissor(target_pos.x, target_pos.y, output_size.x, output_size.y);
|
||||||
|
|
||||||
gsr_color_conversion_draw(color_conversion, self->cursor_texture_id,
|
gsr_color_conversion_draw(color_conversion, self->cursor_texture_id,
|
||||||
cursor_pos, cursor_size,
|
cursor_pos, (vec2i){cursor_size.x * scale.x, cursor_size.y * scale.y},
|
||||||
(vec2i){0, 0}, cursor_size,
|
(vec2i){0, 0}, cursor_size,
|
||||||
texture_rotation, cursor_texture_id_is_external);
|
texture_rotation, cursor_texture_id_is_external);
|
||||||
|
|
||||||
self->params.egl->glDisable(GL_SCISSOR_TEST);
|
self->params.egl->glDisable(GL_SCISSOR_TEST);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void render_x11_cursor(gsr_capture_kms *self, gsr_color_conversion *color_conversion, vec2i capture_pos, vec2i target_pos) {
|
static void render_x11_cursor(gsr_capture_kms *self, gsr_color_conversion *color_conversion, vec2i capture_pos, vec2i target_pos, vec2i output_size) {
|
||||||
if(!self->x11_cursor.visible)
|
if(!self->x11_cursor.visible)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
const vec2d scale = {
|
||||||
|
self->capture_size.x == 0 ? 0 : (double)output_size.x / (double)self->capture_size.x,
|
||||||
|
self->capture_size.y == 0 ? 0 : (double)output_size.y / (double)self->capture_size.y
|
||||||
|
};
|
||||||
|
|
||||||
gsr_cursor_tick(&self->x11_cursor, DefaultRootWindow(self->params.egl->x11.dpy));
|
gsr_cursor_tick(&self->x11_cursor, DefaultRootWindow(self->params.egl->x11.dpy));
|
||||||
|
|
||||||
const vec2i cursor_pos = {
|
const vec2i cursor_pos = {
|
||||||
target_pos.x + self->x11_cursor.position.x - self->x11_cursor.hotspot.x - capture_pos.x,
|
target_pos.x + (self->x11_cursor.position.x - self->x11_cursor.hotspot.x) * scale.x - capture_pos.x,
|
||||||
target_pos.y + self->x11_cursor.position.y - self->x11_cursor.hotspot.y - capture_pos.y
|
target_pos.y + (self->x11_cursor.position.y - self->x11_cursor.hotspot.y) * scale.y - capture_pos.y
|
||||||
};
|
};
|
||||||
|
|
||||||
self->params.egl->glEnable(GL_SCISSOR_TEST);
|
self->params.egl->glEnable(GL_SCISSOR_TEST);
|
||||||
self->params.egl->glScissor(target_pos.x, target_pos.y, self->capture_size.x, self->capture_size.y);
|
self->params.egl->glScissor(target_pos.x, target_pos.y, output_size.x, output_size.y);
|
||||||
|
|
||||||
gsr_color_conversion_draw(color_conversion, self->x11_cursor.texture_id,
|
gsr_color_conversion_draw(color_conversion, self->x11_cursor.texture_id,
|
||||||
cursor_pos, self->x11_cursor.size,
|
cursor_pos, (vec2i){self->x11_cursor.size.x * scale.x, self->x11_cursor.size.y * scale.y},
|
||||||
(vec2i){0, 0}, self->x11_cursor.size,
|
(vec2i){0, 0}, self->x11_cursor.size,
|
||||||
0.0f, false);
|
0.0f, false);
|
||||||
|
|
||||||
@@ -562,8 +582,12 @@ static int gsr_capture_kms_capture(gsr_capture *cap, AVFrame *frame, gsr_color_c
|
|||||||
" If you are experience performance problems in the video then record a single window on X11 or use portal capture option instead\n");
|
" If you are experience performance problems in the video then record a single window on X11 or use portal capture option instead\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const bool is_scaled = self->params.output_resolution.x > 0 && self->params.output_resolution.y > 0;
|
||||||
|
vec2i output_size = is_scaled ? self->params.output_resolution : self->capture_size;
|
||||||
|
output_size = scale_keep_aspect_ratio(self->capture_size, output_size);
|
||||||
|
|
||||||
const float texture_rotation = monitor_rotation_to_radians(self->monitor_rotation);
|
const float texture_rotation = monitor_rotation_to_radians(self->monitor_rotation);
|
||||||
const vec2i target_pos = { max_int(0, frame->width / 2 - self->capture_size.x / 2), max_int(0, frame->height / 2 - self->capture_size.y / 2) };
|
const vec2i target_pos = { max_int(0, frame->width / 2 - output_size.x / 2), max_int(0, frame->height / 2 - output_size.y / 2) };
|
||||||
self->capture_size = rotate_capture_size_if_rotated(self, (vec2i){ drm_fd->src_w, drm_fd->src_h });
|
self->capture_size = rotate_capture_size_if_rotated(self, (vec2i){ drm_fd->src_w, drm_fd->src_h });
|
||||||
gsr_capture_kms_update_capture_size_change(self, color_conversion, target_pos, drm_fd);
|
gsr_capture_kms_update_capture_size_change(self, color_conversion, target_pos, drm_fd);
|
||||||
|
|
||||||
@@ -586,7 +610,7 @@ static int gsr_capture_kms_capture(gsr_capture *cap, AVFrame *frame, gsr_color_c
|
|||||||
pitches[i] = drm_fd->dma_buf[i].pitch;
|
pitches[i] = drm_fd->dma_buf[i].pitch;
|
||||||
modifiers[i] = drm_fd->modifier;
|
modifiers[i] = drm_fd->modifier;
|
||||||
}
|
}
|
||||||
if(!vaapi_copy_drm_planes_to_video_surface(self->video_codec_context, frame, (vec2i){capture_pos.x, capture_pos.y}, self->capture_size, target_pos, self->capture_size, drm_fd->pixel_format, (vec2i){drm_fd->width, drm_fd->height}, fds, offsets, pitches, modifiers, drm_fd->num_dma_bufs)) {
|
if(!vaapi_copy_drm_planes_to_video_surface(self->video_codec_context, frame, (vec2i){capture_pos.x, capture_pos.y}, self->capture_size, target_pos, output_size, drm_fd->pixel_format, (vec2i){drm_fd->width, drm_fd->height}, fds, offsets, pitches, modifiers, drm_fd->num_dma_bufs)) {
|
||||||
fprintf(stderr, "gsr error: gsr_capture_kms_capture: vaapi_copy_drm_planes_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n");
|
fprintf(stderr, "gsr error: gsr_capture_kms_capture: vaapi_copy_drm_planes_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n");
|
||||||
self->fast_path_failed = true;
|
self->fast_path_failed = true;
|
||||||
}
|
}
|
||||||
@@ -602,7 +626,7 @@ static int gsr_capture_kms_capture(gsr_capture *cap, AVFrame *frame, gsr_color_c
|
|||||||
}
|
}
|
||||||
|
|
||||||
gsr_color_conversion_draw(color_conversion, self->external_texture_fallback ? self->external_input_texture_id : self->input_texture_id,
|
gsr_color_conversion_draw(color_conversion, self->external_texture_fallback ? self->external_input_texture_id : self->input_texture_id,
|
||||||
target_pos, self->capture_size,
|
target_pos, output_size,
|
||||||
capture_pos, self->capture_size,
|
capture_pos, self->capture_size,
|
||||||
texture_rotation, self->external_texture_fallback);
|
texture_rotation, self->external_texture_fallback);
|
||||||
}
|
}
|
||||||
@@ -613,9 +637,9 @@ static int gsr_capture_kms_capture(gsr_capture *cap, AVFrame *frame, gsr_color_c
|
|||||||
// the cursor plane is not available when the cursor is on the monitor controlled by the nvidia device.
|
// the cursor plane is not available when the cursor is on the monitor controlled by the nvidia device.
|
||||||
if(self->is_x11) {
|
if(self->is_x11) {
|
||||||
const vec2i cursor_monitor_offset = self->capture_pos;
|
const vec2i cursor_monitor_offset = self->capture_pos;
|
||||||
render_x11_cursor(self, color_conversion, cursor_monitor_offset, target_pos);
|
render_x11_cursor(self, color_conversion, cursor_monitor_offset, target_pos, output_size);
|
||||||
} else if(cursor_drm_fd) {
|
} else if(cursor_drm_fd) {
|
||||||
render_drm_cursor(self, color_conversion, cursor_drm_fd, target_pos, texture_rotation);
|
render_drm_cursor(self, color_conversion, cursor_drm_fd, target_pos, texture_rotation, output_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -240,6 +240,11 @@ static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *self) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(!self->capture_region) {
|
||||||
|
self->width = self->tracking_width;
|
||||||
|
self->height = self->tracking_height;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error_cleanup:
|
error_cleanup:
|
||||||
@@ -351,6 +356,14 @@ static int gsr_capture_nvfbc_start(gsr_capture *cap, AVCodecContext *video_codec
|
|||||||
video_codec_context->height = FFALIGN(self->tracking_height, 2);
|
video_codec_context->height = FFALIGN(self->tracking_height, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(self->params.output_resolution.x == 0 && self->params.output_resolution.y == 0) {
|
||||||
|
self->params.output_resolution = (vec2i){video_codec_context->width, video_codec_context->height};
|
||||||
|
} else {
|
||||||
|
self->params.output_resolution = scale_keep_aspect_ratio((vec2i){video_codec_context->width, video_codec_context->height}, self->params.output_resolution);
|
||||||
|
video_codec_context->width = FFALIGN(self->params.output_resolution.x, 2);
|
||||||
|
video_codec_context->height = FFALIGN(self->params.output_resolution.y, 2);
|
||||||
|
}
|
||||||
|
|
||||||
frame->width = video_codec_context->width;
|
frame->width = video_codec_context->width;
|
||||||
frame->height = video_codec_context->height;
|
frame->height = video_codec_context->height;
|
||||||
|
|
||||||
@@ -390,6 +403,13 @@ static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame, gsr_color
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const vec2i frame_size = (vec2i){self->width, self->height};
|
||||||
|
const bool is_scaled = self->params.output_resolution.x > 0 && self->params.output_resolution.y > 0;
|
||||||
|
vec2i output_size = is_scaled ? self->params.output_resolution : frame_size;
|
||||||
|
output_size = scale_keep_aspect_ratio(frame_size, output_size);
|
||||||
|
|
||||||
|
const vec2i target_pos = { max_int(0, frame->width / 2 - output_size.x / 2), max_int(0, frame->height / 2 - output_size.y / 2) };
|
||||||
|
|
||||||
NVFBC_FRAME_GRAB_INFO frame_info;
|
NVFBC_FRAME_GRAB_INFO frame_info;
|
||||||
memset(&frame_info, 0, sizeof(frame_info));
|
memset(&frame_info, 0, sizeof(frame_info));
|
||||||
|
|
||||||
@@ -412,8 +432,8 @@ static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame, gsr_color
|
|||||||
self->params.egl->glFinish();
|
self->params.egl->glFinish();
|
||||||
|
|
||||||
gsr_color_conversion_draw(color_conversion, self->setup_params.dwTextures[grab_params.dwTextureIndex],
|
gsr_color_conversion_draw(color_conversion, self->setup_params.dwTextures[grab_params.dwTextureIndex],
|
||||||
(vec2i){0, 0}, (vec2i){frame->width, frame->height},
|
target_pos, (vec2i){output_size.x, output_size.y},
|
||||||
(vec2i){0, 0}, (vec2i){frame->width, frame->height},
|
(vec2i){0, 0}, frame_size,
|
||||||
0.0f, false);
|
0.0f, false);
|
||||||
|
|
||||||
self->params.egl->glFlush();
|
self->params.egl->glFlush();
|
||||||
|
|||||||
@@ -300,8 +300,15 @@ static int gsr_capture_portal_start(gsr_capture *cap, AVCodecContext *video_code
|
|||||||
/* Disable vsync */
|
/* Disable vsync */
|
||||||
self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0);
|
self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0);
|
||||||
|
|
||||||
|
if(self->params.output_resolution.x == 0 && self->params.output_resolution.y == 0) {
|
||||||
|
self->params.output_resolution = self->capture_size;
|
||||||
video_codec_context->width = FFALIGN(self->capture_size.x, 2);
|
video_codec_context->width = FFALIGN(self->capture_size.x, 2);
|
||||||
video_codec_context->height = FFALIGN(self->capture_size.y, 2);
|
video_codec_context->height = FFALIGN(self->capture_size.y, 2);
|
||||||
|
} else {
|
||||||
|
self->params.output_resolution = scale_keep_aspect_ratio(self->capture_size, self->params.output_resolution);
|
||||||
|
video_codec_context->width = FFALIGN(self->params.output_resolution.x, 2);
|
||||||
|
video_codec_context->height = FFALIGN(self->params.output_resolution.y, 2);
|
||||||
|
}
|
||||||
|
|
||||||
frame->width = video_codec_context->width;
|
frame->width = video_codec_context->width;
|
||||||
frame->height = video_codec_context->height;
|
frame->height = video_codec_context->height;
|
||||||
@@ -335,7 +342,11 @@ static int gsr_capture_portal_capture(gsr_capture *cap, AVFrame *frame, gsr_colo
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
const vec2i target_pos = { max_int(0, frame->width / 2 - self->capture_size.x / 2), max_int(0, frame->height / 2 - self->capture_size.y / 2) };
|
const bool is_scaled = self->params.output_resolution.x > 0 && self->params.output_resolution.y > 0;
|
||||||
|
vec2i output_size = is_scaled ? self->params.output_resolution : self->capture_size;
|
||||||
|
output_size = scale_keep_aspect_ratio(self->capture_size, output_size);
|
||||||
|
|
||||||
|
const vec2i target_pos = { max_int(0, frame->width / 2 - output_size.x / 2), max_int(0, frame->height / 2 - output_size.y / 2) };
|
||||||
|
|
||||||
self->params.egl->glFlush();
|
self->params.egl->glFlush();
|
||||||
self->params.egl->glFinish();
|
self->params.egl->glFinish();
|
||||||
@@ -354,7 +365,7 @@ static int gsr_capture_portal_capture(gsr_capture *cap, AVFrame *frame, gsr_colo
|
|||||||
pitches[i] = self->dmabuf_data[i].stride;
|
pitches[i] = self->dmabuf_data[i].stride;
|
||||||
modifiers[i] = pipewire_modifiers;
|
modifiers[i] = pipewire_modifiers;
|
||||||
}
|
}
|
||||||
if(!vaapi_copy_drm_planes_to_video_surface(self->video_codec_context, frame, (vec2i){region.x, region.y}, self->capture_size, target_pos, self->capture_size, pipewire_fourcc, self->capture_size, fds, offsets, pitches, modifiers, self->num_dmabuf_data)) {
|
if(!vaapi_copy_drm_planes_to_video_surface(self->video_codec_context, frame, (vec2i){region.x, region.y}, self->capture_size, target_pos, output_size, pipewire_fourcc, self->capture_size, fds, offsets, pitches, modifiers, self->num_dmabuf_data)) {
|
||||||
fprintf(stderr, "gsr error: gsr_capture_portal_capture: vaapi_copy_drm_planes_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n");
|
fprintf(stderr, "gsr error: gsr_capture_portal_capture: vaapi_copy_drm_planes_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n");
|
||||||
self->fast_path_failed = true;
|
self->fast_path_failed = true;
|
||||||
}
|
}
|
||||||
@@ -364,21 +375,26 @@ static int gsr_capture_portal_capture(gsr_capture *cap, AVFrame *frame, gsr_colo
|
|||||||
|
|
||||||
if(self->fast_path_failed) {
|
if(self->fast_path_failed) {
|
||||||
gsr_color_conversion_draw(color_conversion, using_external_image ? self->texture_map.external_texture_id : self->texture_map.texture_id,
|
gsr_color_conversion_draw(color_conversion, using_external_image ? self->texture_map.external_texture_id : self->texture_map.texture_id,
|
||||||
target_pos, self->capture_size,
|
target_pos, output_size,
|
||||||
(vec2i){region.x, region.y}, self->capture_size,
|
(vec2i){region.x, region.y}, self->capture_size,
|
||||||
0.0f, using_external_image);
|
0.0f, using_external_image);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(self->params.record_cursor) {
|
if(self->params.record_cursor && self->texture_map.cursor_texture_id > 0 && cursor_region.width > 0) {
|
||||||
|
const vec2d scale = {
|
||||||
|
self->capture_size.x == 0 ? 0 : (double)output_size.x / (double)self->capture_size.x,
|
||||||
|
self->capture_size.y == 0 ? 0 : (double)output_size.y / (double)self->capture_size.y
|
||||||
|
};
|
||||||
|
|
||||||
const vec2i cursor_pos = {
|
const vec2i cursor_pos = {
|
||||||
target_pos.x + cursor_region.x,
|
target_pos.x + (cursor_region.x * scale.x),
|
||||||
target_pos.y + cursor_region.y
|
target_pos.y + (cursor_region.y * scale.y)
|
||||||
};
|
};
|
||||||
|
|
||||||
self->params.egl->glEnable(GL_SCISSOR_TEST);
|
self->params.egl->glEnable(GL_SCISSOR_TEST);
|
||||||
self->params.egl->glScissor(target_pos.x, target_pos.y, self->capture_size.x, self->capture_size.y);
|
self->params.egl->glScissor(target_pos.x, target_pos.y, output_size.x, output_size.y);
|
||||||
gsr_color_conversion_draw(color_conversion, self->texture_map.cursor_texture_id,
|
gsr_color_conversion_draw(color_conversion, self->texture_map.cursor_texture_id,
|
||||||
(vec2i){cursor_pos.x, cursor_pos.y}, (vec2i){cursor_region.width, cursor_region.height},
|
(vec2i){cursor_pos.x, cursor_pos.y}, (vec2i){cursor_region.width * scale.x, cursor_region.height * scale.y},
|
||||||
(vec2i){0, 0}, (vec2i){cursor_region.width, cursor_region.height},
|
(vec2i){0, 0}, (vec2i){cursor_region.width, cursor_region.height},
|
||||||
0.0f, false);
|
0.0f, false);
|
||||||
self->params.egl->glDisable(GL_SCISSOR_TEST);
|
self->params.egl->glDisable(GL_SCISSOR_TEST);
|
||||||
|
|||||||
@@ -113,13 +113,14 @@ static int gsr_capture_xcomposite_start(gsr_capture *cap, AVCodecContext *video_
|
|||||||
self->params.egl->glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &self->texture_size.y);
|
self->params.egl->glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &self->texture_size.y);
|
||||||
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
|
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||||
|
|
||||||
vec2i video_size = self->texture_size;
|
if(self->params.output_resolution.x == 0 && self->params.output_resolution.y == 0) {
|
||||||
|
self->params.output_resolution = self->texture_size;
|
||||||
if(self->params.region_size.x > 0 && self->params.region_size.y > 0)
|
video_codec_context->width = FFALIGN(self->texture_size.x, 2);
|
||||||
video_size = self->params.region_size;
|
video_codec_context->height = FFALIGN(self->texture_size.y, 2);
|
||||||
|
} else {
|
||||||
video_codec_context->width = FFALIGN(video_size.x, 2);
|
video_codec_context->width = FFALIGN(self->params.output_resolution.x, 2);
|
||||||
video_codec_context->height = FFALIGN(video_size.y, 2);
|
video_codec_context->height = FFALIGN(self->params.output_resolution.y, 2);
|
||||||
|
}
|
||||||
|
|
||||||
frame->width = video_codec_context->width;
|
frame->width = video_codec_context->width;
|
||||||
frame->height = video_codec_context->height;
|
frame->height = video_codec_context->height;
|
||||||
@@ -257,14 +258,18 @@ static int gsr_capture_xcomposite_capture(gsr_capture *cap, AVFrame *frame, gsr_
|
|||||||
gsr_color_conversion_clear(color_conversion);
|
gsr_color_conversion_clear(color_conversion);
|
||||||
}
|
}
|
||||||
|
|
||||||
const vec2i target_pos = { max_int(0, frame->width / 2 - self->texture_size.x / 2), max_int(0, frame->height / 2 - self->texture_size.y / 2) };
|
const bool is_scaled = self->params.output_resolution.x > 0 && self->params.output_resolution.y > 0;
|
||||||
|
vec2i output_size = is_scaled ? self->params.output_resolution : self->texture_size;
|
||||||
|
output_size = scale_keep_aspect_ratio(self->texture_size, output_size);
|
||||||
|
|
||||||
|
const vec2i target_pos = { max_int(0, frame->width / 2 - output_size.x / 2), max_int(0, frame->height / 2 - output_size.y / 2) };
|
||||||
|
|
||||||
self->params.egl->glFlush();
|
self->params.egl->glFlush();
|
||||||
self->params.egl->glFinish();
|
self->params.egl->glFinish();
|
||||||
|
|
||||||
/* Fast opengl free path */
|
/* Fast opengl free path */
|
||||||
if(!self->fast_path_failed && video_codec_context_is_vaapi(self->video_codec_context) && self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD) {
|
if(!self->fast_path_failed && video_codec_context_is_vaapi(self->video_codec_context) && self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD) {
|
||||||
if(!vaapi_copy_egl_image_to_video_surface(self->params.egl, self->window_texture.image, (vec2i){0, 0}, self->texture_size, target_pos, self->texture_size, self->video_codec_context, frame)) {
|
if(!vaapi_copy_egl_image_to_video_surface(self->params.egl, self->window_texture.image, (vec2i){0, 0}, self->texture_size, target_pos, output_size, self->video_codec_context, frame)) {
|
||||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_capture: vaapi_copy_egl_image_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n");
|
fprintf(stderr, "gsr error: gsr_capture_xcomposite_capture: vaapi_copy_egl_image_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n");
|
||||||
self->fast_path_failed = true;
|
self->fast_path_failed = true;
|
||||||
}
|
}
|
||||||
@@ -274,24 +279,29 @@ static int gsr_capture_xcomposite_capture(gsr_capture *cap, AVFrame *frame, gsr_
|
|||||||
|
|
||||||
if(self->fast_path_failed) {
|
if(self->fast_path_failed) {
|
||||||
gsr_color_conversion_draw(color_conversion, window_texture_get_opengl_texture_id(&self->window_texture),
|
gsr_color_conversion_draw(color_conversion, window_texture_get_opengl_texture_id(&self->window_texture),
|
||||||
target_pos, self->texture_size,
|
target_pos, output_size,
|
||||||
(vec2i){0, 0}, self->texture_size,
|
(vec2i){0, 0}, self->texture_size,
|
||||||
0.0f, false);
|
0.0f, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(self->params.record_cursor && self->cursor.visible) {
|
if(self->params.record_cursor && self->cursor.visible) {
|
||||||
|
const vec2d scale = {
|
||||||
|
self->texture_size.x == 0 ? 0 : (double)output_size.x / (double)self->texture_size.x,
|
||||||
|
self->texture_size.y == 0 ? 0 : (double)output_size.y / (double)self->texture_size.y
|
||||||
|
};
|
||||||
|
|
||||||
gsr_cursor_tick(&self->cursor, self->window);
|
gsr_cursor_tick(&self->cursor, self->window);
|
||||||
|
|
||||||
const vec2i cursor_pos = {
|
const vec2i cursor_pos = {
|
||||||
target_pos.x + self->cursor.position.x - self->cursor.hotspot.x,
|
target_pos.x + (self->cursor.position.x - self->cursor.hotspot.x) * scale.x,
|
||||||
target_pos.y + self->cursor.position.y - self->cursor.hotspot.y
|
target_pos.y + (self->cursor.position.y - self->cursor.hotspot.y) * scale.y
|
||||||
};
|
};
|
||||||
|
|
||||||
self->params.egl->glEnable(GL_SCISSOR_TEST);
|
self->params.egl->glEnable(GL_SCISSOR_TEST);
|
||||||
self->params.egl->glScissor(target_pos.x, target_pos.y, self->texture_size.x, self->texture_size.y);
|
self->params.egl->glScissor(target_pos.x, target_pos.y, output_size.x, output_size.y);
|
||||||
|
|
||||||
gsr_color_conversion_draw(color_conversion, self->cursor.texture_id,
|
gsr_color_conversion_draw(color_conversion, self->cursor.texture_id,
|
||||||
cursor_pos, self->cursor.size,
|
cursor_pos, (vec2i){self->cursor.size.x * scale.x, self->cursor.size.y * scale.y},
|
||||||
(vec2i){0, 0}, self->cursor.size,
|
(vec2i){0, 0}, self->cursor.size,
|
||||||
0.0f, false);
|
0.0f, false);
|
||||||
|
|
||||||
|
|||||||
51
src/main.cpp
51
src/main.cpp
@@ -1082,7 +1082,9 @@ static void usage_full() {
|
|||||||
fprintf(stderr, " If an output file is specified and -c is not used then the container format is determined from the output filename extension.\n");
|
fprintf(stderr, " If an output file is specified and -c is not used then the container format is determined from the output filename extension.\n");
|
||||||
fprintf(stderr, " Only containers that support h264, hevc, av1, vp8 or vp9 are supported, which means that only mp4, mkv, flv, webm (and some others) are supported.\n");
|
fprintf(stderr, " Only containers that support h264, hevc, av1, vp8 or vp9 are supported, which means that only mp4, mkv, flv, webm (and some others) are supported.\n");
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, " -s The size (area) to record at in the format WxH, for example 1920x1080. This option is only supported (and required) when -w is \"focused\".\n");
|
fprintf(stderr, " -s The output resolution of the video in the format WxH, for example 1920x1080. If this is 0x0 then the original resolution is used. Optional, except when -w is \"focused\".\n");
|
||||||
|
fprintf(stderr, " Note: the captured content is scaled to this size. The output resolution might not be exactly as specified by this option. The original aspect ratio is respected so the resolution will match that.\n");
|
||||||
|
fprintf(stderr, " The video encoder might also need to add padding, which will result in black bars on the sides of the video. This is especially an issue on AMD.\n");
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, " -f Frame rate to record at. Recording will only capture frames at this target frame rate.\n");
|
fprintf(stderr, " -f Frame rate to record at. Recording will only capture frames at this target frame rate.\n");
|
||||||
fprintf(stderr, " For constant frame rate mode this option is the frame rate every frame will be captured at and if the capture frame rate is below this target frame rate then the frames will be duplicated.\n");
|
fprintf(stderr, " For constant frame rate mode this option is the frame rate every frame will be captured at and if the capture frame rate is below this target frame rate then the frames will be duplicated.\n");
|
||||||
@@ -1198,6 +1200,7 @@ static void usage_full() {
|
|||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "EXAMPLES:\n");
|
fprintf(stderr, "EXAMPLES:\n");
|
||||||
fprintf(stderr, " %s -w screen -f 60 -a default_output -o \"$HOME/Videos/video.mp4\"\n", program_name);
|
fprintf(stderr, " %s -w screen -f 60 -a default_output -o \"$HOME/Videos/video.mp4\"\n", program_name);
|
||||||
|
fprintf(stderr, " %s -w screen -f 60 -a default_output -a default_input -o \"$HOME/Videos/video.mp4\"\n", program_name);
|
||||||
fprintf(stderr, " %s -w screen -f 60 -a \"default_output|default_input\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
|
fprintf(stderr, " %s -w screen -f 60 -a \"default_output|default_input\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
|
||||||
fprintf(stderr, " %s -w screen -f 60 -a default_output -c mkv -r 60 -o \"$HOME/Videos\"\n", program_name);
|
fprintf(stderr, " %s -w screen -f 60 -a default_output -c mkv -r 60 -o \"$HOME/Videos\"\n", program_name);
|
||||||
fprintf(stderr, " %s -w screen -f 60 -a default_output -c mkv -sc script.sh -r 60 -o \"$HOME/Videos\"\n", program_name);
|
fprintf(stderr, " %s -w screen -f 60 -a default_output -c mkv -sc script.sh -r 60 -o \"$HOME/Videos\"\n", program_name);
|
||||||
@@ -2075,11 +2078,10 @@ static void list_audio_devices_command() {
|
|||||||
_exit(0);
|
_exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static gsr_capture* create_capture_impl(std::string &window_str, const char *screen_region, bool wayland, gsr_egl *egl, int fps, VideoCodec video_codec, gsr_color_range color_range,
|
static gsr_capture* create_capture_impl(std::string &window_str, vec2i output_resolution, bool wayland, gsr_egl *egl, int fps, VideoCodec video_codec, gsr_color_range color_range,
|
||||||
bool record_cursor, bool use_software_video_encoder, bool restore_portal_session, const char *portal_session_token_filepath,
|
bool record_cursor, bool use_software_video_encoder, bool restore_portal_session, const char *portal_session_token_filepath,
|
||||||
gsr_color_depth color_depth)
|
gsr_color_depth color_depth)
|
||||||
{
|
{
|
||||||
vec2i region_size = { 0, 0 };
|
|
||||||
Window src_window_id = None;
|
Window src_window_id = None;
|
||||||
bool follow_focused = false;
|
bool follow_focused = false;
|
||||||
|
|
||||||
@@ -2090,18 +2092,8 @@ static gsr_capture* create_capture_impl(std::string &window_str, const char *scr
|
|||||||
_exit(2);
|
_exit(2);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(!screen_region) {
|
if(output_resolution.x <= 0 || output_resolution.y <= 0) {
|
||||||
fprintf(stderr, "Error: option -s is required when using -w focused\n");
|
fprintf(stderr, "Error: invalid value for option -s '%dx%d' when using -w focused option. expected width and height to be greater than 0\n", output_resolution.x, output_resolution.y);
|
||||||
usage();
|
|
||||||
}
|
|
||||||
|
|
||||||
if(sscanf(screen_region, "%dx%d", ®ion_size.x, ®ion_size.y) != 2) {
|
|
||||||
fprintf(stderr, "Error: invalid value for option -s '%s', expected a value in format WxH\n", screen_region);
|
|
||||||
usage();
|
|
||||||
}
|
|
||||||
|
|
||||||
if(region_size.x <= 0 || region_size.y <= 0) {
|
|
||||||
fprintf(stderr, "Error: invalud value for option -s '%s', expected width and height to be greater than 0\n", screen_region);
|
|
||||||
usage();
|
usage();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2121,6 +2113,7 @@ static gsr_capture* create_capture_impl(std::string &window_str, const char *scr
|
|||||||
portal_params.record_cursor = record_cursor;
|
portal_params.record_cursor = record_cursor;
|
||||||
portal_params.restore_portal_session = restore_portal_session;
|
portal_params.restore_portal_session = restore_portal_session;
|
||||||
portal_params.portal_session_token_filepath = portal_session_token_filepath;
|
portal_params.portal_session_token_filepath = portal_session_token_filepath;
|
||||||
|
portal_params.output_resolution = output_resolution;
|
||||||
capture = gsr_capture_portal_create(&portal_params);
|
capture = gsr_capture_portal_create(&portal_params);
|
||||||
if(!capture)
|
if(!capture)
|
||||||
_exit(1);
|
_exit(1);
|
||||||
@@ -2195,6 +2188,7 @@ static gsr_capture* create_capture_impl(std::string &window_str, const char *scr
|
|||||||
nvfbc_params.color_range = color_range;
|
nvfbc_params.color_range = color_range;
|
||||||
nvfbc_params.record_cursor = record_cursor;
|
nvfbc_params.record_cursor = record_cursor;
|
||||||
nvfbc_params.use_software_video_encoder = use_software_video_encoder;
|
nvfbc_params.use_software_video_encoder = use_software_video_encoder;
|
||||||
|
nvfbc_params.output_resolution = output_resolution;
|
||||||
capture = gsr_capture_nvfbc_create(&nvfbc_params);
|
capture = gsr_capture_nvfbc_create(&nvfbc_params);
|
||||||
if(!capture)
|
if(!capture)
|
||||||
_exit(1);
|
_exit(1);
|
||||||
@@ -2207,6 +2201,7 @@ static gsr_capture* create_capture_impl(std::string &window_str, const char *scr
|
|||||||
kms_params.record_cursor = record_cursor;
|
kms_params.record_cursor = record_cursor;
|
||||||
kms_params.hdr = video_codec_is_hdr(video_codec);
|
kms_params.hdr = video_codec_is_hdr(video_codec);
|
||||||
kms_params.fps = fps;
|
kms_params.fps = fps;
|
||||||
|
kms_params.output_resolution = output_resolution;
|
||||||
capture = gsr_capture_kms_create(&kms_params);
|
capture = gsr_capture_kms_create(&kms_params);
|
||||||
if(!capture)
|
if(!capture)
|
||||||
_exit(1);
|
_exit(1);
|
||||||
@@ -2230,10 +2225,10 @@ static gsr_capture* create_capture_impl(std::string &window_str, const char *scr
|
|||||||
xcomposite_params.egl = egl;
|
xcomposite_params.egl = egl;
|
||||||
xcomposite_params.window = src_window_id;
|
xcomposite_params.window = src_window_id;
|
||||||
xcomposite_params.follow_focused = follow_focused;
|
xcomposite_params.follow_focused = follow_focused;
|
||||||
xcomposite_params.region_size = region_size;
|
|
||||||
xcomposite_params.color_range = color_range;
|
xcomposite_params.color_range = color_range;
|
||||||
xcomposite_params.record_cursor = record_cursor;
|
xcomposite_params.record_cursor = record_cursor;
|
||||||
xcomposite_params.color_depth = color_depth;
|
xcomposite_params.color_depth = color_depth;
|
||||||
|
xcomposite_params.output_resolution = output_resolution;
|
||||||
capture = gsr_capture_xcomposite_create(&xcomposite_params);
|
capture = gsr_capture_xcomposite_create(&xcomposite_params);
|
||||||
if(!capture)
|
if(!capture)
|
||||||
_exit(1);
|
_exit(1);
|
||||||
@@ -2607,6 +2602,8 @@ static const AVCodec* select_video_codec_with_fallback(VideoCodec *video_codec,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int main(int argc, char **argv) {
|
int main(int argc, char **argv) {
|
||||||
|
setlocale(LC_ALL, "C"); // Sigh... stupid C
|
||||||
|
|
||||||
signal(SIGINT, stop_handler);
|
signal(SIGINT, stop_handler);
|
||||||
signal(SIGUSR1, save_replay_handler);
|
signal(SIGUSR1, save_replay_handler);
|
||||||
signal(SIGUSR2, toggle_pause_handler);
|
signal(SIGUSR2, toggle_pause_handler);
|
||||||
@@ -3144,13 +3141,25 @@ int main(int argc, char **argv) {
|
|||||||
usage();
|
usage();
|
||||||
}
|
}
|
||||||
|
|
||||||
const char *screen_region = args["-s"].value();
|
const char *output_resolution_str = args["-s"].value();
|
||||||
|
if(!output_resolution_str && strcmp(window_str.c_str(), "focused") == 0) {
|
||||||
if(screen_region && strcmp(window_str.c_str(), "focused") != 0) {
|
fprintf(stderr, "Error: option -s is required when using -w focused option\n");
|
||||||
fprintf(stderr, "Error: option -s is only available when using -w focused\n");
|
|
||||||
usage();
|
usage();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vec2i output_resolution = {0, 0};
|
||||||
|
if(output_resolution_str) {
|
||||||
|
if(sscanf(output_resolution_str, "%dx%d", &output_resolution.x, &output_resolution.y) != 2) {
|
||||||
|
fprintf(stderr, "Error: invalid value for option -s '%s', expected a value in format WxH\n", output_resolution_str);
|
||||||
|
usage();
|
||||||
|
}
|
||||||
|
|
||||||
|
if(output_resolution.x < 0 || output_resolution.y < 0) {
|
||||||
|
fprintf(stderr, "Error: invalud value for option -s '%s', expected width and height to be greater or equal to 0\n", output_resolution_str);
|
||||||
|
usage();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool is_livestream = false;
|
bool is_livestream = false;
|
||||||
const char *filename = args["-o"].value();
|
const char *filename = args["-o"].value();
|
||||||
if(filename) {
|
if(filename) {
|
||||||
@@ -3235,7 +3244,7 @@ int main(int argc, char **argv) {
|
|||||||
const AVCodec *video_codec_f = select_video_codec_with_fallback(&video_codec, video_codec_to_use, file_extension.c_str(), use_software_video_encoder, &egl, &low_power);
|
const AVCodec *video_codec_f = select_video_codec_with_fallback(&video_codec, video_codec_to_use, file_extension.c_str(), use_software_video_encoder, &egl, &low_power);
|
||||||
|
|
||||||
const gsr_color_depth color_depth = video_codec_to_bit_depth(video_codec);
|
const gsr_color_depth color_depth = video_codec_to_bit_depth(video_codec);
|
||||||
gsr_capture *capture = create_capture_impl(window_str, screen_region, wayland, &egl, fps, video_codec, color_range, record_cursor, use_software_video_encoder, restore_portal_session, portal_session_token_filepath, color_depth);
|
gsr_capture *capture = create_capture_impl(window_str, output_resolution, wayland, &egl, fps, video_codec, color_range, record_cursor, use_software_video_encoder, restore_portal_session, portal_session_token_filepath, color_depth);
|
||||||
|
|
||||||
// (Some?) livestreaming services require at least one audio track to work.
|
// (Some?) livestreaming services require at least one audio track to work.
|
||||||
// If not audio is provided then create one silent audio track.
|
// If not audio is provided then create one silent audio track.
|
||||||
|
|||||||
21
src/utils.c
21
src/utils.c
@@ -738,6 +738,8 @@ bool vaapi_copy_drm_planes_to_video_surface(AVCodecContext *video_codec_context,
|
|||||||
.height = dest_size.y
|
.height = dest_size.y
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const bool scaled = dest_size.x != source_size.x || dest_size.y != source_size.y;
|
||||||
|
|
||||||
// Copying a surface to another surface will automatically perform the color conversion. Thanks vaapi!
|
// Copying a surface to another surface will automatically perform the color conversion. Thanks vaapi!
|
||||||
VAProcPipelineParameterBuffer params = {0};
|
VAProcPipelineParameterBuffer params = {0};
|
||||||
params.surface = input_surface_id;
|
params.surface = input_surface_id;
|
||||||
@@ -745,7 +747,7 @@ bool vaapi_copy_drm_planes_to_video_surface(AVCodecContext *video_codec_context,
|
|||||||
params.surface_region = &source_region;
|
params.surface_region = &source_region;
|
||||||
params.output_region = &output_region;
|
params.output_region = &output_region;
|
||||||
params.output_background_color = 0;
|
params.output_background_color = 0;
|
||||||
params.filter_flags = VA_FRAME_PICTURE;
|
params.filter_flags = scaled ? (VA_FILTER_SCALING_HQ | VA_FILTER_INTERPOLATION_BILINEAR) : 0;
|
||||||
params.pipeline_flags = VA_PROC_PIPELINE_FAST;
|
params.pipeline_flags = VA_PROC_PIPELINE_FAST;
|
||||||
|
|
||||||
params.input_color_properties.colour_primaries = 1;
|
params.input_color_properties.colour_primaries = 1;
|
||||||
@@ -877,3 +879,20 @@ bool vaapi_copy_egl_image_to_video_surface(gsr_egl *egl, EGLImage image, vec2i s
|
|||||||
|
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vec2i scale_keep_aspect_ratio(vec2i from, vec2i to) {
|
||||||
|
if(from.x == 0 || from.y == 0)
|
||||||
|
return (vec2i){0, 0};
|
||||||
|
|
||||||
|
const double height_to_width_ratio = (double)from.y / (double)from.x;
|
||||||
|
from.x = to.x;
|
||||||
|
from.y = from.x * height_to_width_ratio;
|
||||||
|
|
||||||
|
if(from.y > to.y) {
|
||||||
|
const double width_height_ratio = (double)from.x / (double)from.y;
|
||||||
|
from.y = to.y;
|
||||||
|
from.x = from.y * width_height_ratio;
|
||||||
|
}
|
||||||
|
|
||||||
|
return from;
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user