mirror of
https://repo.dec05eba.com/gpu-screen-recorder
synced 2026-03-31 09:07:13 +09:00
Refactor kms_vaapi and kms_cuda
Also fixes color metadata, color range and hdr on nvidia wayland.
This commit is contained in:
3
build.sh
3
build.sh
@@ -29,6 +29,7 @@ build_gsr() {
|
||||
$CC -c src/capture/xcomposite_vaapi.c $opts $includes
|
||||
$CC -c src/capture/kms_vaapi.c $opts $includes
|
||||
$CC -c src/capture/kms_cuda.c $opts $includes
|
||||
$CC -c src/capture/kms.c $opts $includes
|
||||
$CC -c kms/client/kms_client.c $opts $includes
|
||||
$CC -c src/egl.c $opts $includes
|
||||
$CC -c src/cuda.c $opts $includes
|
||||
@@ -42,7 +43,7 @@ build_gsr() {
|
||||
$CXX -c src/sound.cpp $opts $includes
|
||||
$CXX -c src/main.cpp $opts $includes
|
||||
$CXX -o gpu-screen-recorder capture.o nvfbc.o kms_client.o egl.o cuda.o xnvctrl.o overclock.o window_texture.o shader.o \
|
||||
color_conversion.o utils.o library_loader.o xcomposite_cuda.o xcomposite_vaapi.o kms_vaapi.o kms_cuda.o sound.o main.o $libs $opts
|
||||
color_conversion.o utils.o library_loader.o xcomposite_cuda.o xcomposite_vaapi.o kms_vaapi.o kms_cuda.o kms.o sound.o main.o $libs $opts
|
||||
}
|
||||
|
||||
build_gsr_kms_server
|
||||
|
||||
@@ -1,17 +1,24 @@
|
||||
#ifndef GSR_CAPTURE_CAPTURE_H
|
||||
#define GSR_CAPTURE_CAPTURE_H
|
||||
|
||||
#include "../color_conversion.h"
|
||||
#include <stdbool.h>
|
||||
|
||||
typedef struct AVCodecContext AVCodecContext;
|
||||
typedef struct AVFrame AVFrame;
|
||||
typedef void* VADisplay;
|
||||
typedef struct _VADRMPRIMESurfaceDescriptor VADRMPRIMESurfaceDescriptor;
|
||||
typedef struct gsr_cuda gsr_cuda;
|
||||
typedef struct AVFrame AVFrame;
|
||||
typedef struct CUgraphicsResource_st *CUgraphicsResource;
|
||||
typedef struct CUarray_st *CUarray;
|
||||
|
||||
typedef struct gsr_capture gsr_capture;
|
||||
|
||||
struct gsr_capture {
|
||||
/* These methods should not be called manually. Call gsr_capture_* instead */
|
||||
int (*start)(gsr_capture *cap, AVCodecContext *video_codec_context);
|
||||
void (*tick)(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame **frame); /* can be NULL */
|
||||
int (*start)(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame);
|
||||
void (*tick)(gsr_capture *cap, AVCodecContext *video_codec_context); /* can be NULL */
|
||||
bool (*should_stop)(gsr_capture *cap, bool *err); /* can be NULL */
|
||||
int (*capture)(gsr_capture *cap, AVFrame *frame);
|
||||
void (*capture_end)(gsr_capture *cap, AVFrame *frame); /* can be NULL */
|
||||
@@ -21,12 +28,34 @@ struct gsr_capture {
|
||||
bool started;
|
||||
};
|
||||
|
||||
int gsr_capture_start(gsr_capture *cap, AVCodecContext *video_codec_context);
|
||||
void gsr_capture_tick(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame **frame);
|
||||
typedef struct gsr_capture_base gsr_capture_base;
|
||||
|
||||
struct gsr_capture_base {
|
||||
unsigned int input_texture;
|
||||
unsigned int target_textures[2];
|
||||
unsigned int cursor_texture;
|
||||
|
||||
gsr_color_conversion color_conversion;
|
||||
|
||||
AVCodecContext *video_codec_context;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
gsr_cuda *cuda;
|
||||
CUgraphicsResource *cuda_graphics_resources;
|
||||
CUarray *mapped_arrays;
|
||||
} gsr_cuda_context;
|
||||
|
||||
int gsr_capture_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame);
|
||||
void gsr_capture_tick(gsr_capture *cap, AVCodecContext *video_codec_context);
|
||||
bool gsr_capture_should_stop(gsr_capture *cap, bool *err);
|
||||
int gsr_capture_capture(gsr_capture *cap, AVFrame *frame);
|
||||
void gsr_capture_end(gsr_capture *cap, AVFrame *frame);
|
||||
/* Calls |gsr_capture_stop| as well */
|
||||
void gsr_capture_destroy(gsr_capture *cap, AVCodecContext *video_codec_context);
|
||||
|
||||
bool gsr_capture_base_setup_vaapi_textures(gsr_capture_base *self, AVFrame *frame, gsr_egl *egl, VADisplay va_dpy, VADRMPRIMESurfaceDescriptor *prime, gsr_color_range color_range);
|
||||
bool gsr_capture_base_setup_cuda_textures(gsr_capture_base *base, AVFrame *frame, gsr_cuda_context *cuda_context, gsr_egl *egl, gsr_color_range color_range, bool hdr);
|
||||
void gsr_capture_base_stop(gsr_capture_base *self, gsr_egl *egl);
|
||||
|
||||
#endif /* GSR_CAPTURE_CAPTURE_H */
|
||||
|
||||
48
include/capture/kms.h
Normal file
48
include/capture/kms.h
Normal file
@@ -0,0 +1,48 @@
|
||||
#ifndef GSR_CAPTURE_KMS_H
|
||||
#define GSR_CAPTURE_KMS_H
|
||||
|
||||
#include "../../kms/client/kms_client.h"
|
||||
#include "../color_conversion.h"
|
||||
#include "../vec2.h"
|
||||
#include "../defs.h"
|
||||
#include <stdbool.h>
|
||||
|
||||
typedef struct gsr_capture_base gsr_capture_base;
|
||||
typedef struct AVCodecContext AVCodecContext;
|
||||
typedef struct AVMasteringDisplayMetadata AVMasteringDisplayMetadata;
|
||||
typedef struct AVContentLightMetadata AVContentLightMetadata;
|
||||
typedef struct gsr_capture_kms gsr_capture_kms;
|
||||
typedef struct gsr_egl gsr_egl;
|
||||
typedef struct AVFrame AVFrame;
|
||||
|
||||
#define MAX_CONNECTOR_IDS 32
|
||||
|
||||
typedef struct {
|
||||
uint32_t connector_ids[MAX_CONNECTOR_IDS];
|
||||
int num_connector_ids;
|
||||
} MonitorId;
|
||||
|
||||
struct gsr_capture_kms {
|
||||
bool should_stop;
|
||||
bool stop_is_error;
|
||||
|
||||
gsr_kms_client kms_client;
|
||||
gsr_kms_response kms_response;
|
||||
|
||||
vec2i capture_pos;
|
||||
vec2i capture_size;
|
||||
MonitorId monitor_id;
|
||||
|
||||
AVMasteringDisplayMetadata *mastering_display_metadata;
|
||||
AVContentLightMetadata *light_metadata;
|
||||
|
||||
gsr_monitor_rotation monitor_rotation;
|
||||
};
|
||||
|
||||
/* Returns 0 on success */
|
||||
int gsr_capture_kms_start(gsr_capture_kms *self, gsr_capture_base *base, const char *display_to_capture, gsr_egl *egl, AVCodecContext *video_codec_context);
|
||||
void gsr_capture_kms_stop(gsr_capture_kms *self);
|
||||
bool gsr_capture_kms_capture(gsr_capture_kms *self, gsr_capture_base *base, AVFrame *frame, gsr_egl *egl, bool hdr, bool screen_plane_use_modifiers, bool cursor_texture_is_external);
|
||||
void gsr_capture_kms_cleanup_kms_fds(gsr_capture_kms *self);
|
||||
|
||||
#endif /* GSR_CAPTURE_KMS_H */
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include "../vec2.h"
|
||||
#include "../utils.h"
|
||||
#include "../color_conversion.h"
|
||||
#include "capture.h"
|
||||
|
||||
typedef struct _XDisplay Display;
|
||||
@@ -12,6 +13,7 @@ typedef struct {
|
||||
const char *display_to_capture; /* if this is "screen", then the first monitor is captured. A copy is made of this */
|
||||
gsr_gpu_info gpu_inf;
|
||||
bool hdr;
|
||||
gsr_color_range color_range;
|
||||
} gsr_capture_kms_cuda_params;
|
||||
|
||||
gsr_capture* gsr_capture_kms_cuda_create(const gsr_capture_kms_cuda_params *params);
|
||||
|
||||
@@ -12,7 +12,6 @@ typedef struct {
|
||||
gsr_egl *egl;
|
||||
const char *display_to_capture; /* if this is "screen", then the first monitor is captured. A copy is made of this */
|
||||
gsr_gpu_info gpu_inf;
|
||||
bool wayland;
|
||||
bool hdr;
|
||||
gsr_color_range color_range;
|
||||
} gsr_capture_kms_vaapi_params;
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include "shader.h"
|
||||
#include "vec2.h"
|
||||
#include <stdbool.h>
|
||||
|
||||
typedef enum {
|
||||
GSR_COLOR_RANGE_LIMITED,
|
||||
|
||||
@@ -73,7 +73,8 @@ typedef CUDA_MEMCPY2D_v2 CUDA_MEMCPY2D;
|
||||
|
||||
typedef struct CUgraphicsResource_st *CUgraphicsResource;
|
||||
|
||||
typedef struct {
|
||||
typedef struct gsr_cuda gsr_cuda;
|
||||
struct gsr_cuda {
|
||||
gsr_overclock overclock;
|
||||
bool do_overclock;
|
||||
|
||||
@@ -88,8 +89,9 @@ typedef struct {
|
||||
CUresult (*cuCtxPushCurrent_v2)(CUcontext ctx);
|
||||
CUresult (*cuCtxPopCurrent_v2)(CUcontext *pctx);
|
||||
CUresult (*cuGetErrorString)(CUresult error, const char **pStr);
|
||||
CUresult (*cuMemsetD8_v2)(CUdeviceptr dstDevice, unsigned char uc, size_t N);
|
||||
CUresult (*cuMemcpy2D_v2)(const CUDA_MEMCPY2D *pCopy);
|
||||
CUresult (*cuMemcpy2DAsync_v2)(const CUDA_MEMCPY2D *pcopy, CUstream hStream);
|
||||
CUresult (*cuStreamSynchronize)(CUstream hStream);
|
||||
|
||||
CUresult (*cuGraphicsGLRegisterImage)(CUgraphicsResource *pCudaResource, unsigned int image, unsigned int target, unsigned int flags);
|
||||
CUresult (*cuGraphicsEGLRegisterImage)(CUgraphicsResource *pCudaResource, void *image, unsigned int flags);
|
||||
@@ -98,7 +100,7 @@ typedef struct {
|
||||
CUresult (*cuGraphicsUnmapResources)(unsigned int count, CUgraphicsResource *resources, CUstream hStream);
|
||||
CUresult (*cuGraphicsUnregisterResource)(CUgraphicsResource resource);
|
||||
CUresult (*cuGraphicsSubResourceGetMappedArray)(CUarray *pArray, CUgraphicsResource resource, unsigned int arrayIndex, unsigned int mipLevel);
|
||||
} gsr_cuda;
|
||||
};
|
||||
|
||||
bool gsr_cuda_load(gsr_cuda *self, Display *display, bool overclock);
|
||||
void gsr_cuda_unload(gsr_cuda *self);
|
||||
|
||||
28
include/defs.h
Normal file
28
include/defs.h
Normal file
@@ -0,0 +1,28 @@
|
||||
#ifndef GSR_DEFS_H
|
||||
#define GSR_DEFS_H
|
||||
|
||||
typedef enum {
|
||||
GSR_GPU_VENDOR_AMD,
|
||||
GSR_GPU_VENDOR_INTEL,
|
||||
GSR_GPU_VENDOR_NVIDIA
|
||||
} gsr_gpu_vendor;
|
||||
|
||||
typedef struct {
|
||||
gsr_gpu_vendor vendor;
|
||||
int gpu_version; /* 0 if unknown */
|
||||
} gsr_gpu_info;
|
||||
|
||||
typedef enum {
|
||||
GSR_MONITOR_ROT_0,
|
||||
GSR_MONITOR_ROT_90,
|
||||
GSR_MONITOR_ROT_180,
|
||||
GSR_MONITOR_ROT_270
|
||||
} gsr_monitor_rotation;
|
||||
|
||||
typedef enum {
|
||||
GSR_CONNECTION_X11,
|
||||
GSR_CONNECTION_WAYLAND,
|
||||
GSR_CONNECTION_DRM
|
||||
} gsr_connection_type;
|
||||
|
||||
#endif /* GSR_DEFS_H */
|
||||
@@ -69,10 +69,16 @@ typedef void (*__eglMustCastToProperFunctionPointerType)(void);
|
||||
#define GL_TRUE 1
|
||||
#define GL_TRIANGLES 0x0004
|
||||
#define GL_TEXTURE_2D 0x0DE1
|
||||
#define GL_TEXTURE_EXTERNAL_OES 0x8D65 // TODO: Use this where applicable
|
||||
#define GL_TEXTURE_EXTERNAL_OES 0x8D65
|
||||
#define GL_RED 0x1903
|
||||
#define GL_RG 0x8227
|
||||
#define GL_RGB 0x1907
|
||||
#define GL_RGBA 0x1908
|
||||
#define GL_RGBA8 0x8058
|
||||
#define GL_R8 0x8229
|
||||
#define GL_RG8 0x822B
|
||||
#define GL_R16 0x822A
|
||||
#define GL_RG16 0x822C
|
||||
#define GL_UNSIGNED_BYTE 0x1401
|
||||
#define GL_COLOR_BUFFER_BIT 0x00004000
|
||||
#define GL_TEXTURE_WRAP_S 0x2802
|
||||
@@ -133,7 +139,8 @@ typedef struct {
|
||||
int num_outputs;
|
||||
} gsr_wayland;
|
||||
|
||||
typedef struct {
|
||||
typedef struct gsr_egl gsr_egl;
|
||||
struct gsr_egl {
|
||||
void *egl_library;
|
||||
void *gl_library;
|
||||
|
||||
@@ -217,7 +224,7 @@ typedef struct {
|
||||
int (*glGetUniformLocation)(unsigned int program, const char *name);
|
||||
void (*glUniform1f)(int location, float v0);
|
||||
void (*glUniform2f)(int location, float v0, float v1);
|
||||
} gsr_egl;
|
||||
};
|
||||
|
||||
bool gsr_egl_load(gsr_egl *self, Display *dpy, bool wayland);
|
||||
void gsr_egl_unload(gsr_egl *self);
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#ifndef GSR_SHADER_H
|
||||
#define GSR_SHADER_H
|
||||
|
||||
#include "egl.h"
|
||||
typedef struct gsr_egl gsr_egl;
|
||||
|
||||
typedef struct {
|
||||
gsr_egl *egl;
|
||||
|
||||
@@ -3,28 +3,11 @@
|
||||
|
||||
#include "vec2.h"
|
||||
#include "../include/egl.h"
|
||||
#include "../include/defs.h"
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <X11/extensions/Xrandr.h>
|
||||
|
||||
typedef enum {
|
||||
GSR_GPU_VENDOR_AMD,
|
||||
GSR_GPU_VENDOR_INTEL,
|
||||
GSR_GPU_VENDOR_NVIDIA
|
||||
} gsr_gpu_vendor;
|
||||
|
||||
typedef struct {
|
||||
gsr_gpu_vendor vendor;
|
||||
int gpu_version; /* 0 if unknown */
|
||||
} gsr_gpu_info;
|
||||
|
||||
typedef enum {
|
||||
GSR_MONITOR_ROT_0,
|
||||
GSR_MONITOR_ROT_90,
|
||||
GSR_MONITOR_ROT_180,
|
||||
GSR_MONITOR_ROT_270
|
||||
} gsr_monitor_rotation;
|
||||
|
||||
typedef struct {
|
||||
const char *name;
|
||||
int name_len;
|
||||
@@ -36,12 +19,6 @@ typedef struct {
|
||||
uint32_t monitor_identifier; /* Only on drm and wayland */
|
||||
} gsr_monitor;
|
||||
|
||||
typedef enum {
|
||||
GSR_CONNECTION_X11,
|
||||
GSR_CONNECTION_WAYLAND,
|
||||
GSR_CONNECTION_DRM
|
||||
} gsr_connection_type;
|
||||
|
||||
typedef struct {
|
||||
const char *name;
|
||||
int name_len;
|
||||
|
||||
@@ -5,13 +5,15 @@
|
||||
#include <sys/types.h>
|
||||
#include <limits.h>
|
||||
|
||||
typedef struct {
|
||||
typedef struct gsr_kms_client gsr_kms_client;
|
||||
|
||||
struct gsr_kms_client {
|
||||
pid_t kms_server_pid;
|
||||
int initial_socket_fd;
|
||||
int initial_client_fd;
|
||||
char initial_socket_path[PATH_MAX];
|
||||
int socket_pair[2];
|
||||
} gsr_kms_client;
|
||||
};
|
||||
|
||||
/* |card_path| should be a path to card, for example /dev/dri/card0 */
|
||||
int gsr_kms_client_init(gsr_kms_client *self, const char *card_path);
|
||||
|
||||
@@ -8,6 +8,9 @@
|
||||
#define GSR_KMS_PROTOCOL_VERSION 2
|
||||
#define GSR_KMS_MAX_PLANES 10
|
||||
|
||||
typedef struct gsr_kms_response_fd gsr_kms_response_fd;
|
||||
typedef struct gsr_kms_response gsr_kms_response;
|
||||
|
||||
typedef enum {
|
||||
KMS_REQUEST_TYPE_REPLACE_CONNECTION,
|
||||
KMS_REQUEST_TYPE_GET_KMS
|
||||
@@ -27,7 +30,7 @@ typedef struct {
|
||||
int new_connection_fd;
|
||||
} gsr_kms_request;
|
||||
|
||||
typedef struct {
|
||||
struct gsr_kms_response_fd {
|
||||
int fd;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
@@ -44,14 +47,14 @@ typedef struct {
|
||||
int src_w;
|
||||
int src_h;
|
||||
struct hdr_output_metadata hdr_metadata;
|
||||
} gsr_kms_response_fd;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
struct gsr_kms_response {
|
||||
uint32_t version; /* GSR_KMS_PROTOCOL_VERSION */
|
||||
int result; /* gsr_kms_result */
|
||||
char err_msg[128];
|
||||
gsr_kms_response_fd fds[GSR_KMS_MAX_PLANES];
|
||||
int num_fds;
|
||||
} gsr_kms_response;
|
||||
};
|
||||
|
||||
#endif /* #define GSR_KMS_SHARED_H */
|
||||
|
||||
@@ -1,25 +1,35 @@
|
||||
#include "../../include/capture/capture.h"
|
||||
#include "../../include/egl.h"
|
||||
#include "../../include/cuda.h"
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include <va/va.h>
|
||||
#include <va/va_drmcommon.h>
|
||||
#include <libavutil/frame.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
|
||||
int gsr_capture_start(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
#define FOURCC_NV12 842094158
|
||||
#define FOURCC_P010 808530000
|
||||
|
||||
int gsr_capture_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
|
||||
if(cap->started)
|
||||
return -1;
|
||||
|
||||
int res = cap->start(cap, video_codec_context);
|
||||
int res = cap->start(cap, video_codec_context, frame);
|
||||
if(res == 0)
|
||||
cap->started = true;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void gsr_capture_tick(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame **frame) {
|
||||
void gsr_capture_tick(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
if(!cap->started) {
|
||||
fprintf(stderr, "gsr error: gsp_capture_tick failed: the gsr capture has not been started\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if(cap->tick)
|
||||
cap->tick(cap, video_codec_context, frame);
|
||||
cap->tick(cap, video_codec_context);
|
||||
}
|
||||
|
||||
bool gsr_capture_should_stop(gsr_capture *cap, bool *err) {
|
||||
@@ -57,3 +67,237 @@ void gsr_capture_end(gsr_capture *cap, AVFrame *frame) {
|
||||
void gsr_capture_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
cap->destroy(cap, video_codec_context);
|
||||
}
|
||||
|
||||
static uint32_t fourcc(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
|
||||
return (d << 24) | (c << 16) | (b << 8) | a;
|
||||
}
|
||||
|
||||
bool gsr_capture_base_setup_vaapi_textures(gsr_capture_base *self, AVFrame *frame, gsr_egl *egl, VADisplay va_dpy, VADRMPRIMESurfaceDescriptor *prime, gsr_color_range color_range) {
|
||||
const int res = av_hwframe_get_buffer(self->video_codec_context->hw_frames_ctx, frame, 0);
|
||||
if(res < 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: av_hwframe_get_buffer failed: %d\n", res);
|
||||
return false;
|
||||
}
|
||||
|
||||
VASurfaceID target_surface_id = (uintptr_t)frame->data[3];
|
||||
|
||||
VAStatus va_status = vaExportSurfaceHandle(va_dpy, target_surface_id, VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2, VA_EXPORT_SURFACE_WRITE_ONLY | VA_EXPORT_SURFACE_SEPARATE_LAYERS, prime);
|
||||
if(va_status != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: vaExportSurfaceHandle failed, error: %d\n", va_status);
|
||||
return false;
|
||||
}
|
||||
vaSyncSurface(va_dpy, target_surface_id);
|
||||
|
||||
egl->glGenTextures(1, &self->input_texture);
|
||||
egl->glBindTexture(GL_TEXTURE_2D, self->input_texture);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
egl->glGenTextures(1, &self->cursor_texture);
|
||||
egl->glBindTexture(GL_TEXTURE_2D, self->cursor_texture);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
const uint32_t formats_nv12[2] = { fourcc('R', '8', ' ', ' '), fourcc('G', 'R', '8', '8') };
|
||||
const uint32_t formats_p010[2] = { fourcc('R', '1', '6', ' '), fourcc('G', 'R', '3', '2') };
|
||||
|
||||
if(prime->fourcc == FOURCC_NV12 || prime->fourcc == FOURCC_P010) {
|
||||
const uint32_t *formats = prime->fourcc == FOURCC_NV12 ? formats_nv12 : formats_p010;
|
||||
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
|
||||
|
||||
egl->glGenTextures(2, self->target_textures);
|
||||
for(int i = 0; i < 2; ++i) {
|
||||
const int layer = i;
|
||||
const int plane = 0;
|
||||
|
||||
//const uint64_t modifier = prime->objects[prime->layers[layer].object_index[plane]].drm_format_modifier;
|
||||
|
||||
const intptr_t img_attr[] = {
|
||||
EGL_LINUX_DRM_FOURCC_EXT, formats[i],
|
||||
EGL_WIDTH, prime->width / div[i],
|
||||
EGL_HEIGHT, prime->height / div[i],
|
||||
EGL_DMA_BUF_PLANE0_FD_EXT, prime->objects[prime->layers[layer].object_index[plane]].fd,
|
||||
EGL_DMA_BUF_PLANE0_OFFSET_EXT, prime->layers[layer].offset[plane],
|
||||
EGL_DMA_BUF_PLANE0_PITCH_EXT, prime->layers[layer].pitch[plane],
|
||||
// TODO:
|
||||
//EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT, modifier & 0xFFFFFFFFULL,
|
||||
//EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT, modifier >> 32ULL,
|
||||
EGL_NONE
|
||||
};
|
||||
|
||||
while(egl->eglGetError() != EGL_SUCCESS){}
|
||||
EGLImage image = egl->eglCreateImage(egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
|
||||
if(!image) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: failed to create egl image from drm fd for output drm fd, error: %d\n", egl->eglGetError());
|
||||
return false;
|
||||
}
|
||||
|
||||
egl->glBindTexture(GL_TEXTURE_2D, self->target_textures[i]);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
|
||||
while(egl->glGetError()) {}
|
||||
while(egl->eglGetError() != EGL_SUCCESS){}
|
||||
egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
|
||||
if(egl->glGetError() != 0 || egl->eglGetError() != EGL_SUCCESS) {
|
||||
// TODO: Get the error properly
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: failed to bind egl image to gl texture, error: %d\n", egl->eglGetError());
|
||||
egl->eglDestroyImage(egl->egl_display, image);
|
||||
egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
return false;
|
||||
}
|
||||
|
||||
egl->eglDestroyImage(egl->egl_display, image);
|
||||
egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
}
|
||||
|
||||
gsr_color_conversion_params color_conversion_params = {0};
|
||||
color_conversion_params.color_range = color_range;
|
||||
color_conversion_params.egl = egl;
|
||||
color_conversion_params.source_color = GSR_SOURCE_COLOR_RGB;
|
||||
if(prime->fourcc == FOURCC_NV12)
|
||||
color_conversion_params.destination_color = GSR_DESTINATION_COLOR_NV12;
|
||||
else
|
||||
color_conversion_params.destination_color = GSR_DESTINATION_COLOR_P010;
|
||||
|
||||
color_conversion_params.destination_textures[0] = self->target_textures[0];
|
||||
color_conversion_params.destination_textures[1] = self->target_textures[1];
|
||||
color_conversion_params.num_destination_textures = 2;
|
||||
|
||||
if(gsr_color_conversion_init(&self->color_conversion, &color_conversion_params) != 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: failed to create color conversion\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
} else {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: unexpected fourcc %u for output drm fd, expected nv12 or p010\n", prime->fourcc);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int gl_create_texture(gsr_egl *egl, int width, int height, int internal_format, unsigned int format) {
|
||||
unsigned int texture_id = 0;
|
||||
egl->glGenTextures(1, &texture_id);
|
||||
egl->glBindTexture(GL_TEXTURE_2D, texture_id);
|
||||
egl->glTexImage2D(GL_TEXTURE_2D, 0, internal_format, width, height, 0, format, GL_UNSIGNED_BYTE, NULL);
|
||||
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
|
||||
|
||||
egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
return texture_id;
|
||||
}
|
||||
|
||||
static bool cuda_register_opengl_texture(gsr_cuda *cuda, CUgraphicsResource *cuda_graphics_resource, CUarray *mapped_array, unsigned int texture_id) {
|
||||
CUresult res;
|
||||
CUcontext old_ctx;
|
||||
res = cuda->cuCtxPushCurrent_v2(cuda->cu_ctx);
|
||||
res = cuda->cuGraphicsGLRegisterImage(cuda_graphics_resource, texture_id, GL_TEXTURE_2D, CU_GRAPHICS_REGISTER_FLAGS_NONE);
|
||||
if (res != CUDA_SUCCESS) {
|
||||
const char *err_str = "unknown";
|
||||
cuda->cuGetErrorString(res, &err_str);
|
||||
fprintf(stderr, "gsr error: cuda_register_opengl_texture: cuGraphicsGLRegisterImage failed, error: %s, texture " "id: %u\n", err_str, texture_id);
|
||||
res = cuda->cuCtxPopCurrent_v2(&old_ctx);
|
||||
return false;
|
||||
}
|
||||
|
||||
res = cuda->cuGraphicsResourceSetMapFlags(*cuda_graphics_resource, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
|
||||
res = cuda->cuGraphicsMapResources(1, cuda_graphics_resource, 0);
|
||||
|
||||
res = cuda->cuGraphicsSubResourceGetMappedArray(mapped_array, *cuda_graphics_resource, 0, 0);
|
||||
res = cuda->cuCtxPopCurrent_v2(&old_ctx);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool gsr_capture_base_setup_cuda_textures(gsr_capture_base *base, AVFrame *frame, gsr_cuda_context *cuda_context, gsr_egl *egl, gsr_color_range color_range, bool hdr) {
|
||||
// TODO:
|
||||
const int res = av_hwframe_get_buffer(base->video_codec_context->hw_frames_ctx, frame, 0);
|
||||
if(res < 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_setup_cuda_textures: av_hwframe_get_buffer failed: %d\n", res);
|
||||
return false;
|
||||
}
|
||||
|
||||
egl->glGenTextures(1, &base->input_texture);
|
||||
egl->glBindTexture(GL_TEXTURE_2D, base->input_texture);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
egl->glGenTextures(1, &base->cursor_texture);
|
||||
egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, base->cursor_texture);
|
||||
egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
|
||||
|
||||
const unsigned int internal_formats_nv12[2] = { GL_R8, GL_RG8 };
|
||||
const unsigned int internal_formats_p010[2] = { GL_R16, GL_RG16 };
|
||||
const unsigned int formats[2] = { GL_RED, GL_RG };
|
||||
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
|
||||
|
||||
for(int i = 0; i < 2; ++i) {
|
||||
base->target_textures[i] = gl_create_texture(egl, base->video_codec_context->width / div[i], base->video_codec_context->height / div[i], !hdr ? internal_formats_nv12[i] : internal_formats_p010[i], formats[i]);
|
||||
if(base->target_textures[i] == 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_setup_cuda_textures: failed to create opengl texture\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if(!cuda_register_opengl_texture(cuda_context->cuda, &cuda_context->cuda_graphics_resources[i], &cuda_context->mapped_arrays[i], base->target_textures[i])) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
gsr_color_conversion_params color_conversion_params = {0};
|
||||
color_conversion_params.color_range = color_range;
|
||||
color_conversion_params.egl = egl;
|
||||
color_conversion_params.source_color = GSR_SOURCE_COLOR_RGB;
|
||||
if(!hdr)
|
||||
color_conversion_params.destination_color = GSR_DESTINATION_COLOR_NV12;
|
||||
else
|
||||
color_conversion_params.destination_color = GSR_DESTINATION_COLOR_P010;
|
||||
|
||||
color_conversion_params.destination_textures[0] = base->target_textures[0];
|
||||
color_conversion_params.destination_textures[1] = base->target_textures[1];
|
||||
color_conversion_params.num_destination_textures = 2;
|
||||
|
||||
if(gsr_color_conversion_init(&base->color_conversion, &color_conversion_params) != 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_setup_cuda_textures: failed to create color conversion\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void gsr_capture_base_stop(gsr_capture_base *self, gsr_egl *egl) {
|
||||
gsr_color_conversion_deinit(&self->color_conversion);
|
||||
|
||||
if(egl->egl_context) {
|
||||
if(self->input_texture) {
|
||||
egl->glDeleteTextures(1, &self->input_texture);
|
||||
self->input_texture = 0;
|
||||
}
|
||||
|
||||
if(self->cursor_texture) {
|
||||
egl->glDeleteTextures(1, &self->cursor_texture);
|
||||
self->cursor_texture = 0;
|
||||
}
|
||||
|
||||
egl->glDeleteTextures(2, self->target_textures);
|
||||
self->target_textures[0] = 0;
|
||||
self->target_textures[1] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
363
src/capture/kms.c
Normal file
363
src/capture/kms.c
Normal file
@@ -0,0 +1,363 @@
|
||||
#include "../../include/capture/kms.h"
|
||||
#include "../../include/capture/capture.h"
|
||||
#include "../../include/utils.h"
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavutil/mastering_display_metadata.h>
|
||||
|
||||
#define HDMI_STATIC_METADATA_TYPE1 0
|
||||
#define HDMI_EOTF_SMPTE_ST2084 2
|
||||
|
||||
/* TODO: On monitor reconfiguration, find monitor x, y, width and height again. Do the same for nvfbc. */
|
||||
|
||||
typedef struct {
|
||||
MonitorId *monitor_id;
|
||||
const char *monitor_to_capture;
|
||||
int monitor_to_capture_len;
|
||||
int num_monitors;
|
||||
} MonitorCallbackUserdata;
|
||||
|
||||
static void monitor_callback(const gsr_monitor *monitor, void *userdata) {
|
||||
MonitorCallbackUserdata *monitor_callback_userdata = userdata;
|
||||
++monitor_callback_userdata->num_monitors;
|
||||
|
||||
if(monitor_callback_userdata->monitor_to_capture_len != monitor->name_len || memcmp(monitor_callback_userdata->monitor_to_capture, monitor->name, monitor->name_len) != 0)
|
||||
return;
|
||||
|
||||
if(monitor_callback_userdata->monitor_id->num_connector_ids < MAX_CONNECTOR_IDS) {
|
||||
monitor_callback_userdata->monitor_id->connector_ids[monitor_callback_userdata->monitor_id->num_connector_ids] = monitor->connector_id;
|
||||
++monitor_callback_userdata->monitor_id->num_connector_ids;
|
||||
}
|
||||
|
||||
if(monitor_callback_userdata->monitor_id->num_connector_ids == MAX_CONNECTOR_IDS)
|
||||
fprintf(stderr, "gsr warning: reached max connector ids\n");
|
||||
}
|
||||
|
||||
static int max_int(int a, int b) {
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
int gsr_capture_kms_start(gsr_capture_kms *self, gsr_capture_base *base, const char *display_to_capture, gsr_egl *egl, AVCodecContext *video_codec_context) {
|
||||
base->video_codec_context = video_codec_context;
|
||||
|
||||
gsr_monitor monitor;
|
||||
self->monitor_id.num_connector_ids = 0;
|
||||
|
||||
int kms_init_res = gsr_kms_client_init(&self->kms_client, egl->card_path);
|
||||
if(kms_init_res != 0)
|
||||
return kms_init_res;
|
||||
|
||||
MonitorCallbackUserdata monitor_callback_userdata = {
|
||||
&self->monitor_id,
|
||||
display_to_capture, strlen(display_to_capture),
|
||||
0,
|
||||
};
|
||||
for_each_active_monitor_output(egl, GSR_CONNECTION_DRM, monitor_callback, &monitor_callback_userdata);
|
||||
|
||||
if(!get_monitor_by_name(egl, GSR_CONNECTION_DRM, display_to_capture, &monitor)) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_start: failed to find monitor by name \"%s\"\n", display_to_capture);
|
||||
return -1;
|
||||
}
|
||||
|
||||
monitor.name = display_to_capture;
|
||||
self->monitor_rotation = drm_monitor_get_display_server_rotation(egl, &monitor);
|
||||
|
||||
self->capture_pos = monitor.pos;
|
||||
if(self->monitor_rotation == GSR_MONITOR_ROT_90 || self->monitor_rotation == GSR_MONITOR_ROT_270) {
|
||||
self->capture_size.x = monitor.size.y;
|
||||
self->capture_size.y = monitor.size.x;
|
||||
} else {
|
||||
self->capture_size = monitor.size;
|
||||
}
|
||||
|
||||
/* Disable vsync */
|
||||
egl->eglSwapInterval(egl->egl_display, 0);
|
||||
|
||||
base->video_codec_context->width = max_int(2, even_number_ceil(self->capture_size.x));
|
||||
base->video_codec_context->height = max_int(2, even_number_ceil(self->capture_size.y));
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gsr_capture_kms_stop(gsr_capture_kms *self) {
|
||||
gsr_capture_kms_cleanup_kms_fds(self);
|
||||
gsr_kms_client_deinit(&self->kms_client);
|
||||
}
|
||||
|
||||
static float monitor_rotation_to_radians(gsr_monitor_rotation rot) {
|
||||
switch(rot) {
|
||||
case GSR_MONITOR_ROT_0: return 0.0f;
|
||||
case GSR_MONITOR_ROT_90: return M_PI_2;
|
||||
case GSR_MONITOR_ROT_180: return M_PI;
|
||||
case GSR_MONITOR_ROT_270: return M_PI + M_PI_2;
|
||||
}
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
/* Prefer non combined planes */
|
||||
static gsr_kms_response_fd* find_drm_by_connector_id(gsr_kms_response *kms_response, uint32_t connector_id) {
|
||||
int index_combined = -1;
|
||||
for(int i = 0; i < kms_response->num_fds; ++i) {
|
||||
if(kms_response->fds[i].connector_id == connector_id && !kms_response->fds[i].is_cursor) {
|
||||
if(kms_response->fds[i].is_combined_plane)
|
||||
index_combined = i;
|
||||
else
|
||||
return &kms_response->fds[i];
|
||||
}
|
||||
}
|
||||
|
||||
if(index_combined != -1)
|
||||
return &kms_response->fds[index_combined];
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static gsr_kms_response_fd* find_first_combined_drm(gsr_kms_response *kms_response) {
|
||||
for(int i = 0; i < kms_response->num_fds; ++i) {
|
||||
if(kms_response->fds[i].is_combined_plane && !kms_response->fds[i].is_cursor)
|
||||
return &kms_response->fds[i];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static gsr_kms_response_fd* find_largest_drm(gsr_kms_response *kms_response) {
|
||||
if(kms_response->num_fds == 0)
|
||||
return NULL;
|
||||
|
||||
int64_t largest_size = 0;
|
||||
gsr_kms_response_fd *largest_drm = &kms_response->fds[0];
|
||||
for(int i = 0; i < kms_response->num_fds; ++i) {
|
||||
const int64_t size = (int64_t)kms_response->fds[i].width * (int64_t)kms_response->fds[i].height;
|
||||
if(size > largest_size && !kms_response->fds[i].is_cursor) {
|
||||
largest_size = size;
|
||||
largest_drm = &kms_response->fds[i];
|
||||
}
|
||||
}
|
||||
return largest_drm;
|
||||
}
|
||||
|
||||
static gsr_kms_response_fd* find_cursor_drm(gsr_kms_response *kms_response) {
|
||||
for(int i = 0; i < kms_response->num_fds; ++i) {
|
||||
if(kms_response->fds[i].is_cursor)
|
||||
return &kms_response->fds[i];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool hdr_metadata_is_supported_format(const struct hdr_output_metadata *hdr_metadata) {
|
||||
return hdr_metadata->metadata_type == HDMI_STATIC_METADATA_TYPE1 &&
|
||||
hdr_metadata->hdmi_metadata_type1.metadata_type == HDMI_STATIC_METADATA_TYPE1 &&
|
||||
hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084;
|
||||
}
|
||||
|
||||
static void gsr_kms_set_hdr_metadata(gsr_capture_kms *self, AVFrame *frame, gsr_kms_response_fd *drm_fd) {
|
||||
if(!self->mastering_display_metadata)
|
||||
self->mastering_display_metadata = av_mastering_display_metadata_create_side_data(frame);
|
||||
|
||||
if(!self->light_metadata)
|
||||
self->light_metadata = av_content_light_metadata_create_side_data(frame);
|
||||
|
||||
if(self->mastering_display_metadata) {
|
||||
for(int i = 0; i < 3; ++i) {
|
||||
self->mastering_display_metadata->display_primaries[i][0] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.display_primaries[i].x, 50000);
|
||||
self->mastering_display_metadata->display_primaries[i][1] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.display_primaries[i].y, 50000);
|
||||
}
|
||||
|
||||
self->mastering_display_metadata->white_point[0] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.white_point.x, 50000);
|
||||
self->mastering_display_metadata->white_point[1] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.white_point.y, 50000);
|
||||
|
||||
self->mastering_display_metadata->min_luminance = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.min_display_mastering_luminance, 10000);
|
||||
self->mastering_display_metadata->max_luminance = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.max_display_mastering_luminance, 1);
|
||||
|
||||
self->mastering_display_metadata->has_primaries = self->mastering_display_metadata->display_primaries[0][0].num > 0;
|
||||
self->mastering_display_metadata->has_luminance = self->mastering_display_metadata->max_luminance.num > 0;
|
||||
}
|
||||
|
||||
if(self->light_metadata) {
|
||||
self->light_metadata->MaxCLL = drm_fd->hdr_metadata.hdmi_metadata_type1.max_cll;
|
||||
self->light_metadata->MaxFALL = drm_fd->hdr_metadata.hdmi_metadata_type1.max_fall;
|
||||
}
|
||||
}
|
||||
|
||||
static vec2i swap_vec2i(vec2i value) {
|
||||
int tmp = value.x;
|
||||
value.x = value.y;
|
||||
value.y = tmp;
|
||||
return value;
|
||||
}
|
||||
|
||||
bool gsr_capture_kms_capture(gsr_capture_kms *self, gsr_capture_base *base, AVFrame *frame, gsr_egl *egl, bool hdr, bool screen_plane_use_modifiers, bool cursor_texture_is_external) {
|
||||
egl->glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
|
||||
egl->glClear(GL_COLOR_BUFFER_BIT);
|
||||
|
||||
gsr_capture_kms_cleanup_kms_fds(self);
|
||||
|
||||
gsr_kms_response_fd *drm_fd = NULL;
|
||||
gsr_kms_response_fd *cursor_drm_fd = NULL;
|
||||
bool capture_is_combined_plane = false;
|
||||
|
||||
if(gsr_kms_client_get_kms(&self->kms_client, &self->kms_response) != 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_capture: failed to get kms, error: %d (%s)\n", self->kms_response.result, self->kms_response.err_msg);
|
||||
return false;
|
||||
}
|
||||
|
||||
if(self->kms_response.num_fds == 0) {
|
||||
static bool error_shown = false;
|
||||
if(!error_shown) {
|
||||
error_shown = true;
|
||||
fprintf(stderr, "gsr error: no drm found, capture will fail\n");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
for(int i = 0; i < self->monitor_id.num_connector_ids; ++i) {
|
||||
drm_fd = find_drm_by_connector_id(&self->kms_response, self->monitor_id.connector_ids[i]);
|
||||
if(drm_fd)
|
||||
break;
|
||||
}
|
||||
|
||||
// Will never happen on wayland unless the target monitor has been disconnected
|
||||
if(!drm_fd) {
|
||||
drm_fd = find_first_combined_drm(&self->kms_response);
|
||||
if(!drm_fd)
|
||||
drm_fd = find_largest_drm(&self->kms_response);
|
||||
capture_is_combined_plane = true;
|
||||
}
|
||||
|
||||
cursor_drm_fd = find_cursor_drm(&self->kms_response);
|
||||
|
||||
if(!drm_fd)
|
||||
return false;
|
||||
|
||||
if(!capture_is_combined_plane && cursor_drm_fd && cursor_drm_fd->connector_id != drm_fd->connector_id)
|
||||
cursor_drm_fd = NULL;
|
||||
|
||||
if(drm_fd->has_hdr_metadata && hdr && hdr_metadata_is_supported_format(&drm_fd->hdr_metadata))
|
||||
gsr_kms_set_hdr_metadata(self, frame, drm_fd);
|
||||
|
||||
// TODO: This causes a crash sometimes on steam deck, why? is it a driver bug? a vaapi pure version doesn't cause a crash.
|
||||
// Even ffmpeg kmsgrab causes this crash. The error is:
|
||||
// amdgpu: Failed to allocate a buffer:
|
||||
// amdgpu: size : 28508160 bytes
|
||||
// amdgpu: alignment : 2097152 bytes
|
||||
// amdgpu: domains : 4
|
||||
// amdgpu: flags : 4
|
||||
// amdgpu: Failed to allocate a buffer:
|
||||
// amdgpu: size : 28508160 bytes
|
||||
// amdgpu: alignment : 2097152 bytes
|
||||
// amdgpu: domains : 4
|
||||
// amdgpu: flags : 4
|
||||
// EE ../jupiter-mesa/src/gallium/drivers/radeonsi/radeon_vcn_enc.c:516 radeon_create_encoder UVD - Can't create CPB buffer.
|
||||
// [hevc_vaapi @ 0x55ea72b09840] Failed to upload encode parameters: 2 (resource allocation failed).
|
||||
// [hevc_vaapi @ 0x55ea72b09840] Encode failed: -5.
|
||||
// Error: avcodec_send_frame failed, error: Input/output error
|
||||
// Assertion pic->display_order == pic->encode_order failed at libavcodec/vaapi_encode_h265.c:765
|
||||
// kms server info: kms client shutdown, shutting down the server
|
||||
intptr_t img_attr[18] = {
|
||||
EGL_LINUX_DRM_FOURCC_EXT, drm_fd->pixel_format,
|
||||
EGL_WIDTH, drm_fd->width,
|
||||
EGL_HEIGHT, drm_fd->height,
|
||||
EGL_DMA_BUF_PLANE0_FD_EXT, drm_fd->fd,
|
||||
EGL_DMA_BUF_PLANE0_OFFSET_EXT, drm_fd->offset,
|
||||
EGL_DMA_BUF_PLANE0_PITCH_EXT, drm_fd->pitch,
|
||||
};
|
||||
|
||||
if(screen_plane_use_modifiers) {
|
||||
img_attr[12] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT;
|
||||
img_attr[13] = drm_fd->modifier & 0xFFFFFFFFULL;
|
||||
|
||||
img_attr[14] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT;
|
||||
img_attr[15] = drm_fd->modifier >> 32ULL;
|
||||
|
||||
img_attr[16] = EGL_NONE;
|
||||
img_attr[17] = EGL_NONE;
|
||||
} else {
|
||||
img_attr[12] = EGL_NONE;
|
||||
img_attr[13] = EGL_NONE;
|
||||
}
|
||||
|
||||
EGLImage image = egl->eglCreateImage(egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
|
||||
egl->glBindTexture(GL_TEXTURE_2D, base->input_texture);
|
||||
egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
|
||||
egl->eglDestroyImage(egl->egl_display, image);
|
||||
egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
vec2i capture_pos = self->capture_pos;
|
||||
if(!capture_is_combined_plane)
|
||||
capture_pos = (vec2i){drm_fd->x, drm_fd->y};
|
||||
|
||||
const float texture_rotation = monitor_rotation_to_radians(self->monitor_rotation);
|
||||
|
||||
gsr_color_conversion_draw(&base->color_conversion, base->input_texture,
|
||||
(vec2i){0, 0}, self->capture_size,
|
||||
capture_pos, self->capture_size,
|
||||
texture_rotation, false);
|
||||
|
||||
if(cursor_drm_fd) {
|
||||
const vec2i cursor_size = {cursor_drm_fd->width, cursor_drm_fd->height};
|
||||
vec2i cursor_pos = {cursor_drm_fd->x, cursor_drm_fd->y};
|
||||
switch(self->monitor_rotation) {
|
||||
case GSR_MONITOR_ROT_0:
|
||||
break;
|
||||
case GSR_MONITOR_ROT_90:
|
||||
cursor_pos = swap_vec2i(cursor_pos);
|
||||
cursor_pos.x = self->capture_size.x - cursor_pos.x;
|
||||
// TODO: Remove this horrible hack
|
||||
cursor_pos.x -= cursor_size.x;
|
||||
break;
|
||||
case GSR_MONITOR_ROT_180:
|
||||
cursor_pos.x = self->capture_size.x - cursor_pos.x;
|
||||
cursor_pos.y = self->capture_size.y - cursor_pos.y;
|
||||
// TODO: Remove this horrible hack
|
||||
cursor_pos.x -= cursor_size.x;
|
||||
cursor_pos.y -= cursor_size.y;
|
||||
break;
|
||||
case GSR_MONITOR_ROT_270:
|
||||
cursor_pos = swap_vec2i(cursor_pos);
|
||||
cursor_pos.y = self->capture_size.y - cursor_pos.y;
|
||||
// TODO: Remove this horrible hack
|
||||
cursor_pos.y -= cursor_size.y;
|
||||
break;
|
||||
}
|
||||
|
||||
const intptr_t img_attr_cursor[] = {
|
||||
EGL_LINUX_DRM_FOURCC_EXT, cursor_drm_fd->pixel_format,
|
||||
EGL_WIDTH, cursor_drm_fd->width,
|
||||
EGL_HEIGHT, cursor_drm_fd->height,
|
||||
EGL_DMA_BUF_PLANE0_FD_EXT, cursor_drm_fd->fd,
|
||||
EGL_DMA_BUF_PLANE0_OFFSET_EXT, cursor_drm_fd->offset,
|
||||
EGL_DMA_BUF_PLANE0_PITCH_EXT, cursor_drm_fd->pitch,
|
||||
EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT, cursor_drm_fd->modifier & 0xFFFFFFFFULL,
|
||||
EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT, cursor_drm_fd->modifier >> 32ULL,
|
||||
EGL_NONE
|
||||
};
|
||||
|
||||
EGLImage cursor_image = egl->eglCreateImage(egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr_cursor);
|
||||
const int target = cursor_texture_is_external ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
|
||||
egl->glBindTexture(target, base->cursor_texture);
|
||||
egl->glEGLImageTargetTexture2DOES(target, cursor_image);
|
||||
egl->eglDestroyImage(egl->egl_display, cursor_image);
|
||||
egl->glBindTexture(target, 0);
|
||||
|
||||
gsr_color_conversion_draw(&base->color_conversion, base->cursor_texture,
|
||||
cursor_pos, cursor_size,
|
||||
(vec2i){0, 0}, cursor_size,
|
||||
texture_rotation, false);
|
||||
}
|
||||
|
||||
egl->eglSwapBuffers(egl->egl_display, egl->egl_surface);
|
||||
//egl->glFlush();
|
||||
//egl->glFinish();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void gsr_capture_kms_cleanup_kms_fds(gsr_capture_kms *self) {
|
||||
for(int i = 0; i < self->kms_response.num_fds; ++i) {
|
||||
if(self->kms_response.fds[i].fd > 0)
|
||||
close(self->kms_response.fds[i].fd);
|
||||
self->kms_response.fds[i].fd = 0;
|
||||
}
|
||||
self->kms_response.num_fds = 0;
|
||||
}
|
||||
@@ -1,7 +1,5 @@
|
||||
#include "../../include/capture/kms_cuda.h"
|
||||
#include "../../kms/client/kms_client.h"
|
||||
#include "../../include/utils.h"
|
||||
#include "../../include/color_conversion.h"
|
||||
#include "../../include/capture/kms.h"
|
||||
#include "../../include/cuda.h"
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
@@ -9,60 +7,20 @@
|
||||
#include <assert.h>
|
||||
#include <libavutil/hwcontext.h>
|
||||
#include <libavutil/hwcontext_cuda.h>
|
||||
#include <libavutil/mastering_display_metadata.h>
|
||||
#include <libavutil/frame.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
|
||||
/*
|
||||
TODO: Use dummy pool for cuda buffer so we can create our own cuda buffers from pixel buffer objects
|
||||
and copy the input textures to the pixel buffer objects. Use sw_format NV12 as well. Then this is
|
||||
similar to kms_vaapi. This allows us to remove one extra texture and texture copy.
|
||||
*/
|
||||
// TODO: Wayland capture
|
||||
|
||||
#define MAX_CONNECTOR_IDS 32
|
||||
|
||||
typedef struct {
|
||||
uint32_t connector_ids[MAX_CONNECTOR_IDS];
|
||||
int num_connector_ids;
|
||||
} MonitorId;
|
||||
gsr_capture_base base;
|
||||
gsr_capture_kms kms;
|
||||
|
||||
typedef struct {
|
||||
gsr_capture_kms_cuda_params params;
|
||||
|
||||
bool should_stop;
|
||||
bool stop_is_error;
|
||||
bool created_hw_frame;
|
||||
|
||||
gsr_cuda cuda;
|
||||
|
||||
gsr_kms_client kms_client;
|
||||
gsr_kms_response kms_response;
|
||||
|
||||
vec2i capture_pos;
|
||||
vec2i capture_size;
|
||||
MonitorId monitor_id;
|
||||
|
||||
CUgraphicsResource cuda_graphics_resource;
|
||||
CUarray mapped_array;
|
||||
|
||||
unsigned int input_texture;
|
||||
unsigned int cursor_texture;
|
||||
unsigned int target_texture;
|
||||
|
||||
gsr_color_conversion color_conversion;
|
||||
|
||||
AVCodecContext *video_codec_context;
|
||||
AVMasteringDisplayMetadata *mastering_display_metadata;
|
||||
AVContentLightMetadata *light_metadata;
|
||||
|
||||
gsr_monitor_rotation monitor_rotation;
|
||||
CUgraphicsResource cuda_graphics_resources[2];
|
||||
CUarray mapped_arrays[2];
|
||||
CUstream cuda_stream;
|
||||
} gsr_capture_kms_cuda;
|
||||
|
||||
static int max_int(int a, int b) {
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
static void gsr_capture_kms_cuda_stop(gsr_capture *cap, AVCodecContext *video_codec_context);
|
||||
|
||||
static bool cuda_create_codec_context(gsr_capture_kms_cuda *cap_kms, AVCodecContext *video_codec_context) {
|
||||
@@ -98,7 +56,7 @@ static bool cuda_create_codec_context(gsr_capture_kms_cuda *cap_kms, AVCodecCont
|
||||
(AVHWFramesContext *)frame_context->data;
|
||||
hw_frame_context->width = video_codec_context->width;
|
||||
hw_frame_context->height = video_codec_context->height;
|
||||
hw_frame_context->sw_format = AV_PIX_FMT_BGR0;
|
||||
hw_frame_context->sw_format = cap_kms->params.hdr ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
|
||||
hw_frame_context->format = video_codec_context->pix_fmt;
|
||||
hw_frame_context->device_ref = device_ctx;
|
||||
hw_frame_context->device_ctx = (AVHWDeviceContext*)device_ctx->data;
|
||||
@@ -112,80 +70,21 @@ static bool cuda_create_codec_context(gsr_capture_kms_cuda *cap_kms, AVCodecCont
|
||||
return false;
|
||||
}
|
||||
|
||||
cap_kms->cuda_stream = cuda_device_context->stream;
|
||||
video_codec_context->hw_device_ctx = av_buffer_ref(device_ctx);
|
||||
video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
|
||||
return true;
|
||||
}
|
||||
|
||||
// TODO: On monitor reconfiguration, find monitor x, y, width and height again. Do the same for nvfbc.
|
||||
|
||||
typedef struct {
|
||||
gsr_capture_kms_cuda *cap_kms;
|
||||
const char *monitor_to_capture;
|
||||
int monitor_to_capture_len;
|
||||
int num_monitors;
|
||||
} MonitorCallbackUserdata;
|
||||
|
||||
static void monitor_callback(const gsr_monitor *monitor, void *userdata) {
|
||||
MonitorCallbackUserdata *monitor_callback_userdata = userdata;
|
||||
++monitor_callback_userdata->num_monitors;
|
||||
|
||||
if(monitor_callback_userdata->monitor_to_capture_len != monitor->name_len || memcmp(monitor_callback_userdata->monitor_to_capture, monitor->name, monitor->name_len) != 0)
|
||||
return;
|
||||
|
||||
if(monitor_callback_userdata->cap_kms->monitor_id.num_connector_ids < MAX_CONNECTOR_IDS) {
|
||||
monitor_callback_userdata->cap_kms->monitor_id.connector_ids[monitor_callback_userdata->cap_kms->monitor_id.num_connector_ids] = monitor->connector_id;
|
||||
++monitor_callback_userdata->cap_kms->monitor_id.num_connector_ids;
|
||||
}
|
||||
|
||||
if(monitor_callback_userdata->cap_kms->monitor_id.num_connector_ids == MAX_CONNECTOR_IDS)
|
||||
fprintf(stderr, "gsr warning: reached max connector ids\n");
|
||||
}
|
||||
|
||||
static int gsr_capture_kms_cuda_start(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
static int gsr_capture_kms_cuda_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
|
||||
gsr_capture_kms_cuda *cap_kms = cap->priv;
|
||||
|
||||
cap_kms->video_codec_context = video_codec_context;
|
||||
|
||||
gsr_monitor monitor;
|
||||
cap_kms->monitor_id.num_connector_ids = 0;
|
||||
|
||||
int kms_init_res = gsr_kms_client_init(&cap_kms->kms_client, cap_kms->params.egl->card_path);
|
||||
if(kms_init_res != 0) {
|
||||
const int res = gsr_capture_kms_start(&cap_kms->kms, &cap_kms->base, cap_kms->params.display_to_capture, cap_kms->params.egl, video_codec_context);
|
||||
if(res != 0) {
|
||||
gsr_capture_kms_cuda_stop(cap, video_codec_context);
|
||||
return kms_init_res;
|
||||
return res;
|
||||
}
|
||||
|
||||
MonitorCallbackUserdata monitor_callback_userdata = {
|
||||
cap_kms,
|
||||
cap_kms->params.display_to_capture, strlen(cap_kms->params.display_to_capture),
|
||||
0
|
||||
};
|
||||
for_each_active_monitor_output(cap_kms->params.egl, GSR_CONNECTION_DRM, monitor_callback, &monitor_callback_userdata);
|
||||
|
||||
if(!get_monitor_by_name(cap_kms->params.egl, GSR_CONNECTION_DRM, cap_kms->params.display_to_capture, &monitor)) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_cuda_start: failed to find monitor by name \"%s\"\n", cap_kms->params.display_to_capture);
|
||||
gsr_capture_kms_cuda_stop(cap, video_codec_context);
|
||||
return -1;
|
||||
}
|
||||
|
||||
monitor.name = cap_kms->params.display_to_capture;
|
||||
cap_kms->monitor_rotation = drm_monitor_get_display_server_rotation(cap_kms->params.egl, &monitor);
|
||||
|
||||
cap_kms->capture_pos = monitor.pos;
|
||||
if(cap_kms->monitor_rotation == GSR_MONITOR_ROT_90 || cap_kms->monitor_rotation == GSR_MONITOR_ROT_270) {
|
||||
cap_kms->capture_size.x = monitor.size.y;
|
||||
cap_kms->capture_size.y = monitor.size.x;
|
||||
} else {
|
||||
cap_kms->capture_size = monitor.size;
|
||||
}
|
||||
|
||||
video_codec_context->width = max_int(2, cap_kms->capture_size.x & ~1);
|
||||
video_codec_context->height = max_int(2, cap_kms->capture_size.y & ~1);
|
||||
|
||||
/* Disable vsync */
|
||||
cap_kms->params.egl->eglSwapInterval(cap_kms->params.egl->egl_display, 0);
|
||||
|
||||
// TODO: overclocking is not supported on wayland...
|
||||
if(!gsr_cuda_load(&cap_kms->cuda, NULL, false)) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_cuda_start: failed to load cuda\n");
|
||||
@@ -198,131 +97,24 @@ static int gsr_capture_kms_cuda_start(gsr_capture *cap, AVCodecContext *video_co
|
||||
return -1;
|
||||
}
|
||||
|
||||
gsr_cuda_context cuda_context = {
|
||||
.cuda = &cap_kms->cuda,
|
||||
.cuda_graphics_resources = cap_kms->cuda_graphics_resources,
|
||||
.mapped_arrays = cap_kms->mapped_arrays
|
||||
};
|
||||
if(!gsr_capture_base_setup_cuda_textures(&cap_kms->base, frame, &cuda_context, cap_kms->params.egl, cap_kms->params.color_range, cap_kms->params.hdr)) {
|
||||
gsr_capture_kms_cuda_stop(cap, video_codec_context);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int gl_create_texture(gsr_capture_kms_cuda *cap_kms, int width, int height) {
|
||||
unsigned int texture_id = 0;
|
||||
cap_kms->params.egl->glGenTextures(1, &texture_id);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, texture_id);
|
||||
cap_kms->params.egl->glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
|
||||
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
|
||||
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
return texture_id;
|
||||
}
|
||||
|
||||
static bool cuda_register_opengl_texture(gsr_capture_kms_cuda *cap_kms) {
|
||||
CUresult res;
|
||||
CUcontext old_ctx;
|
||||
res = cap_kms->cuda.cuCtxPushCurrent_v2(cap_kms->cuda.cu_ctx);
|
||||
// TODO: Use cuGraphicsEGLRegisterImage instead with the window egl image (dont use window_texture).
|
||||
// That removes the need for an extra texture and texture copy
|
||||
res = cap_kms->cuda.cuGraphicsGLRegisterImage(
|
||||
&cap_kms->cuda_graphics_resource, cap_kms->target_texture, GL_TEXTURE_2D,
|
||||
CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY);
|
||||
if (res != CUDA_SUCCESS) {
|
||||
const char *err_str = "unknown";
|
||||
cap_kms->cuda.cuGetErrorString(res, &err_str);
|
||||
fprintf(stderr, "gsr error: cuda_register_opengl_texture: cuGraphicsGLRegisterImage failed, error: %s, texture " "id: %u\n", err_str, cap_kms->target_texture);
|
||||
res = cap_kms->cuda.cuCtxPopCurrent_v2(&old_ctx);
|
||||
return false;
|
||||
}
|
||||
|
||||
res = cap_kms->cuda.cuGraphicsResourceSetMapFlags(cap_kms->cuda_graphics_resource, CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY);
|
||||
res = cap_kms->cuda.cuGraphicsMapResources(1, &cap_kms->cuda_graphics_resource, 0);
|
||||
|
||||
res = cap_kms->cuda.cuGraphicsSubResourceGetMappedArray(&cap_kms->mapped_array, cap_kms->cuda_graphics_resource, 0, 0);
|
||||
res = cap_kms->cuda.cuCtxPopCurrent_v2(&old_ctx);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void gsr_capture_kms_cuda_tick(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame **frame) {
|
||||
gsr_capture_kms_cuda *cap_kms = cap->priv;
|
||||
|
||||
if(!cap_kms->created_hw_frame) {
|
||||
cap_kms->created_hw_frame = true;
|
||||
|
||||
av_frame_free(frame);
|
||||
*frame = av_frame_alloc();
|
||||
if(!frame) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_cuda_tick: failed to allocate frame\n");
|
||||
cap_kms->should_stop = true;
|
||||
cap_kms->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
(*frame)->format = video_codec_context->pix_fmt;
|
||||
(*frame)->width = video_codec_context->width;
|
||||
(*frame)->height = video_codec_context->height;
|
||||
(*frame)->color_range = video_codec_context->color_range;
|
||||
(*frame)->color_primaries = video_codec_context->color_primaries;
|
||||
(*frame)->color_trc = video_codec_context->color_trc;
|
||||
(*frame)->colorspace = video_codec_context->colorspace;
|
||||
(*frame)->chroma_location = video_codec_context->chroma_sample_location;
|
||||
|
||||
if(av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, *frame, 0) < 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_cuda_tick: av_hwframe_get_buffer failed\n");
|
||||
cap_kms->should_stop = true;
|
||||
cap_kms->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
|
||||
cap_kms->params.egl->glGenTextures(1, &cap_kms->input_texture);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, cap_kms->input_texture);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
cap_kms->params.egl->glGenTextures(1, &cap_kms->cursor_texture);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, cap_kms->cursor_texture);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
|
||||
|
||||
cap_kms->target_texture = gl_create_texture(cap_kms, video_codec_context->width, video_codec_context->height);
|
||||
if(cap_kms->target_texture == 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_cuda_tick: failed to create opengl texture\n");
|
||||
cap_kms->should_stop = true;
|
||||
cap_kms->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if(!cuda_register_opengl_texture(cap_kms)) {
|
||||
cap_kms->should_stop = true;
|
||||
cap_kms->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
|
||||
gsr_color_conversion_params color_conversion_params = {0};
|
||||
color_conversion_params.egl = cap_kms->params.egl;
|
||||
color_conversion_params.source_color = GSR_SOURCE_COLOR_RGB;
|
||||
color_conversion_params.destination_color = GSR_DESTINATION_COLOR_BGR;
|
||||
|
||||
color_conversion_params.destination_textures[0] = cap_kms->target_texture;
|
||||
color_conversion_params.num_destination_textures = 1;
|
||||
|
||||
if(gsr_color_conversion_init(&cap_kms->color_conversion, &color_conversion_params) != 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_cuda_tick: failed to create color conversion\n");
|
||||
cap_kms->should_stop = true;
|
||||
cap_kms->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool gsr_capture_kms_cuda_should_stop(gsr_capture *cap, bool *err) {
|
||||
gsr_capture_kms_cuda *cap_kms = cap->priv;
|
||||
if(cap_kms->should_stop) {
|
||||
if(cap_kms->kms.should_stop) {
|
||||
if(err)
|
||||
*err = cap_kms->stop_is_error;
|
||||
*err = cap_kms->kms.stop_is_error;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -331,268 +123,30 @@ static bool gsr_capture_kms_cuda_should_stop(gsr_capture *cap, bool *err) {
|
||||
return false;
|
||||
}
|
||||
|
||||
static float monitor_rotation_to_radians(gsr_monitor_rotation rot) {
|
||||
switch(rot) {
|
||||
case GSR_MONITOR_ROT_0: return 0.0f;
|
||||
case GSR_MONITOR_ROT_90: return M_PI_2;
|
||||
case GSR_MONITOR_ROT_180: return M_PI;
|
||||
case GSR_MONITOR_ROT_270: return M_PI + M_PI_2;
|
||||
}
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
/* Prefer non combined planes */
|
||||
static gsr_kms_response_fd* find_drm_by_connector_id(gsr_kms_response *kms_response, uint32_t connector_id) {
|
||||
int index_combined = -1;
|
||||
for(int i = 0; i < kms_response->num_fds; ++i) {
|
||||
if(kms_response->fds[i].connector_id == connector_id && !kms_response->fds[i].is_cursor) {
|
||||
if(kms_response->fds[i].is_combined_plane)
|
||||
index_combined = i;
|
||||
else
|
||||
return &kms_response->fds[i];
|
||||
}
|
||||
}
|
||||
|
||||
if(index_combined != -1)
|
||||
return &kms_response->fds[index_combined];
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static gsr_kms_response_fd* find_first_combined_drm(gsr_kms_response *kms_response) {
|
||||
for(int i = 0; i < kms_response->num_fds; ++i) {
|
||||
if(kms_response->fds[i].is_combined_plane && !kms_response->fds[i].is_cursor)
|
||||
return &kms_response->fds[i];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static gsr_kms_response_fd* find_largest_drm(gsr_kms_response *kms_response) {
|
||||
if(kms_response->num_fds == 0)
|
||||
return NULL;
|
||||
|
||||
int64_t largest_size = 0;
|
||||
gsr_kms_response_fd *largest_drm = &kms_response->fds[0];
|
||||
for(int i = 0; i < kms_response->num_fds; ++i) {
|
||||
const int64_t size = (int64_t)kms_response->fds[i].width * (int64_t)kms_response->fds[i].height;
|
||||
if(size > largest_size && !kms_response->fds[i].is_cursor) {
|
||||
largest_size = size;
|
||||
largest_drm = &kms_response->fds[i];
|
||||
}
|
||||
}
|
||||
return largest_drm;
|
||||
}
|
||||
|
||||
static void gsr_capture_kms_unload_cuda_graphics(gsr_capture_kms_cuda *cap_kms) {
|
||||
if(cap_kms->cuda.cu_ctx) {
|
||||
CUcontext old_ctx;
|
||||
cap_kms->cuda.cuCtxPushCurrent_v2(cap_kms->cuda.cu_ctx);
|
||||
|
||||
if(cap_kms->cuda_graphics_resource) {
|
||||
cap_kms->cuda.cuGraphicsUnmapResources(1, &cap_kms->cuda_graphics_resource, 0);
|
||||
cap_kms->cuda.cuGraphicsUnregisterResource(cap_kms->cuda_graphics_resource);
|
||||
cap_kms->cuda_graphics_resource = 0;
|
||||
for(int i = 0; i < 2; ++i) {
|
||||
if(cap_kms->cuda_graphics_resources[i]) {
|
||||
cap_kms->cuda.cuGraphicsUnmapResources(1, &cap_kms->cuda_graphics_resources[i], 0);
|
||||
cap_kms->cuda.cuGraphicsUnregisterResource(cap_kms->cuda_graphics_resources[i]);
|
||||
cap_kms->cuda_graphics_resources[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
cap_kms->cuda.cuCtxPopCurrent_v2(&old_ctx);
|
||||
}
|
||||
}
|
||||
|
||||
static gsr_kms_response_fd* find_cursor_drm(gsr_kms_response *kms_response) {
|
||||
for(int i = 0; i < kms_response->num_fds; ++i) {
|
||||
if(kms_response->fds[i].is_cursor)
|
||||
return &kms_response->fds[i];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define HDMI_STATIC_METADATA_TYPE1 0
|
||||
#define HDMI_EOTF_SMPTE_ST2084 2
|
||||
|
||||
static bool hdr_metadata_is_supported_format(const struct hdr_output_metadata *hdr_metadata) {
|
||||
return hdr_metadata->metadata_type == HDMI_STATIC_METADATA_TYPE1 &&
|
||||
hdr_metadata->hdmi_metadata_type1.metadata_type == HDMI_STATIC_METADATA_TYPE1 &&
|
||||
hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084;
|
||||
}
|
||||
|
||||
static void gsr_capture_kms_vaapi_set_hdr_metadata(gsr_capture_kms_cuda *cap_kms, AVFrame *frame, gsr_kms_response_fd *drm_fd) {
|
||||
if(!cap_kms->mastering_display_metadata)
|
||||
cap_kms->mastering_display_metadata = av_mastering_display_metadata_create_side_data(frame);
|
||||
|
||||
if(!cap_kms->light_metadata)
|
||||
cap_kms->light_metadata = av_content_light_metadata_create_side_data(frame);
|
||||
|
||||
if(cap_kms->mastering_display_metadata) {
|
||||
for(int i = 0; i < 3; ++i) {
|
||||
cap_kms->mastering_display_metadata->display_primaries[i][0] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.display_primaries[i].x, 50000);
|
||||
cap_kms->mastering_display_metadata->display_primaries[i][1] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.display_primaries[i].y, 50000);
|
||||
}
|
||||
|
||||
cap_kms->mastering_display_metadata->white_point[0] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.white_point.x, 50000);
|
||||
cap_kms->mastering_display_metadata->white_point[1] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.white_point.y, 50000);
|
||||
|
||||
cap_kms->mastering_display_metadata->min_luminance = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.min_display_mastering_luminance, 10000);
|
||||
cap_kms->mastering_display_metadata->max_luminance = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.max_display_mastering_luminance, 1);
|
||||
|
||||
cap_kms->mastering_display_metadata->has_primaries = cap_kms->mastering_display_metadata->display_primaries[0][0].num > 0;
|
||||
cap_kms->mastering_display_metadata->has_luminance = cap_kms->mastering_display_metadata->max_luminance.num > 0;
|
||||
}
|
||||
|
||||
if(cap_kms->light_metadata) {
|
||||
cap_kms->light_metadata->MaxCLL = drm_fd->hdr_metadata.hdmi_metadata_type1.max_cll;
|
||||
cap_kms->light_metadata->MaxFALL = drm_fd->hdr_metadata.hdmi_metadata_type1.max_fall;
|
||||
}
|
||||
}
|
||||
|
||||
static vec2i swap_vec2i(vec2i value) {
|
||||
int tmp = value.x;
|
||||
value.x = value.y;
|
||||
value.y = tmp;
|
||||
return value;
|
||||
}
|
||||
|
||||
static int gsr_capture_kms_cuda_capture(gsr_capture *cap, AVFrame *frame) {
|
||||
(void)frame;
|
||||
gsr_capture_kms_cuda *cap_kms = cap->priv;
|
||||
|
||||
cap_kms->params.egl->glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
|
||||
cap_kms->params.egl->glClear(GL_COLOR_BUFFER_BIT);
|
||||
|
||||
for(int i = 0; i < cap_kms->kms_response.num_fds; ++i) {
|
||||
if(cap_kms->kms_response.fds[i].fd > 0)
|
||||
close(cap_kms->kms_response.fds[i].fd);
|
||||
cap_kms->kms_response.fds[i].fd = 0;
|
||||
}
|
||||
cap_kms->kms_response.num_fds = 0;
|
||||
|
||||
gsr_kms_response_fd *drm_fd = NULL;
|
||||
gsr_kms_response_fd *cursor_drm_fd = NULL;
|
||||
bool capture_is_combined_plane = false;
|
||||
|
||||
if(gsr_kms_client_get_kms(&cap_kms->kms_client, &cap_kms->kms_response) != 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_vaapi_capture: failed to get kms, error: %d (%s)\n", cap_kms->kms_response.result, cap_kms->kms_response.err_msg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(cap_kms->kms_response.num_fds == 0) {
|
||||
static bool error_shown = false;
|
||||
if(!error_shown) {
|
||||
error_shown = true;
|
||||
fprintf(stderr, "gsr error: no drm found, capture will fail\n");
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
for(int i = 0; i < cap_kms->monitor_id.num_connector_ids; ++i) {
|
||||
drm_fd = find_drm_by_connector_id(&cap_kms->kms_response, cap_kms->monitor_id.connector_ids[i]);
|
||||
if(drm_fd)
|
||||
break;
|
||||
}
|
||||
|
||||
// Will never happen on wayland unless the target monitor has been disconnected
|
||||
if(!drm_fd) {
|
||||
drm_fd = find_first_combined_drm(&cap_kms->kms_response);
|
||||
if(!drm_fd)
|
||||
drm_fd = find_largest_drm(&cap_kms->kms_response);
|
||||
capture_is_combined_plane = true;
|
||||
}
|
||||
|
||||
cursor_drm_fd = find_cursor_drm(&cap_kms->kms_response);
|
||||
|
||||
if(!drm_fd)
|
||||
return -1;
|
||||
|
||||
if(!capture_is_combined_plane && cursor_drm_fd && cursor_drm_fd->connector_id != drm_fd->connector_id)
|
||||
cursor_drm_fd = NULL;
|
||||
|
||||
if(drm_fd->has_hdr_metadata && cap_kms->params.hdr && hdr_metadata_is_supported_format(&drm_fd->hdr_metadata))
|
||||
gsr_capture_kms_vaapi_set_hdr_metadata(cap_kms, frame, drm_fd);
|
||||
|
||||
const intptr_t img_attr[] = {
|
||||
//EGL_IMAGE_PRESERVED_KHR, EGL_TRUE,
|
||||
EGL_LINUX_DRM_FOURCC_EXT, drm_fd->pixel_format,//cap_kms->params.egl->pixel_format, ARGB8888
|
||||
EGL_WIDTH, drm_fd->width,//cap_kms->params.egl->width,
|
||||
EGL_HEIGHT, drm_fd->height,//cap_kms->params.egl->height,
|
||||
EGL_DMA_BUF_PLANE0_FD_EXT, drm_fd->fd,//cap_kms->params.egl->fd,
|
||||
EGL_DMA_BUF_PLANE0_OFFSET_EXT, drm_fd->offset,//cap_kms->params.egl->offset,
|
||||
EGL_DMA_BUF_PLANE0_PITCH_EXT, drm_fd->pitch,//cap_kms->params.egl->pitch,
|
||||
EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT, drm_fd->modifier & 0xFFFFFFFFULL,//cap_kms->params.egl->modifier & 0xFFFFFFFFULL,
|
||||
EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT, drm_fd->modifier >> 32ULL,//cap_kms->params.egl->modifier >> 32ULL,
|
||||
EGL_NONE
|
||||
};
|
||||
|
||||
EGLImage image = cap_kms->params.egl->eglCreateImage(cap_kms->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, cap_kms->input_texture);
|
||||
cap_kms->params.egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
|
||||
cap_kms->params.egl->eglDestroyImage(cap_kms->params.egl->egl_display, image);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
vec2i capture_pos = cap_kms->capture_pos;
|
||||
if(!capture_is_combined_plane)
|
||||
capture_pos = (vec2i){drm_fd->x, drm_fd->y};
|
||||
|
||||
const float texture_rotation = monitor_rotation_to_radians(cap_kms->monitor_rotation);
|
||||
|
||||
gsr_color_conversion_draw(&cap_kms->color_conversion, cap_kms->input_texture,
|
||||
(vec2i){0, 0}, cap_kms->capture_size,
|
||||
capture_pos, cap_kms->capture_size,
|
||||
texture_rotation, false);
|
||||
|
||||
if(cursor_drm_fd) {
|
||||
const vec2i cursor_size = {cursor_drm_fd->width, cursor_drm_fd->height};
|
||||
vec2i cursor_pos = {cursor_drm_fd->x, cursor_drm_fd->y};
|
||||
switch(cap_kms->monitor_rotation) {
|
||||
case GSR_MONITOR_ROT_0:
|
||||
break;
|
||||
case GSR_MONITOR_ROT_90:
|
||||
cursor_pos = swap_vec2i(cursor_pos);
|
||||
cursor_pos.x = cap_kms->capture_size.x - cursor_pos.x;
|
||||
// TODO: Remove this horrible hack
|
||||
cursor_pos.x -= cursor_size.x;
|
||||
break;
|
||||
case GSR_MONITOR_ROT_180:
|
||||
cursor_pos.x = cap_kms->capture_size.x - cursor_pos.x;
|
||||
cursor_pos.y = cap_kms->capture_size.y - cursor_pos.y;
|
||||
// TODO: Remove this horrible hack
|
||||
cursor_pos.x -= cursor_size.x;
|
||||
cursor_pos.y -= cursor_size.y;
|
||||
break;
|
||||
case GSR_MONITOR_ROT_270:
|
||||
cursor_pos = swap_vec2i(cursor_pos);
|
||||
cursor_pos.y = cap_kms->capture_size.y - cursor_pos.y;
|
||||
// TODO: Remove this horrible hack
|
||||
cursor_pos.y -= cursor_size.y;
|
||||
break;
|
||||
}
|
||||
|
||||
const intptr_t img_attr_cursor[] = {
|
||||
EGL_LINUX_DRM_FOURCC_EXT, cursor_drm_fd->pixel_format,
|
||||
EGL_WIDTH, cursor_drm_fd->width,
|
||||
EGL_HEIGHT, cursor_drm_fd->height,
|
||||
EGL_DMA_BUF_PLANE0_FD_EXT, cursor_drm_fd->fd,
|
||||
EGL_DMA_BUF_PLANE0_OFFSET_EXT, cursor_drm_fd->offset,
|
||||
EGL_DMA_BUF_PLANE0_PITCH_EXT, cursor_drm_fd->pitch,
|
||||
EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT, cursor_drm_fd->modifier & 0xFFFFFFFFULL,
|
||||
EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT, cursor_drm_fd->modifier >> 32ULL,
|
||||
EGL_NONE
|
||||
};
|
||||
|
||||
EGLImage cursor_image = cap_kms->params.egl->eglCreateImage(cap_kms->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr_cursor);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, cap_kms->cursor_texture);
|
||||
cap_kms->params.egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, cursor_image);
|
||||
cap_kms->params.egl->eglDestroyImage(cap_kms->params.egl->egl_display, cursor_image);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
|
||||
|
||||
gsr_color_conversion_draw(&cap_kms->color_conversion, cap_kms->cursor_texture,
|
||||
cursor_pos, cursor_size,
|
||||
(vec2i){0, 0}, cursor_size,
|
||||
texture_rotation, true);
|
||||
}
|
||||
|
||||
cap_kms->params.egl->eglSwapBuffers(cap_kms->params.egl->egl_display, cap_kms->params.egl->egl_surface);
|
||||
|
||||
frame->linesize[0] = frame->width * 4;
|
||||
gsr_capture_kms_capture(&cap_kms->kms, &cap_kms->base, frame, cap_kms->params.egl, cap_kms->params.hdr, true, true);
|
||||
|
||||
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
|
||||
for(int i = 0; i < 2; ++i) {
|
||||
CUDA_MEMCPY2D memcpy_struct;
|
||||
memcpy_struct.srcXInBytes = 0;
|
||||
memcpy_struct.srcY = 0;
|
||||
@@ -602,13 +156,18 @@ static int gsr_capture_kms_cuda_capture(gsr_capture *cap, AVFrame *frame) {
|
||||
memcpy_struct.dstY = 0;
|
||||
memcpy_struct.dstMemoryType = CU_MEMORYTYPE_DEVICE;
|
||||
|
||||
memcpy_struct.srcArray = cap_kms->mapped_array;
|
||||
memcpy_struct.srcPitch = frame->linesize[0];
|
||||
memcpy_struct.dstDevice = (CUdeviceptr)frame->data[0];
|
||||
memcpy_struct.dstPitch = frame->linesize[0];
|
||||
memcpy_struct.WidthInBytes = frame->width * 4;
|
||||
memcpy_struct.Height = frame->height;
|
||||
cap_kms->cuda.cuMemcpy2D_v2(&memcpy_struct);
|
||||
memcpy_struct.srcArray = cap_kms->mapped_arrays[i];
|
||||
memcpy_struct.srcPitch = frame->width / div[i];
|
||||
memcpy_struct.dstDevice = (CUdeviceptr)frame->data[i];
|
||||
memcpy_struct.dstPitch = frame->linesize[i];
|
||||
memcpy_struct.WidthInBytes = frame->width * (cap_kms->params.hdr ? 2 : 1);
|
||||
memcpy_struct.Height = frame->height / div[i];
|
||||
// TODO: Remove this copy if possible
|
||||
cap_kms->cuda.cuMemcpy2DAsync_v2(&memcpy_struct, cap_kms->cuda_stream);
|
||||
}
|
||||
|
||||
// TODO: needed?
|
||||
cap_kms->cuda.cuStreamSynchronize(cap_kms->cuda_stream);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -616,53 +175,22 @@ static int gsr_capture_kms_cuda_capture(gsr_capture *cap, AVFrame *frame) {
|
||||
static void gsr_capture_kms_cuda_capture_end(gsr_capture *cap, AVFrame *frame) {
|
||||
(void)frame;
|
||||
gsr_capture_kms_cuda *cap_kms = cap->priv;
|
||||
|
||||
for(int i = 0; i < cap_kms->kms_response.num_fds; ++i) {
|
||||
if(cap_kms->kms_response.fds[i].fd > 0)
|
||||
close(cap_kms->kms_response.fds[i].fd);
|
||||
cap_kms->kms_response.fds[i].fd = 0;
|
||||
}
|
||||
cap_kms->kms_response.num_fds = 0;
|
||||
gsr_capture_kms_cleanup_kms_fds(&cap_kms->kms);
|
||||
}
|
||||
|
||||
static void gsr_capture_kms_cuda_stop(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
gsr_capture_kms_cuda *cap_kms = cap->priv;
|
||||
|
||||
gsr_color_conversion_deinit(&cap_kms->color_conversion);
|
||||
|
||||
gsr_capture_kms_unload_cuda_graphics(cap_kms);
|
||||
|
||||
if(cap_kms->params.egl->egl_context) {
|
||||
if(cap_kms->input_texture) {
|
||||
cap_kms->params.egl->glDeleteTextures(1, &cap_kms->input_texture);
|
||||
cap_kms->input_texture = 0;
|
||||
}
|
||||
|
||||
if(cap_kms->cursor_texture) {
|
||||
cap_kms->params.egl->glDeleteTextures(1, &cap_kms->cursor_texture);
|
||||
cap_kms->cursor_texture = 0;
|
||||
}
|
||||
|
||||
if(cap_kms->target_texture) {
|
||||
cap_kms->params.egl->glDeleteTextures(1, &cap_kms->target_texture);
|
||||
cap_kms->target_texture = 0;
|
||||
}
|
||||
}
|
||||
|
||||
for(int i = 0; i < cap_kms->kms_response.num_fds; ++i) {
|
||||
if(cap_kms->kms_response.fds[i].fd > 0)
|
||||
close(cap_kms->kms_response.fds[i].fd);
|
||||
cap_kms->kms_response.fds[i].fd = 0;
|
||||
}
|
||||
cap_kms->kms_response.num_fds = 0;
|
||||
|
||||
if(video_codec_context->hw_device_ctx)
|
||||
av_buffer_unref(&video_codec_context->hw_device_ctx);
|
||||
if(video_codec_context->hw_frames_ctx)
|
||||
av_buffer_unref(&video_codec_context->hw_frames_ctx);
|
||||
|
||||
gsr_cuda_unload(&cap_kms->cuda);
|
||||
gsr_kms_client_deinit(&cap_kms->kms_client);
|
||||
gsr_capture_kms_stop(&cap_kms->kms);
|
||||
gsr_capture_base_stop(&cap_kms->base, cap_kms->params.egl);
|
||||
}
|
||||
|
||||
static void gsr_capture_kms_cuda_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
@@ -706,7 +234,7 @@ gsr_capture* gsr_capture_kms_cuda_create(const gsr_capture_kms_cuda_params *para
|
||||
|
||||
*cap = (gsr_capture) {
|
||||
.start = gsr_capture_kms_cuda_start,
|
||||
.tick = gsr_capture_kms_cuda_tick,
|
||||
.tick = NULL,
|
||||
.should_stop = gsr_capture_kms_cuda_should_stop,
|
||||
.capture = gsr_capture_kms_cuda_capture,
|
||||
.capture_end = gsr_capture_kms_cuda_capture_end,
|
||||
|
||||
@@ -1,59 +1,24 @@
|
||||
#include "../../include/capture/kms_vaapi.h"
|
||||
#include "../../kms/client/kms_client.h"
|
||||
#include "../../include/utils.h"
|
||||
#include "../../include/capture/kms.h"
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <assert.h>
|
||||
#include <libavutil/hwcontext.h>
|
||||
#include <libavutil/hwcontext_vaapi.h>
|
||||
#include <libavutil/frame.h>
|
||||
#include <libavutil/mastering_display_metadata.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <va/va.h>
|
||||
#include <va/va_drmcommon.h>
|
||||
|
||||
#define MAX_CONNECTOR_IDS 32
|
||||
|
||||
typedef struct {
|
||||
uint32_t connector_ids[MAX_CONNECTOR_IDS];
|
||||
int num_connector_ids;
|
||||
} MonitorId;
|
||||
gsr_capture_base base;
|
||||
gsr_capture_kms kms;
|
||||
|
||||
typedef struct {
|
||||
gsr_capture_kms_vaapi_params params;
|
||||
|
||||
bool should_stop;
|
||||
bool stop_is_error;
|
||||
bool created_hw_frame;
|
||||
|
||||
gsr_kms_client kms_client;
|
||||
gsr_kms_response kms_response;
|
||||
|
||||
vec2i capture_pos;
|
||||
vec2i capture_size;
|
||||
MonitorId monitor_id;
|
||||
|
||||
VADisplay va_dpy;
|
||||
VADRMPRIMESurfaceDescriptor prime;
|
||||
|
||||
unsigned int input_texture;
|
||||
unsigned int target_textures[2];
|
||||
unsigned int cursor_texture;
|
||||
|
||||
gsr_color_conversion color_conversion;
|
||||
|
||||
AVCodecContext *video_codec_context;
|
||||
AVMasteringDisplayMetadata *mastering_display_metadata;
|
||||
AVContentLightMetadata *light_metadata;
|
||||
|
||||
gsr_monitor_rotation monitor_rotation;
|
||||
} gsr_capture_kms_vaapi;
|
||||
|
||||
static int max_int(int a, int b) {
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
static void gsr_capture_kms_vaapi_stop(gsr_capture *cap, AVCodecContext *video_codec_context);
|
||||
|
||||
static bool drm_create_codec_context(gsr_capture_kms_vaapi *cap_kms, AVCodecContext *video_codec_context) {
|
||||
@@ -103,79 +68,21 @@ static bool drm_create_codec_context(gsr_capture_kms_vaapi *cap_kms, AVCodecCont
|
||||
return true;
|
||||
}
|
||||
|
||||
#define DRM_FORMAT_MOD_INVALID 0xffffffffffffffULL
|
||||
|
||||
// TODO: On monitor reconfiguration, find monitor x, y, width and height again. Do the same for nvfbc.
|
||||
|
||||
typedef struct {
|
||||
gsr_capture_kms_vaapi *cap_kms;
|
||||
const char *monitor_to_capture;
|
||||
int monitor_to_capture_len;
|
||||
int num_monitors;
|
||||
} MonitorCallbackUserdata;
|
||||
|
||||
static void monitor_callback(const gsr_monitor *monitor, void *userdata) {
|
||||
(void)monitor;
|
||||
MonitorCallbackUserdata *monitor_callback_userdata = userdata;
|
||||
++monitor_callback_userdata->num_monitors;
|
||||
|
||||
if(monitor_callback_userdata->monitor_to_capture_len != monitor->name_len || memcmp(monitor_callback_userdata->monitor_to_capture, monitor->name, monitor->name_len) != 0)
|
||||
return;
|
||||
|
||||
if(monitor_callback_userdata->cap_kms->monitor_id.num_connector_ids < MAX_CONNECTOR_IDS) {
|
||||
monitor_callback_userdata->cap_kms->monitor_id.connector_ids[monitor_callback_userdata->cap_kms->monitor_id.num_connector_ids] = monitor->connector_id;
|
||||
++monitor_callback_userdata->cap_kms->monitor_id.num_connector_ids;
|
||||
}
|
||||
|
||||
if(monitor_callback_userdata->cap_kms->monitor_id.num_connector_ids == MAX_CONNECTOR_IDS)
|
||||
fprintf(stderr, "gsr warning: reached max connector ids\n");
|
||||
}
|
||||
|
||||
static int gsr_capture_kms_vaapi_start(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
static int gsr_capture_kms_vaapi_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
|
||||
gsr_capture_kms_vaapi *cap_kms = cap->priv;
|
||||
|
||||
cap_kms->video_codec_context = video_codec_context;
|
||||
|
||||
gsr_monitor monitor;
|
||||
cap_kms->monitor_id.num_connector_ids = 0;
|
||||
|
||||
int kms_init_res = gsr_kms_client_init(&cap_kms->kms_client, cap_kms->params.egl->card_path);
|
||||
if(kms_init_res != 0) {
|
||||
int res = gsr_capture_kms_start(&cap_kms->kms, &cap_kms->base, cap_kms->params.display_to_capture, cap_kms->params.egl, video_codec_context);
|
||||
if(res != 0) {
|
||||
gsr_capture_kms_vaapi_stop(cap, video_codec_context);
|
||||
return kms_init_res;
|
||||
return res;
|
||||
}
|
||||
|
||||
MonitorCallbackUserdata monitor_callback_userdata = {
|
||||
cap_kms,
|
||||
cap_kms->params.display_to_capture, strlen(cap_kms->params.display_to_capture),
|
||||
0,
|
||||
};
|
||||
for_each_active_monitor_output(cap_kms->params.egl, GSR_CONNECTION_DRM, monitor_callback, &monitor_callback_userdata);
|
||||
|
||||
if(!get_monitor_by_name(cap_kms->params.egl, GSR_CONNECTION_DRM, cap_kms->params.display_to_capture, &monitor)) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_vaapi_start: failed to find monitor by name \"%s\"\n", cap_kms->params.display_to_capture);
|
||||
if(!drm_create_codec_context(cap_kms, video_codec_context)) {
|
||||
gsr_capture_kms_vaapi_stop(cap, video_codec_context);
|
||||
return -1;
|
||||
}
|
||||
|
||||
monitor.name = cap_kms->params.display_to_capture;
|
||||
cap_kms->monitor_rotation = drm_monitor_get_display_server_rotation(cap_kms->params.egl, &monitor);
|
||||
|
||||
cap_kms->capture_pos = monitor.pos;
|
||||
if(cap_kms->monitor_rotation == GSR_MONITOR_ROT_90 || cap_kms->monitor_rotation == GSR_MONITOR_ROT_270) {
|
||||
cap_kms->capture_size.x = monitor.size.y;
|
||||
cap_kms->capture_size.y = monitor.size.x;
|
||||
} else {
|
||||
cap_kms->capture_size = monitor.size;
|
||||
}
|
||||
|
||||
/* Disable vsync */
|
||||
cap_kms->params.egl->eglSwapInterval(cap_kms->params.egl->egl_display, 0);
|
||||
|
||||
video_codec_context->width = max_int(2, even_number_ceil(cap_kms->capture_size.x));
|
||||
video_codec_context->height = max_int(2, even_number_ceil(cap_kms->capture_size.y));
|
||||
|
||||
if(!drm_create_codec_context(cap_kms, video_codec_context)) {
|
||||
if(!gsr_capture_base_setup_vaapi_textures(&cap_kms->base, frame, cap_kms->params.egl, cap_kms->va_dpy, &cap_kms->prime, cap_kms->params.color_range)) {
|
||||
gsr_capture_kms_vaapi_stop(cap, video_codec_context);
|
||||
return -1;
|
||||
}
|
||||
@@ -183,163 +90,11 @@ static int gsr_capture_kms_vaapi_start(gsr_capture *cap, AVCodecContext *video_c
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t fourcc(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
|
||||
return (d << 24) | (c << 16) | (b << 8) | a;
|
||||
}
|
||||
|
||||
#define FOURCC_NV12 842094158
|
||||
#define FOURCC_P010 808530000
|
||||
|
||||
static void gsr_capture_kms_vaapi_tick(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame **frame) {
|
||||
gsr_capture_kms_vaapi *cap_kms = cap->priv;
|
||||
|
||||
if(!cap_kms->created_hw_frame) {
|
||||
cap_kms->created_hw_frame = true;
|
||||
|
||||
av_frame_free(frame);
|
||||
*frame = av_frame_alloc();
|
||||
if(!frame) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_vaapi_tick: failed to allocate frame\n");
|
||||
cap_kms->should_stop = true;
|
||||
cap_kms->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
(*frame)->format = video_codec_context->pix_fmt;
|
||||
(*frame)->width = video_codec_context->width;
|
||||
(*frame)->height = video_codec_context->height;
|
||||
(*frame)->color_range = video_codec_context->color_range;
|
||||
(*frame)->color_primaries = video_codec_context->color_primaries;
|
||||
(*frame)->color_trc = video_codec_context->color_trc;
|
||||
(*frame)->colorspace = video_codec_context->colorspace;
|
||||
(*frame)->chroma_location = video_codec_context->chroma_sample_location;
|
||||
|
||||
int res = av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, *frame, 0);
|
||||
if(res < 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_vaapi_tick: av_hwframe_get_buffer failed: %d\n", res);
|
||||
cap_kms->should_stop = true;
|
||||
cap_kms->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
|
||||
VASurfaceID target_surface_id = (uintptr_t)(*frame)->data[3];
|
||||
|
||||
VAStatus va_status = vaExportSurfaceHandle(cap_kms->va_dpy, target_surface_id, VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2, VA_EXPORT_SURFACE_WRITE_ONLY | VA_EXPORT_SURFACE_SEPARATE_LAYERS, &cap_kms->prime);
|
||||
if(va_status != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_vaapi_tick: vaExportSurfaceHandle failed, error: %d\n", va_status);
|
||||
cap_kms->should_stop = true;
|
||||
cap_kms->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
vaSyncSurface(cap_kms->va_dpy, target_surface_id);
|
||||
|
||||
cap_kms->params.egl->glGenTextures(1, &cap_kms->input_texture);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, cap_kms->input_texture);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
cap_kms->params.egl->glGenTextures(1, &cap_kms->cursor_texture);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, cap_kms->cursor_texture);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
const uint32_t formats_nv12[2] = { fourcc('R', '8', ' ', ' '), fourcc('G', 'R', '8', '8') };
|
||||
const uint32_t formats_p010[2] = { fourcc('R', '1', '6', ' '), fourcc('G', 'R', '3', '2') };
|
||||
|
||||
if(cap_kms->prime.fourcc == FOURCC_NV12 || cap_kms->prime.fourcc == FOURCC_P010) {
|
||||
const uint32_t *formats = cap_kms->prime.fourcc == FOURCC_NV12 ? formats_nv12 : formats_p010;
|
||||
|
||||
cap_kms->params.egl->glGenTextures(2, cap_kms->target_textures);
|
||||
for(int i = 0; i < 2; ++i) {
|
||||
const int layer = i;
|
||||
const int plane = 0;
|
||||
|
||||
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
|
||||
//const uint64_t modifier = cap_kms->prime.objects[cap_kms->prime.layers[layer].object_index[plane]].drm_format_modifier;
|
||||
|
||||
const intptr_t img_attr[] = {
|
||||
EGL_LINUX_DRM_FOURCC_EXT, formats[i],
|
||||
EGL_WIDTH, cap_kms->prime.width / div[i],
|
||||
EGL_HEIGHT, cap_kms->prime.height / div[i],
|
||||
EGL_DMA_BUF_PLANE0_FD_EXT, cap_kms->prime.objects[cap_kms->prime.layers[layer].object_index[plane]].fd,
|
||||
EGL_DMA_BUF_PLANE0_OFFSET_EXT, cap_kms->prime.layers[layer].offset[plane],
|
||||
EGL_DMA_BUF_PLANE0_PITCH_EXT, cap_kms->prime.layers[layer].pitch[plane],
|
||||
// TODO:
|
||||
//EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT, modifier & 0xFFFFFFFFULL,
|
||||
//EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT, modifier >> 32ULL,
|
||||
EGL_NONE
|
||||
};
|
||||
|
||||
while(cap_kms->params.egl->eglGetError() != EGL_SUCCESS){}
|
||||
EGLImage image = cap_kms->params.egl->eglCreateImage(cap_kms->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
|
||||
if(!image) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_vaapi_tick: failed to create egl image from drm fd for output drm fd, error: %d\n", cap_kms->params.egl->eglGetError());
|
||||
cap_kms->should_stop = true;
|
||||
cap_kms->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, cap_kms->target_textures[i]);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
cap_kms->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
|
||||
while(cap_kms->params.egl->glGetError()) {}
|
||||
while(cap_kms->params.egl->eglGetError() != EGL_SUCCESS){}
|
||||
cap_kms->params.egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
|
||||
if(cap_kms->params.egl->glGetError() != 0 || cap_kms->params.egl->eglGetError() != EGL_SUCCESS) {
|
||||
// TODO: Get the error properly
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_vaapi_tick: failed to bind egl image to gl texture, error: %d\n", cap_kms->params.egl->eglGetError());
|
||||
cap_kms->should_stop = true;
|
||||
cap_kms->stop_is_error = true;
|
||||
cap_kms->params.egl->eglDestroyImage(cap_kms->params.egl->egl_display, image);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
cap_kms->params.egl->eglDestroyImage(cap_kms->params.egl->egl_display, image);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
}
|
||||
|
||||
gsr_color_conversion_params color_conversion_params = {0};
|
||||
color_conversion_params.color_range = cap_kms->params.color_range;
|
||||
color_conversion_params.egl = cap_kms->params.egl;
|
||||
color_conversion_params.source_color = GSR_SOURCE_COLOR_RGB;
|
||||
if(cap_kms->prime.fourcc == FOURCC_NV12)
|
||||
color_conversion_params.destination_color = GSR_DESTINATION_COLOR_NV12;
|
||||
else
|
||||
color_conversion_params.destination_color = GSR_DESTINATION_COLOR_P010;
|
||||
|
||||
color_conversion_params.destination_textures[0] = cap_kms->target_textures[0];
|
||||
color_conversion_params.destination_textures[1] = cap_kms->target_textures[1];
|
||||
color_conversion_params.num_destination_textures = 2;
|
||||
|
||||
if(gsr_color_conversion_init(&cap_kms->color_conversion, &color_conversion_params) != 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_vaapi_tick: failed to create color conversion\n");
|
||||
cap_kms->should_stop = true;
|
||||
cap_kms->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_vaapi_tick: unexpected fourcc %u for output drm fd, expected nv12 or p010\n", cap_kms->prime.fourcc);
|
||||
cap_kms->should_stop = true;
|
||||
cap_kms->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool gsr_capture_kms_vaapi_should_stop(gsr_capture *cap, bool *err) {
|
||||
gsr_capture_kms_vaapi *cap_kms = cap->priv;
|
||||
if(cap_kms->should_stop) {
|
||||
if(cap_kms->kms.should_stop) {
|
||||
if(err)
|
||||
*err = cap_kms->stop_is_error;
|
||||
*err = cap_kms->kms.stop_is_error;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -348,294 +103,21 @@ static bool gsr_capture_kms_vaapi_should_stop(gsr_capture *cap, bool *err) {
|
||||
return false;
|
||||
}
|
||||
|
||||
static float monitor_rotation_to_radians(gsr_monitor_rotation rot) {
|
||||
switch(rot) {
|
||||
case GSR_MONITOR_ROT_0: return 0.0f;
|
||||
case GSR_MONITOR_ROT_90: return M_PI_2;
|
||||
case GSR_MONITOR_ROT_180: return M_PI;
|
||||
case GSR_MONITOR_ROT_270: return M_PI + M_PI_2;
|
||||
}
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
/* Prefer non combined planes */
|
||||
static gsr_kms_response_fd* find_drm_by_connector_id(gsr_kms_response *kms_response, uint32_t connector_id) {
|
||||
int index_combined = -1;
|
||||
for(int i = 0; i < kms_response->num_fds; ++i) {
|
||||
if(kms_response->fds[i].connector_id == connector_id && !kms_response->fds[i].is_cursor) {
|
||||
if(kms_response->fds[i].is_combined_plane)
|
||||
index_combined = i;
|
||||
else
|
||||
return &kms_response->fds[i];
|
||||
}
|
||||
}
|
||||
|
||||
if(index_combined != -1)
|
||||
return &kms_response->fds[index_combined];
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static gsr_kms_response_fd* find_first_combined_drm(gsr_kms_response *kms_response) {
|
||||
for(int i = 0; i < kms_response->num_fds; ++i) {
|
||||
if(kms_response->fds[i].is_combined_plane && !kms_response->fds[i].is_cursor)
|
||||
return &kms_response->fds[i];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static gsr_kms_response_fd* find_largest_drm(gsr_kms_response *kms_response) {
|
||||
if(kms_response->num_fds == 0)
|
||||
return NULL;
|
||||
|
||||
int64_t largest_size = 0;
|
||||
gsr_kms_response_fd *largest_drm = &kms_response->fds[0];
|
||||
for(int i = 0; i < kms_response->num_fds; ++i) {
|
||||
const int64_t size = (int64_t)kms_response->fds[i].width * (int64_t)kms_response->fds[i].height;
|
||||
if(size > largest_size && !kms_response->fds[i].is_cursor) {
|
||||
largest_size = size;
|
||||
largest_drm = &kms_response->fds[i];
|
||||
}
|
||||
}
|
||||
return largest_drm;
|
||||
}
|
||||
|
||||
static gsr_kms_response_fd* find_cursor_drm(gsr_kms_response *kms_response) {
|
||||
for(int i = 0; i < kms_response->num_fds; ++i) {
|
||||
if(kms_response->fds[i].is_cursor)
|
||||
return &kms_response->fds[i];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define HDMI_STATIC_METADATA_TYPE1 0
|
||||
#define HDMI_EOTF_SMPTE_ST2084 2
|
||||
|
||||
static bool hdr_metadata_is_supported_format(const struct hdr_output_metadata *hdr_metadata) {
|
||||
return hdr_metadata->metadata_type == HDMI_STATIC_METADATA_TYPE1 &&
|
||||
hdr_metadata->hdmi_metadata_type1.metadata_type == HDMI_STATIC_METADATA_TYPE1 &&
|
||||
hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084;
|
||||
}
|
||||
|
||||
static void gsr_capture_kms_vaapi_set_hdr_metadata(gsr_capture_kms_vaapi *cap_kms, AVFrame *frame, gsr_kms_response_fd *drm_fd) {
|
||||
if(!cap_kms->mastering_display_metadata)
|
||||
cap_kms->mastering_display_metadata = av_mastering_display_metadata_create_side_data(frame);
|
||||
|
||||
if(!cap_kms->light_metadata)
|
||||
cap_kms->light_metadata = av_content_light_metadata_create_side_data(frame);
|
||||
|
||||
if(cap_kms->mastering_display_metadata) {
|
||||
for(int i = 0; i < 3; ++i) {
|
||||
cap_kms->mastering_display_metadata->display_primaries[i][0] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.display_primaries[i].x, 50000);
|
||||
cap_kms->mastering_display_metadata->display_primaries[i][1] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.display_primaries[i].y, 50000);
|
||||
}
|
||||
|
||||
cap_kms->mastering_display_metadata->white_point[0] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.white_point.x, 50000);
|
||||
cap_kms->mastering_display_metadata->white_point[1] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.white_point.y, 50000);
|
||||
|
||||
cap_kms->mastering_display_metadata->min_luminance = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.min_display_mastering_luminance, 10000);
|
||||
cap_kms->mastering_display_metadata->max_luminance = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.max_display_mastering_luminance, 1);
|
||||
|
||||
cap_kms->mastering_display_metadata->has_primaries = cap_kms->mastering_display_metadata->display_primaries[0][0].num > 0;
|
||||
cap_kms->mastering_display_metadata->has_luminance = cap_kms->mastering_display_metadata->max_luminance.num > 0;
|
||||
}
|
||||
|
||||
if(cap_kms->light_metadata) {
|
||||
cap_kms->light_metadata->MaxCLL = drm_fd->hdr_metadata.hdmi_metadata_type1.max_cll;
|
||||
cap_kms->light_metadata->MaxFALL = drm_fd->hdr_metadata.hdmi_metadata_type1.max_fall;
|
||||
}
|
||||
}
|
||||
|
||||
static vec2i swap_vec2i(vec2i value) {
|
||||
int tmp = value.x;
|
||||
value.x = value.y;
|
||||
value.y = tmp;
|
||||
return value;
|
||||
}
|
||||
|
||||
static int gsr_capture_kms_vaapi_capture(gsr_capture *cap, AVFrame *frame) {
|
||||
gsr_capture_kms_vaapi *cap_kms = cap->priv;
|
||||
|
||||
cap_kms->params.egl->glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
|
||||
cap_kms->params.egl->glClear(GL_COLOR_BUFFER_BIT);
|
||||
|
||||
for(int i = 0; i < cap_kms->kms_response.num_fds; ++i) {
|
||||
if(cap_kms->kms_response.fds[i].fd > 0)
|
||||
close(cap_kms->kms_response.fds[i].fd);
|
||||
cap_kms->kms_response.fds[i].fd = 0;
|
||||
}
|
||||
cap_kms->kms_response.num_fds = 0;
|
||||
|
||||
gsr_kms_response_fd *drm_fd = NULL;
|
||||
gsr_kms_response_fd *cursor_drm_fd = NULL;
|
||||
bool capture_is_combined_plane = false;
|
||||
|
||||
if(gsr_kms_client_get_kms(&cap_kms->kms_client, &cap_kms->kms_response) != 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_kms_vaapi_capture: failed to get kms, error: %d (%s)\n", cap_kms->kms_response.result, cap_kms->kms_response.err_msg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(cap_kms->kms_response.num_fds == 0) {
|
||||
static bool error_shown = false;
|
||||
if(!error_shown) {
|
||||
error_shown = true;
|
||||
fprintf(stderr, "gsr error: no drm found, capture will fail\n");
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
for(int i = 0; i < cap_kms->monitor_id.num_connector_ids; ++i) {
|
||||
drm_fd = find_drm_by_connector_id(&cap_kms->kms_response, cap_kms->monitor_id.connector_ids[i]);
|
||||
if(drm_fd)
|
||||
break;
|
||||
}
|
||||
|
||||
// Will never happen on wayland unless the target monitor has been disconnected
|
||||
if(!drm_fd) {
|
||||
drm_fd = find_first_combined_drm(&cap_kms->kms_response);
|
||||
if(!drm_fd)
|
||||
drm_fd = find_largest_drm(&cap_kms->kms_response);
|
||||
capture_is_combined_plane = true;
|
||||
}
|
||||
|
||||
cursor_drm_fd = find_cursor_drm(&cap_kms->kms_response);
|
||||
|
||||
if(!drm_fd)
|
||||
return -1;
|
||||
|
||||
if(!capture_is_combined_plane && cursor_drm_fd && cursor_drm_fd->connector_id != drm_fd->connector_id)
|
||||
cursor_drm_fd = NULL;
|
||||
|
||||
if(drm_fd->has_hdr_metadata && cap_kms->params.hdr && hdr_metadata_is_supported_format(&drm_fd->hdr_metadata))
|
||||
gsr_capture_kms_vaapi_set_hdr_metadata(cap_kms, frame, drm_fd);
|
||||
|
||||
// TODO: This causes a crash sometimes on steam deck, why? is it a driver bug? a vaapi pure version doesn't cause a crash.
|
||||
// Even ffmpeg kmsgrab causes this crash. The error is:
|
||||
// amdgpu: Failed to allocate a buffer:
|
||||
// amdgpu: size : 28508160 bytes
|
||||
// amdgpu: alignment : 2097152 bytes
|
||||
// amdgpu: domains : 4
|
||||
// amdgpu: flags : 4
|
||||
// amdgpu: Failed to allocate a buffer:
|
||||
// amdgpu: size : 28508160 bytes
|
||||
// amdgpu: alignment : 2097152 bytes
|
||||
// amdgpu: domains : 4
|
||||
// amdgpu: flags : 4
|
||||
// EE ../jupiter-mesa/src/gallium/drivers/radeonsi/radeon_vcn_enc.c:516 radeon_create_encoder UVD - Can't create CPB buffer.
|
||||
// [hevc_vaapi @ 0x55ea72b09840] Failed to upload encode parameters: 2 (resource allocation failed).
|
||||
// [hevc_vaapi @ 0x55ea72b09840] Encode failed: -5.
|
||||
// Error: avcodec_send_frame failed, error: Input/output error
|
||||
// Assertion pic->display_order == pic->encode_order failed at libavcodec/vaapi_encode_h265.c:765
|
||||
// kms server info: kms client shutdown, shutting down the server
|
||||
const intptr_t img_attr[] = {
|
||||
EGL_LINUX_DRM_FOURCC_EXT, drm_fd->pixel_format,
|
||||
EGL_WIDTH, drm_fd->width,
|
||||
EGL_HEIGHT, drm_fd->height,
|
||||
EGL_DMA_BUF_PLANE0_FD_EXT, drm_fd->fd,
|
||||
EGL_DMA_BUF_PLANE0_OFFSET_EXT, drm_fd->offset,
|
||||
EGL_DMA_BUF_PLANE0_PITCH_EXT, drm_fd->pitch,
|
||||
// TODO:
|
||||
//EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT, drm_fd->modifier & 0xFFFFFFFFULL,
|
||||
//EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT, drm_fd->modifier >> 32ULL,
|
||||
EGL_NONE
|
||||
};
|
||||
|
||||
EGLImage image = cap_kms->params.egl->eglCreateImage(cap_kms->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, cap_kms->input_texture);
|
||||
cap_kms->params.egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
|
||||
cap_kms->params.egl->eglDestroyImage(cap_kms->params.egl->egl_display, image);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
// TODO: Test rotation with multiple monitors, different rotation setups
|
||||
// TODO: Make rotation work on wayland
|
||||
// TODO: Apply these changes to kms cuda too, and test that
|
||||
|
||||
vec2i capture_pos = cap_kms->capture_pos;
|
||||
if(!capture_is_combined_plane)
|
||||
capture_pos = (vec2i){drm_fd->x, drm_fd->y};
|
||||
|
||||
const float texture_rotation = monitor_rotation_to_radians(cap_kms->monitor_rotation);
|
||||
|
||||
gsr_color_conversion_draw(&cap_kms->color_conversion, cap_kms->input_texture,
|
||||
(vec2i){0, 0}, cap_kms->capture_size,
|
||||
capture_pos, cap_kms->capture_size,
|
||||
texture_rotation, false);
|
||||
|
||||
if(cursor_drm_fd) {
|
||||
const vec2i cursor_size = {cursor_drm_fd->width, cursor_drm_fd->height};
|
||||
vec2i cursor_pos = {cursor_drm_fd->x, cursor_drm_fd->y};
|
||||
switch(cap_kms->monitor_rotation) {
|
||||
case GSR_MONITOR_ROT_0:
|
||||
break;
|
||||
case GSR_MONITOR_ROT_90:
|
||||
cursor_pos = swap_vec2i(cursor_pos);
|
||||
cursor_pos.x = cap_kms->capture_size.x - cursor_pos.x;
|
||||
// TODO: Remove this horrible hack
|
||||
cursor_pos.x -= cursor_size.x;
|
||||
break;
|
||||
case GSR_MONITOR_ROT_180:
|
||||
cursor_pos.x = cap_kms->capture_size.x - cursor_pos.x;
|
||||
cursor_pos.y = cap_kms->capture_size.y - cursor_pos.y;
|
||||
// TODO: Remove this horrible hack
|
||||
cursor_pos.x -= cursor_size.x;
|
||||
cursor_pos.y -= cursor_size.y;
|
||||
break;
|
||||
case GSR_MONITOR_ROT_270:
|
||||
cursor_pos = swap_vec2i(cursor_pos);
|
||||
cursor_pos.y = cap_kms->capture_size.y - cursor_pos.y;
|
||||
// TODO: Remove this horrible hack
|
||||
cursor_pos.y -= cursor_size.y;
|
||||
break;
|
||||
}
|
||||
|
||||
const intptr_t img_attr_cursor[] = {
|
||||
EGL_LINUX_DRM_FOURCC_EXT, cursor_drm_fd->pixel_format,
|
||||
EGL_WIDTH, cursor_drm_fd->width,
|
||||
EGL_HEIGHT, cursor_drm_fd->height,
|
||||
EGL_DMA_BUF_PLANE0_FD_EXT, cursor_drm_fd->fd,
|
||||
EGL_DMA_BUF_PLANE0_OFFSET_EXT, cursor_drm_fd->offset,
|
||||
EGL_DMA_BUF_PLANE0_PITCH_EXT, cursor_drm_fd->pitch,
|
||||
EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT, cursor_drm_fd->modifier & 0xFFFFFFFFULL,
|
||||
EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT, cursor_drm_fd->modifier >> 32ULL,
|
||||
EGL_NONE
|
||||
};
|
||||
|
||||
EGLImage cursor_image = cap_kms->params.egl->eglCreateImage(cap_kms->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr_cursor);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, cap_kms->cursor_texture);
|
||||
cap_kms->params.egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, cursor_image);
|
||||
cap_kms->params.egl->eglDestroyImage(cap_kms->params.egl->egl_display, cursor_image);
|
||||
cap_kms->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
gsr_color_conversion_draw(&cap_kms->color_conversion, cap_kms->cursor_texture,
|
||||
cursor_pos, cursor_size,
|
||||
(vec2i){0, 0}, cursor_size,
|
||||
texture_rotation, false);
|
||||
}
|
||||
|
||||
cap_kms->params.egl->eglSwapBuffers(cap_kms->params.egl->egl_display, cap_kms->params.egl->egl_surface);
|
||||
//cap_kms->params.egl->glFlush();
|
||||
//cap_kms->params.egl->glFinish();
|
||||
|
||||
gsr_capture_kms_capture(&cap_kms->kms, &cap_kms->base, frame, cap_kms->params.egl, cap_kms->params.hdr, false, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gsr_capture_kms_vaapi_capture_end(gsr_capture *cap, AVFrame *frame) {
|
||||
(void)frame;
|
||||
gsr_capture_kms_vaapi *cap_kms = cap->priv;
|
||||
|
||||
for(int i = 0; i < cap_kms->kms_response.num_fds; ++i) {
|
||||
if(cap_kms->kms_response.fds[i].fd > 0)
|
||||
close(cap_kms->kms_response.fds[i].fd);
|
||||
cap_kms->kms_response.fds[i].fd = 0;
|
||||
}
|
||||
cap_kms->kms_response.num_fds = 0;
|
||||
gsr_capture_kms_cleanup_kms_fds(&cap_kms->kms);
|
||||
}
|
||||
|
||||
static void gsr_capture_kms_vaapi_stop(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
gsr_capture_kms_vaapi *cap_kms = cap->priv;
|
||||
|
||||
gsr_color_conversion_deinit(&cap_kms->color_conversion);
|
||||
|
||||
for(uint32_t i = 0; i < cap_kms->prime.num_objects; ++i) {
|
||||
if(cap_kms->prime.objects[i].fd > 0) {
|
||||
close(cap_kms->prime.objects[i].fd);
|
||||
@@ -643,35 +125,13 @@ static void gsr_capture_kms_vaapi_stop(gsr_capture *cap, AVCodecContext *video_c
|
||||
}
|
||||
}
|
||||
|
||||
if(cap_kms->params.egl->egl_context) {
|
||||
if(cap_kms->input_texture) {
|
||||
cap_kms->params.egl->glDeleteTextures(1, &cap_kms->input_texture);
|
||||
cap_kms->input_texture = 0;
|
||||
}
|
||||
|
||||
if(cap_kms->cursor_texture) {
|
||||
cap_kms->params.egl->glDeleteTextures(1, &cap_kms->cursor_texture);
|
||||
cap_kms->cursor_texture = 0;
|
||||
}
|
||||
|
||||
cap_kms->params.egl->glDeleteTextures(2, cap_kms->target_textures);
|
||||
cap_kms->target_textures[0] = 0;
|
||||
cap_kms->target_textures[1] = 0;
|
||||
}
|
||||
|
||||
for(int i = 0; i < cap_kms->kms_response.num_fds; ++i) {
|
||||
if(cap_kms->kms_response.fds[i].fd > 0)
|
||||
close(cap_kms->kms_response.fds[i].fd);
|
||||
cap_kms->kms_response.fds[i].fd = 0;
|
||||
}
|
||||
cap_kms->kms_response.num_fds = 0;
|
||||
|
||||
if(video_codec_context->hw_device_ctx)
|
||||
av_buffer_unref(&video_codec_context->hw_device_ctx);
|
||||
if(video_codec_context->hw_frames_ctx)
|
||||
av_buffer_unref(&video_codec_context->hw_frames_ctx);
|
||||
|
||||
gsr_kms_client_deinit(&cap_kms->kms_client);
|
||||
gsr_capture_kms_stop(&cap_kms->kms);
|
||||
gsr_capture_base_stop(&cap_kms->base, cap_kms->params.egl);
|
||||
}
|
||||
|
||||
static void gsr_capture_kms_vaapi_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
@@ -716,7 +176,7 @@ gsr_capture* gsr_capture_kms_vaapi_create(const gsr_capture_kms_vaapi_params *pa
|
||||
|
||||
*cap = (gsr_capture) {
|
||||
.start = gsr_capture_kms_vaapi_start,
|
||||
.tick = gsr_capture_kms_vaapi_tick,
|
||||
.tick = NULL,
|
||||
.should_stop = gsr_capture_kms_vaapi_should_stop,
|
||||
.capture = gsr_capture_kms_vaapi_capture,
|
||||
.capture_end = gsr_capture_kms_vaapi_capture_end,
|
||||
|
||||
@@ -24,7 +24,6 @@ typedef struct {
|
||||
bool capture_session_created;
|
||||
|
||||
gsr_cuda cuda;
|
||||
bool frame_initialized;
|
||||
} gsr_capture_nvfbc;
|
||||
|
||||
#if defined(_WIN64) || defined(__LP64__)
|
||||
@@ -174,7 +173,7 @@ static bool ffmpeg_create_cuda_contexts(gsr_capture_nvfbc *cap_nvfbc, AVCodecCon
|
||||
return true;
|
||||
}
|
||||
|
||||
static int gsr_capture_nvfbc_start(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
static int gsr_capture_nvfbc_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
|
||||
gsr_capture_nvfbc *cap_nvfbc = cap->priv;
|
||||
if(!gsr_cuda_load(&cap_nvfbc->cuda, cap_nvfbc->params.dpy, cap_nvfbc->params.overclock))
|
||||
return -1;
|
||||
@@ -321,6 +320,13 @@ static int gsr_capture_nvfbc_start(gsr_capture *cap, AVCodecContext *video_codec
|
||||
if(!ffmpeg_create_cuda_contexts(cap_nvfbc, video_codec_context))
|
||||
goto error_cleanup;
|
||||
|
||||
// TODO: Remove
|
||||
const int res = av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, frame, 0);
|
||||
if(res < 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_nvfbc_start: av_hwframe_get_buffer failed: %d\n", res);
|
||||
goto error_cleanup;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error_cleanup:
|
||||
@@ -371,21 +377,6 @@ static void gsr_capture_nvfbc_destroy_session(gsr_capture *cap) {
|
||||
cap_nvfbc->nv_fbc_handle = 0;
|
||||
}
|
||||
|
||||
static void gsr_capture_nvfbc_tick(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame **frame) {
|
||||
gsr_capture_nvfbc *cap_nvfbc = cap->priv;
|
||||
if(!cap_nvfbc->frame_initialized && video_codec_context->hw_frames_ctx) {
|
||||
cap_nvfbc->frame_initialized = true;
|
||||
(*frame)->hw_frames_ctx = video_codec_context->hw_frames_ctx;
|
||||
(*frame)->buf[0] = av_buffer_pool_get(((AVHWFramesContext*)video_codec_context->hw_frames_ctx->data)->pool);
|
||||
(*frame)->extended_data = (*frame)->data;
|
||||
(*frame)->color_range = video_codec_context->color_range;
|
||||
(*frame)->color_primaries = video_codec_context->color_primaries;
|
||||
(*frame)->color_trc = video_codec_context->color_trc;
|
||||
(*frame)->colorspace = video_codec_context->colorspace;
|
||||
(*frame)->chroma_location = video_codec_context->chroma_sample_location;
|
||||
}
|
||||
}
|
||||
|
||||
static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame) {
|
||||
gsr_capture_nvfbc *cap_nvfbc = cap->priv;
|
||||
|
||||
@@ -477,7 +468,7 @@ gsr_capture* gsr_capture_nvfbc_create(const gsr_capture_nvfbc_params *params) {
|
||||
|
||||
*cap = (gsr_capture) {
|
||||
.start = gsr_capture_nvfbc_start,
|
||||
.tick = gsr_capture_nvfbc_tick,
|
||||
.tick = NULL,
|
||||
.should_stop = NULL,
|
||||
.capture = gsr_capture_nvfbc_capture,
|
||||
.capture_end = NULL,
|
||||
|
||||
@@ -14,7 +14,6 @@ typedef struct {
|
||||
bool should_stop;
|
||||
bool stop_is_error;
|
||||
bool window_resized;
|
||||
bool created_hw_frame;
|
||||
bool follow_focused_initialized;
|
||||
double window_resize_timer;
|
||||
|
||||
@@ -148,7 +147,7 @@ static unsigned int gl_create_texture(gsr_capture_xcomposite_cuda *cap_xcomp, in
|
||||
return texture_id;
|
||||
}
|
||||
|
||||
static int gsr_capture_xcomposite_cuda_start(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
static int gsr_capture_xcomposite_cuda_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
|
||||
gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
|
||||
|
||||
if(cap_xcomp->params.follow_focused) {
|
||||
@@ -227,6 +226,12 @@ static int gsr_capture_xcomposite_cuda_start(gsr_capture *cap, AVCodecContext *v
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, frame, 0) < 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_start: av_hwframe_get_buffer failed\n");
|
||||
gsr_capture_xcomposite_cuda_stop(cap, video_codec_context);
|
||||
return -1;
|
||||
}
|
||||
|
||||
cap_xcomp->window_resize_timer = clock_get_monotonic_seconds();
|
||||
return 0;
|
||||
}
|
||||
@@ -267,7 +272,7 @@ static void gsr_capture_xcomposite_cuda_stop(gsr_capture *cap, AVCodecContext *v
|
||||
}
|
||||
}
|
||||
|
||||
static void gsr_capture_xcomposite_cuda_tick(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame **frame) {
|
||||
static void gsr_capture_xcomposite_cuda_tick(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
|
||||
|
||||
bool init_new_window = false;
|
||||
@@ -350,7 +355,7 @@ static void gsr_capture_xcomposite_cuda_tick(gsr_capture *cap, AVCodecContext *v
|
||||
}
|
||||
|
||||
const double window_resize_timeout = 1.0; // 1 second
|
||||
if(!cap_xcomp->created_hw_frame || (cap_xcomp->window_resized && clock_get_monotonic_seconds() - cap_xcomp->window_resize_timer >= window_resize_timeout)) {
|
||||
if(cap_xcomp->window_resized && clock_get_monotonic_seconds() - cap_xcomp->window_resize_timer >= window_resize_timeout) {
|
||||
cap_xcomp->window_resized = false;
|
||||
if(window_texture_on_resize(&cap_xcomp->window_texture) != 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_tick: window_texture_on_resize failed\n");
|
||||
@@ -370,33 +375,6 @@ static void gsr_capture_xcomposite_cuda_tick(gsr_capture *cap, AVCodecContext *v
|
||||
cap_xcomp->texture_size.x = min_int(video_codec_context->width, max_int(2, cap_xcomp->texture_size.x & ~1));
|
||||
cap_xcomp->texture_size.y = min_int(video_codec_context->height, max_int(2, cap_xcomp->texture_size.y & ~1));
|
||||
|
||||
if(!cap_xcomp->created_hw_frame) {
|
||||
cap_xcomp->created_hw_frame = true;
|
||||
av_frame_free(frame);
|
||||
*frame = av_frame_alloc();
|
||||
if(!frame) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_tick: failed to allocate frame\n");
|
||||
cap_xcomp->should_stop = true;
|
||||
cap_xcomp->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
(*frame)->format = video_codec_context->pix_fmt;
|
||||
(*frame)->width = video_codec_context->width;
|
||||
(*frame)->height = video_codec_context->height;
|
||||
(*frame)->color_range = video_codec_context->color_range;
|
||||
(*frame)->color_primaries = video_codec_context->color_primaries;
|
||||
(*frame)->color_trc = video_codec_context->color_trc;
|
||||
(*frame)->colorspace = video_codec_context->colorspace;
|
||||
(*frame)->chroma_location = video_codec_context->chroma_sample_location;
|
||||
|
||||
if(av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, *frame, 0) < 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_tick: av_hwframe_get_buffer failed\n");
|
||||
cap_xcomp->should_stop = true;
|
||||
cap_xcomp->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Clear texture with black background because the source texture (window_texture_get_opengl_texture_id(&cap_xcomp->window_texture))
|
||||
// might be smaller than cap_xcomp->target_texture_id
|
||||
cap_xcomp->params.egl->glClearTexImage(cap_xcomp->target_texture_id, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
|
||||
|
||||
@@ -14,13 +14,13 @@
|
||||
#include <va/va_drmcommon.h>
|
||||
|
||||
typedef struct {
|
||||
gsr_capture_base base;
|
||||
gsr_capture_xcomposite_vaapi_params params;
|
||||
XEvent xev;
|
||||
|
||||
bool should_stop;
|
||||
bool stop_is_error;
|
||||
bool window_resized;
|
||||
bool created_hw_frame;
|
||||
bool follow_focused_initialized;
|
||||
|
||||
Window window;
|
||||
@@ -33,10 +33,6 @@ typedef struct {
|
||||
VADisplay va_dpy;
|
||||
VADRMPRIMESurfaceDescriptor prime;
|
||||
|
||||
unsigned int target_textures[2];
|
||||
|
||||
gsr_color_conversion color_conversion;
|
||||
|
||||
Atom net_active_window_atom;
|
||||
} gsr_capture_xcomposite_vaapi;
|
||||
|
||||
@@ -113,9 +109,11 @@ static bool drm_create_codec_context(gsr_capture_xcomposite_vaapi *cap_xcomp, AV
|
||||
|
||||
#define DRM_FORMAT_MOD_INVALID 0xffffffffffffffULL
|
||||
|
||||
static int gsr_capture_xcomposite_vaapi_start(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
static int gsr_capture_xcomposite_vaapi_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
|
||||
gsr_capture_xcomposite_vaapi *cap_xcomp = cap->priv;
|
||||
|
||||
cap_xcomp->base.video_codec_context = video_codec_context;
|
||||
|
||||
if(cap_xcomp->params.follow_focused) {
|
||||
cap_xcomp->net_active_window_atom = XInternAtom(cap_xcomp->params.egl->x11.dpy, "_NET_ACTIVE_WINDOW", False);
|
||||
if(!cap_xcomp->net_active_window_atom) {
|
||||
@@ -185,17 +183,16 @@ static int gsr_capture_xcomposite_vaapi_start(gsr_capture *cap, AVCodecContext *
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(!gsr_capture_base_setup_vaapi_textures(&cap_xcomp->base, frame, cap_xcomp->params.egl, cap_xcomp->va_dpy, &cap_xcomp->prime, cap_xcomp->params.color_range)) {
|
||||
gsr_capture_xcomposite_vaapi_stop(cap, video_codec_context);
|
||||
return -1;
|
||||
}
|
||||
|
||||
cap_xcomp->window_resize_timer = clock_get_monotonic_seconds();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t fourcc(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
|
||||
return (d << 24) | (c << 16) | (b << 8) | a;
|
||||
}
|
||||
|
||||
#define FOURCC_NV12 842094158
|
||||
|
||||
static void gsr_capture_xcomposite_vaapi_tick(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame **frame) {
|
||||
static void gsr_capture_xcomposite_vaapi_tick(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
gsr_capture_xcomposite_vaapi *cap_xcomp = cap->priv;
|
||||
|
||||
cap_xcomp->params.egl->glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
|
||||
@@ -281,7 +278,7 @@ static void gsr_capture_xcomposite_vaapi_tick(gsr_capture *cap, AVCodecContext *
|
||||
}
|
||||
|
||||
const double window_resize_timeout = 1.0; // 1 second
|
||||
if(!cap_xcomp->created_hw_frame || (cap_xcomp->window_resized && clock_get_monotonic_seconds() - cap_xcomp->window_resize_timer >= window_resize_timeout)) {
|
||||
if(cap_xcomp->window_resized && clock_get_monotonic_seconds() - cap_xcomp->window_resize_timer >= window_resize_timeout) {
|
||||
cap_xcomp->window_resized = false;
|
||||
|
||||
if(window_texture_on_resize(&cap_xcomp->window_texture) != 0) {
|
||||
@@ -302,124 +299,7 @@ static void gsr_capture_xcomposite_vaapi_tick(gsr_capture *cap, AVCodecContext *
|
||||
cap_xcomp->texture_size.x = min_int(video_codec_context->width, max_int(2, even_number_ceil(cap_xcomp->texture_size.x)));
|
||||
cap_xcomp->texture_size.y = min_int(video_codec_context->height, max_int(2, even_number_ceil(cap_xcomp->texture_size.y)));
|
||||
|
||||
if(!cap_xcomp->created_hw_frame) {
|
||||
cap_xcomp->created_hw_frame = true;
|
||||
av_frame_free(frame);
|
||||
*frame = av_frame_alloc();
|
||||
if(!frame) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_vaapi_tick: failed to allocate frame\n");
|
||||
cap_xcomp->should_stop = true;
|
||||
cap_xcomp->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
(*frame)->format = video_codec_context->pix_fmt;
|
||||
(*frame)->width = video_codec_context->width;
|
||||
(*frame)->height = video_codec_context->height;
|
||||
(*frame)->color_range = video_codec_context->color_range;
|
||||
(*frame)->color_primaries = video_codec_context->color_primaries;
|
||||
(*frame)->color_trc = video_codec_context->color_trc;
|
||||
(*frame)->colorspace = video_codec_context->colorspace;
|
||||
(*frame)->chroma_location = video_codec_context->chroma_sample_location;
|
||||
|
||||
int res = av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, *frame, 0);
|
||||
if(res < 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_vaapi_tick: av_hwframe_get_buffer failed: %d\n", res);
|
||||
cap_xcomp->should_stop = true;
|
||||
cap_xcomp->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
|
||||
VASurfaceID target_surface_id = (uintptr_t)(*frame)->data[3];
|
||||
|
||||
VAStatus va_status = vaExportSurfaceHandle(cap_xcomp->va_dpy, target_surface_id, VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2, VA_EXPORT_SURFACE_WRITE_ONLY | VA_EXPORT_SURFACE_SEPARATE_LAYERS, &cap_xcomp->prime);
|
||||
if(va_status != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_vaapi_tick: vaExportSurfaceHandle failed, error: %d\n", va_status);
|
||||
cap_xcomp->should_stop = true;
|
||||
cap_xcomp->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
vaSyncSurface(cap_xcomp->va_dpy, target_surface_id);
|
||||
|
||||
if(cap_xcomp->prime.fourcc == FOURCC_NV12) {
|
||||
cap_xcomp->params.egl->glGenTextures(2, cap_xcomp->target_textures);
|
||||
for(int i = 0; i < 2; ++i) {
|
||||
const uint32_t formats[2] = { fourcc('R', '8', ' ', ' '), fourcc('G', 'R', '8', '8') };
|
||||
const int layer = i;
|
||||
const int plane = 0;
|
||||
|
||||
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
|
||||
//const uint64_t modifier = cap_kms->prime.objects[cap_kms->prime.layers[layer].object_index[plane]].drm_format_modifier;
|
||||
|
||||
const intptr_t img_attr[] = {
|
||||
EGL_LINUX_DRM_FOURCC_EXT, formats[i],
|
||||
EGL_WIDTH, cap_xcomp->prime.width / div[i],
|
||||
EGL_HEIGHT, cap_xcomp->prime.height / div[i],
|
||||
EGL_DMA_BUF_PLANE0_FD_EXT, cap_xcomp->prime.objects[cap_xcomp->prime.layers[layer].object_index[plane]].fd,
|
||||
EGL_DMA_BUF_PLANE0_OFFSET_EXT, cap_xcomp->prime.layers[layer].offset[plane],
|
||||
EGL_DMA_BUF_PLANE0_PITCH_EXT, cap_xcomp->prime.layers[layer].pitch[plane],
|
||||
// TODO:
|
||||
//EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT, modifier & 0xFFFFFFFFULL,
|
||||
//EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT, modifier >> 32ULL,
|
||||
EGL_NONE
|
||||
};
|
||||
|
||||
while(cap_xcomp->params.egl->eglGetError() != EGL_SUCCESS){}
|
||||
EGLImage image = cap_xcomp->params.egl->eglCreateImage(cap_xcomp->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
|
||||
if(!image) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_vaapi_tick: failed to create egl image from drm fd for output drm fd, error: %d\n", cap_xcomp->params.egl->eglGetError());
|
||||
cap_xcomp->should_stop = true;
|
||||
cap_xcomp->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
|
||||
cap_xcomp->params.egl->glBindTexture(GL_TEXTURE_2D, cap_xcomp->target_textures[i]);
|
||||
cap_xcomp->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
cap_xcomp->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
cap_xcomp->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
cap_xcomp->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
|
||||
while(cap_xcomp->params.egl->glGetError()) {}
|
||||
while(cap_xcomp->params.egl->eglGetError() != EGL_SUCCESS){}
|
||||
cap_xcomp->params.egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
|
||||
if(cap_xcomp->params.egl->glGetError() != 0 || cap_xcomp->params.egl->eglGetError() != EGL_SUCCESS) {
|
||||
// TODO: Get the error properly
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_vaapi_tick: failed to bind egl image to gl texture, error: %d\n", cap_xcomp->params.egl->eglGetError());
|
||||
cap_xcomp->should_stop = true;
|
||||
cap_xcomp->stop_is_error = true;
|
||||
cap_xcomp->params.egl->eglDestroyImage(cap_xcomp->params.egl->egl_display, image);
|
||||
cap_xcomp->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
cap_xcomp->params.egl->eglDestroyImage(cap_xcomp->params.egl->egl_display, image);
|
||||
cap_xcomp->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
|
||||
}
|
||||
|
||||
gsr_color_conversion_params color_conversion_params = {0};
|
||||
color_conversion_params.color_range = cap_xcomp->params.color_range;
|
||||
color_conversion_params.egl = cap_xcomp->params.egl;
|
||||
color_conversion_params.source_color = GSR_SOURCE_COLOR_RGB;
|
||||
color_conversion_params.destination_color = GSR_DESTINATION_COLOR_NV12;
|
||||
|
||||
color_conversion_params.destination_textures[0] = cap_xcomp->target_textures[0];
|
||||
color_conversion_params.destination_textures[1] = cap_xcomp->target_textures[1];
|
||||
color_conversion_params.num_destination_textures = 2;
|
||||
|
||||
if(gsr_color_conversion_init(&cap_xcomp->color_conversion, &color_conversion_params) != 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_vaapi_tick: failed to create color conversion\n");
|
||||
cap_xcomp->should_stop = true;
|
||||
cap_xcomp->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
fprintf(stderr, "gsr error: gsr_capture_xcomposite_vaapi_tick: unexpected fourcc %u for output drm fd, expected nv12\n", cap_xcomp->prime.fourcc);
|
||||
cap_xcomp->should_stop = true;
|
||||
cap_xcomp->stop_is_error = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
gsr_color_conversion_clear(&cap_xcomp->color_conversion);
|
||||
gsr_color_conversion_clear(&cap_xcomp->base.color_conversion);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -443,7 +323,7 @@ static int gsr_capture_xcomposite_vaapi_capture(gsr_capture *cap, AVFrame *frame
|
||||
const int target_x = max_int(0, frame->width / 2 - cap_xcomp->texture_size.x / 2);
|
||||
const int target_y = max_int(0, frame->height / 2 - cap_xcomp->texture_size.y / 2);
|
||||
|
||||
gsr_color_conversion_draw(&cap_xcomp->color_conversion, window_texture_get_opengl_texture_id(&cap_xcomp->window_texture),
|
||||
gsr_color_conversion_draw(&cap_xcomp->base.color_conversion, window_texture_get_opengl_texture_id(&cap_xcomp->window_texture),
|
||||
(vec2i){target_x, target_y}, cap_xcomp->texture_size,
|
||||
(vec2i){0, 0}, cap_xcomp->texture_size,
|
||||
0.0f, false);
|
||||
@@ -458,8 +338,6 @@ static int gsr_capture_xcomposite_vaapi_capture(gsr_capture *cap, AVFrame *frame
|
||||
static void gsr_capture_xcomposite_vaapi_stop(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
gsr_capture_xcomposite_vaapi *cap_xcomp = cap->priv;
|
||||
|
||||
gsr_color_conversion_deinit(&cap_xcomp->color_conversion);
|
||||
|
||||
for(uint32_t i = 0; i < cap_xcomp->prime.num_objects; ++i) {
|
||||
if(cap_xcomp->prime.objects[i].fd > 0) {
|
||||
close(cap_xcomp->prime.objects[i].fd);
|
||||
@@ -467,18 +345,14 @@ static void gsr_capture_xcomposite_vaapi_stop(gsr_capture *cap, AVCodecContext *
|
||||
}
|
||||
}
|
||||
|
||||
if(cap_xcomp->params.egl->egl_context) {
|
||||
cap_xcomp->params.egl->glDeleteTextures(2, cap_xcomp->target_textures);
|
||||
cap_xcomp->target_textures[0] = 0;
|
||||
cap_xcomp->target_textures[1] = 0;
|
||||
}
|
||||
|
||||
window_texture_deinit(&cap_xcomp->window_texture);
|
||||
|
||||
if(video_codec_context->hw_device_ctx)
|
||||
av_buffer_unref(&video_codec_context->hw_device_ctx);
|
||||
if(video_codec_context->hw_frames_ctx)
|
||||
av_buffer_unref(&video_codec_context->hw_frames_ctx);
|
||||
|
||||
gsr_capture_base_stop(&cap_xcomp->base, cap_xcomp->params.egl);
|
||||
}
|
||||
|
||||
static void gsr_capture_xcomposite_vaapi_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#include "../include/color_conversion.h"
|
||||
#include "../include/egl.h"
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <math.h>
|
||||
|
||||
@@ -28,8 +28,9 @@ bool gsr_cuda_load(gsr_cuda *self, Display *display, bool do_overclock) {
|
||||
{ (void**)&self->cuCtxPushCurrent_v2, "cuCtxPushCurrent_v2" },
|
||||
{ (void**)&self->cuCtxPopCurrent_v2, "cuCtxPopCurrent_v2" },
|
||||
{ (void**)&self->cuGetErrorString, "cuGetErrorString" },
|
||||
{ (void**)&self->cuMemsetD8_v2, "cuMemsetD8_v2" },
|
||||
{ (void**)&self->cuMemcpy2D_v2, "cuMemcpy2D_v2" },
|
||||
{ (void**)&self->cuMemcpy2DAsync_v2, "cuMemcpy2DAsync_v2" },
|
||||
{ (void**)&self->cuStreamSynchronize, "cuStreamSynchronize" },
|
||||
|
||||
{ (void**)&self->cuGraphicsGLRegisterImage, "cuGraphicsGLRegisterImage" },
|
||||
{ (void**)&self->cuGraphicsEGLRegisterImage, "cuGraphicsEGLRegisterImage" },
|
||||
|
||||
64
src/main.cpp
64
src/main.cpp
@@ -34,7 +34,6 @@ extern "C" {
|
||||
#include <libswresample/swresample.h>
|
||||
#include <libavutil/avutil.h>
|
||||
#include <libavutil/time.h>
|
||||
#include <libavutil/mastering_display_metadata.h>
|
||||
#include <libavfilter/avfilter.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
@@ -362,9 +361,9 @@ static AVCodecContext *create_video_codec_context(AVPixelFormat pix_fmt,
|
||||
codec_context->color_trc = AVCOL_TRC_SMPTE2084;
|
||||
codec_context->colorspace = AVCOL_SPC_BT2020_NCL;
|
||||
} else {
|
||||
//codec_context->color_primaries = AVCOL_PRI_BT709;
|
||||
//codec_context->color_trc = AVCOL_TRC_BT709;
|
||||
//codec_context->colorspace = AVCOL_SPC_BT709;
|
||||
codec_context->color_primaries = AVCOL_PRI_BT709;
|
||||
codec_context->color_trc = AVCOL_TRC_BT709;
|
||||
codec_context->colorspace = AVCOL_SPC_BT709;
|
||||
}
|
||||
//codec_context->chroma_sample_location = AVCHROMA_LOC_CENTER;
|
||||
if(codec->id == AV_CODEC_ID_HEVC)
|
||||
@@ -727,6 +726,11 @@ static void open_video(AVCodecContext *codec_context, VideoQuality video_quality
|
||||
} else {
|
||||
//av_dict_set(&options, "profile", "main10", 0);
|
||||
//av_dict_set(&options, "pix_fmt", "yuv420p16le", 0);
|
||||
if(hdr) {
|
||||
av_dict_set(&options, "profile", "main10", 0);
|
||||
} else {
|
||||
av_dict_set(&options, "profile", "main", 0);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if(codec_context->codec_id == AV_CODEC_ID_AV1) {
|
||||
@@ -1476,6 +1480,7 @@ static gsr_capture* create_capture_impl(const char *window_str, const char *scre
|
||||
kms_params.display_to_capture = window_str;
|
||||
kms_params.gpu_inf = gpu_inf;
|
||||
kms_params.hdr = video_codec_is_hdr(video_codec);
|
||||
kms_params.color_range = color_range;
|
||||
capture = gsr_capture_kms_cuda_create(&kms_params);
|
||||
if(!capture)
|
||||
_exit(1);
|
||||
@@ -1512,7 +1517,6 @@ static gsr_capture* create_capture_impl(const char *window_str, const char *scre
|
||||
kms_params.egl = &egl;
|
||||
kms_params.display_to_capture = window_str;
|
||||
kms_params.gpu_inf = gpu_inf;
|
||||
kms_params.wayland = wayland;
|
||||
kms_params.hdr = video_codec_is_hdr(video_codec);
|
||||
kms_params.color_range = color_range;
|
||||
capture = gsr_capture_kms_vaapi_create(&kms_params);
|
||||
@@ -2159,7 +2163,21 @@ int main(int argc, char **argv) {
|
||||
if(replay_buffer_size_secs == -1)
|
||||
video_stream = create_stream(av_format_context, video_codec_context);
|
||||
|
||||
int capture_result = gsr_capture_start(capture, video_codec_context);
|
||||
AVFrame *video_frame = av_frame_alloc();
|
||||
if(!video_frame) {
|
||||
fprintf(stderr, "Error: Failed to allocate video frame\n");
|
||||
_exit(1);
|
||||
}
|
||||
video_frame->format = video_codec_context->pix_fmt;
|
||||
video_frame->width = video_codec_context->width;
|
||||
video_frame->height = video_codec_context->height;
|
||||
video_frame->color_range = video_codec_context->color_range;
|
||||
video_frame->color_primaries = video_codec_context->color_primaries;
|
||||
video_frame->color_trc = video_codec_context->color_trc;
|
||||
video_frame->colorspace = video_codec_context->colorspace;
|
||||
video_frame->chroma_location = video_codec_context->chroma_sample_location;
|
||||
|
||||
int capture_result = gsr_capture_start(capture, video_codec_context, video_frame);
|
||||
if(capture_result != 0) {
|
||||
fprintf(stderr, "gsr error: gsr_capture_start failed\n");
|
||||
_exit(capture_result);
|
||||
@@ -2275,21 +2293,6 @@ int main(int argc, char **argv) {
|
||||
double paused_time_offset = 0.0;
|
||||
double paused_time_start = 0.0;
|
||||
|
||||
// TODO: Remove?
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Error: Failed to allocate frame\n");
|
||||
_exit(1);
|
||||
}
|
||||
frame->format = video_codec_context->pix_fmt;
|
||||
frame->width = video_codec_context->width;
|
||||
frame->height = video_codec_context->height;
|
||||
frame->color_range = video_codec_context->color_range;
|
||||
frame->color_primaries = video_codec_context->color_primaries;
|
||||
frame->color_trc = video_codec_context->color_trc;
|
||||
frame->colorspace = video_codec_context->colorspace;
|
||||
frame->chroma_location = video_codec_context->chroma_sample_location;
|
||||
|
||||
std::mutex write_output_mutex;
|
||||
std::mutex audio_filter_mutex;
|
||||
|
||||
@@ -2452,7 +2455,7 @@ int main(int argc, char **argv) {
|
||||
while(running) {
|
||||
double frame_start = clock_get_monotonic_seconds();
|
||||
|
||||
gsr_capture_tick(capture, video_codec_context, &frame);
|
||||
gsr_capture_tick(capture, video_codec_context);
|
||||
should_stop_error = false;
|
||||
if(gsr_capture_should_stop(capture, &should_stop_error)) {
|
||||
running = 0;
|
||||
@@ -2503,31 +2506,31 @@ int main(int argc, char **argv) {
|
||||
const int num_frames = framerate_mode == FramerateMode::CONSTANT ? std::max((int64_t)0LL, expected_frames - video_pts_counter) : 1;
|
||||
|
||||
if(num_frames > 0 && !paused) {
|
||||
gsr_capture_capture(capture, frame);
|
||||
gsr_capture_capture(capture, video_frame);
|
||||
|
||||
// TODO: Check if duplicate frame can be saved just by writing it with a different pts instead of sending it again
|
||||
for(int i = 0; i < num_frames; ++i) {
|
||||
if(framerate_mode == FramerateMode::CONSTANT) {
|
||||
frame->pts = video_pts_counter + i;
|
||||
video_frame->pts = video_pts_counter + i;
|
||||
} else {
|
||||
frame->pts = (this_video_frame_time - record_start_time) * (double)AV_TIME_BASE;
|
||||
const bool same_pts = frame->pts == video_prev_pts;
|
||||
video_prev_pts = frame->pts;
|
||||
video_frame->pts = (this_video_frame_time - record_start_time) * (double)AV_TIME_BASE;
|
||||
const bool same_pts = video_frame->pts == video_prev_pts;
|
||||
video_prev_pts = video_frame->pts;
|
||||
if(same_pts)
|
||||
continue;
|
||||
}
|
||||
|
||||
int ret = avcodec_send_frame(video_codec_context, frame);
|
||||
int ret = avcodec_send_frame(video_codec_context, video_frame);
|
||||
if(ret == 0) {
|
||||
// TODO: Move to separate thread because this could write to network (for example when livestreaming)
|
||||
receive_frames(video_codec_context, VIDEO_STREAM_INDEX, video_stream, frame->pts, av_format_context,
|
||||
receive_frames(video_codec_context, VIDEO_STREAM_INDEX, video_stream, video_frame->pts, av_format_context,
|
||||
record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex, paused_time_offset);
|
||||
} else {
|
||||
fprintf(stderr, "Error: avcodec_send_frame failed, error: %s\n", av_error_to_string(ret));
|
||||
}
|
||||
}
|
||||
|
||||
gsr_capture_end(capture, frame);
|
||||
gsr_capture_end(capture, video_frame);
|
||||
video_pts_counter += num_frames;
|
||||
}
|
||||
}
|
||||
@@ -2606,6 +2609,7 @@ int main(int argc, char **argv) {
|
||||
//XCloseDisplay(dpy);
|
||||
}
|
||||
|
||||
//av_frame_free(&video_frame);
|
||||
free((void*)window_str);
|
||||
free(empty_audio);
|
||||
// We do an _exit here because cuda uses at_exit to do _something_ that causes the program to freeze,
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#include "../include/shader.h"
|
||||
#include "../include/egl.h"
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
|
||||
|
||||
Reference in New Issue
Block a user