#include "../util/bmem.h"
#include "video-scaler.h"
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
struct video_scaler {
struct SwsContext *swscale;
int src_height;
int dst_heights[4];
uint8_t *dst_pointers[4];
int dst_linesizes[4];
};
static inline enum AVPixelFormat
get_ffmpeg_video_format(enum video_format format)
{
switch (format) {
case VIDEO_FORMAT_I420:
return AV_PIX_FMT_YUV420P;
case VIDEO_FORMAT_NV12:
return AV_PIX_FMT_NV12;
case VIDEO_FORMAT_YUY2:
return AV_PIX_FMT_YUYV422;
case VIDEO_FORMAT_UYVY:
return AV_PIX_FMT_UYVY422;
case VIDEO_FORMAT_YVYU:
return AV_PIX_FMT_YVYU422;
case VIDEO_FORMAT_RGBA:
return AV_PIX_FMT_RGBA;
case VIDEO_FORMAT_BGRA:
return AV_PIX_FMT_BGRA;
case VIDEO_FORMAT_BGRX:
return AV_PIX_FMT_BGRA;
case VIDEO_FORMAT_Y800:
return AV_PIX_FMT_GRAY8;
case VIDEO_FORMAT_I444:
return AV_PIX_FMT_YUV444P;
case VIDEO_FORMAT_I412:
return AV_PIX_FMT_YUV444P12LE;
case VIDEO_FORMAT_BGR3:
return AV_PIX_FMT_BGR24;
case VIDEO_FORMAT_I422:
return AV_PIX_FMT_YUV422P;
case VIDEO_FORMAT_I210:
return AV_PIX_FMT_YUV422P10LE;
case VIDEO_FORMAT_I40A:
return AV_PIX_FMT_YUVA420P;
case VIDEO_FORMAT_I42A:
return AV_PIX_FMT_YUVA422P;
case VIDEO_FORMAT_YUVA:
return AV_PIX_FMT_YUVA444P;
#if LIBAVUTIL_BUILD >= AV_VERSION_INT(56, 31, 100)
case VIDEO_FORMAT_YA2L:
return AV_PIX_FMT_YUVA444P12LE;
#endif
case VIDEO_FORMAT_I010:
return AV_PIX_FMT_YUV420P10LE;
case VIDEO_FORMAT_P010:
return AV_PIX_FMT_P010LE;
#if LIBAVUTIL_BUILD >= AV_VERSION_INT(57, 17, 100)
case VIDEO_FORMAT_P216:
return AV_PIX_FMT_P216LE;
case VIDEO_FORMAT_P416:
return AV_PIX_FMT_P416LE;
#endif
case VIDEO_FORMAT_NONE:
case VIDEO_FORMAT_AYUV:
default:
return AV_PIX_FMT_NONE;
}
}
static inline int get_ffmpeg_scale_type(enum video_scale_type type)
{
switch (type) {
case VIDEO_SCALE_DEFAULT:
return SWS_FAST_BILINEAR;
case VIDEO_SCALE_POINT:
return SWS_POINT;
case VIDEO_SCALE_FAST_BILINEAR:
return SWS_FAST_BILINEAR;
case VIDEO_SCALE_BILINEAR:
return SWS_BILINEAR | SWS_AREA;
case VIDEO_SCALE_BICUBIC:
return SWS_BICUBIC;
}
return SWS_POINT;
}
static inline const int *get_ffmpeg_coeffs(enum video_colorspace cs)
{
int colorspace = SWS_CS_ITU709;
switch (cs) {
case VIDEO_CS_DEFAULT:
case VIDEO_CS_709:
case VIDEO_CS_SRGB:
colorspace = SWS_CS_ITU709;
break;
case VIDEO_CS_601:
colorspace = SWS_CS_ITU601;
break;
case VIDEO_CS_2100_PQ:
case VIDEO_CS_2100_HLG:
colorspace = SWS_CS_BT2020;
break;
}
return sws_getCoefficients(colorspace);
}
static inline int get_ffmpeg_range_type(enum video_range_type type)
{
switch (type) {
case VIDEO_RANGE_DEFAULT:
return 0;
case VIDEO_RANGE_PARTIAL:
return 0;
case VIDEO_RANGE_FULL:
return 1;
}
return 0;
}
#define FIXED_1_0 (1 << 16)
int video_scaler_create(video_scaler_t **scaler_out,
const struct video_scale_info *dst,
const struct video_scale_info *src,
enum video_scale_type type)
{
enum AVPixelFormat format_src = get_ffmpeg_video_format(src->format);
enum AVPixelFormat format_dst = get_ffmpeg_video_format(dst->format);
int scale_type = get_ffmpeg_scale_type(type);
const int *coeff_src = get_ffmpeg_coeffs(src->colorspace);
const int *coeff_dst = get_ffmpeg_coeffs(dst->colorspace);
int range_src = get_ffmpeg_range_type(src->range);
int range_dst = get_ffmpeg_range_type(dst->range);
struct video_scaler *scaler;
int ret;
if (!scaler_out)
return VIDEO_SCALER_FAILED;
if (format_src == AV_PIX_FMT_NONE || format_dst == AV_PIX_FMT_NONE)
return VIDEO_SCALER_BAD_CONVERSION;
scaler = bzalloc(sizeof(struct video_scaler));
scaler->src_height = src->height;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format_dst);
bool has_plane[4] = {0};
for (size_t i = 0; i < 4; i++)
has_plane[desc->comp[i].plane] = 1;
scaler->dst_heights[0] = dst->height;
for (size_t i = 1; i < 4; ++i) {
if (has_plane[i]) {
const int s = (i == 1 || i == 2) ? desc->log2_chroma_h
: 0;
scaler->dst_heights[i] = dst->height >> s;
}
}
ret = av_image_alloc(scaler->dst_pointers, scaler->dst_linesizes,
dst->width, dst->height, format_dst, 32);
if (ret < 0) {
blog(LOG_WARNING,
"video_scaler_create: av_image_alloc failed: %d", ret);
goto fail;
}
scaler->swscale = sws_alloc_context();
if (!scaler->swscale) {
blog(LOG_ERROR,
"video_scaler_create: Could not create swscale");
goto fail;
}
av_opt_set_int(scaler->swscale, "sws_flags", scale_type, 0);
av_opt_set_int(scaler->swscale, "srcw", src->width, 0);
av_opt_set_int(scaler->swscale, "srch", src->height, 0);
av_opt_set_int(scaler->swscale, "dstw", dst->width, 0);
av_opt_set_int(scaler->swscale, "dsth", dst->height, 0);
av_opt_set_int(scaler->swscale, "src_format", format_src, 0);
av_opt_set_int(scaler->swscale, "dst_format", format_dst, 0);
av_opt_set_int(scaler->swscale, "src_range", range_src, 0);
av_opt_set_int(scaler->swscale, "dst_range", range_dst, 0);
if (sws_init_context(scaler->swscale, NULL, NULL) < 0) {
blog(LOG_ERROR, "video_scaler_create: sws_init_context failed");
goto fail;
}
ret = sws_setColorspaceDetails(scaler->swscale, coeff_src, range_src,
coeff_dst, range_dst, 0, FIXED_1_0,
FIXED_1_0);
if (ret < 0) {
blog(LOG_DEBUG,
"video_scaler_create: sws_setColorspaceDetails failed, ignoring");
}
*scaler_out = scaler;
return VIDEO_SCALER_SUCCESS;
fail:
video_scaler_destroy(scaler);
return VIDEO_SCALER_FAILED;
}
void video_scaler_destroy(video_scaler_t *scaler)
{
if (scaler) {
sws_freeContext(scaler->swscale);
if (scaler->dst_pointers[0])
av_freep(scaler->dst_pointers);
bfree(scaler);
}
}
int video_scaler_scale(int src_width, int src_height, const uint8_t *src_data,
int dst_width, int dst_height, uint8_t *dst_data,
int scaling_algo)
{
float scale_x = (float)src_width / dst_width;
float scale_y = (float)src_height / dst_height;
for (int y = 0; y < dst_height; ++y) {
for (int x = 0; x < dst_width; ++x) {
float src_x = x * scale_x;
float src_y = y * scale_y;
int nearest_x = (int)(src_x + 0.5f);
int nearest_y = (int)(src_y + 0.5f);
nearest_x = nearest_x < 0 ? 0
: (nearest_x >= src_width
? src_width - 1
: nearest_x);
nearest_y = nearest_y < 0 ? 0
: (nearest_y >= src_height
? src_height - 1
: nearest_y);
dst_data[y * dst_width + x] =
src_data[nearest_y * src_width + nearest_x];
}
}
return 0;
}