通过在通道画面上拾取鼠标按下的坐标,然后鼠标移动,直到松开,根据松开的坐标和按下的坐标,绘制一个矩形区域,作为热点或者需要电子放大的区域,拿到这个坐标区域,用途非常多,可以直接将区域中的画面放大,也可以将该圈起来的区域位置发给设备,由设备设定对应的热点区域作为集中观察点,可以用来人工智能分析,比如出现在该区域的人脸,可以判定为入侵,该区域内的画面被改动过,判定为物体非法挪动等。各种各样的分析算法应用上来,就可以做出非常多的检测效果,这些都有个前提,那就是用户能够在视频画面中自由的选择自己需要的区域,这就是要实现的功能。
采集到的视频数据,在UI界面上,可能是拉伸填充显示的,也可能是等比例缩放显示的,最重要的是,显示的窗体,几乎不大可能刚好是和分辨率大小一样,所以这就涉及到一个转换关系,就是根据窗体的尺寸和视频的尺寸,当前鼠标按下的坐标,需要换算成视频对应的坐标,换算公式是:视频X=坐标X / 窗体宽度 * 视频宽度,视频Y=坐标Y / 窗体高度 * 视频高度。所以在视频窗体控件上识别鼠标按下/鼠标移动/鼠标松开事件进行处理即可,最后发送信号出去,带上类型(鼠标按下/鼠标移动/鼠标松开)和QPoint坐标。为什么要带上类型呢?方便用户处理,比如识别到用户按下就记住坐标,移动的时候绘制方框,结束的时候发送滤镜执行裁剪也就是电子放大操作。
void VideoWidget::btnClicked(const QString &btnName)
{
QString flag = widgetPara.videoFlag;
QString name = STRDATETIMEMS;
if (!flag.isEmpty()) {
name = QString("%1_%2").arg(flag).arg(name);
}
if (btnName.endsWith("btnRecord")) {
QString fileName = QString("%1/%2.mp4").arg(recordPath).arg(name);
this->recordStart(fileName);
} else if (btnName.endsWith("btnStop")) {
this->recordStop();
} else if (btnName.endsWith("btnSound")) {
this->setMuted(true);
} else if (btnName.endsWith("btnMuted")) {
this->setMuted(false);
} else if (btnName.endsWith("btnSnap")) {
QString snapName = QString("%1/%2.jpg").arg(snapPath).arg(name);
this->snap(snapName, false);
} else if (btnName.endsWith("btnCrop")) {
if (videoThread) {
if (videoPara.videoCore == VideoCore_FFmpeg) {
QMetaObject::invokeMethod(videoThread, "setCrop", Q_ARG(bool, true));
}
}
} else if (btnName.endsWith("btnReset")) {
if (videoThread) {
this->removeGraph("crop");
if (videoPara.videoCore == VideoCore_FFmpeg) {
QMetaObject::invokeMethod(videoThread, "setCrop", Q_ARG(bool, false));
}
}
} else if (btnName.endsWith("btnAlarm")) {
} else if (btnName.endsWith("btnClose")) {
this->stop();
}
}
void AbstractVideoWidget::appendGraph(const GraphInfo &graph)
{
QMutexLocker locker(&mutex);
listGraph << graph;
this->update();
emit sig_graphChanged();
}
void AbstractVideoWidget::removeGraph(const QString &name)
{
QMutexLocker locker(&mutex);
int count = listGraph.count();
for (int i = 0; i < count; ++i) {
if (listGraph.at(i).name == name) {
listGraph.removeAt(i);
break;
}
}
this->update();
emit sig_graphChanged();
}
void AbstractVideoWidget::clearGraph()
{
QMutexLocker locker(&mutex);
listGraph.clear();
this->update();
emit sig_graphChanged();
}
QString FilterHelper::getFilter(const GraphInfo &graph, bool hardware)
{
//drawbox=x=10:y=10:w=100:h=100:c=#ffffff@1:t=2
QString filter;
//有个现象就是硬解码下的图形滤镜会导致原图颜色不对
if (hardware) {
return filter;
}
//暂时只实现了矩形区域
QRect rect = graph.rect;
if (rect.isEmpty()) {
return filter;
}
//过滤关键字用于电子放大
if (graph.name == "crop") {
filter = QString("crop=%1:%2:%3:%4").arg(rect.width()).arg(rect.height()).arg(rect.x()).arg(rect.y());
return filter;
}
QStringList list;
list << QString("x=%1").arg(rect.x());
list << QString("y=%1").arg(rect.y());
list << QString("w=%1").arg(rect.width());
list << QString("h=%1").arg(rect.height());
QColor color = graph.borderColor;
list << QString("c=%1@%2").arg(color.name()).arg(color.alphaF());
//背景颜色不透明则填充背景颜色
if (graph.bgColor == Qt::transparent) {
list << QString("t=%1").arg(graph.borderWidth);
} else {
list << QString("t=%1").arg("fill");
}
filter = QString("drawbox=%1").arg(list.join(":"));
return filter;
}
QString FilterHelper::getFilters(const QStringList &listFilter)
{
//挨个取出图片滤镜对应的图片和坐标
int count = listFilter.count();
QStringList listImage, listPosition, listTemp;
for (int i = 0; i < count; ++i) {
QString filter = listFilter.at(i);
if (filter.startsWith("movie=")) {
QStringList list = filter.split(";");
QString movie = list.first();
QString overlay = list.last();
movie.replace("[wm]", "");
overlay.replace("[wm]", "");
overlay.replace("[in]", "");
overlay.replace("[out]", "");
listImage << movie;
listPosition << overlay;
} else {
listTemp << filter;
}
}
//图片滤镜字符串在下面重新处理
QString filterImage, filterAll;
QString filterOther = listTemp.join(",");
//存在图片水印需要重新调整滤镜字符串
//1张图: movie=./osd.png[wm0];[in][wm0]overlay=0:0[out]
//2张图: movie=./osd.png[wm0];movie=./osd.png[wm1];[in][wm0]overlay=0:0[a];[a][wm1]overlay=0:0[out]
//3张图: movie=./osd.png[wm0];movie=./osd.png[wm1];movie=./osd.png[wm2];[in][wm0]overlay=0:0[a0];[a0][wm1]overlay=0:0[a1];[a1][wm2]overlay=0:0[out]
count = listImage.count();
if (count > 0) {
//加上标识符和头部和尾部标识符
for (int i = 0; i < count; ++i) {
QString flag = QString("[wm%1]").arg(i);
listImage[i] = listImage.at(i) + flag;
listPosition[i] = flag + listPosition.at(i);
listPosition[i] = (i == 0 ? "[in]" : QString("[a%1]").arg(i - 1)) + listPosition.at(i);
listPosition[i] = listPosition.at(i) + (i == (count - 1) ? "[out]" : QString("[a%1]").arg(i));
}
QStringList filters;
for (int i = 0; i < count; ++i) {
filters << listImage.at(i);
}
for (int i = 0; i < count; ++i) {
filters << listPosition.at(i);
}
//图片滤镜集合最终字符串
filterImage = filters.join(";");
//存在其他滤镜则其他滤镜在前面
if (listTemp.count() > 0) {
filterImage.replace("[in]", "[other]");
filterAll = "[in]" + filterOther + "[other];" + filterImage;
} else {
filterAll = filterImage;
}
} else {
filterAll = filterOther;
}
return filterAll;
}
QStringList FilterHelper::getFilters(const QList<OsdInfo> &listOsd, const QList<GraphInfo> &listGraph, bool noimage, bool hardware)
{
//滤镜内容字符串集合
QStringList listFilter;
//加入标签信息
foreach (OsdInfo osd, listOsd) {
QString filter = FilterHelper::getFilter(osd, noimage);
if (!filter.isEmpty()) {
listFilter << filter;
}
}
//加入图形信息
foreach (GraphInfo graph, listGraph) {
QString filter = FilterHelper::getFilter(graph, hardware);
if (!filter.isEmpty()) {
listFilter << filter;
}
}
//加入其他滤镜
QString filter = FilterHelper::getFilter();
if (!filter.isEmpty()) {
listFilter << filter;
}
return listFilter;
}