Merge branch 'master' of https://gitee.com/290198252/multimedia
# Conflicts: # client/webrtc_capture/src/camera_video_sink.cpp # client/webrtc_capture/src/mainwindow.cppmaster
commit
8ba473edb2
|
@ -40,3 +40,4 @@ client/janus_gateway_win/x64/
|
|||
client/janus_gateway_win/janus_win/x64/
|
||||
client/janus_gateway_win/.vs/
|
||||
client/janus_gateway_win/usocket_test/x64/
|
||||
client/build-webrtc_capture-Desktop_Qt_5_15_2_MSVC2019_64bit-Debug/
|
||||
|
|
|
@ -76,12 +76,13 @@ void CameraVideoSink::OnFrame(const webrtc::VideoFrame& frame) {
|
|||
std::chrono::system_clock::now().time_since_epoch()).count();
|
||||
static size_t cnt = 0;
|
||||
|
||||
qDebug()<<int(frame.video_frame_buffer()->type());
|
||||
// qDebug()<<int(frame.video_frame_buffer()->type());
|
||||
rtc::scoped_refptr<webrtc::I420BufferInterface> frameBuffer =
|
||||
frame.video_frame_buffer()->ToI420();
|
||||
cnt++;
|
||||
int width = this->m_capability.width;
|
||||
int height = this->m_capability.height;
|
||||
<<<<<<< HEAD
|
||||
webrtc::VideoCaptureCapability cap;
|
||||
int ret = this->Capability(cap);
|
||||
if(ret < 0)
|
||||
|
@ -94,6 +95,11 @@ void CameraVideoSink::OnFrame(const webrtc::VideoFrame& frame) {
|
|||
uint8_t *data =
|
||||
new uint8_t[width * height*4];
|
||||
qDebug()<<width*height<<int(frameBuffer->GetI420()->type());
|
||||
=======
|
||||
|
||||
uint8_t *data = new uint8_t[width * height*4];
|
||||
// qDebug()<<width*height<<int(frameBuffer->GetI420()->type());
|
||||
>>>>>>> ceaccb7db090d07a75d14c43e07a96773ac1e9a6
|
||||
memcpy(data,frameBuffer->GetI420()->DataY(),width*height);
|
||||
memcpy(data + width*height ,frameBuffer->GetI420()->DataU(),
|
||||
width*height/4);
|
||||
|
@ -102,9 +108,11 @@ void CameraVideoSink::OnFrame(const webrtc::VideoFrame& frame) {
|
|||
width*height/4);
|
||||
|
||||
m_buf.PushFirst(data);
|
||||
|
||||
auto timestamp_curr = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now().time_since_epoch()).count();
|
||||
if(timestamp_curr - timestamp > 1000) {
|
||||
qDebug()<<timestamp;
|
||||
RTC_LOG(LS_INFO) << "FPS: " << cnt<<m_buf.Size();
|
||||
cnt = 0;
|
||||
timestamp = timestamp_curr;
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
#define CAMERA_VIDEO_SINK_H
|
||||
|
||||
// vcm_capturer_test.h
|
||||
|
||||
|
||||
#include <memory>
|
||||
#include <QObject>
|
||||
#include "modules/video_capture/video_capture.h"
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
#include "CPlayWidget.h"
|
||||
|
||||
#include <QOpenGLTexture>
|
||||
#include <QOpenGLBuffer>
|
||||
#include <QMouseEvent>
|
||||
|
@ -61,10 +59,6 @@ void main(void) \
|
|||
gl_FragColor = vec4(rgb, 1); \
|
||||
}";
|
||||
|
||||
// rgb???????????
|
||||
// ???MEDIASUBTYPE_RGB32 ??bgr?????????????????????
|
||||
|
||||
|
||||
const char *fsrcrgb = "varying vec2 textureOut; \
|
||||
uniform sampler2D rgbdata; \
|
||||
void main() \
|
||||
|
@ -82,7 +76,6 @@ void CPlayWidget::OnPaintData(const rtc::scoped_refptr<webrtc::I420BufferInterfa
|
|||
qDebug("CPlayWidget::PlayOneFrame new data memory. Len=%d width=%d height=%d\n",
|
||||
len, m_nVideoW, m_nVideoW);
|
||||
memcpy(m_pBufYuv420p, data,len);
|
||||
//??????,????paintGL???
|
||||
update();
|
||||
}
|
||||
*/
|
||||
|
@ -90,6 +83,28 @@ void CPlayWidget::OnPaintData(const rtc::scoped_refptr<webrtc::I420BufferInterfa
|
|||
update();
|
||||
}
|
||||
|
||||
CPlayWidget::CPlayWidget(QWidget *parent, IMG_TYPE type)
|
||||
{
|
||||
textureUniformY = 0;
|
||||
textureUniformU = 0;
|
||||
textureUniformV = 0;
|
||||
id_y = 0;
|
||||
id_u = 0;
|
||||
id_v = 0;
|
||||
m_pTextureRGB = nullptr;
|
||||
m_pBufYuv420p = nullptr;
|
||||
m_pVSHader = NULL;
|
||||
m_pFSHader = NULL;
|
||||
m_pShaderProgram = NULL;
|
||||
m_pTextureY = NULL;
|
||||
m_pTextureU = NULL;
|
||||
m_pTextureV = NULL;
|
||||
m_nVideoH = 0;
|
||||
m_nVideoW = 0;
|
||||
mType = type;
|
||||
m_start_render = false;
|
||||
}
|
||||
|
||||
CPlayWidget::CPlayWidget(QWidget *parent):QOpenGLWidget(parent) {
|
||||
textureUniformY = 0;
|
||||
textureUniformU = 0;
|
||||
|
@ -107,11 +122,12 @@ CPlayWidget::CPlayWidget(QWidget *parent):QOpenGLWidget(parent) {
|
|||
m_pTextureV = NULL;
|
||||
m_nVideoH = 0;
|
||||
m_nVideoW = 0;
|
||||
mType = TYPE_I420;
|
||||
mType = TYPE_YUV420P;
|
||||
m_start_render = false;
|
||||
}
|
||||
|
||||
CPlayWidget::~CPlayWidget() {
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -144,6 +160,8 @@ void CPlayWidget::OnCameraData( rtc::scoped_refptr<webrtc::I420BufferInterface>
|
|||
update();
|
||||
}
|
||||
|
||||
|
||||
|
||||
int CPlayWidget::OnCameraData(uint8_t *p)
|
||||
{
|
||||
memcpy(m_pBufYuv420p,p,m_nVideoH*m_nVideoW/2*3);
|
||||
|
@ -167,6 +185,16 @@ int CPlayWidget::SetImgSize(uint32_t width, uint32_t height)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int CPlayWidget::RenderWidth()
|
||||
{
|
||||
return this->m_nVideoW;
|
||||
}
|
||||
|
||||
int CPlayWidget::RenderHeight()
|
||||
{
|
||||
return this->m_nVideoH;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
|
||||
|
@ -176,7 +204,7 @@ U = - 0.1687 R - 0.3313 G + 0.5 B + 128
|
|||
|
||||
V = 0.5 R - 0.4187 G - 0.0813 B + 128
|
||||
|
||||
????????RGB ?????????YUV (256????) ????:
|
||||
RGB YUV (256)
|
||||
|
||||
R = Y + 1.402 (Cr-128)
|
||||
|
||||
|
@ -190,7 +218,6 @@ void CPlayWidget::initializeGL()
|
|||
glEnable(GL_DEPTH_TEST);
|
||||
|
||||
m_pVSHader = new QOpenGLShader(QOpenGLShader::Vertex, this);
|
||||
|
||||
bool bCompile = m_pVSHader->compileSourceCode(vsrcyuv);
|
||||
if(!bCompile)
|
||||
{
|
||||
|
@ -372,7 +399,6 @@ int CPlayWidget::loadYuvTexture()
|
|||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
glActiveTexture(GL_TEXTURE1);
|
||||
glBindTexture(GL_TEXTURE_2D, id_u);
|
||||
// glPixelStorei(GL_UNPACK_ROW_LENGTH, m_buffer->StrideU());
|
||||
glTexImage2D(GL_TEXTURE_2D,
|
||||
0, GL_RED,
|
||||
m_nVideoW/2,
|
||||
|
@ -385,10 +411,9 @@ int CPlayWidget::loadYuvTexture()
|
|||
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
// v·ÖÁ¿
|
||||
glActiveTexture(GL_TEXTURE2);//???????????GL_TEXTURE2
|
||||
glActiveTexture(GL_TEXTURE2);// GL_TEXTURE2
|
||||
glBindTexture(GL_TEXTURE_2D, id_v);
|
||||
// glPixelStorei(GL_UNPACK_ROW_LENGTH, m_buffer->StrideV());
|
||||
|
||||
glTexImage2D(GL_TEXTURE_2D,
|
||||
0, GL_RED,
|
||||
m_nVideoW/2,
|
||||
|
@ -412,7 +437,7 @@ int CPlayWidget::loadRtcI420Texture()
|
|||
if(nullptr == m_buffer)
|
||||
return 0;
|
||||
glActiveTexture(GL_TEXTURE0);
|
||||
//???????y????????????
|
||||
|
||||
glBindTexture(GL_TEXTURE_2D, id_y);
|
||||
glPixelStorei(GL_UNPACK_ROW_LENGTH, m_buffer->StrideY());
|
||||
glTexImage2D(GL_TEXTURE_2D,
|
||||
|
@ -430,7 +455,7 @@ int CPlayWidget::loadRtcI420Texture()
|
|||
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
//????u????????
|
||||
|
||||
glActiveTexture(GL_TEXTURE1);
|
||||
glBindTexture(GL_TEXTURE_2D, id_u);
|
||||
glPixelStorei(GL_UNPACK_ROW_LENGTH, m_buffer->StrideU());
|
||||
|
@ -446,8 +471,8 @@ int CPlayWidget::loadRtcI420Texture()
|
|||
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
// v·ÖÁ¿
|
||||
glActiveTexture(GL_TEXTURE2);//???????????GL_TEXTURE2
|
||||
|
||||
glActiveTexture(GL_TEXTURE2);//GL_TEXTURE2
|
||||
glBindTexture(GL_TEXTURE_2D, id_v);
|
||||
glPixelStorei(GL_UNPACK_ROW_LENGTH, m_buffer->StrideV());
|
||||
glTexImage2D(GL_TEXTURE_2D,
|
||||
|
|
|
@ -36,6 +36,8 @@ public:
|
|||
TYPE_I420,
|
||||
TYPE_UNSET,
|
||||
}IMG_TYPE;
|
||||
CPlayWidget(QWidget* parent,IMG_TYPE type);
|
||||
|
||||
CPlayWidget(QWidget* parent);
|
||||
~CPlayWidget();
|
||||
int SetDataType(IMG_TYPE);
|
||||
|
@ -43,6 +45,8 @@ public:
|
|||
int StopRender();
|
||||
int OnCameraData(uint8_t *);
|
||||
int SetImgSize(uint32_t width,uint32_t );
|
||||
int RenderWidth();
|
||||
int RenderHeight();
|
||||
protected:
|
||||
QTimer tm;
|
||||
void initializeGL() override;
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
#include "cv_ssd.h"
|
||||
#include <QDebug>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
|
||||
String labelFile = "ssd/labelmap_det.txt";
|
||||
String modelFile = "debug/ssd/MobileNetSSD_deploy.caffemodel";
|
||||
String model_text_file = "debug/ssd/MobileNetSSD_deploy.prototxt";
|
||||
|
||||
String objNames[] = {
|
||||
"background",
|
||||
"aeroplane",
|
||||
"bicycle",
|
||||
"bird",
|
||||
"boat",
|
||||
"bottle",
|
||||
"bus",
|
||||
"car",
|
||||
"cat",
|
||||
"chair",
|
||||
"cow",
|
||||
"diningtable",
|
||||
"dog",
|
||||
"horse",
|
||||
"motorbike",
|
||||
"person",
|
||||
"pottedplant",
|
||||
"sheep",
|
||||
"sofa",
|
||||
"train",
|
||||
"tvmonitor"
|
||||
};
|
||||
|
||||
cv::Mat * ssd_detect(cv::Mat *inframe,std::string modelFile,std::string model_text_file) {
|
||||
if (inframe->empty() || (modelFile == "") || (model_text_file == "")) {
|
||||
printf("could not load image...\n");
|
||||
return inframe;
|
||||
}
|
||||
cv::Mat resize;
|
||||
cv::resize(*inframe, resize,
|
||||
Size(480,560),
|
||||
0, 0, INTER_LINEAR);// X Y各缩小一半
|
||||
cv::Mat *image2 = new cv::Mat(480,640,CV_8UC3);
|
||||
cv::cvtColor(resize,*image2,COLOR_RGBA2RGB);
|
||||
Mat blobImage1 = blobFromImage(*image2, 0.007843,
|
||||
Size(300, 300),
|
||||
Scalar(127.5, 127.5, 127.5), true, false);
|
||||
|
||||
|
||||
//// image:这个就是我们将要输入神经网络进行处理或者分类的图片。
|
||||
//// mean:需要将图片整体减去的平均值,如果我们需要对RGB图片的三个通道分别减去不同的值,那么可以使用3组平均值,
|
||||
//// 如果只使用一组,那么就默认对三个通道减去一样的值。减去平均值(mean):为了消除同一场景下不同光照的图片,
|
||||
//// 对我们最终的分类或者神经网络的影响,我们常常对图片的R、G、B通道的像素求一个平均值,
|
||||
//// 然后将每个像素值减去我们的平均值,这样就可以得到像素之间的相对值,就可以排除光照的影响。
|
||||
//// scalefactor:当我们将图片减去平均值之后,还可以对剩下的像素值进行一定的尺度缩放,
|
||||
//// 它的默认值是1,如果希望减去平均像素之后的值,全部缩小一半,那么可以将scalefactor设为1/2。
|
||||
//// size:这个参数是我们神经网络在训练的时候要求输入的图片尺寸。
|
||||
//// swapRB:OpenCV中认为我们的图片通道顺序是BGR,但是我平均值假设的顺序是RGB,
|
||||
/// 所以如果需要交换R和G,那么就要使swapRB=true
|
||||
///
|
||||
Net net = readNetFromCaffe(model_text_file, modelFile);
|
||||
net.setInput(blobImage1, "data");
|
||||
Mat detection = net.forward("detection_out");
|
||||
Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());
|
||||
float confidence_threshold = 0.2;
|
||||
for (int i = 0; i < detectionMat.rows; i++) {
|
||||
float confidence = detectionMat.at<float>(i, 2);
|
||||
if (confidence > confidence_threshold) {
|
||||
size_t objIndex = (size_t)(detectionMat.at<float>(i, 1));
|
||||
float tl_x = detectionMat.at<float>(i, 3) * image2->cols;
|
||||
float tl_y = detectionMat.at<float>(i, 4) * image2->rows;
|
||||
float br_x = detectionMat.at<float>(i, 5) * image2->cols;
|
||||
float br_y = detectionMat.at<float>(i, 6) * image2->rows;
|
||||
|
||||
Rect object_box((int)tl_x, (int)tl_y, (int)(br_x - tl_x), (int)(br_y - tl_y));
|
||||
rectangle(*image2, object_box, Scalar(0, 0, 255), 2, 8, 0);
|
||||
putText(*image2, format("%s", objNames[objIndex].c_str()),
|
||||
Point(tl_x, tl_y), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(255, 0, 0), 2);
|
||||
}
|
||||
}
|
||||
return image2;
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
#ifndef CV_SSD_H
|
||||
#define CV_SSD_H
|
||||
|
||||
#include <opencv2/opencv.hpp>
|
||||
#include <opencv2/dnn.hpp>
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::dnn;
|
||||
using namespace std;
|
||||
|
||||
cv::Mat * ssd_detect(cv::Mat *inframe,std::string modelFile,std::string model_text_file);
|
||||
|
||||
|
||||
|
||||
#endif // CV_SSD_H
|
|
@ -0,0 +1,168 @@
|
|||
#include "cv_yolo.h"
|
||||
|
||||
float confidenceThreshold = 0.25;
|
||||
|
||||
void test_yolo()
|
||||
{
|
||||
image_detection();
|
||||
}
|
||||
|
||||
void video_detection() {
|
||||
String modelConfiguration = "D:/vcprojects/images/dnn/yolov2-tiny-voc/yolov2-tiny-voc.cfg";
|
||||
String modelBinary = "D:/vcprojects/images/dnn/yolov2-tiny-voc/yolov2-tiny-voc.weights";
|
||||
dnn::Net net = readNetFromDarknet(modelConfiguration, modelBinary);
|
||||
if (net.empty())
|
||||
{
|
||||
printf("Could not load net...\n");
|
||||
return;
|
||||
}
|
||||
|
||||
vector<string> classNamesVec;
|
||||
ifstream classNamesFile("D:/vcprojects/images/dnn/yolov2-tiny-voc/voc.names");
|
||||
if (classNamesFile.is_open())
|
||||
{
|
||||
string className = "";
|
||||
while (std::getline(classNamesFile, className))
|
||||
classNamesVec.push_back(className);
|
||||
}
|
||||
|
||||
// VideoCapture capture(0);
|
||||
VideoCapture capture;
|
||||
capture.open("D:/vcprojects/images/fbb.avi");
|
||||
if (!capture.isOpened()) {
|
||||
printf("could not open the camera...\n");
|
||||
return;
|
||||
}
|
||||
|
||||
Mat frame;
|
||||
while (capture.read(frame))
|
||||
{
|
||||
if (frame.empty())
|
||||
if (frame.channels() == 4)
|
||||
cvtColor(frame, frame, COLOR_BGRA2BGR);
|
||||
Mat inputBlob = blobFromImage(frame, 1 / 255.F, Size(416, 416), Scalar(), true, false);
|
||||
net.setInput(inputBlob, "data");
|
||||
Mat detectionMat = net.forward("detection_out");
|
||||
vector<double> layersTimings;
|
||||
double freq = getTickFrequency() / 1000;
|
||||
double time = net.getPerfProfile(layersTimings) / freq;
|
||||
ostringstream ss;
|
||||
ss << "FPS: " << 1000 / time << " ; time: " << time << " ms";
|
||||
putText(frame, ss.str(), Point(20, 20), 0, 0.5, Scalar(0, 0, 255));
|
||||
|
||||
for (int i = 0; i < detectionMat.rows; i++)
|
||||
{
|
||||
const int probability_index = 5;
|
||||
const int probability_size = detectionMat.cols - probability_index;
|
||||
float *prob_array_ptr = &detectionMat.at<float>(i, probability_index);
|
||||
size_t objectClass = max_element(prob_array_ptr, prob_array_ptr + probability_size) - prob_array_ptr;
|
||||
float confidence = detectionMat.at<float>(i, (int)objectClass + probability_index);
|
||||
if (confidence > confidenceThreshold)
|
||||
{
|
||||
float x = detectionMat.at<float>(i, 0);
|
||||
float y = detectionMat.at<float>(i, 1);
|
||||
float width = detectionMat.at<float>(i, 2);
|
||||
float height = detectionMat.at<float>(i, 3);
|
||||
int xLeftBottom = static_cast<int>((x - width / 2) * frame.cols);
|
||||
int yLeftBottom = static_cast<int>((y - height / 2) * frame.rows);
|
||||
int xRightTop = static_cast<int>((x + width / 2) * frame.cols);
|
||||
int yRightTop = static_cast<int>((y + height / 2) * frame.rows);
|
||||
Rect object(xLeftBottom, yLeftBottom,
|
||||
xRightTop - xLeftBottom,
|
||||
yRightTop - yLeftBottom);
|
||||
rectangle(frame, object, Scalar(0, 255, 0));
|
||||
if (objectClass < classNamesVec.size())
|
||||
{
|
||||
ss.str("");
|
||||
ss << confidence;
|
||||
String conf(ss.str());
|
||||
String label = String(classNamesVec[objectClass]) + ": " + conf;
|
||||
int baseLine = 0;
|
||||
Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
|
||||
rectangle(frame, Rect(Point(xLeftBottom, yLeftBottom),
|
||||
Size(labelSize.width, labelSize.height + baseLine)),
|
||||
Scalar(255, 255, 255), FILLED);
|
||||
putText(frame, label, Point(xLeftBottom, yLeftBottom + labelSize.height),
|
||||
FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 0, 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
imshow("YOLOv3: Detections", frame);
|
||||
if (waitKey(1) >= 0) break;
|
||||
}
|
||||
}
|
||||
|
||||
void image_detection() {
|
||||
String modelConfiguration = "D:/vcprojects/images/dnn/yolov2-tiny-voc/yolov2-tiny-voc.cfg";
|
||||
String modelBinary = "D:/vcprojects/images/dnn/yolov2-tiny-voc/yolov2-tiny-voc.weights";
|
||||
dnn::Net net = readNetFromDarknet(modelConfiguration, modelBinary);
|
||||
if (net.empty())
|
||||
{
|
||||
printf("Could not load net...\n");
|
||||
return;
|
||||
}
|
||||
vector<string> classNamesVec;
|
||||
ifstream classNamesFile("D:/vcprojects/images/dnn/yolov2-tiny-voc/voc.names");
|
||||
if (classNamesFile.is_open())
|
||||
{
|
||||
string className = "";
|
||||
while (std::getline(classNamesFile, className))
|
||||
classNamesVec.push_back(className);
|
||||
}
|
||||
|
||||
// ͼ
|
||||
Mat frame = imread("D:/vcprojects/images/fastrcnn.jpg");
|
||||
Mat inputBlob = blobFromImage(frame, 1 / 255.F, Size(416, 416), Scalar(), true, false);
|
||||
net.setInput(inputBlob, "data");
|
||||
|
||||
//
|
||||
Mat detectionMat = net.forward("detection_out");
|
||||
vector<double> layersTimings;
|
||||
double freq = getTickFrequency() / 1000;
|
||||
double time = net.getPerfProfile(layersTimings) / freq;
|
||||
ostringstream ss;
|
||||
ss << "detection time: " << time << " ms";
|
||||
putText(frame, ss.str(), Point(20, 20), 0, 0.5, Scalar(0, 0, 255));
|
||||
|
||||
//
|
||||
for (int i = 0; i < detectionMat.rows; i++)
|
||||
{
|
||||
const int probability_index = 5;
|
||||
const int probability_size = detectionMat.cols - probability_index;
|
||||
float *prob_array_ptr = &detectionMat.at<float>(i, probability_index);
|
||||
size_t objectClass = max_element(prob_array_ptr, prob_array_ptr + probability_size) - prob_array_ptr;
|
||||
float confidence = detectionMat.at<float>(i, (int)objectClass + probability_index);
|
||||
if (confidence > confidenceThreshold)
|
||||
{
|
||||
float x = detectionMat.at<float>(i, 0);
|
||||
float y = detectionMat.at<float>(i, 1);
|
||||
float width = detectionMat.at<float>(i, 2);
|
||||
float height = detectionMat.at<float>(i, 3);
|
||||
int xLeftBottom = static_cast<int>((x - width / 2) * frame.cols);
|
||||
int yLeftBottom = static_cast<int>((y - height / 2) * frame.rows);
|
||||
int xRightTop = static_cast<int>((x + width / 2) * frame.cols);
|
||||
int yRightTop = static_cast<int>((y + height / 2) * frame.rows);
|
||||
Rect object(xLeftBottom, yLeftBottom,
|
||||
xRightTop - xLeftBottom,
|
||||
yRightTop - yLeftBottom);
|
||||
rectangle(frame, object, Scalar(0, 0, 255), 2, 8);
|
||||
if (objectClass < classNamesVec.size())
|
||||
{
|
||||
ss.str("");
|
||||
ss << confidence;
|
||||
String conf(ss.str());
|
||||
String label = String(classNamesVec[objectClass]) + ": " + conf;
|
||||
int baseLine = 0;
|
||||
Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
|
||||
rectangle(frame, Rect(Point(xLeftBottom, yLeftBottom),
|
||||
Size(labelSize.width, labelSize.height + baseLine)),
|
||||
Scalar(255, 255, 255), FILLED);
|
||||
putText(frame, label, Point(xLeftBottom, yLeftBottom + labelSize.height),
|
||||
FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 0, 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
imshow("YOLO-Detections", frame);
|
||||
waitKey(0);
|
||||
return;
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
#ifndef CV_YOLO_H
|
||||
#define CV_YOLO_H
|
||||
|
||||
#include <opencv2/opencv.hpp>
|
||||
#include <opencv2/dnn.hpp>
|
||||
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
using namespace cv::dnn;
|
||||
|
||||
|
||||
void video_detection();
|
||||
void image_detection();
|
||||
void test_yolo();
|
||||
#endif // CV_YOLO_H
|
|
@ -0,0 +1,162 @@
|
|||
#include <windows.h>
|
||||
#include "cvhelper.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
/*
|
||||
* 将一幅RGB图像转换YCrCb图像,并得到三个分离的通道。
|
||||
*/
|
||||
void TransferRGB2YCrCb(cv::Mat &input, std::vector<cv::Mat> &chls)
|
||||
{
|
||||
//YCrCb色彩空间:Y为颜色的亮度(luma)成分、而CB和CR则为蓝色和红色的浓度偏移量成分。
|
||||
//只有Y成分的图基本等同于彩色图像的灰度图。
|
||||
cv::cvtColor(input, input, cv::COLOR_BGR2YCrCb);
|
||||
cv::split(input, chls);
|
||||
}
|
||||
|
||||
/*
|
||||
* 将Y、Cr、Cb三个分离的通道合并为YCrCb图像,再转换为RGB图像。
|
||||
*/
|
||||
void MergeYCrCb2RGB(cv::Mat &output, std::vector<cv::Mat> &chls)
|
||||
{
|
||||
cv::merge(chls, output);
|
||||
cv::cvtColor(output, output, cv::COLOR_YCrCb2BGR);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
*CLAHE.
|
||||
*@ClipLimit:直方图的柱子高度大于计算后的ClipLimit的部分被裁剪掉,然后将其平均分配给整张直方图,从而提升*整个图像。
|
||||
*但是本人觉得这里解释的不对,更像是累计概率分布函数的斜率上限阈值,高于ClipLimit的部分被裁剪掉,
|
||||
*然后将其平均分配给整张直方图,从而提升整个图像。
|
||||
*@TilesGridSize:// 将图像分为TilesGridSize*TilesGridSize大小的方块.。(滑动窗口大小为TilesGridSize*TilesGridSize)
|
||||
*/
|
||||
cv::Mat clahe_deal(cv::Mat &src, double ClipLimit = 40.0, int TilesGridSize = 8)
|
||||
{
|
||||
cv::Mat ycrcb = src.clone();
|
||||
std::vector<cv::Mat> channels;
|
||||
|
||||
// TransferRGB2YCrCb(ycrcb, channels);
|
||||
|
||||
// cv::Mat clahe_img;
|
||||
// //opencv源码中原型: createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8));
|
||||
// cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE();
|
||||
|
||||
// clahe->setClipLimit(ClipLimit);
|
||||
// clahe->setTilesGridSize(Size(TilesGridSize, TilesGridSize));
|
||||
// clahe->apply(channels[0], clahe_img);
|
||||
// channels[0].release();
|
||||
// clahe_img.copyTo(channels[0]);
|
||||
// MergeYCrCb2RGB(ycrcb, channels);
|
||||
// return ycrcb;
|
||||
return ycrcb;
|
||||
}
|
||||
|
||||
|
||||
//将mat数组转换为二进制,255用1表示,0用0表示
|
||||
int Mat2Binary(const cv::Mat img, int line_byte, char* data)
|
||||
{
|
||||
int width = img.cols;
|
||||
int height = img.rows;
|
||||
size_t line_size = line_byte * 8;
|
||||
size_t bit_size = line_size * height;
|
||||
|
||||
|
||||
char* p = data; int offset, v; unsigned char temp;
|
||||
for (int row = height - 1; row >= 0; row--)
|
||||
{
|
||||
for (int col = 0; col < width; col++)
|
||||
{
|
||||
offset = col % 8;
|
||||
//v = img.data[row * width + col];
|
||||
v = img.at<uchar>(row, col);
|
||||
temp = 1;
|
||||
temp = temp << (8 - offset - 1);
|
||||
if (v == 255)
|
||||
{
|
||||
*(p + col / 8) |= temp;
|
||||
}
|
||||
else
|
||||
{
|
||||
temp = ~temp;
|
||||
*(p + col / 8) &= temp;
|
||||
}
|
||||
}
|
||||
//注释掉一下代码,列数否则不能被4整除的图像最后4列会出现黑边
|
||||
/* for (int j = width / 8; j < line_byte + 1; j++) {
|
||||
p[j] = 0;
|
||||
} */
|
||||
p = p + line_byte;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
//将Mat图像保存为1位bmp图
|
||||
int Save1BitImage(const cv::Mat img, std::string dst)
|
||||
{
|
||||
int width = img.cols;
|
||||
int height = img.rows;
|
||||
const int biBitCount = 1;
|
||||
|
||||
//待存储图像数据每行字节数为4的倍数,计算位图每行占多少个字节
|
||||
//int line_byte = (width * biBitCount >> 3 + 3) / 4 * 4;
|
||||
int line_byte = (width * biBitCount / 8 + 5) / 4 * 4;//+3改为+5,否则无法向上对齐,造成崩溃
|
||||
char* p_data = (char*)malloc(line_byte * height + 1);//后面加1否则会报heap错误
|
||||
//memset(p_data, 0x01, line_byte * height+1);
|
||||
|
||||
//将mat数组转换为二进制,255用1表示,0用0表示
|
||||
Mat2Binary(img, line_byte, p_data);
|
||||
|
||||
//bmp位图颜色表大小,以字节为单位,灰度图像颜色表为256*4字节,彩色图像颜色表大小为0,二值图为2*4
|
||||
int color_type_num = 2;
|
||||
int colorTablesize = color_type_num * sizeof(RGBQUAD);
|
||||
RGBQUAD* pColorTable = new RGBQUAD[color_type_num];
|
||||
for (int i = 0; i < color_type_num; i++) {
|
||||
pColorTable[i].rgbBlue = i * 255;
|
||||
pColorTable[i].rgbRed = i * 255;
|
||||
pColorTable[i].rgbGreen = i * 255;
|
||||
pColorTable[i].rgbReserved = 0;
|
||||
}
|
||||
//申请位图文件头结构变量,填写文件头信息
|
||||
BITMAPFILEHEADER fileHead;
|
||||
fileHead.bfType = 0x4D42; //bmp类型
|
||||
fileHead.bfSize = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) + colorTablesize + line_byte * height; //bfSize是图像文件4个组成部分之和,图文件的大小,以字节为单位
|
||||
fileHead.bfReserved1 = 0;//位图文件保留字,必须为0
|
||||
fileHead.bfReserved2 = 0;
|
||||
fileHead.bfOffBits = 54 + colorTablesize; //bfOffBits是图像文件前3个部分所需空间之和,位图数据的起始位置
|
||||
|
||||
//申请位图信息头结构变量,填写信息头信息
|
||||
BITMAPINFOHEADER head;
|
||||
head.biBitCount = biBitCount;//每个像素所需的位数,必须是1(双色),(29-30字节)4(16色),8(256色)16(高彩色)或24(真彩色)之一
|
||||
head.biCompression = BI_RGB; //位图压缩类型,必须为BI_RGB否则图片打开会有错误
|
||||
head.biHeight = height;//位图的高度,以像素为单位
|
||||
head.biWidth = width;
|
||||
//head.biSize = 40;
|
||||
head.biSize = sizeof(BITMAPINFOHEADER);//本结构所占用字节数
|
||||
head.biSizeImage = line_byte * height;//位图的大小(其中包含了为了补齐行数是4的倍数而添加的空字节)
|
||||
head.biPlanes = 1;//目标设备的级别,必须为1
|
||||
head.biXPelsPerMeter = 23622;//位图水平分辨率,每米像素数,分辨率设为600
|
||||
head.biYPelsPerMeter = 23622;//位图垂直分辨率,每米像素数
|
||||
head.biClrImportant = 0;//位图显示过程中重要的颜色数
|
||||
head.biClrUsed = 0;//位图实际使用的颜色表中的颜色数
|
||||
|
||||
std::ofstream fp(dst.c_str(), std::ios::binary | std::ios::out);
|
||||
if (!fp.is_open()) {
|
||||
return -1;
|
||||
}
|
||||
fp.write((char*)&fileHead, sizeof(BITMAPFILEHEADER)); //写文件头进文件
|
||||
//写位图信息头进内存
|
||||
fp.write((char*)&head, sizeof(BITMAPINFOHEADER));
|
||||
//颜色表,写入文件
|
||||
fp.write((char*)pColorTable, sizeof(RGBQUAD) * color_type_num);
|
||||
//写位图数据进文件pBmpBuf
|
||||
fp.write((char*)p_data, height * line_byte);
|
||||
fp.close();
|
||||
|
||||
delete[]pColorTable;
|
||||
delete[]p_data;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
#ifndef CVHELPER_H
|
||||
#define CVHELPER_H
|
||||
|
||||
// 常见opencv 图像操作的一层wrap
|
||||
|
||||
#include "opencv2/opencv.hpp"
|
||||
#include <opencv2/videoio.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
#include <opencv2/imgproc.hpp>
|
||||
|
||||
void TransferRGB2YCrCb(cv::Mat &input, std::vector<cv::Mat> &chls);
|
||||
void MergeYCrCb2RGB(cv::Mat &output, std::vector<cv::Mat> &chls);
|
||||
cv::Mat clahe_deal(cv::Mat &src, double ClipLimit , int TilesGridSize );
|
||||
int Save1BitImage(const cv::Mat img, std::string dst);
|
||||
int Mat2Binary(const cv::Mat img, int line_byte, char* data);
|
||||
|
||||
#endif // CVHELPER_H
|
|
@ -1,7 +1,7 @@
|
|||
#include "mainwindow.h"
|
||||
#include "rtc.h"
|
||||
#include <QApplication>
|
||||
|
||||
#include "cv_yolo.h"
|
||||
#include "modules/video_capture/video_capture.h"
|
||||
#include "video_source_impl.h"
|
||||
#include <QString>
|
||||
|
@ -11,6 +11,7 @@
|
|||
#include "camera_video_sink.h"
|
||||
#include "video_source_impl.h"
|
||||
#include <QMetaType>
|
||||
#include "cv_ssd.h"
|
||||
|
||||
|
||||
# pragma comment(lib, "secur32.lib")
|
||||
|
@ -21,6 +22,21 @@
|
|||
# pragma comment(lib, "Strmiids.lib")
|
||||
# pragma comment(lib, "User32.lib")
|
||||
|
||||
|
||||
int BubbleSort(int *p,int len){
|
||||
if(nullptr == p)
|
||||
return -1;
|
||||
for(int i = 0;i < len - 1;i++){
|
||||
for(int j = 0;j < len - 1;j++){
|
||||
if(p[j] < p[j + 1]){
|
||||
int tmp = p[j];
|
||||
p[j] = p[j+1];
|
||||
p[j+1] = tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void EnumCapture()
|
||||
{
|
||||
rtc::WinsockInitializer winsock_init;
|
||||
|
@ -29,7 +45,6 @@ void EnumCapture()
|
|||
rtc::ThreadManager::Instance()->SetCurrentThread(&w32_thread);
|
||||
rtc::InitializeSSL();
|
||||
|
||||
|
||||
std::unique_ptr<webrtc::VideoCaptureModule::DeviceInfo> info(
|
||||
webrtc::VideoCaptureFactory::CreateDeviceInfo());
|
||||
|
||||
|
@ -56,6 +71,11 @@ void EnumCapture()
|
|||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int p[5] = {3,1,49,23,23};
|
||||
BubbleSort(p,5);
|
||||
for(int i = 0;i< 5;i++){
|
||||
qDebug()<<p[i]<<" ";
|
||||
}
|
||||
qRegisterMetaType<rtc::scoped_refptr<webrtc::I420BufferInterface>>("rtc::scoped_refptr<webrtc::I420BufferInterface>");
|
||||
qRegisterMetaType<rtc::scoped_refptr<webrtc::I420BufferInterface>>("rtc::scoped_refptr<webrtc::I420BufferInterface>&");
|
||||
|
||||
|
@ -73,3 +93,7 @@ int main(int argc, char *argv[])
|
|||
w.show();
|
||||
return a.exec();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +1,99 @@
|
|||
#include "mainwindow.h"
|
||||
#include "ui_mainwindow.h"
|
||||
#include "camera_video_sink.h"
|
||||
#include <QLabel>
|
||||
|
||||
#include "opencv2/opencv.hpp"
|
||||
#include <opencv2/videoio.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
#include <opencv2/imgproc.hpp>
|
||||
#include "cvhelper.h"
|
||||
#include "cv_ssd.h"
|
||||
#include <list>
|
||||
#include <mutex>
|
||||
#include <QFileDialog>
|
||||
#include "Qss.h"
|
||||
|
||||
class ASyncDetectAndRenderThread :public QSSASyncProcess{
|
||||
public:
|
||||
typedef enum {
|
||||
STATUS_STOP = 1,
|
||||
STATUS_RUNNING = 2
|
||||
}Status;
|
||||
ASyncDetectAndRenderThread(QWidget * parent,CPlayWidget *render_ui,
|
||||
int width,int height,
|
||||
std::string model_path,
|
||||
std::string model_txt_path){
|
||||
m_render = render_ui;
|
||||
m_parent = parent;
|
||||
m_status = STATUS_RUNNING;
|
||||
m_width = width;
|
||||
m_height = height;
|
||||
this->m_model_path = model_path;
|
||||
this->m_model_txt_path = model_txt_path;
|
||||
}
|
||||
bool Detecting(){
|
||||
if(m_mat.size() > 0)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
Status DetectStatus(){
|
||||
return this->m_status;
|
||||
}
|
||||
void SetCvImage(uint8_t *frame){
|
||||
cv::Mat yuv420p;
|
||||
yuv420p.create(m_height*3/2,
|
||||
m_width, CV_8UC1);
|
||||
memcpy(yuv420p.data, frame, m_height*m_width
|
||||
*sizeof(unsigned char)*3/2);
|
||||
cv::Mat *rgbImg = new cv::Mat;
|
||||
cv::cvtColor(yuv420p, *rgbImg, cv::COLOR_YUV2BGR_I420);
|
||||
std::lock_guard<std::mutex> guard(this->m_mutex);
|
||||
m_mat.push_back(rgbImg);
|
||||
}
|
||||
|
||||
void Run(void *) override{
|
||||
while(m_status == STATUS_RUNNING){
|
||||
cv::Mat *c = takeLast();
|
||||
if(nullptr != c){
|
||||
cv::Mat *result = ssd_detect(c,
|
||||
this->m_model_path,
|
||||
this->m_model_txt_path);
|
||||
|
||||
qDebug()<<result->cols<<result->rows<<result->type();
|
||||
cv::Mat yuvData;
|
||||
|
||||
cv::cvtColor(*result, yuvData, cv::COLOR_BGR2YUV_I420);
|
||||
this->m_render->OnCameraData(yuvData.data);
|
||||
|
||||
}
|
||||
delete c;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
cv::Mat *takeLast(){
|
||||
std::lock_guard<std::mutex> guard(this->m_mutex);
|
||||
if(m_mat.size() != 0){
|
||||
auto ret = *m_mat.begin();
|
||||
m_mat.pop_front();
|
||||
return ret;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
std::list<cv::Mat*> m_mat;
|
||||
std::mutex m_mutex;
|
||||
CPlayWidget *m_render;
|
||||
QWidget *m_parent;
|
||||
Status m_status;
|
||||
int m_width;
|
||||
int m_height;
|
||||
std::string m_model_path; // 模型路径
|
||||
std::string m_model_txt_path; // 模型txt路径
|
||||
};
|
||||
|
||||
ASyncDetectAndRenderThread *gdetect = nullptr;
|
||||
|
||||
class AsyncRennder :public QSSASyncProcess{
|
||||
public:
|
||||
|
@ -9,6 +102,7 @@ public:
|
|||
this->mfbs = p;
|
||||
mUI = render_ui;
|
||||
this->state = true;
|
||||
|
||||
}
|
||||
// 停止渲染
|
||||
void StopRender(){
|
||||
|
@ -20,16 +114,20 @@ public:
|
|||
qtimer->setSingleShot(false);
|
||||
|
||||
QObject::connect(qtimer, SIGNAL(timeout()), &eventLoop, SLOT(quit()));
|
||||
qtimer->start(10);
|
||||
qtimer->start(22);
|
||||
|
||||
while(state){
|
||||
if(mfbs ->Size() > 0){
|
||||
uint8_t *frame = this->mfbs->TakeLast();
|
||||
uint8_t *frame = mfbs->TakeLast();
|
||||
mUI->OnCameraData(frame);
|
||||
if(gdetect != nullptr){
|
||||
if(!gdetect->Detecting()){
|
||||
gdetect->SetCvImage(frame);
|
||||
}
|
||||
}
|
||||
eventLoop.exec(); // 渲染一次
|
||||
delete frame;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
private:
|
||||
|
@ -38,7 +136,9 @@ private:
|
|||
CPlayWidget *mUI;
|
||||
};
|
||||
|
||||
AsyncRennder *gRender;
|
||||
|
||||
AsyncRennder *gRender = nullptr;
|
||||
|
||||
|
||||
MainWindow::MainWindow(QWidget *parent)
|
||||
: QssMainWindow(parent)
|
||||
|
@ -58,6 +158,9 @@ MainWindow::MainWindow(QWidget *parent)
|
|||
info->GetDeviceName(i,name,100,nullptr,0,nullptr,0);
|
||||
ui->comboBox->addItem(QString::asprintf("%s",name),i);
|
||||
}
|
||||
mDetectResut = new CPlayWidget(this);
|
||||
ui->gridLayout->addWidget(mDetectResut,0,1);
|
||||
|
||||
}
|
||||
|
||||
MainWindow::~MainWindow()
|
||||
|
@ -72,6 +175,7 @@ void MainWindow::OnUpdateFrame( rtc::scoped_refptr<webrtc::I420BufferInterface>&
|
|||
|
||||
void MainWindow::on_pushButton_clicked()
|
||||
{
|
||||
if(ui->pushButton->text() == QString("采集")) {
|
||||
int id = ui->comboBox->currentData().toInt();
|
||||
webrtc::VideoCaptureCapability p;
|
||||
|
||||
|
@ -83,6 +187,12 @@ void MainWindow::on_pushButton_clicked()
|
|||
info->GetCapability(ids,id,p);
|
||||
qDebug()<<QString::asprintf("GetCapability: %d %d %d %d",id,p.width,p.height,p.maxFPS);
|
||||
|
||||
<<<<<<< HEAD
|
||||
char ids[128];
|
||||
info->GetDeviceName(id,nullptr,0,ids,128,nullptr,0);
|
||||
info->GetCapability(ids,id,p);
|
||||
qDebug()<<QString::asprintf("GetCapability: %d %d %d %d",id,p.width,p.height,p.maxFPS);
|
||||
|
||||
m_capturer.reset(CameraVideoSink::Create(p.width, p.height, 25, id));
|
||||
if (!m_capturer) {
|
||||
qDebug()<<"error";
|
||||
|
@ -100,6 +210,28 @@ void MainWindow::on_pushButton_clicked()
|
|||
gRender = new AsyncRennder(this,m_capturer->VideoBuffer(),ui->openGLWidget);
|
||||
gRender->Start(this);
|
||||
connect(gRender,&QSSASyncProcess::Done,this,&MainWindow::RenderDone);
|
||||
=======
|
||||
m_capturer.reset(CameraVideoSink::Create(p.width, p.height, 25, id));
|
||||
if (!m_capturer) {
|
||||
qDebug()<<"error";
|
||||
}
|
||||
ui->openGLWidget->SetDataType(CPlayWidget::TYPE_YUV420P);
|
||||
ui->openGLWidget->SetImgSize(m_capturer->Capability().width,
|
||||
m_capturer->Capability().height);
|
||||
ui->openGLWidget->StartRender();
|
||||
// ui->openGLWidget->moveToThread(&gRender->Thread());
|
||||
if(gRender == nullptr){
|
||||
gRender = new AsyncRennder(this,m_capturer->VideoBuffer(),ui->openGLWidget);
|
||||
gRender->Start(this);
|
||||
connect(gRender,&QSSASyncProcess::Done,this,&MainWindow::RenderDone);
|
||||
}
|
||||
mDetectResut->SetImgSize(480,560);
|
||||
mDetectResut->StartRender();
|
||||
|
||||
ui->pushButton->setText(QString::asprintf("正在采集"));
|
||||
}else{
|
||||
ui->openGLWidget->StopRender();
|
||||
>>>>>>> ceaccb7db090d07a75d14c43e07a96773ac1e9a6
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -108,16 +240,88 @@ void MainWindow::RenderDone()
|
|||
|
||||
}
|
||||
|
||||
cv::Mat *QImage2cvMat(QImage image)
|
||||
{
|
||||
cv::Mat *mat;
|
||||
switch(image.format())
|
||||
{
|
||||
case QImage::Format_ARGB32:
|
||||
case QImage::Format_RGB32:
|
||||
case QImage::Format_ARGB32_Premultiplied:
|
||||
mat = new cv::Mat(image.height(),
|
||||
image.width(),
|
||||
CV_8UC4);
|
||||
memcpy(mat->data,image.bits(),image.bytesPerLine()*image.height());
|
||||
break;
|
||||
case QImage::Format_RGB888:
|
||||
mat = new cv::Mat(image.height(), image.width(), CV_8UC3,
|
||||
(void*)image.bits(), image.bytesPerLine());
|
||||
break;
|
||||
case QImage::Format_Indexed8:
|
||||
mat = new cv::Mat(image.height(), image.width(), CV_8UC1,
|
||||
(void*)image.bits(), image.bytesPerLine());
|
||||
break;
|
||||
}
|
||||
Save1BitImage(*mat,"d://tgest.png");
|
||||
return mat;
|
||||
}
|
||||
|
||||
|
||||
void MainWindow::on_pushButton_2_clicked()
|
||||
{
|
||||
CPlayWidget *ptr = new CPlayWidget(this);
|
||||
CPlayWidget *ptr1 = new CPlayWidget(this);
|
||||
CPlayWidget *ptr2 = new CPlayWidget(this);
|
||||
|
||||
ui->gridLayout->addWidget(ptr,0,1);
|
||||
ui->gridLayout->addWidget(ptr1,1,0);
|
||||
ui->gridLayout->addWidget(ptr2,1,1);
|
||||
|
||||
QPixmap pix = ui->openGLWidget->grab();
|
||||
origin_picture->setPixmap(pix);
|
||||
}
|
||||
|
||||
// 识别
|
||||
void MainWindow::on_pushButton_3_clicked()
|
||||
{
|
||||
if(nullptr != origin_picture->pixmap()){
|
||||
QImage image = origin_picture->pixmap()->toImage();
|
||||
image = image.scaled(1280,720);
|
||||
cv::Mat pic = *QImage2cvMat(image);
|
||||
if((ui->lineEdit->text() == "") || (ui->lineEdit_2->text() == "")){
|
||||
|
||||
}
|
||||
cv::Mat *result = ssd_detect(&pic,
|
||||
ui->lineEdit->text().toStdString(),
|
||||
ui->lineEdit_2->text().toStdString()
|
||||
);
|
||||
this->processed_picture->setPixmap(QPixmap::fromImage(image));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MainWindow::on_pushButton_4_clicked()
|
||||
{
|
||||
if(gdetect == nullptr){
|
||||
if((ui->lineEdit->text().toStdString() == "")
|
||||
||
|
||||
(ui->lineEdit_2->text().toStdString() == "")){
|
||||
QssMessageBox::warn("please input model file",nullptr,"please input model file",QMessageBox::Ok);
|
||||
return;
|
||||
}
|
||||
gdetect = new ASyncDetectAndRenderThread(this,mDetectResut,ui->openGLWidget->RenderWidth(),
|
||||
ui->openGLWidget->RenderHeight(),
|
||||
ui->lineEdit->text().toStdString(),
|
||||
ui->lineEdit_2->text().toStdString());
|
||||
gdetect->Start(this);
|
||||
connect(gdetect,&QSSASyncProcess::Done,this,&MainWindow::RenderDone);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// 导入文件
|
||||
void MainWindow::on_pushButton_5_clicked()
|
||||
{
|
||||
QString label1 = ui->lineEdit_2->text();
|
||||
QString openFile = QFileDialog::getOpenFileName(this, "0.0", "",
|
||||
"*.caffemodel *.prototxt",nullptr);
|
||||
if (openFile.contains(".caffemodel")){
|
||||
ui->lineEdit->setText(openFile);
|
||||
}
|
||||
if (openFile.contains(".prototxt")){
|
||||
ui->lineEdit_2->setText(openFile);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include "rtc.h"
|
||||
#include "api/video/i420_buffer.h"
|
||||
#include "Qss.h"
|
||||
#include "cplaywidget.h"
|
||||
|
||||
QT_BEGIN_NAMESPACE
|
||||
namespace Ui { class MainWindow; }
|
||||
|
@ -23,8 +24,17 @@ private slots:
|
|||
void RenderDone();
|
||||
void on_pushButton_2_clicked();
|
||||
|
||||
void on_pushButton_3_clicked();
|
||||
|
||||
void on_pushButton_4_clicked();
|
||||
|
||||
void on_pushButton_5_clicked();
|
||||
|
||||
private:
|
||||
Ui::MainWindow *ui;
|
||||
std::unique_ptr<CameraVideoSink> m_capturer;
|
||||
QLabel *origin_picture;
|
||||
QLabel *processed_picture;
|
||||
CPlayWidget *mDetectResut;
|
||||
};
|
||||
#endif // MAINWINDOW_H
|
||||
|
|
|
@ -20,9 +20,9 @@
|
|||
<string>MainWindow</string>
|
||||
</property>
|
||||
<widget class="QWidget" name="centralwidget">
|
||||
<layout class="QVBoxLayout" name="verticalLayout_2" stretch="1,9">
|
||||
<layout class="QVBoxLayout" name="verticalLayout_2" stretch="1,0,9">
|
||||
<item>
|
||||
<layout class="QHBoxLayout" name="horizontalLayout" stretch="1,1,1,1,1,0,3">
|
||||
<layout class="QHBoxLayout" name="horizontalLayout" stretch="1,1,1,1,1,3">
|
||||
<item>
|
||||
<widget class="QLabel" name="label">
|
||||
<property name="text">
|
||||
|
@ -56,13 +56,6 @@
|
|||
<item>
|
||||
<widget class="QComboBox" name="comboBox_2"/>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QPushButton" name="pushButton_2">
|
||||
<property name="text">
|
||||
<string>增加窗口</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<spacer name="horizontalSpacer">
|
||||
<property name="orientation">
|
||||
|
@ -78,6 +71,57 @@
|
|||
</item>
|
||||
</layout>
|
||||
</item>
|
||||
<item>
|
||||
<layout class="QHBoxLayout" name="horizontalLayout_2">
|
||||
<item>
|
||||
<widget class="QLabel" name="label_3">
|
||||
<property name="text">
|
||||
<string>模型文件: .caffemodel</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QLineEdit" name="lineEdit"/>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QLabel" name="label_4">
|
||||
<property name="text">
|
||||
<string>.prototxt:</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QLineEdit" name="lineEdit_2"/>
|
||||
</item>
|
||||
<item>
|
||||
<spacer name="horizontalSpacer_2">
|
||||
<property name="orientation">
|
||||
<enum>Qt::Horizontal</enum>
|
||||
</property>
|
||||
<property name="sizeHint" stdset="0">
|
||||
<size>
|
||||
<width>40</width>
|
||||
<height>20</height>
|
||||
</size>
|
||||
</property>
|
||||
</spacer>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QPushButton" name="pushButton_5">
|
||||
<property name="text">
|
||||
<string>导入</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QPushButton" name="pushButton_4">
|
||||
<property name="text">
|
||||
<string>动态检测</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</item>
|
||||
<item>
|
||||
<layout class="QGridLayout" name="gridLayout" rowstretch="0" columnstretch="0">
|
||||
<item row="0" column="0">
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
QT += core gui
|
||||
|
||||
include(D:\\project\\c++qt\\qsswraper\\qsswraper.pri)
|
||||
include(D:\\project\\qt_project\\qsswraper\\qsswraper.pri)
|
||||
RESOURCES += $$PWD/qss/qss.qrc
|
||||
|
||||
greaterThan(QT_MAJOR_VERSION, 4): QT += widgets
|
||||
|
@ -20,11 +20,12 @@ DEFINES += QT_DEPRECATED_WARNINGS NOMINMAX WEBRTC_WIN NOMINMAX WIN32_LEAN_AND_ME
|
|||
|
||||
INCLUDEPATH += third/include/
|
||||
|
||||
LIBS += -L$$PWD/third/lib libwebrtc.lib ole32.lib oleaut32.lib strmiids.lib advapi32.lib
|
||||
|
||||
SOURCES += \
|
||||
src/camera_video_sink.cpp \
|
||||
src/cplaywidget.cpp \
|
||||
src/cv_ssd.cpp \
|
||||
src/cv_yolo.cpp \
|
||||
src/cvhelper.cpp \
|
||||
src/main.cpp \
|
||||
src/mainwindow.cpp \
|
||||
src/video_source_impl.cpp
|
||||
|
@ -32,6 +33,9 @@ SOURCES += \
|
|||
HEADERS += \
|
||||
src/camera_video_sink.h \
|
||||
src/cplaywidget.h \
|
||||
src/cv_ssd.h \
|
||||
src/cv_yolo.h \
|
||||
src/cvhelper.h \
|
||||
src/mainwindow.h \
|
||||
src/rtc.h \
|
||||
src/video_source_impl.h
|
||||
|
@ -43,3 +47,14 @@ FORMS += \
|
|||
qnx: target.path = /tmp/$${TARGET}/bin
|
||||
else: unix:!android: target.path = /opt/$${TARGET}/bin
|
||||
!isEmpty(target.path): INSTALLS += target
|
||||
|
||||
CONFIG(debug, debug|release){
|
||||
message("debug mode")
|
||||
LIBS += -L$$PWD/third/lib libwebrtc.lib ole32.lib oleaut32.lib strmiids.lib
|
||||
LIBS += advapi32.lib opencv_core455d.lib
|
||||
LIBS += opencv_stitching455d.lib opencv_objdetect455d.lib opencv_videoio455d.lib
|
||||
LIBS += opencv_ml455d.lib opencv_imgcodecs455d.lib opencv_imgproc455d.lib opencv_dnn455d.lib
|
||||
LIBS+= opencv_highgui455d.lib
|
||||
DEFINES += DEBUG_FLAG
|
||||
Qt += debug
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue