Compare commits

...

10 Commits

Author SHA1 Message Date
zcy e0cf4f960f no message 2023-01-11 12:09:27 +08:00
zcy b872dbf500 no message 2023-01-11 11:05:19 +08:00
zcy 8ba473edb2 Merge branch 'master' of https://gitee.com/290198252/multimedia
# Conflicts:
#	client/webrtc_capture/src/camera_video_sink.cpp
#	client/webrtc_capture/src/mainwindow.cpp
2022-10-04 23:50:34 +08:00
zcy b4f0cdca74 no message 2022-10-04 23:49:46 +08:00
zcy ceaccb7db0 no message 2022-05-26 09:33:26 +08:00
zcy 9111580993 no message 2022-05-25 21:40:29 +08:00
zcy c54519de29 改成ssd模型用文件输入的形式 2022-05-25 21:34:18 +08:00
zcy 1ba89778f5 add dynamic detect 2022-05-23 01:20:11 +08:00
zcy ed2d2e9e09 no message 2022-05-21 20:44:46 +08:00
zcy f748b3ded8 no message 2022-05-19 18:01:35 +08:00
14 changed files with 605 additions and 95 deletions

6
.gitignore vendored
View File

@ -41,3 +41,9 @@ client/janus_gateway_win/janus_win/x64/
client/janus_gateway_win/.vs/ client/janus_gateway_win/.vs/
client/janus_gateway_win/usocket_test/x64/ client/janus_gateway_win/usocket_test/x64/
client/build-webrtc_capture-Desktop_Qt_5_15_2_MSVC2019_64bit-Debug/ client/build-webrtc_capture-Desktop_Qt_5_15_2_MSVC2019_64bit-Debug/
client/build-webrtc_capture-Desktop_Qt_5_15_2_MSVC2019_64bit-Release/
client/build-webrtc_capture-vvv-Debug/
client/build-webrtc_demo-Desktop_Qt_5_12_9_MSVC2015_64bit-Debug/
client/build-webrtc_demo-Desktop_Qt_5_12_9_MSVC2017_64bit-Debug/
client/build-webrtc_demo-vvv-Debug/
client/rtmp_demo/build-yuvgl-Replacement_for_Desktop_Qt_5_14_0_MSVC2017_64bit-debug/

View File

@ -76,20 +76,15 @@ void CameraVideoSink::OnFrame(const webrtc::VideoFrame& frame) {
std::chrono::system_clock::now().time_since_epoch()).count(); std::chrono::system_clock::now().time_since_epoch()).count();
static size_t cnt = 0; static size_t cnt = 0;
qDebug()<<int(frame.video_frame_buffer()->type()); // qDebug()<<int(frame.video_frame_buffer()->type());
rtc::scoped_refptr<webrtc::I420BufferInterface> frameBuffer = rtc::scoped_refptr<webrtc::I420BufferInterface> frameBuffer =
frame.video_frame_buffer()->ToI420(); frame.video_frame_buffer()->ToI420();
cnt++; cnt++;
int width = this->m_capability.width; int width = this->m_capability.width;
int height = this->m_capability.height; int height = this->m_capability.height;
qDebug()<<this->Capability().height<<this->Capability().width
<<int(this->Capability().videoType)
<<int(webrtc::VideoType::kI420)
<<this->Capability().interlaced<<width * height*4;
uint8_t *data = uint8_t *data = new uint8_t[width * height*4];
new uint8_t[width * height*4]; // qDebug()<<width*height<<int(frameBuffer->GetI420()->type());
qDebug()<<width*height<<int(frameBuffer->GetI420()->type());
memcpy(data,frameBuffer->GetI420()->DataY(),width*height); memcpy(data,frameBuffer->GetI420()->DataY(),width*height);
memcpy(data + width*height ,frameBuffer->GetI420()->DataU(), memcpy(data + width*height ,frameBuffer->GetI420()->DataU(),
width*height/4); width*height/4);
@ -98,9 +93,11 @@ void CameraVideoSink::OnFrame(const webrtc::VideoFrame& frame) {
width*height/4); width*height/4);
m_buf.PushFirst(data); m_buf.PushFirst(data);
auto timestamp_curr = std::chrono::duration_cast<std::chrono::milliseconds>( auto timestamp_curr = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch()).count(); std::chrono::system_clock::now().time_since_epoch()).count();
if(timestamp_curr - timestamp > 1000) { if(timestamp_curr - timestamp > 1000) {
qDebug()<<timestamp;
RTC_LOG(LS_INFO) << "FPS: " << cnt<<m_buf.Size(); RTC_LOG(LS_INFO) << "FPS: " << cnt<<m_buf.Size();
cnt = 0; cnt = 0;
timestamp = timestamp_curr; timestamp = timestamp_curr;

View File

@ -2,8 +2,6 @@
#define CAMERA_VIDEO_SINK_H #define CAMERA_VIDEO_SINK_H
// vcm_capturer_test.h // vcm_capturer_test.h
#include <memory> #include <memory>
#include <QObject> #include <QObject>
#include "modules/video_capture/video_capture.h" #include "modules/video_capture/video_capture.h"

View File

@ -83,6 +83,28 @@ void CPlayWidget::OnPaintData(const rtc::scoped_refptr<webrtc::I420BufferInterfa
update(); update();
} }
CPlayWidget::CPlayWidget(QWidget *parent, IMG_TYPE type)
{
textureUniformY = 0;
textureUniformU = 0;
textureUniformV = 0;
id_y = 0;
id_u = 0;
id_v = 0;
m_pTextureRGB = nullptr;
m_pBufYuv420p = nullptr;
m_pVSHader = NULL;
m_pFSHader = NULL;
m_pShaderProgram = NULL;
m_pTextureY = NULL;
m_pTextureU = NULL;
m_pTextureV = NULL;
m_nVideoH = 0;
m_nVideoW = 0;
mType = type;
m_start_render = false;
}
CPlayWidget::CPlayWidget(QWidget *parent):QOpenGLWidget(parent) { CPlayWidget::CPlayWidget(QWidget *parent):QOpenGLWidget(parent) {
textureUniformY = 0; textureUniformY = 0;
textureUniformU = 0; textureUniformU = 0;
@ -100,7 +122,7 @@ CPlayWidget::CPlayWidget(QWidget *parent):QOpenGLWidget(parent) {
m_pTextureV = NULL; m_pTextureV = NULL;
m_nVideoH = 0; m_nVideoH = 0;
m_nVideoW = 0; m_nVideoW = 0;
mType = TYPE_I420; mType = TYPE_YUV420P;
m_start_render = false; m_start_render = false;
} }
@ -138,6 +160,8 @@ void CPlayWidget::OnCameraData( rtc::scoped_refptr<webrtc::I420BufferInterface>
update(); update();
} }
int CPlayWidget::OnCameraData(uint8_t *p) int CPlayWidget::OnCameraData(uint8_t *p)
{ {
memcpy(m_pBufYuv420p,p,m_nVideoH*m_nVideoW/2*3); memcpy(m_pBufYuv420p,p,m_nVideoH*m_nVideoW/2*3);

View File

@ -36,6 +36,8 @@ public:
TYPE_I420, TYPE_I420,
TYPE_UNSET, TYPE_UNSET,
}IMG_TYPE; }IMG_TYPE;
CPlayWidget(QWidget* parent,IMG_TYPE type);
CPlayWidget(QWidget* parent); CPlayWidget(QWidget* parent);
~CPlayWidget(); ~CPlayWidget();
int SetDataType(IMG_TYPE); int SetDataType(IMG_TYPE);

View File

@ -0,0 +1,85 @@
#include "cv_ssd.h"
#include <QDebug>
#include <iostream>
using namespace std;
String labelFile = "ssd/labelmap_det.txt";
String modelFile = "debug/ssd/MobileNetSSD_deploy.caffemodel";
String model_text_file = "debug/ssd/MobileNetSSD_deploy.prototxt";
String objNames[] = {
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor"
};
cv::Mat * ssd_detect(cv::Mat *inframe,std::string modelFile,std::string model_text_file) {
if (inframe->empty() || (modelFile == "") || (model_text_file == "")) {
printf("could not load image...\n");
return inframe;
}
cv::Mat resize;
cv::resize(*inframe, resize,
Size(480,560),
0, 0, INTER_LINEAR);// X Y各缩小一半
cv::Mat *image2 = new cv::Mat(480,640,CV_8UC3);
cv::cvtColor(resize,*image2,COLOR_RGBA2RGB);
Mat blobImage1 = blobFromImage(*image2, 0.007843,
Size(300, 300),
Scalar(127.5, 127.5, 127.5), true, false);
//// image这个就是我们将要输入神经网络进行处理或者分类的图片。
//// mean需要将图片整体减去的平均值如果我们需要对RGB图片的三个通道分别减去不同的值那么可以使用3组平均值
//// 如果只使用一组那么就默认对三个通道减去一样的值。减去平均值mean为了消除同一场景下不同光照的图片
//// 对我们最终的分类或者神经网络的影响我们常常对图片的R、G、B通道的像素求一个平均值
//// 然后将每个像素值减去我们的平均值,这样就可以得到像素之间的相对值,就可以排除光照的影响。
//// scalefactor当我们将图片减去平均值之后还可以对剩下的像素值进行一定的尺度缩放
//// 它的默认值是1如果希望减去平均像素之后的值全部缩小一半那么可以将scalefactor设为1/2。
//// size这个参数是我们神经网络在训练的时候要求输入的图片尺寸。
//// swapRBOpenCV中认为我们的图片通道顺序是BGR但是我平均值假设的顺序是RGB
/// 所以如果需要交换R和G那么就要使swapRB=true
///
Net net = readNetFromCaffe(model_text_file, modelFile);
net.setInput(blobImage1, "data");
Mat detection = net.forward("detection_out");
Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());
float confidence_threshold = 0.2;
for (int i = 0; i < detectionMat.rows; i++) {
float confidence = detectionMat.at<float>(i, 2);
if (confidence > confidence_threshold) {
size_t objIndex = (size_t)(detectionMat.at<float>(i, 1));
float tl_x = detectionMat.at<float>(i, 3) * image2->cols;
float tl_y = detectionMat.at<float>(i, 4) * image2->rows;
float br_x = detectionMat.at<float>(i, 5) * image2->cols;
float br_y = detectionMat.at<float>(i, 6) * image2->rows;
Rect object_box((int)tl_x, (int)tl_y, (int)(br_x - tl_x), (int)(br_y - tl_y));
rectangle(*image2, object_box, Scalar(0, 0, 255), 2, 8, 0);
putText(*image2, format("%s", objNames[objIndex].c_str()),
Point(tl_x, tl_y), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(255, 0, 0), 2);
}
}
return image2;
}

View File

@ -0,0 +1,16 @@
#ifndef CV_SSD_H
#define CV_SSD_H
#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
#include <iostream>
using namespace cv;
using namespace cv::dnn;
using namespace std;
cv::Mat * ssd_detect(cv::Mat *inframe,std::string modelFile,std::string model_text_file);
#endif // CV_SSD_H

View File

@ -0,0 +1,168 @@
#include "cv_yolo.h"
float confidenceThreshold = 0.25;
void test_yolo()
{
image_detection();
}
void video_detection() {
String modelConfiguration = "D:/vcprojects/images/dnn/yolov2-tiny-voc/yolov2-tiny-voc.cfg";
String modelBinary = "D:/vcprojects/images/dnn/yolov2-tiny-voc/yolov2-tiny-voc.weights";
dnn::Net net = readNetFromDarknet(modelConfiguration, modelBinary);
if (net.empty())
{
printf("Could not load net...\n");
return;
}
vector<string> classNamesVec;
ifstream classNamesFile("D:/vcprojects/images/dnn/yolov2-tiny-voc/voc.names");
if (classNamesFile.is_open())
{
string className = "";
while (std::getline(classNamesFile, className))
classNamesVec.push_back(className);
}
// VideoCapture capture(0);
VideoCapture capture;
capture.open("D:/vcprojects/images/fbb.avi");
if (!capture.isOpened()) {
printf("could not open the camera...\n");
return;
}
Mat frame;
while (capture.read(frame))
{
if (frame.empty())
if (frame.channels() == 4)
cvtColor(frame, frame, COLOR_BGRA2BGR);
Mat inputBlob = blobFromImage(frame, 1 / 255.F, Size(416, 416), Scalar(), true, false);
net.setInput(inputBlob, "data");
Mat detectionMat = net.forward("detection_out");
vector<double> layersTimings;
double freq = getTickFrequency() / 1000;
double time = net.getPerfProfile(layersTimings) / freq;
ostringstream ss;
ss << "FPS: " << 1000 / time << " ; time: " << time << " ms";
putText(frame, ss.str(), Point(20, 20), 0, 0.5, Scalar(0, 0, 255));
for (int i = 0; i < detectionMat.rows; i++)
{
const int probability_index = 5;
const int probability_size = detectionMat.cols - probability_index;
float *prob_array_ptr = &detectionMat.at<float>(i, probability_index);
size_t objectClass = max_element(prob_array_ptr, prob_array_ptr + probability_size) - prob_array_ptr;
float confidence = detectionMat.at<float>(i, (int)objectClass + probability_index);
if (confidence > confidenceThreshold)
{
float x = detectionMat.at<float>(i, 0);
float y = detectionMat.at<float>(i, 1);
float width = detectionMat.at<float>(i, 2);
float height = detectionMat.at<float>(i, 3);
int xLeftBottom = static_cast<int>((x - width / 2) * frame.cols);
int yLeftBottom = static_cast<int>((y - height / 2) * frame.rows);
int xRightTop = static_cast<int>((x + width / 2) * frame.cols);
int yRightTop = static_cast<int>((y + height / 2) * frame.rows);
Rect object(xLeftBottom, yLeftBottom,
xRightTop - xLeftBottom,
yRightTop - yLeftBottom);
rectangle(frame, object, Scalar(0, 255, 0));
if (objectClass < classNamesVec.size())
{
ss.str("");
ss << confidence;
String conf(ss.str());
String label = String(classNamesVec[objectClass]) + ": " + conf;
int baseLine = 0;
Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
rectangle(frame, Rect(Point(xLeftBottom, yLeftBottom),
Size(labelSize.width, labelSize.height + baseLine)),
Scalar(255, 255, 255), FILLED);
putText(frame, label, Point(xLeftBottom, yLeftBottom + labelSize.height),
FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 0, 0));
}
}
}
imshow("YOLOv3: Detections", frame);
if (waitKey(1) >= 0) break;
}
}
void image_detection() {
String modelConfiguration = "D:/vcprojects/images/dnn/yolov2-tiny-voc/yolov2-tiny-voc.cfg";
String modelBinary = "D:/vcprojects/images/dnn/yolov2-tiny-voc/yolov2-tiny-voc.weights";
dnn::Net net = readNetFromDarknet(modelConfiguration, modelBinary);
if (net.empty())
{
printf("Could not load net...\n");
return;
}
vector<string> classNamesVec;
ifstream classNamesFile("D:/vcprojects/images/dnn/yolov2-tiny-voc/voc.names");
if (classNamesFile.is_open())
{
string className = "";
while (std::getline(classNamesFile, className))
classNamesVec.push_back(className);
}
// ͼ
Mat frame = imread("D:/vcprojects/images/fastrcnn.jpg");
Mat inputBlob = blobFromImage(frame, 1 / 255.F, Size(416, 416), Scalar(), true, false);
net.setInput(inputBlob, "data");
//
Mat detectionMat = net.forward("detection_out");
vector<double> layersTimings;
double freq = getTickFrequency() / 1000;
double time = net.getPerfProfile(layersTimings) / freq;
ostringstream ss;
ss << "detection time: " << time << " ms";
putText(frame, ss.str(), Point(20, 20), 0, 0.5, Scalar(0, 0, 255));
//
for (int i = 0; i < detectionMat.rows; i++)
{
const int probability_index = 5;
const int probability_size = detectionMat.cols - probability_index;
float *prob_array_ptr = &detectionMat.at<float>(i, probability_index);
size_t objectClass = max_element(prob_array_ptr, prob_array_ptr + probability_size) - prob_array_ptr;
float confidence = detectionMat.at<float>(i, (int)objectClass + probability_index);
if (confidence > confidenceThreshold)
{
float x = detectionMat.at<float>(i, 0);
float y = detectionMat.at<float>(i, 1);
float width = detectionMat.at<float>(i, 2);
float height = detectionMat.at<float>(i, 3);
int xLeftBottom = static_cast<int>((x - width / 2) * frame.cols);
int yLeftBottom = static_cast<int>((y - height / 2) * frame.rows);
int xRightTop = static_cast<int>((x + width / 2) * frame.cols);
int yRightTop = static_cast<int>((y + height / 2) * frame.rows);
Rect object(xLeftBottom, yLeftBottom,
xRightTop - xLeftBottom,
yRightTop - yLeftBottom);
rectangle(frame, object, Scalar(0, 0, 255), 2, 8);
if (objectClass < classNamesVec.size())
{
ss.str("");
ss << confidence;
String conf(ss.str());
String label = String(classNamesVec[objectClass]) + ": " + conf;
int baseLine = 0;
Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
rectangle(frame, Rect(Point(xLeftBottom, yLeftBottom),
Size(labelSize.width, labelSize.height + baseLine)),
Scalar(255, 255, 255), FILLED);
putText(frame, label, Point(xLeftBottom, yLeftBottom + labelSize.height),
FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 0, 0));
}
}
}
imshow("YOLO-Detections", frame);
waitKey(0);
return;
}

View File

@ -0,0 +1,19 @@
#ifndef CV_YOLO_H
#define CV_YOLO_H
#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
#include <fstream>
#include <iostream>
#include <algorithm>
#include <cstdlib>
using namespace std;
using namespace cv;
using namespace cv::dnn;
void video_detection();
void image_detection();
void test_yolo();
#endif // CV_YOLO_H

View File

@ -1,7 +1,7 @@
#include "mainwindow.h" #include "mainwindow.h"
#include "rtc.h" #include "rtc.h"
#include <QApplication> #include <QApplication>
#include "cv_yolo.h"
#include "modules/video_capture/video_capture.h" #include "modules/video_capture/video_capture.h"
#include "video_source_impl.h" #include "video_source_impl.h"
#include <QString> #include <QString>
@ -11,7 +11,7 @@
#include "camera_video_sink.h" #include "camera_video_sink.h"
#include "video_source_impl.h" #include "video_source_impl.h"
#include <QMetaType> #include <QMetaType>
#include "cv_ssd.h"
# pragma comment(lib, "secur32.lib") # pragma comment(lib, "secur32.lib")
# pragma comment(lib, "winmm.lib") # pragma comment(lib, "winmm.lib")
@ -21,6 +21,20 @@
# pragma comment(lib, "Strmiids.lib") # pragma comment(lib, "Strmiids.lib")
# pragma comment(lib, "User32.lib") # pragma comment(lib, "User32.lib")
int BubbleSort(int *p,int len){
if(nullptr == p)
return -1;
for(int i = 0;i < len - 1;i++){
for(int j = 0;j < len - 1;j++){
if(p[j] < p[j + 1]){
int tmp = p[j];
p[j] = p[j+1];
p[j+1] = tmp;
}
}
}
}
void EnumCapture() void EnumCapture()
{ {
rtc::WinsockInitializer winsock_init; rtc::WinsockInitializer winsock_init;
@ -29,7 +43,6 @@ void EnumCapture()
rtc::ThreadManager::Instance()->SetCurrentThread(&w32_thread); rtc::ThreadManager::Instance()->SetCurrentThread(&w32_thread);
rtc::InitializeSSL(); rtc::InitializeSSL();
std::unique_ptr<webrtc::VideoCaptureModule::DeviceInfo> info( std::unique_ptr<webrtc::VideoCaptureModule::DeviceInfo> info(
webrtc::VideoCaptureFactory::CreateDeviceInfo()); webrtc::VideoCaptureFactory::CreateDeviceInfo());
@ -56,6 +69,11 @@ void EnumCapture()
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
int p[5] = {3,1,49,23,23};
BubbleSort(p,5);
for(int i = 0;i< 5;i++){
qDebug()<<p[i]<<" ";
}
qRegisterMetaType<rtc::scoped_refptr<webrtc::I420BufferInterface>>("rtc::scoped_refptr<webrtc::I420BufferInterface>"); qRegisterMetaType<rtc::scoped_refptr<webrtc::I420BufferInterface>>("rtc::scoped_refptr<webrtc::I420BufferInterface>");
qRegisterMetaType<rtc::scoped_refptr<webrtc::I420BufferInterface>>("rtc::scoped_refptr<webrtc::I420BufferInterface>&"); qRegisterMetaType<rtc::scoped_refptr<webrtc::I420BufferInterface>>("rtc::scoped_refptr<webrtc::I420BufferInterface>&");
@ -69,7 +87,11 @@ int main(int argc, char *argv[])
QCoreApplication::setAttribute(Qt::AA_EnableHighDpiScaling); QCoreApplication::setAttribute(Qt::AA_EnableHighDpiScaling);
QApplication a(argc, argv); QApplication a(argc, argv);
MainWindow w; MainWindow w;
w.setWindowTitle("webrtc easy demo"); w.setWindowTitle("learning tool");
w.show(); w.show();
return a.exec(); return a.exec();
} }

View File

@ -8,6 +8,92 @@
#include <opencv2/highgui.hpp> #include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp> #include <opencv2/imgproc.hpp>
#include "cvhelper.h" #include "cvhelper.h"
#include "cv_ssd.h"
#include <list>
#include <mutex>
#include <QFileDialog>
#include "Qss.h"
class ASyncDetectAndRenderThread :public QSSASyncProcess{
public:
typedef enum {
STATUS_STOP = 1,
STATUS_RUNNING = 2
}Status;
ASyncDetectAndRenderThread(QWidget * parent,CPlayWidget *render_ui,
int width,int height,
std::string model_path,
std::string model_txt_path){
m_render = render_ui;
m_parent = parent;
m_status = STATUS_RUNNING;
m_width = width;
m_height = height;
this->m_model_path = model_path;
this->m_model_txt_path = model_txt_path;
}
bool Detecting(){
if(m_mat.size() > 0)
return true;
else
return false;
}
Status DetectStatus(){
return this->m_status;
}
void SetCvImage(uint8_t *frame){
cv::Mat yuv420p;
yuv420p.create(m_height*3/2,
m_width, CV_8UC1);
memcpy(yuv420p.data, frame, m_height*m_width
*sizeof(unsigned char)*3/2);
cv::Mat *rgbImg = new cv::Mat;
cv::cvtColor(yuv420p, *rgbImg, cv::COLOR_YUV2BGR_I420);
std::lock_guard<std::mutex> guard(this->m_mutex);
m_mat.push_back(rgbImg);
}
void Run(void *) override{
while(m_status == STATUS_RUNNING){
cv::Mat *c = takeLast();
if(nullptr != c){
cv::Mat *result = ssd_detect(c,
this->m_model_path,
this->m_model_txt_path);
qDebug()<<result->cols<<result->rows<<result->type();
cv::Mat yuvData;
cv::cvtColor(*result, yuvData, cv::COLOR_BGR2YUV_I420);
this->m_render->OnCameraData(yuvData.data);
}
delete c;
}
}
private:
cv::Mat *takeLast(){
std::lock_guard<std::mutex> guard(this->m_mutex);
if(m_mat.size() != 0){
auto ret = *m_mat.begin();
m_mat.pop_front();
return ret;
}
return nullptr;
}
std::list<cv::Mat*> m_mat;
std::mutex m_mutex;
CPlayWidget *m_render;
QWidget *m_parent;
Status m_status;
int m_width;
int m_height;
std::string m_model_path; // 模型路径
std::string m_model_txt_path; // 模型txt路径
};
ASyncDetectAndRenderThread *gdetect = nullptr;
class AsyncRennder :public QSSASyncProcess{ class AsyncRennder :public QSSASyncProcess{
public: public:
@ -16,6 +102,7 @@ public:
this->mfbs = p; this->mfbs = p;
mUI = render_ui; mUI = render_ui;
this->state = true; this->state = true;
} }
// 停止渲染 // 停止渲染
void StopRender(){ void StopRender(){
@ -27,31 +114,17 @@ public:
qtimer->setSingleShot(false); qtimer->setSingleShot(false);
QObject::connect(qtimer, SIGNAL(timeout()), &eventLoop, SLOT(quit())); QObject::connect(qtimer, SIGNAL(timeout()), &eventLoop, SLOT(quit()));
qtimer->start(3); qtimer->start(22);
while(state){ while(state){
if(mfbs ->Size() > 0){ if(mfbs ->Size() > 0){
uint8_t *frame = this->mfbs->TakeLast(); uint8_t *frame = mfbs->TakeLast();
cv::Mat yuv420p;
yuv420p.create(mUI->RenderHeight()*3/2,
mUI->RenderWidth(), CV_8UC1);
memcpy(yuv420p.data, frame, mUI->RenderHeight()*mUI->RenderWidth()
*sizeof(unsigned char)*3/2);
cv::Mat rgbImg;
cv::cvtColor(yuv420p, rgbImg, cv::COLOR_YUV2BGR_I420);
// cv::Mat dst;
// cv::rotate(rgbImg, dst, cv::ROTATE_90_CLOCKWISE); // 绕x翻转上下颠倒
// cv::imshow("img", dst);
//// cv::Mat gray, dst;
//// cvtColor(src, gray, COLOR_BGR2GRAY);
//// imshow("input", gray);
//// equalizeHist(gray, dst);
//// imshow("eq", dst);
// cv::waitKey(1);
mUI->OnCameraData(frame); mUI->OnCameraData(frame);
// qDebug()<<"dst size is "<<dst.size().width<<dst.size().height; if(gdetect != nullptr){
if(!gdetect->Detecting()){
gdetect->SetCvImage(frame);
}
}
eventLoop.exec(); // 渲染一次 eventLoop.exec(); // 渲染一次
delete frame; delete frame;
} }
@ -63,7 +136,9 @@ private:
CPlayWidget *mUI; CPlayWidget *mUI;
}; };
AsyncRennder *gRender;
AsyncRennder *gRender = nullptr;
MainWindow::MainWindow(QWidget *parent) MainWindow::MainWindow(QWidget *parent)
: QssMainWindow(parent) : QssMainWindow(parent)
@ -83,14 +158,9 @@ MainWindow::MainWindow(QWidget *parent)
info->GetDeviceName(i,name,100,nullptr,0,nullptr,0); info->GetDeviceName(i,name,100,nullptr,0,nullptr,0);
ui->comboBox->addItem(QString::asprintf("%s",name),i); ui->comboBox->addItem(QString::asprintf("%s",name),i);
} }
origin_picture = new QLabel(this); mDetectResut = new CPlayWidget(this);
processed_picture = new QLabel(this); ui->gridLayout->addWidget(mDetectResut,0,1);
ui->gridLayout->addWidget(origin_picture,0,1);
origin_picture->setMaximumWidth(12000);
origin_picture->setMaximumHeight(12000);
ui->gridLayout->addWidget(processed_picture,1,0);
} }
MainWindow::~MainWindow() MainWindow::~MainWindow()
@ -105,30 +175,38 @@ void MainWindow::OnUpdateFrame( rtc::scoped_refptr<webrtc::I420BufferInterface>&
void MainWindow::on_pushButton_clicked() void MainWindow::on_pushButton_clicked()
{ {
int id = ui->comboBox->currentData().toInt(); if(ui->pushButton->text() == QString("采集")) {
webrtc::VideoCaptureCapability p; int id = ui->comboBox->currentData().toInt();
webrtc::VideoCaptureCapability p;
std::unique_ptr<webrtc::VideoCaptureModule::DeviceInfo> info( std::unique_ptr<webrtc::VideoCaptureModule::DeviceInfo> info(
webrtc::VideoCaptureFactory::CreateDeviceInfo()); webrtc::VideoCaptureFactory::CreateDeviceInfo());
char ids[128]; char ids[128];
info->GetDeviceName(id,nullptr,0,ids,128,nullptr,0); info->GetDeviceName(id,nullptr,0,ids,128,nullptr,0);
info->GetCapability(ids,id,p); info->GetCapability(ids,id,p);
qDebug()<<QString::asprintf("GetCapability: %d %d %d %d",id,p.width,p.height,p.maxFPS); qDebug()<<QString::asprintf("GetCapability: %d %d %d %d",id,p.width,p.height,p.maxFPS);
m_capturer.reset(CameraVideoSink::Create(p.width, p.height, 25, id)); m_capturer.reset(CameraVideoSink::Create(p.width, p.height, 25, id));
if (!m_capturer) { if (!m_capturer) {
qDebug()<<"error"; qDebug()<<"error";
} }
ui->openGLWidget->SetDataType(CPlayWidget::TYPE_YUV420P); ui->openGLWidget->SetDataType(CPlayWidget::TYPE_YUV420P);
ui->openGLWidget->SetImgSize(m_capturer->Capability().width, ui->openGLWidget->SetImgSize(m_capturer->Capability().width,
m_capturer->Capability().height); m_capturer->Capability().height);
ui->openGLWidget->StartRender(); ui->openGLWidget->StartRender();
// ui->openGLWidget->moveToThread(&gRender->Thread()); // ui->openGLWidget->moveToThread(&gRender->Thread());
if(gRender == nullptr){ if(gRender == nullptr){
gRender = new AsyncRennder(this,m_capturer->VideoBuffer(),ui->openGLWidget); gRender = new AsyncRennder(this,m_capturer->VideoBuffer(),ui->openGLWidget);
gRender->Start(this); gRender->Start(this);
connect(gRender,&QSSASyncProcess::Done,this,&MainWindow::RenderDone); connect(gRender,&QSSASyncProcess::Done,this,&MainWindow::RenderDone);
}
mDetectResut->SetImgSize(480,560);
mDetectResut->StartRender();
ui->pushButton->setText(QString::asprintf("正在采集"));
}else{
ui->openGLWidget->StopRender();
} }
} }
@ -137,8 +215,6 @@ void MainWindow::RenderDone()
} }
cv::Mat *QImage2cvMat(QImage image) cv::Mat *QImage2cvMat(QImage image)
{ {
cv::Mat *mat; cv::Mat *mat;
@ -153,14 +229,15 @@ cv::Mat *QImage2cvMat(QImage image)
memcpy(mat->data,image.bits(),image.bytesPerLine()*image.height()); memcpy(mat->data,image.bits(),image.bytesPerLine()*image.height());
break; break;
case QImage::Format_RGB888: case QImage::Format_RGB888:
mat = new cv::Mat(image.height(), image.width(), CV_8UC3, (void*)image.bits(), image.bytesPerLine()); mat = new cv::Mat(image.height(), image.width(), CV_8UC3,
(void*)image.bits(), image.bytesPerLine());
break; break;
case QImage::Format_Indexed8: case QImage::Format_Indexed8:
mat = new cv::Mat(image.height(), image.width(), CV_8UC1, (void*)image.bits(), image.bytesPerLine()); mat = new cv::Mat(image.height(), image.width(), CV_8UC1,
(void*)image.bits(), image.bytesPerLine());
break; break;
} }
Save1BitImage(*mat,"d://tgest.png"); Save1BitImage(*mat,"d://tgest.png");
return mat; return mat;
} }
@ -168,20 +245,58 @@ cv::Mat *QImage2cvMat(QImage image)
void MainWindow::on_pushButton_2_clicked() void MainWindow::on_pushButton_2_clicked()
{ {
QPixmap pix = ui->openGLWidget->grab(); QPixmap pix = ui->openGLWidget->grab();
QImage image = pix.toImage();
image = image.scaled(1280,720);
origin_picture->setPixmap(pix); origin_picture->setPixmap(pix);
qDebug()<<"format is "<<image.format(); }
// cv::Mat Img;
// 识别
cv::Mat pic = *QImage2cvMat(image); void MainWindow::on_pushButton_3_clicked()
try{ {
cv::imshow("img", pic); if(nullptr != origin_picture->pixmap()){
}catch (std::exception &e){ QImage image = origin_picture->pixmap()->toImage();
qDebug()<<e.what(); image = image.scaled(1280,720);
} cv::Mat pic = *QImage2cvMat(image);
if((ui->lineEdit->text() == "") || (ui->lineEdit_2->text() == "")){
cv::waitKey(1);
this->processed_picture->setPixmap(QPixmap::fromImage(image)); }
cv::Mat *result = ssd_detect(&pic,
ui->lineEdit->text().toStdString(),
ui->lineEdit_2->text().toStdString()
);
this->processed_picture->setPixmap(QPixmap::fromImage(image));
}
}
void MainWindow::on_pushButton_4_clicked()
{
if(gdetect == nullptr){
if((ui->lineEdit->text().toStdString() == "")
||
(ui->lineEdit_2->text().toStdString() == "")){
QssMessageBox::warn("please input model file",nullptr,"please input model file",QMessageBox::Ok);
return;
}
gdetect = new ASyncDetectAndRenderThread(this,mDetectResut,ui->openGLWidget->RenderWidth(),
ui->openGLWidget->RenderHeight(),
ui->lineEdit->text().toStdString(),
ui->lineEdit_2->text().toStdString());
gdetect->Start(this);
connect(gdetect,&QSSASyncProcess::Done,this,&MainWindow::RenderDone);
}
}
// 导入文件
void MainWindow::on_pushButton_5_clicked()
{
QString label1 = ui->lineEdit_2->text();
QString openFile = QFileDialog::getOpenFileName(this, "0.0", "",
"*.caffemodel *.prototxt",nullptr);
if (openFile.contains(".caffemodel")){
ui->lineEdit->setText(openFile);
}
if (openFile.contains(".prototxt")){
ui->lineEdit_2->setText(openFile);
}
} }

View File

@ -4,6 +4,7 @@
#include "rtc.h" #include "rtc.h"
#include "api/video/i420_buffer.h" #include "api/video/i420_buffer.h"
#include "Qss.h" #include "Qss.h"
#include "cplaywidget.h"
QT_BEGIN_NAMESPACE QT_BEGIN_NAMESPACE
namespace Ui { class MainWindow; } namespace Ui { class MainWindow; }
@ -23,10 +24,17 @@ private slots:
void RenderDone(); void RenderDone();
void on_pushButton_2_clicked(); void on_pushButton_2_clicked();
void on_pushButton_3_clicked();
void on_pushButton_4_clicked();
void on_pushButton_5_clicked();
private: private:
Ui::MainWindow *ui; Ui::MainWindow *ui;
std::unique_ptr<CameraVideoSink> m_capturer; std::unique_ptr<CameraVideoSink> m_capturer;
QLabel *origin_picture; QLabel *origin_picture;
QLabel *processed_picture; QLabel *processed_picture;
CPlayWidget *mDetectResut;
}; };
#endif // MAINWINDOW_H #endif // MAINWINDOW_H

View File

@ -20,9 +20,9 @@
<string>MainWindow</string> <string>MainWindow</string>
</property> </property>
<widget class="QWidget" name="centralwidget"> <widget class="QWidget" name="centralwidget">
<layout class="QVBoxLayout" name="verticalLayout_2" stretch="1,9"> <layout class="QVBoxLayout" name="verticalLayout_2" stretch="1,0,9">
<item> <item>
<layout class="QHBoxLayout" name="horizontalLayout" stretch="1,1,1,1,1,0,3"> <layout class="QHBoxLayout" name="horizontalLayout" stretch="1,1,1,1,1,3">
<item> <item>
<widget class="QLabel" name="label"> <widget class="QLabel" name="label">
<property name="text"> <property name="text">
@ -56,13 +56,6 @@
<item> <item>
<widget class="QComboBox" name="comboBox_2"/> <widget class="QComboBox" name="comboBox_2"/>
</item> </item>
<item>
<widget class="QPushButton" name="pushButton_2">
<property name="text">
<string>拍照</string>
</property>
</widget>
</item>
<item> <item>
<spacer name="horizontalSpacer"> <spacer name="horizontalSpacer">
<property name="orientation"> <property name="orientation">
@ -78,6 +71,57 @@
</item> </item>
</layout> </layout>
</item> </item>
<item>
<layout class="QHBoxLayout" name="horizontalLayout_2">
<item>
<widget class="QLabel" name="label_3">
<property name="text">
<string>模型文件: .caffemodel</string>
</property>
</widget>
</item>
<item>
<widget class="QLineEdit" name="lineEdit"/>
</item>
<item>
<widget class="QLabel" name="label_4">
<property name="text">
<string>.prototxt:</string>
</property>
</widget>
</item>
<item>
<widget class="QLineEdit" name="lineEdit_2"/>
</item>
<item>
<spacer name="horizontalSpacer_2">
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QPushButton" name="pushButton_5">
<property name="text">
<string>导入</string>
</property>
</widget>
</item>
<item>
<widget class="QPushButton" name="pushButton_4">
<property name="text">
<string>动态检测</string>
</property>
</widget>
</item>
</layout>
</item>
<item> <item>
<layout class="QGridLayout" name="gridLayout" rowstretch="0" columnstretch="0"> <layout class="QGridLayout" name="gridLayout" rowstretch="0" columnstretch="0">
<item row="0" column="0"> <item row="0" column="0">

View File

@ -23,6 +23,8 @@ INCLUDEPATH += third/include/
SOURCES += \ SOURCES += \
src/camera_video_sink.cpp \ src/camera_video_sink.cpp \
src/cplaywidget.cpp \ src/cplaywidget.cpp \
src/cv_ssd.cpp \
src/cv_yolo.cpp \
src/cvhelper.cpp \ src/cvhelper.cpp \
src/main.cpp \ src/main.cpp \
src/mainwindow.cpp \ src/mainwindow.cpp \
@ -31,6 +33,8 @@ SOURCES += \
HEADERS += \ HEADERS += \
src/camera_video_sink.h \ src/camera_video_sink.h \
src/cplaywidget.h \ src/cplaywidget.h \
src/cv_ssd.h \
src/cv_yolo.h \
src/cvhelper.h \ src/cvhelper.h \
src/mainwindow.h \ src/mainwindow.h \
src/rtc.h \ src/rtc.h \
@ -46,8 +50,10 @@ else: unix:!android: target.path = /opt/$${TARGET}/bin
CONFIG(debug, debug|release){ CONFIG(debug, debug|release){
message("debug mode") message("debug mode")
LIBS += -L$$PWD/third/lib libwebrtc.lib ole32.lib oleaut32.lib strmiids.lib advapi32.lib opencv_core455d.lib LIBS += -L$$PWD/third/lib libwebrtc.lib ole32.lib oleaut32.lib strmiids.lib
LIBS +=opencv_stitching455d.lib opencv_objdetect455d.lib opencv_ml455d.lib opencv_imgcodecs455d.lib opencv_imgproc455d.lib LIBS += advapi32.lib opencv_core455d.lib
LIBS += opencv_stitching455d.lib opencv_objdetect455d.lib opencv_videoio455d.lib
LIBS += opencv_ml455d.lib opencv_imgcodecs455d.lib opencv_imgproc455d.lib opencv_dnn455d.lib
LIBS+= opencv_highgui455d.lib LIBS+= opencv_highgui455d.lib
DEFINES += DEBUG_FLAG DEFINES += DEBUG_FLAG
Qt += debug Qt += debug