改成ssd模型用文件输入的形式

master
zcy 2022-05-25 21:34:18 +08:00
parent 1ba89778f5
commit c54519de29
6 changed files with 168 additions and 71 deletions

View File

@ -93,9 +93,11 @@ void CameraVideoSink::OnFrame(const webrtc::VideoFrame& frame) {
width*height/4);
m_buf.PushFirst(data);
auto timestamp_curr = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
if(timestamp_curr - timestamp > 1000) {
qDebug()<<timestamp;
RTC_LOG(LS_INFO) << "FPS: " << cnt<<m_buf.Size();
cnt = 0;
timestamp = timestamp_curr;

View File

@ -1,20 +1,39 @@
#include "cv_ssd.h"
#include <QDebug>
const size_t width = 1280;
const size_t height = 720;
#include <iostream>
using namespace std;
String labelFile = "ssd/labelmap_det.txt";
String modelFile = "D:/project/multimedia/client/build-webrtc_capture-Desktop_Qt_5_15_2_MSVC2019_64bit-Debug/debug/ssd/MobileNetSSD_deploy.caffemodel";
String model_text_file = "D:/project/multimedia/client/build-webrtc_capture-Desktop_Qt_5_15_2_MSVC2019_64bit-Debug/debug/ssd/MobileNetSSD_deploy.prototxt";
String modelFile = "debug/ssd/MobileNetSSD_deploy.caffemodel";
String model_text_file = "debug/ssd/MobileNetSSD_deploy.prototxt";
String objNames[] = { "background",
"aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair",
"cow", "diningtable", "dog", "horse",
"motorbike", "person", "pottedplant",
"sheep", "sofa", "train", "tvmonitor" };
String objNames[] = {
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor"
};
cv::Mat * ssd_detect(cv::Mat *inframe) {
if (inframe->empty()) {
cv::Mat * ssd_detect(cv::Mat *inframe,std::string modelFile,std::string model_text_file) {
if (inframe->empty() || (modelFile == "") || (model_text_file == "")) {
printf("could not load image...\n");
return inframe;
}
@ -28,7 +47,6 @@ cv::Mat * ssd_detect(cv::Mat *inframe) {
Size(300, 300),
Scalar(127.5, 127.5, 127.5), true, false);
// Net net = readNetFromCaffe(model_text_file, modelFile);
//// image这个就是我们将要输入神经网络进行处理或者分类的图片。
//// mean需要将图片整体减去的平均值如果我们需要对RGB图片的三个通道分别减去不同的值那么可以使用3组平均值
@ -40,6 +58,7 @@ cv::Mat * ssd_detect(cv::Mat *inframe) {
//// size这个参数是我们神经网络在训练的时候要求输入的图片尺寸。
//// swapRBOpenCV中认为我们的图片通道顺序是BGR但是我平均值假设的顺序是RGB
/// 所以如果需要交换R和G那么就要使swapRB=true
///
Net net = readNetFromCaffe(model_text_file, modelFile);
net.setInput(blobImage1, "data");
Mat detection = net.forward("detection_out");
@ -56,9 +75,11 @@ cv::Mat * ssd_detect(cv::Mat *inframe) {
Rect object_box((int)tl_x, (int)tl_y, (int)(br_x - tl_x), (int)(br_y - tl_y));
rectangle(*image2, object_box, Scalar(0, 0, 255), 2, 8, 0);
putText(*image2, format("%s", objNames[objIndex].c_str()), Point(tl_x, tl_y), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(255, 0, 0), 2);
putText(*image2, format("%s", objNames[objIndex].c_str()),
Point(tl_x, tl_y), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(255, 0, 0), 2);
}
}
cv::imshow("image2",*image2);
return image2;
}

View File

@ -9,8 +9,7 @@ using namespace cv;
using namespace cv::dnn;
using namespace std;
cv::Mat * ssd_detect(cv::Mat*);
cv::Mat * ssd_detect(cv::Mat *inframe,std::string modelFile,std::string model_text_file);

View File

@ -11,6 +11,7 @@
#include "cv_ssd.h"
#include <list>
#include <mutex>
#include <QFileDialog>
class ASyncDetectAndRenderThread :public QSSASyncProcess{
@ -20,19 +21,26 @@ public:
STATUS_RUNNING = 2
}Status;
ASyncDetectAndRenderThread(QWidget * parent,CPlayWidget *render_ui,
int width,int height){
int width,int height,
std::string model_path,
std::string model_txt_path){
m_render = render_ui;
m_parent = parent;
m_status = STATUS_RUNNING;
m_width = width;
m_height = height;
this->m_model_path = model_path;
this->m_model_txt_path = model_txt_path;
}
bool DetectStatus(){
bool Detecting(){
if(m_mat.size() > 0)
return true;
else
return false;
}
Status DetectStatus(){
return this->m_status;
}
void SetCvImage(uint8_t *frame){
cv::Mat yuv420p;
yuv420p.create(m_height*3/2,
@ -42,7 +50,6 @@ public:
cv::Mat *rgbImg = new cv::Mat;
cv::cvtColor(yuv420p, *rgbImg, cv::COLOR_YUV2BGR_I420);
std::lock_guard<std::mutex> guard(this->m_mutex);
// imshow("yuv420",yuv420p);
m_mat.push_back(rgbImg);
}
@ -50,12 +57,18 @@ public:
while(m_status == STATUS_RUNNING){
cv::Mat *c = takeLast();
if(nullptr != c){
cv::Mat *result = ssd_detect(c);
cv::Mat *result = ssd_detect(c,
this->m_model_path,
this->m_model_txt_path);
qDebug()<<result->cols<<result->rows<<result->type();
cv::Mat yuvData;
cv::cvtColor(*result, yuvData, cv::COLOR_BGR2YUV_I420);
this->m_render->OnCameraData(yuvData.data);
}
delete c;
}
}
@ -68,7 +81,7 @@ private:
return ret;
}
return nullptr;
}
}
std::list<cv::Mat*> m_mat;
std::mutex m_mutex;
CPlayWidget *m_render;
@ -76,6 +89,8 @@ private:
Status m_status;
int m_width;
int m_height;
std::string m_model_path; // 模型路径
std::string m_model_txt_path; // 模型txt路径
};
ASyncDetectAndRenderThread *gdetect = nullptr;
@ -87,6 +102,7 @@ public:
this->mfbs = p;
mUI = render_ui;
this->state = true;
}
// 停止渲染
void StopRender(){
@ -98,14 +114,14 @@ public:
qtimer->setSingleShot(false);
QObject::connect(qtimer, SIGNAL(timeout()), &eventLoop, SLOT(quit()));
qtimer->start(3);
qtimer->start(22);
while(state){
if(mfbs ->Size() > 0){
uint8_t *frame = mfbs->TakeLast();
mUI->OnCameraData(frame);
if(gdetect != nullptr){
if(!gdetect->DetectStatus()){
if(!gdetect->Detecting()){
gdetect->SetCvImage(frame);
}
}
@ -142,16 +158,8 @@ MainWindow::MainWindow(QWidget *parent)
info->GetDeviceName(i,name,100,nullptr,0,nullptr,0);
ui->comboBox->addItem(QString::asprintf("%s",name),i);
}
origin_picture = new QLabel(this);
processed_picture = new QLabel(this);
ui->gridLayout->addWidget(origin_picture,0,1);
origin_picture->setMaximumWidth(12000);
origin_picture->setMaximumHeight(12000);
ui->gridLayout->addWidget(processed_picture,1,0);
mDetectResut = new CPlayWidget(this);
ui->gridLayout->addWidget(mDetectResut,1,1);
ui->gridLayout->addWidget(mDetectResut,0,1);
}
@ -167,37 +175,39 @@ void MainWindow::OnUpdateFrame( rtc::scoped_refptr<webrtc::I420BufferInterface>&
void MainWindow::on_pushButton_clicked()
{
if(ui->pushButton->text() == QString("采集")) {
int id = ui->comboBox->currentData().toInt();
webrtc::VideoCaptureCapability p;
std::unique_ptr<webrtc::VideoCaptureModule::DeviceInfo> info(
webrtc::VideoCaptureFactory::CreateDeviceInfo());
int id = ui->comboBox->currentData().toInt();
webrtc::VideoCaptureCapability p;
char ids[128];
info->GetDeviceName(id,nullptr,0,ids,128,nullptr,0);
info->GetCapability(ids,id,p);
qDebug()<<QString::asprintf("GetCapability: %d %d %d %d",id,p.width,p.height,p.maxFPS);
std::unique_ptr<webrtc::VideoCaptureModule::DeviceInfo> info(
webrtc::VideoCaptureFactory::CreateDeviceInfo());
m_capturer.reset(CameraVideoSink::Create(p.width, p.height, 25, id));
if (!m_capturer) {
qDebug()<<"error";
}
ui->openGLWidget->SetDataType(CPlayWidget::TYPE_YUV420P);
ui->openGLWidget->SetImgSize(m_capturer->Capability().width,
m_capturer->Capability().height);
ui->openGLWidget->StartRender();
// ui->openGLWidget->moveToThread(&gRender->Thread());
if(gRender == nullptr){
gRender = new AsyncRennder(this,m_capturer->VideoBuffer(),ui->openGLWidget);
gRender->Start(this);
connect(gRender,&QSSASyncProcess::Done,this,&MainWindow::RenderDone);
}
mDetectResut->SetImgSize(480,560);
mDetectResut->StartRender();
char ids[128];
info->GetDeviceName(id,nullptr,0,ids,128,nullptr,0);
info->GetCapability(ids,id,p);
qDebug()<<QString::asprintf("GetCapability: %d %d %d %d",id,p.width,p.height,p.maxFPS);
m_capturer.reset(CameraVideoSink::Create(p.width, p.height, 25, id));
if (!m_capturer) {
qDebug()<<"error";
ui->pushButton->setText(QString::asprintf("正在采集"));
}else{
ui->openGLWidget->StopRender();
}
ui->openGLWidget->SetDataType(CPlayWidget::TYPE_YUV420P);
ui->openGLWidget->SetImgSize(m_capturer->Capability().width,
m_capturer->Capability().height);
ui->openGLWidget->StartRender();
// ui->openGLWidget->moveToThread(&gRender->Thread());
if(gRender == nullptr){
gRender = new AsyncRennder(this,m_capturer->VideoBuffer(),ui->openGLWidget);
gRender->Start(this);
connect(gRender,&QSSASyncProcess::Done,this,&MainWindow::RenderDone);
}
mDetectResut->SetImgSize(480,560);
mDetectResut->StartRender();
}
void MainWindow::RenderDone()
@ -205,8 +215,6 @@ void MainWindow::RenderDone()
}
cv::Mat *QImage2cvMat(QImage image)
{
cv::Mat *mat;
@ -247,8 +255,13 @@ void MainWindow::on_pushButton_3_clicked()
QImage image = origin_picture->pixmap()->toImage();
image = image.scaled(1280,720);
cv::Mat pic = *QImage2cvMat(image);
if((ui->lineEdit->text() == "") || (ui->lineEdit_2->text() == "")){
cv::Mat *result = ssd_detect(&pic);
}
cv::Mat *result = ssd_detect(&pic,
ui->lineEdit->text().toStdString(),
ui->lineEdit_2->text().toStdString()
);
this->processed_picture->setPixmap(QPixmap::fromImage(image));
}
}
@ -258,10 +271,26 @@ void MainWindow::on_pushButton_4_clicked()
{
if(gdetect == nullptr){
gdetect = new ASyncDetectAndRenderThread(this,mDetectResut,ui->openGLWidget->RenderWidth(),
ui->openGLWidget->RenderHeight());
ui->openGLWidget->RenderHeight(),
ui->lineEdit->text().toStdString(),
ui->lineEdit_2->text().toStdString());
gdetect->Start(this);
connect(gdetect,&QSSASyncProcess::Done,this,&MainWindow::RenderDone);
}
}
// 导入文件
void MainWindow::on_pushButton_5_clicked()
{
QString label1 = ui->lineEdit_2->text();
QString openFile = QFileDialog::getOpenFileName(this, "0.0", "",
"*.caffemodel *.prototxt",nullptr);
if (openFile.contains(".caffemodel")){
ui->lineEdit->setText(openFile);
}
if (openFile.contains(".prototxt")){
ui->lineEdit_2->setText(openFile);
}
}

View File

@ -28,6 +28,8 @@ private slots:
void on_pushButton_4_clicked();
void on_pushButton_5_clicked();
private:
Ui::MainWindow *ui;
std::unique_ptr<CameraVideoSink> m_capturer;

View File

@ -20,9 +20,9 @@
<string>MainWindow</string>
</property>
<widget class="QWidget" name="centralwidget">
<layout class="QVBoxLayout" name="verticalLayout_2" stretch="1,9">
<layout class="QVBoxLayout" name="verticalLayout_2" stretch="1,0,9">
<item>
<layout class="QHBoxLayout" name="horizontalLayout" stretch="1,1,1,1,1,0,0,0,3">
<layout class="QHBoxLayout" name="horizontalLayout" stretch="1,1,1,1,1,0,0,3">
<item>
<widget class="QLabel" name="label">
<property name="text">
@ -70,13 +70,6 @@
</property>
</widget>
</item>
<item>
<widget class="QPushButton" name="pushButton_4">
<property name="text">
<string>动态检测</string>
</property>
</widget>
</item>
<item>
<spacer name="horizontalSpacer">
<property name="orientation">
@ -92,6 +85,57 @@
</item>
</layout>
</item>
<item>
<layout class="QHBoxLayout" name="horizontalLayout_2">
<item>
<widget class="QLabel" name="label_3">
<property name="text">
<string>模型文件: .caffemodel</string>
</property>
</widget>
</item>
<item>
<widget class="QLineEdit" name="lineEdit"/>
</item>
<item>
<widget class="QLabel" name="label_4">
<property name="text">
<string>.prototxt:</string>
</property>
</widget>
</item>
<item>
<widget class="QLineEdit" name="lineEdit_2"/>
</item>
<item>
<spacer name="horizontalSpacer_2">
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QPushButton" name="pushButton_5">
<property name="text">
<string>导入</string>
</property>
</widget>
</item>
<item>
<widget class="QPushButton" name="pushButton_4">
<property name="text">
<string>动态检测</string>
</property>
</widget>
</item>
</layout>
</item>
<item>
<layout class="QGridLayout" name="gridLayout" rowstretch="0" columnstretch="0">
<item row="0" column="0">