Add message to rotate gimbal to specific coordinates
parent
c4e3a9ceea
commit
f1f08a9a21
@ -0,0 +1,240 @@
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
#include <mutex>
|
||||
#include <cmath>
|
||||
#include <opencv2/opencv.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include <SDL2/SDL.h>
|
||||
|
||||
extern "C" {
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
}
|
||||
|
||||
std::mutex mtx;
|
||||
cv::Mat img;
|
||||
std::vector<cv::Rect> found;
|
||||
std::vector<double> weights;
|
||||
unsigned target;
|
||||
|
||||
bool stop = false;
|
||||
|
||||
static void processFrameThread() {
|
||||
cv::HOGDescriptor hog;
|
||||
hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());
|
||||
|
||||
while(!stop) {
|
||||
std::unique_lock<std::mutex> lock(mtx);
|
||||
|
||||
if(!img.empty()) {
|
||||
cv::Mat gray;
|
||||
cv::cvtColor(img, gray, cv::COLOR_BGR2GRAY);
|
||||
hog.detectMultiScale(gray, found, weights, 0, cv::Size(), cv::Size(), 1.1);
|
||||
|
||||
target = 0;
|
||||
for(unsigned i = 0; i < weights.size(); i++) {
|
||||
if(weights[i] > weights[target])
|
||||
target = i;
|
||||
}
|
||||
|
||||
// Get the center of the highest weighted rectangle
|
||||
float x = found[target].x + found[target].width / 2;
|
||||
float y = found[target].y + found[target].height / 2;
|
||||
|
||||
// Normalize the coordinates
|
||||
x = 2 * (x - img.cols / 2) / img.cols;
|
||||
y = 2 * (y - img.rows / 2) / img.rows;
|
||||
|
||||
// Get the FOV angle of the point in radians
|
||||
float FOV = 120 * (M_PI / 180);
|
||||
x = x * (FOV / 2);
|
||||
y = y * (FOV / 2);
|
||||
|
||||
// Convert to degrees
|
||||
x = x * (180 / M_PI);
|
||||
y = y * (180 / M_PI);
|
||||
|
||||
printf("y: %f, p: %f\n", x, y);
|
||||
}
|
||||
|
||||
lock.unlock();
|
||||
|
||||
SDL_Delay(75);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void captureFrameThread(SDL_Window* window, const char* fname) {
|
||||
|
||||
av_register_all();
|
||||
avcodec_register_all();
|
||||
AVFormatContext* pFormatCtx = avformat_alloc_context();
|
||||
|
||||
if (avformat_open_input(&pFormatCtx, fname, NULL, NULL) != 0) {
|
||||
std::cerr << "Couldn't open stream\n";
|
||||
return;
|
||||
}
|
||||
|
||||
if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
|
||||
std::cerr << "Couldn't find stream information\n";
|
||||
return;
|
||||
}
|
||||
|
||||
int videoStream = -1;
|
||||
for (unsigned int i = 0; i < pFormatCtx->nb_streams; i++) {
|
||||
if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
videoStream = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (videoStream == -1) {
|
||||
std::cerr << "Didn't find a video stream\n";
|
||||
return;
|
||||
}
|
||||
|
||||
AVCodecParameters* pCodecParameters = pFormatCtx->streams[videoStream]->codecpar;
|
||||
AVCodec* pCodec = avcodec_find_decoder(pCodecParameters->codec_id);
|
||||
|
||||
if (pCodec == NULL) {
|
||||
std::cerr << "Unsupported codec\n";
|
||||
return;
|
||||
}
|
||||
|
||||
AVCodecContext* pCodecCtx = avcodec_alloc_context3(pCodec);
|
||||
pCodecCtx->pix_fmt = AV_PIX_FMT_GRAY8;
|
||||
avcodec_parameters_to_context(pCodecCtx, pCodecParameters);
|
||||
|
||||
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
|
||||
std::cerr << "Could not open codec\n";
|
||||
return;
|
||||
}
|
||||
|
||||
AVFrame* pFrame = av_frame_alloc();
|
||||
AVFrame* pFrameRGB = av_frame_alloc();
|
||||
|
||||
int numBytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height, 1);
|
||||
uint8_t* buffer = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t));
|
||||
av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, buffer, AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height, 1);
|
||||
|
||||
SwsContext* sws_ctx = sws_getContext(
|
||||
pCodecCtx->width,
|
||||
pCodecCtx->height,
|
||||
pCodecCtx->pix_fmt,
|
||||
pCodecCtx->width,
|
||||
pCodecCtx->height,
|
||||
AV_PIX_FMT_BGR24,
|
||||
SWS_BILINEAR,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL);
|
||||
|
||||
//AVRational time_base = pFormatCtx->streams[videoStream]->time_base;
|
||||
//AVRational frame_rate = av_guess_frame_rate(pFormatCtx, pFormatCtx->streams[videoStream], NULL);
|
||||
//uint32_t delay = (av_rescale_q(1, av_inv_q(frame_rate), time_base) / AV_TIME_BASE) * 1000;
|
||||
//printf("delay: %u\n", delay);
|
||||
|
||||
SDL_SetWindowSize(window, pCodecCtx->width, pCodecCtx->height);
|
||||
SDL_Renderer* renderer = SDL_CreateRenderer(window, -1, 0);
|
||||
SDL_Texture* texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_BGR24, SDL_TEXTUREACCESS_STATIC, pCodecCtx->width, pCodecCtx->height);
|
||||
|
||||
while (!stop) {
|
||||
AVPacket packet;
|
||||
if(av_read_frame(pFormatCtx, &packet) < 0) {
|
||||
stop = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (packet.stream_index == videoStream) {
|
||||
int response = avcodec_send_packet(pCodecCtx, &packet);
|
||||
if (response < 0) {
|
||||
std::cerr << "Error while sending a packet to the decoder: " << response << '\n';
|
||||
return;
|
||||
}
|
||||
|
||||
while (response >= 0) {
|
||||
response = avcodec_receive_frame(pCodecCtx, pFrame);
|
||||
if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) {
|
||||
break;
|
||||
} else if (response < 0) {
|
||||
std::cerr << "Error while receiving a frame from the decoder: " << response << '\n';
|
||||
return;
|
||||
}
|
||||
|
||||
if (response >= 0) {
|
||||
sws_scale(sws_ctx, (uint8_t const* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
|
||||
|
||||
std::unique_lock<std::mutex> lock(mtx);
|
||||
|
||||
img = cv::Mat(pCodecCtx->height, pCodecCtx->width, CV_8UC3, pFrameRGB->data[0], pFrameRGB->linesize[0]);
|
||||
|
||||
for (unsigned i = 0; i < found.size(); i++) {
|
||||
rectangle(img, found[i], cv::Scalar(255 - weights[i] * 255, 0, weights[i] * 255), 3);
|
||||
}
|
||||
|
||||
lock.unlock();
|
||||
|
||||
{
|
||||
SDL_UpdateTexture(texture, NULL, img.data, img.cols * 3);
|
||||
SDL_RenderClear(renderer);
|
||||
SDL_RenderCopy(renderer, texture, NULL, NULL);
|
||||
SDL_RenderPresent(renderer);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
av_packet_unref(&packet);
|
||||
|
||||
SDL_Event event;
|
||||
while(SDL_PollEvent(&event)) {
|
||||
switch(event.type) {
|
||||
case SDL_QUIT:
|
||||
stop = true;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
SDL_Delay(33);
|
||||
|
||||
}
|
||||
|
||||
av_free(buffer);
|
||||
av_frame_free(&pFrameRGB);
|
||||
av_frame_free(&pFrame);
|
||||
avcodec_close(pCodecCtx);
|
||||
avformat_close_input(&pFormatCtx);
|
||||
|
||||
SDL_DestroyTexture(texture);
|
||||
SDL_DestroyRenderer(renderer);
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
if(SDL_Init(SDL_INIT_VIDEO) < 0) {
|
||||
fprintf(stderr, "%s", SDL_GetError());
|
||||
return 1;
|
||||
}
|
||||
|
||||
SDL_Window* win = SDL_CreateWindow(
|
||||
"Robomaster",
|
||||
SDL_WINDOWPOS_UNDEFINED,
|
||||
SDL_WINDOWPOS_UNDEFINED,
|
||||
800, 300,
|
||||
SDL_WINDOW_RESIZABLE );
|
||||
if(!win) {
|
||||
fprintf(stderr, "%s", SDL_GetError());
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::thread captureThread(captureFrameThread, win, argv[1]);
|
||||
std::thread processThread(processFrameThread);
|
||||
|
||||
captureThread.join();
|
||||
processThread.join();
|
||||
|
||||
SDL_Quit();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue