double-hit-balls/game/main_code — копия.cpp

593 lines
14 KiB
C++
Raw Permalink Normal View History

2018-05-25 16:41:26 +00:00
#include "main_code.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include "include/Engine.h"
#include "main_code.h"
FaceStruct::FaceStruct()
{
}
FaceStruct::FaceStruct(const std::array<cv::Point2f, LANDMARK_POINT_COUNT>& fromPreds)
{
ApplyPreds(fromPreds);
}
void FaceStruct::CalcFromPreds()
{
float minX = preds[0](0);
float maxX = preds[0](0);
float minY = preds[0](1);
float maxY = preds[0](1);
float sumX = preds[0](0);
float sumY = preds[0](1);
for (size_t i = 1; i < LANDMARK_POINT_COUNT; i++)
{
if (minX > preds[i](0))
{
minX = preds[i](0);
}
if (minY > preds[i](1))
{
minY = preds[i](1);
}
if (maxX < preds[i](0))
{
maxX = preds[i](0);
}
if (maxY < preds[i](1))
{
maxY = preds[i](1);
}
sumX += preds[i](0);
sumY += preds[i](1);
}
float avgX = sumX / static_cast<float>(LANDMARK_POINT_COUNT);
float avgY = sumY / static_cast<float>(LANDMARK_POINT_COUNT);
center = { avgX, avgY };
size = { maxX- minX,maxY - minY };
//center = { centerXSmoother.responsiveAnalogReadSimple(avgX), centerYSmoother.responsiveAnalogReadSimple(avgY) };
//size = { sizeWidthSmoother.responsiveAnalogReadSimple(maxX- minX), sizeHeightSmoother.responsiveAnalogReadSimple(maxY - minY) };
}
void FaceStruct::ApplyPreds(const std::array<cv::Point2f, LANDMARK_POINT_COUNT>& fromPreds)
{
InnerApplyPreds(fromPreds);
CalcFromPreds();
/*
for (size_t i = 0; i < LANDMARK_POINT_COUNT; i++)
{
preds[i] = preds[i] - center;
}
historicalPreds.push_back(preds);
if (historicalPreds.size() > 4)
{
historicalPreds.erase(historicalPreds.begin());
}
for (size_t i = 0; i < LANDMARK_POINT_COUNT; i++)
{
preds[i] = { 0.f, 0.f };
for (size_t x = 0; x < historicalPreds.size(); x++)
{
preds[i] += historicalPreds[x][i];
}
preds[i] = preds[i] / static_cast<float>(historicalPreds.size());
}
for (size_t i = 0; i < LANDMARK_POINT_COUNT; i++)
{
preds[i] = preds[i] + center;
}*/
}
void FaceStruct::InnerApplyPreds(const std::array<cv::Point2f, LANDMARK_POINT_COUNT>& fromPreds)
{
for (size_t i = 0; i < LANDMARK_POINT_COUNT; i++)
{
preds[i] = { fromPreds[i].x, 720 - fromPreds[i].y};
}
}
cv::VideoCapture cap;
FaceLandmarkDetector faceLandmarkDetector;
GLuint lastFrameTexture;
std::array<FaceLandmarkStruct, MAX_FACE_COUNT> faceLandmarkArr;
cv::Mat frame;
std::vector<cv::Mat> frameArr;
int currentIndex = -1;
int lastProcessedIndex = 0;
int lineShift = 0;
int prevPassedIndex = 0;
int indexStep = 0;
std::array<FaceStruct, MAX_FACE_COUNT> faceStruct;
const std::string CONST_LOADING_BACKGROUND_BLACK = "loading_background_black";
TMyApplication* Application;
void TMyApplication::InnerInit()
{
Application = this;
#ifdef TARGET_WIN32
#ifdef NDEBUG
//ST::PathToResources = "resources/";
ST::PathToResources = "../../../assets/";
#else
ST::PathToResources = "../../../assets/";
#endif
#endif
#ifdef TARGET_IOS
ST::PathToResources = "assets/";
#endif
if (Console != NULL)
{
*Console<<"APP INIT\n";
}
srand (static_cast<size_t>(time(NULL)));
ResourceManager->ShaderManager.AddShader("DefaultShader", "shader1vertex.txt", "shader1fragment.txt");
ResourceManager->ShaderManager.AddShader("FrameShader", "frameshader_vertex.txt", "frameshader_fragment.txt");
Renderer->PushShader("DefaultShader");
ResourceManager->TexList.AddTexture(CONST_LOADING_BACKGROUND_BLACK + ".png", CONST_LOADING_BACKGROUND_BLACK);
auto texid = ResourceManager->TexList.AddTexture("emoji/Asset 3@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 4@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 5@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 6@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 7@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 8@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 9@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 10@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 11@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 13@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 14@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 15@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 16@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 17@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 18@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 19@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 20@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 21@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 22@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 23@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 24@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 25@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 26@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 27@20x.png");
ResourceManager->TexList.AddTexture("emoji/Asset 28@20x.png");
ResourceManager->FontManager.AddFont("droid_sans14", "droid_sans14_font_bitmap.png", "droid_sans14_font_charmap.txt");
ResourceManager->FontManager.PushFont("droid_sans14");
lastFrameTexture = ResourceManager->TexList.AddEmptyTexture("lastFrameTexture", 1280, 720);
Renderer->SetOrthoProjection();
Renderer->SetFullScreenViewport();
for (size_t i = 0; i < MAX_FACE_COUNT; i++)
{
faceRenderPair[i].first.SamplerMap[CONST_STRING_TEXTURE_UNIFORM] = "Asset "+boost::lexical_cast<std::string>(i+3)+"@20x.png";
faceRenderPair[i].second.Data.Vec3CoordArr[CONST_STRING_POSITION_ATTRIB].push_back(Vector3f(0, 0, 0));
faceRenderPair[i].second.Data.Vec3CoordArr[CONST_STRING_POSITION_ATTRIB].push_back(Vector3f(0, 512, 0));
faceRenderPair[i].second.Data.Vec3CoordArr[CONST_STRING_POSITION_ATTRIB].push_back(Vector3f(512, 512, 0));
faceRenderPair[i].second.Data.Vec3CoordArr[CONST_STRING_POSITION_ATTRIB].push_back(Vector3f(512, 512, 0));
faceRenderPair[i].second.Data.Vec3CoordArr[CONST_STRING_POSITION_ATTRIB].push_back(Vector3f(512, 0, 0));
faceRenderPair[i].second.Data.Vec3CoordArr[CONST_STRING_POSITION_ATTRIB].push_back(Vector3f(0, 0, 0));
faceRenderPair[i].second.Data.Vec2CoordArr[CONST_STRING_TEXCOORD_ATTRIB].push_back(Vector2f(0, 0));
faceRenderPair[i].second.Data.Vec2CoordArr[CONST_STRING_TEXCOORD_ATTRIB].push_back(Vector2f(0, 1));
faceRenderPair[i].second.Data.Vec2CoordArr[CONST_STRING_TEXCOORD_ATTRIB].push_back(Vector2f(1, 1));
faceRenderPair[i].second.Data.Vec2CoordArr[CONST_STRING_TEXCOORD_ATTRIB].push_back(Vector2f(1, 1));
faceRenderPair[i].second.Data.Vec2CoordArr[CONST_STRING_TEXCOORD_ATTRIB].push_back(Vector2f(1, 0));
faceRenderPair[i].second.Data.Vec2CoordArr[CONST_STRING_TEXCOORD_ATTRIB].push_back(Vector2f(0, 0));
faceRenderPair[i].second.RefreshBuffer();
}
cap = cv::VideoCapture(0);
//cap = cv::VideoCapture("video.mp4");
//cap = cv::VideoCapture("bp.mp4");
// Check if camera opened successfully
if (!cap.isOpened()) {
std::cout << "Error opening video stream or file" << std::endl;
}
cap.set(CV_CAP_PROP_FRAME_WIDTH, 1280);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 720);
faceLandmarkDetector.StartTrackProcess();
Inited = true;
}
void TMyApplication::InnerDeinit()
{
faceLandmarkDetector.StopTrackProcess();
cap.release();
Inited = false;
if (Console != NULL)
{
*Console<<"APP DEINIT\n";
}
}
void TMyApplication::InnerOnTapDown(Vector2f p)
{
}
void TMyApplication::InnerOnTapUp(Vector2f p)
{
}
void TMyApplication::InnerOnTapUpAfterMove(Vector2f p)
{
}
void TMyApplication::InnerOnMove(Vector2f p, Vector2f shift)
{
}
void TMyApplication::OnFling(Vector2f v)
{
}
void TMyApplication::InnerDraw()
{
glDisable(GL_DEPTH_TEST);
//glBindTexture(GL_TEXTURE_2D, ResourceManager->TexList[CONST_LOADING_BACKGROUND_BLACK]);
glBindTexture(GL_TEXTURE_2D, lastFrameTexture);
Renderer->DrawRect(Vector2f(0.f, 0.f), Vector2f(Renderer->GetMatrixWidth(), Renderer->GetMatrixHeight()));
/*
for (size_t i = 0; i < MAX_FACE_COUNT; i++)
{
TRenderParamsSetter params(faceRenderPair[i].first);
Renderer->DrawTriangleList(faceRenderPair[i].second);
}*/
}
void TMyApplication::InnerUpdate(size_t dt)
{
/*cap >> lastFrame;
if (lastFrame.empty())
{
return;
}
faceLandmarkTracker.HandleOneFrame(lastFrame, localInputStructArr, localFaceShift, localOutputStructArr);
high_resolution_clock::time_point t2 = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(t2 - FaceLandmarkTracker::lastTimePoint).count();
FaceLandmarkTracker::lastTimePoint = t2;
std::stringstream ofs;
ofs << round(1000000.0 / duration);
for (size_t index = 0; index < CONST_FACE_COUNT; index++)
{
if (localOutputStructArr[index].valid && localInputStructArr[index].valid)
{
cv::Rect oldFaceRect = localOutputStructArr[index].face;
cv::Rect newFaceRect = localInputStructArr[index].face;
cv::Point2f oldCenter(oldFaceRect.x + oldFaceRect.width*0.5f, oldFaceRect.y + oldFaceRect.height*0.5f);
cv::Point2f newCenter(newFaceRect.x + newFaceRect.width*0.5f, newFaceRect.y + newFaceRect.height*0.5f);
localFaceShift[index] = newCenter - oldCenter;
}
else
{
localFaceShift[index] = cv::Point2f(0, 0);
}
}
for (size_t index = 0; index < CONST_FACE_COUNT; index++)
{
if (localOutputStructArr[index].valid)
{
faceStruct[index].ApplyPreds(localOutputStructArr[index].preds);
for (size_t i = 0; i < CONST_PREDS_COUNT; i++)
{
//cv::ellipse(lastFrame, cv::Point2f(faceStruct[index].preds[i](0), 720 - faceStruct[index].preds[i](1)) + localFaceShift[index], cv::Size(2, 2), 0, 0, 360, cv::Scalar(255, 128, 128), 4, 8, 0);
}
Vector2f posFrom = faceStruct[index].center - faceStruct[index].size*0.5f* 1.25f;
Vector2f posTo = faceStruct[index].center + faceStruct[index].size*0.5f* 1.25f;
ApplyVertexCoordVec(faceRenderPair[index].second.Data.Vec3CoordArr[CONST_STRING_POSITION_ATTRIB], posFrom, posTo, 0);
faceRenderPair[index].second.RefreshBuffer();
}
else
{
ApplyVertexCoordVec(faceRenderPair[index].second.Data.Vec3CoordArr[CONST_STRING_POSITION_ATTRIB], {0,0}, { 0,0 }, 0);
faceRenderPair[index].second.RefreshBuffer();
}
}
for (size_t index = 0; index < CONST_FACE_COUNT; index++)
{
if (localInputStructArr[index].valid)
{
//cv::rectangle(lastFrame, localInputStructArr[index].face, cv::Scalar(255, 0, 255), 4, 8, 0);
}
}
*/
cap >> frame; // get a new frame from camera/video or read image
if (frame.empty())
{
return;
}
cv::Mat image = frame.clone();
frameArr.push_back(image);
while (frameArr.size() > 40)
{
lineShift++;
frameArr.erase(frameArr.begin());
}
currentIndex++;
faceLandmarkArr = faceLandmarkDetector.GetFaceLandmarks(frame, currentIndex);
bool newProcessedIndexFound = false;
int newProcessedIndex;
for (size_t i = 0; i < faceLandmarkArr.size(); i++)
{
if (faceLandmarkArr[i].valid)
{
if (!newProcessedIndexFound)
{
newProcessedIndexFound = true;
newProcessedIndex = faceLandmarkArr[i].frameIndex;
}
else
{
if (newProcessedIndex < faceLandmarkArr[i].frameIndex)
{
newProcessedIndex = faceLandmarkArr[i].frameIndex;
}
}
}
}
if (newProcessedIndexFound && newProcessedIndex > lastProcessedIndex)
{
indexStep = 0;
lastProcessedIndex = newProcessedIndex;
prevPassedIndex = currentIndex;
}
else
{
if (prevPassedIndex > lastProcessedIndex + indexStep)
{
indexStep++;
}
}
//std::cout << currentIndex << " " << lastProcessedIndex << " stepped:" << lastProcessedIndex + indexStep << " indexStep: " << indexStep << std::endl;
cv::Mat renderImage;
if (lastProcessedIndex + indexStep - lineShift < 0)
{
renderImage = frameArr[0];
}
else if (lastProcessedIndex + indexStep - lineShift >= frameArr.size())
{
renderImage = frameArr[frameArr.size() - 1];
}
else
{
renderImage = frameArr[lastProcessedIndex + indexStep - lineShift];
}
renderImage = renderImage.clone();
cv::Rect frameRect(cv::Point(), renderImage.size());
for (size_t i = 0; i < faceLandmarkArr.size(); i++)
{
if (faceLandmarkArr[i].valid)
{
cv::rectangle(renderImage, faceLandmarkArr[i].faceRect, cv::Scalar(0, 0, 255), 1, 4, 0);
for (size_t j = 0; j < faceLandmarkArr[i].landmarkArr.size(); j++)
{
//cv::circle(renderImage, faceLandmarkArr[i].landmarkArr[j], 0.1, cv::Scalar(0, 255, 255), 4, 8, 0);
}
}
}
for (size_t index = 0; index < faceLandmarkArr.size(); index++)
{
if (faceLandmarkArr[index].valid)
{
faceStruct[index].ApplyPreds(faceLandmarkArr[index].landmarkArr);
for (size_t i = 0; i < LANDMARK_POINT_COUNT; i++)
{
cv::ellipse(renderImage, cv::Point2f(faceStruct[index].preds[i](0), 720 - faceStruct[index].preds[i](1)), cv::Size(2, 2), 0, 0, 360, cv::Scalar(255, 128, 128), 4, 8, 0);
}
Vector2f posFrom = faceStruct[index].center - faceStruct[index].size*0.5f* 1.35f;
Vector2f posTo = faceStruct[index].center + faceStruct[index].size*0.5f* 1.35f;
ApplyVertexCoordVec(faceRenderPair[index].second.Data.Vec3CoordArr[CONST_STRING_POSITION_ATTRIB], posFrom, posTo, 0);
faceRenderPair[index].second.RefreshBuffer();
}
else
{
ApplyVertexCoordVec(faceRenderPair[index].second.Data.Vec3CoordArr[CONST_STRING_POSITION_ATTRIB], { 0,0 }, { 0,0 }, 0);
faceRenderPair[index].second.RefreshBuffer();
}
}
glBindTexture(GL_TEXTURE_2D, lastFrameTexture);
static std::array<char, 1280 * 720 * 3> buffer;
int step = renderImage.step;
int height = renderImage.rows;
int width = renderImage.cols;
int channels = 3;
char * data = (char *)renderImage.data;
for (int i = 0; i < height; i++)
{
int ci = height - i - 1;
memcpy(&buffer[i*width*channels], &(data[ci*step]), width*channels);
}
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RGB,
1280,
720,
0,
GL_BGR_EXT,
GL_UNSIGNED_BYTE,
&buffer[0]);
}
bool TMyApplication::IsInited()
{
return Inited;
}