Track and identify people from multiple HD streams at the same time, using FaceLog6 plugins
/*
* Copyright (C) 2015-2018 Digital Barriers plc. All rights reserved.
* Contact: http://www.digitalbarriers.com/
*
* This file is part of the Papillon SDK.
*
* You can't use, modify or distribute any part of this file without
* the explicit written agreements of Digital Barriers.
*/
#include <unordered_map>
#include <vector>
#include <PapillonCore.h>
USING_NAMESPACE_PAPILLON
void CreateFaceDetector(PDetector &detector, const PProperties &parameters) {
P_LOG_INFO << "Creating face detector...";
PString detectorName = "FaceDetector2";
int32 numDetectors = 8; // number of detector worker threads
int32 numLocalisers = 2; // number of localiser worker threads
int32 minDetectionSize = 80;
int32 maxDetectionSize = 1000;
int32 maxDetections = 0;
double threshold = 0.0;
PRectanglei roi; // region of interest (empty means whole frame)
parameters.Get("Detector", detectorName);
parameters.Get("numDetectors", numDetectors);
parameters.Get("numLocalisers", numLocalisers);
parameters.Get("MinDetectionSize", minDetectionSize);
parameters.Get("MaxDetectionSize", maxDetectionSize);
parameters.Get("MaxDetections", maxDetections);
parameters.Get("Threshold", threshold);
parameters.Get("ROI", roi);
P_LOG_INFO << "Detector : " << detectorName;
P_LOG_INFO << "MinDetectionSize : " << minDetectionSize;
P_LOG_INFO << "MaxDetectionSize : " << maxDetectionSize;
P_LOG_INFO << "numDetectors : " << numDetectors;
P_LOG_INFO << "numLocalisers : " << numLocalisers;
P_LOG_INFO << "MaxDetections : " << maxDetections;
P_LOG_INFO << "Threshold : " << threshold;
P_LOG_INFO << "ROI : " << roi;
// Set up detector
PProperties detectortProps;
detectortProps.Set("numDetectors", numDetectors);
detectortProps.Get("numLocalisers", numLocalisers);
PResult res = PDetector::Create(detectorName, detectortProps, detector);
if (res.Failed()) {
P_LOG_ERROR << "Failed to create face detector:" << res;
return;
}
detector.SetMinDetectionSize(PSizei(minDetectionSize, minDetectionSize));
detector.SetMaxDetectionSize(maxDetectionSize);
detector.SetThreshold(static_cast<float>(threshold));
detector.SetMaxDetections(maxDetections);
if (roi.IsValid())
}
void CreateTemplateDescriber(PDescriber &describer, int batchSize, bool useGPU, bool sharedMode) {
P_LOG_INFO << "Creating face recognition describer...";
P_LOG_INFO << "describerBatchSize: " << batchSize;
PString gpuString = useGPU ? "gpuMode=true" : "gpuMode=false";
P_LOG_INFO << "GPU:" << gpuString;
PProperties dnnParameters =
PProperties::CreateFromKeyValueString("type=Face;" + gpuString + PString(";batchSize=%1").Arg(batchSize));
if (sharedMode) {
dnnParameters.Set("sharedDnn", true);
}
PDescriber::Create("DescriberDnn", dnnParameters, describer).OrDie();
}
void CreateMetaDescriber(PDescriber &describer, bool useGPU) {
P_LOG_INFO << "Creating gender recognition describer...";
PString gpuString = useGPU ? "gpuMode=true" : "gpuMode=false";
P_LOG_INFO << "GPU:" << gpuString;
PProperties genderDnnParameters = PProperties::CreateFromKeyValueString("type=Gender;" + gpuString);
PDescriber::Create("DescriberDnn", genderDnnParameters, describer).OrDie();
}
void ProcessEvents(PList &events, std::unordered_map<std::string, int> &counter) {
for (int i = 0; i < events.Size(); i++) {
PEvent event;
events.Get(i, event);
// P_LOG_INFO << event.GetType();
counter[event.GetType().c_str()]++;
}
}
class ProcessThreadC : public PRunnable {
public:
ProcessThreadC() {}
~ProcessThreadC() { /*std::cerr << "thread destructor " << (void*)this << " vf:" << m_videoFile.c_str() << "\n";*/
}
PResult Init(const PProperties &parameters, const PString &videoFile) {
// open video stream
P_LOG_INFO << "Trying to open'" << videoFile << "'";
PResult resVal = PInputVideoStream::Open(videoFile, m_ivs).LogIfError();
if (resVal.Failed()) {
P_LOG_ERROR << "Error in opening video stream:" << videoFile;
return resVal;
}
P_LOG_INFO << "Video stream is opened";
// create FaceLog
P_LOG_INFO << "Creating FaceLog6 instance";
resVal = PAnalytics::Create("FaceLog6", parameters, m_faceLog).LogIfError();
if (resVal.Failed()) {
P_LOG_ERROR << "Failed to create: FaceLog6 with parameters:" << parameters;
return resVal;
}
P_LOG_INFO << "Created FaceLog6 instance";
return PResult::C_OK;
}
virtual void Run() {
std::unordered_map<std::string, int> counter;
counter.insert({"FrameStart", 0});
counter.insert({"FrameEnd", 0});
counter.insert({"Track-In-Progress", 0});
counter.insert({"Face", 0});
counter.insert({"Sighting", 0});
// processing loop
P_LOG_INFO << "Starting main processing loop";
PFrame frame;
PList events;
while (m_ivs.GetFrame(frame).Ok()) {
if (!m_faceLog.Apply(frame, events).LogIfError().Ok()) {
P_LOG_ERROR << "Error in analytics";
break;
}
ProcessEvents(events, counter);
}
if (!m_faceLog.Finish(events).LogIfError().Ok()) {
P_LOG_ERROR << "Error in analytics";
}
ProcessEvents(events, counter);
P_LOG_INFO << "Finished processing";
}
private:
PAnalytics m_faceLog;
};
const PString SAMPLE_DIR = PPath::Join(PUtils::GetEnv("PAPILLON_INSTALL_DIR"),
"Data",
"Samples"); // path to find sample data: $PAPILLON_INSTALL_DIR/Data/Samples
void RunDemo() {
const bool useGPU = true; // use gpu
const bool useShared = true; // shared mode for describers (describers will be shared between FaceLogs)
// create list of sources
std::vector<std::string> sources;
sources.push_back(PPath::Join(SAMPLE_DIR, "busy_office.avi").c_str());
sources.push_back(PPath::Join(SAMPLE_DIR, "officeEntry.avi").c_str());
sources.push_back(PPath::Join(SAMPLE_DIR, "busy_office.avi").c_str());
sources.push_back(PPath::Join(SAMPLE_DIR, "officeEntry.avi").c_str());
PProperties parameters;
parameters.Set("GPU", useGPU);
parameters.Set("MaxFaceDetectorFR", -1.); // limit frame rate of face detector to 8 fps (to no overwhelm CPU)(if
// processing video files better disable this with -1.)
// create face detector
if (useShared) {
PDetector faceDetector;
CreateFaceDetector(faceDetector, parameters);
parameters.Set("FaceDetector", faceDetector);
}
// create face recognition describer
if (useShared) {
PDescriber faceDescriber;
CreateTemplateDescriber(faceDescriber, 4, useGPU, true);
parameters.Set("FaceRecognitionDescriber", faceDescriber);
}
// create gender describer
if (useShared) {
PDescriber genderDescriber;
CreateMetaDescriber(genderDescriber, useGPU);
parameters.Set("GenderDescriber", genderDescriber);
}
std::vector<ProcessThreadC> tasks; // important! we need to keep tasks until threads finish
std::vector<PConcurrentThread> threads;
// start processing threads
P_LOG_INFO << "-------------------- Initialising tasks";
for (auto source : sources) {
tasks.push_back(ProcessThreadC());
if (tasks.back().Init(parameters, source).LogIfError().Failed()) {
P_LOG_ERROR << "Failed to init processing thread.";
return;
}
}
P_LOG_INFO << "-------------------- Lunching processing threads";
for (auto &task : tasks) {
threads.push_back(PConcurrentThread(task));
}
// wait for threads to finish
P_LOG_INFO << "-------------------- Waiting for threads";
for (auto &it : threads) {
it.Join();
}
}
int main() {
RunDemo();
return 0;
}