Skip to content
Snippets Groups Projects
Commit 2533f021 authored by Leonardo de Lima Gaspar's avatar Leonardo de Lima Gaspar
Browse files

SVM with HoG features implemented, seems to be working well on single tests,...

SVM with HoG features implemented, seems to be working well on single tests, but model needs to be serialized and connected to timestamp generator.
parent a381638e
No related branches found
No related tags found
No related merge requests found
Showing with 72 additions and 26 deletions
......@@ -3,37 +3,83 @@
#include <vector>
#include <opencv2/opencv.hpp>
#include <opencv2/objdetect.hpp>
#include <opencv2/ml.hpp>
using namespace std;
using namespace cv;
using namespace cv::ml;
namespace fs = filesystem;
int imgWidth = 480;
int imgHeight = 270;
int imgWidth = 256;
int imgHeight = 128;
int main(int argc, char** argv) {
int labels[2] = {0, 1};
vector<vector<float>> noFishHogVec;
vector<vector<float>> fishHogVec;
HOGDescriptor *hog = new HOGDescriptor();
HOGDescriptor *hog = new HOGDescriptor(Size(imgWidth,imgHeight), Size(16,16), Size(8,8), Size(8,8), 9);
for(auto const& file : fs::directory_iterator("dataset/noFish/")) {
vector<float> descriptor;
vector<Point> points;
hog->compute(imread(file.path().string(), IMREAD_GRAYSCALE), descriptor, Size(30,30), Size(0,0), points);
hog->compute(imread(file.path().string(), IMREAD_GRAYSCALE), descriptor, Size(32,32), Size(0,0), points);
noFishHogVec.push_back(descriptor);
}
for(auto const& file : fs::directory_iterator("dataset/fish/")) {
vector<float> descriptor;
vector<Point> points;
hog->compute(imread(file.path().string(), IMREAD_GRAYSCALE), descriptor, Size(30,30), Size(0,0), points);
hog->compute(imread(file.path().string(), IMREAD_GRAYSCALE), descriptor, Size(32,32), Size(0,0), points);
fishHogVec.push_back(descriptor);
}
cout << noFishHogVec.size() << endl;
cout << fishHogVec.size() << endl;
int totSize = noFishHogVec.size() + fishHogVec.size();
Mat trainMat(totSize, noFishHogVec[0].size(), CV_32FC1);
Mat labels(totSize, 1, CV_32SC1);
for (int i=0; i < totSize; i++) {
if(i < noFishHogVec.size()) {
for(int j=0; j<noFishHogVec[0].size(); j++) {
trainMat.at<float>(i,j) = noFishHogVec[i][j];
}
labels.at<int>(i) = 0;
}
else {
for(int j=0; j<fishHogVec[0].size(); j++) {
trainMat.at<float>(i,j) = fishHogVec[i-noFishHogVec.size()][j];
}
labels.at<int>(i) = 1;
}
}
Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setKernel(SVM::LINEAR);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));
svm->train(trainMat, ROW_SAMPLE, labels);
// TESTING A SINGULAR PREDICTION, AND SHOWING TWO WINDOWS OF INPUTS.
// UNCOMMENT THIS WHOLE BLOCK TO TEST.
string input1 = "dataset/fish/tester.jpg";
string input2 = "dataset/fish/frame1150Myggbukta-[2021-05-21_10-47-06]-399.jpg";
vector<float> testFishHog;
vector<float> testNoFishHog;
vector<Point> points;
hog->compute(imread(input1, IMREAD_GRAYSCALE), testFishHog, Size(32,32), Size(0,0), points);
hog->compute(imread(input2, IMREAD_GRAYSCALE), testNoFishHog, Size(32,32), Size(0,0), points);
Mat testFish = Mat(testFishHog).t();
Mat testNoFish = Mat(testNoFishHog).t();
float fishResponse = svm->predict(testFish);
float noFishResponse = svm->predict(testNoFish);
cout << "(0 = no fish, 1 = fish)" << "\n";
cout << "Test image WITH fish, prediction result: " << fishResponse << "\n";
cout << "Test image WITHOUT fish, prediction result: " << noFishResponse << endl;
imshow("Input 1", imread(input1, IMREAD_GRAYSCALE));
imshow("Input 2", imread(input2, IMREAD_GRAYSCALE));
imshow("img", fishHogVec[20]);
waitKey(0);
destroyWindow("img");
}
\ No newline at end of file
......@@ -7,22 +7,19 @@ from pathlib import Path
# Skips every 1/n frames, according to this ratio.
# Maybe do random frame n*frameCount instead?
percentExtractedPerVideo = 0.3
firstNvideos = 6
resizeWidth = 480
resizeHeight = 270
percentExtractedPerVideo = 0.2
firstNvideos = 17
resizeWidth = 256
resizeHeight = 128
groundsPath = "src/python/groundTruth/"
### Generates a dataset of labelled images from one video (soon a whole folder) by comparing to ground truth.
### Takes about 5 minutes for a 30 minute input.
def generateDatasetFromFrames(inputVideosPath, extractionRatio):
### Generates a dataset of labelled images from the first n videos in a folder, by comparing to their respective ground truths.
def generateDatasetFromFrames(inputVideosPath):
for i in range(firstNvideos):
curVideo = os.listdir(inputVideosPath)[i]
decoder = cv2.VideoCapture(inputVideosPath + curVideo)
desiredFPS = int(cv2.CAP_PROP_FPS * extractionRatio)
sourceNoExt = os.path.splitext(curVideo)[0]
groundTruthName = "groundTruth_" + sourceNoExt + ".csv"
......@@ -38,7 +35,9 @@ def generateDatasetFromFrames(inputVideosPath, extractionRatio):
print("No ground truth file found to label from, for file {file}.\n".format(file = curVideo))
continue
step = int(decoder.get(cv2.CAP_PROP_FPS) / desiredFPS)
# NOTE: Iterates through frames in steps, using integer rounding. Hence, gives VERY rough approximation of given percentage.
# I.e. Anything above 0.5 (or 50%) will result in every single frame being saved.
step = int(1/percentExtractedPerVideo)
try:
# Frame reading loop
while True:
......@@ -50,7 +49,7 @@ def generateDatasetFromFrames(inputVideosPath, extractionRatio):
# Also does modulo step to simulate lower FPS.
frameNo = int(decoder.get(cv2.CAP_PROP_POS_FRAMES)-1)
if frameNo % step == 0:
frame = cv2.resize(frame, (resizeWidth, resizeHeight), cv2.INTER_CUBIC)
frame = cv2.resize(frame, (resizeWidth, resizeHeight), cv2.INTER_LINEAR)
curTime_s = int(decoder.get(cv2.CAP_PROP_POS_MSEC)/1000)
found = False
......@@ -73,9 +72,10 @@ def generateDatasetFromFrames(inputVideosPath, extractionRatio):
finally:
decoder.release()
groundTruthCSV.close()
print("Done.")
def main(video,fps):
generateDatasetFromFrames(video,fps)
def main(video):
generateDatasetFromFrames(video)
if __name__ == "__main__":
main("sourceVideos/", percentExtractedPerVideo)
\ No newline at end of file
main("sourceVideos/")
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment