Commit bbddb10f authored by Anders Langlie's avatar Anders Langlie
Browse files

cleaned up code and commented

parent 0ea312e2
class SalamanderImage(): from cv2 import cv2
descriptors = [] import numpy as np
filename = '' import algorithm.dsift as dsift
def __init__(self, filename):
self.filename class SalamanderImage:
descriptors = []
filename = ''
def __init__(self, filename):
self.filename = filename
self.descriptors = self.calculate_descriptors()
def calculate_descriptors(self):
"""
Calculates the descriptors if the member image
Returns: The calculated descriptors
"""
image = cv2.imdecode(np.fromfile(self.filename, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)
if image is None:
raise FileNotFoundError("Cannot find image file " + self.filename)
return dsift.compute_descriptors(image)
import os import os
from cv2 import cv2 from cv2 import cv2
from algorithm.imageprocessing import create_salamander_image
import glob import glob
from algorithm.SalamanderImage import SalamanderImage
min_good_match = 15
match_dist = 0.75
def match_single_image(input_salamander, match_salamander): def match_single_image(input_salamander, match_salamander):
"""
Compares two SalamanderImages and determines if they are similar enough to be a match
Args:
input_salamander: SalamanderImage of input salamander
match_salamander: SalamanderImage of salamander from the database
Returns:
Boolean value indicating if the comparison was a match and the number of hits gotten in the comparison
"""
min_good_match = 15
match_dist = 0.75
match = cv2.BFMatcher().knnMatch(input_salamander.descriptors, match_salamander.descriptors, k=2) match = cv2.BFMatcher().knnMatch(input_salamander.descriptors, match_salamander.descriptors, k=2)
goodmatch = [] goodmatch = []
...@@ -20,16 +31,28 @@ def match_single_image(input_salamander, match_salamander): ...@@ -20,16 +31,28 @@ def match_single_image(input_salamander, match_salamander):
def match_file_structure(input_image: str, match_directory: str): def match_file_structure(input_image: str, match_directory: str):
"""
Loops through a given directory of salamanders represented by folders containing their images, and finds the best
match (if any) based on an input image
Args:
input_image: SalamanderImage of input salamander
match_directory: Path of directory to looped through
Returns:
The ID of the best matched salamander or None if no match is found
"""
best_match = -1 best_match = -1
match_count = 0 match_count = 0
# check if input path is valid: # check if input path is valid:
if os.path.isfile(input_image): if os.path.isfile(input_image):
input_salamander = create_salamander_image(input_image) input_salamander = SalamanderImage(input_image)
for folder in os.listdir(match_directory): for folder in os.listdir(match_directory):
name_list = glob.glob(os.path.join(match_directory, folder, "*_str.*")) name_list = glob.glob(os.path.join(match_directory, folder, "*_str.*"))
for filename in name_list: for filename in name_list:
res, num_matches = match_single_image(input_salamander, create_salamander_image(filename)) res, num_matches = match_single_image(input_salamander, SalamanderImage(filename))
if res and num_matches > match_count: if res and num_matches > match_count:
match_count = num_matches match_count = num_matches
best_match = int(folder) best_match = int(folder)
......
...@@ -7,6 +7,15 @@ sift = cv2.SIFT_create() ...@@ -7,6 +7,15 @@ sift = cv2.SIFT_create()
def compute_descriptors(image): def compute_descriptors(image):
dense = sift.compute(image,kp) """
Computes the descriptors of an incoming image
Args:
image: Image from openCV
Returns:
Descriptors of image
"""
dense = sift.compute(image, kp)
des = dense[1] des = dense[1]
return des return des
from cv2 import cv2
import numpy as np
from algorithm.SalamanderImage import SalamanderImage
import algorithm.dsift as dsift
import algorithm.segmentation as segmentation
def create_salamander_image(filename: str):
salamander_image = SalamanderImage(filename)
salamander_image.filename = filename
salamander_image.descriptors = get_descriptors(filename)
return salamander_image
def get_descriptors(filename: str):
image = get_straightened_image(filename)
return calculate_descriptors(image)
# Reads, straightens, resizes the image and returns it
def get_straightened_image(filename: str):
straightened_filename = filename # [0:-4] + '_str.jpg'
image = get_image(straightened_filename)
return image
# Should return a binary image (numpy ndarray) with 1 for "Part of salamander"
# and 0 for "Not part of the salamander".
def get_segmented_image(filename: str):
image = get_image(filename)
return segmentation.get_salamander_mask(image)
def get_image(filename):
img = cv2.imdecode(np.fromfile(filename, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)
if img is None:
raise FileNotFoundError("Cannot find image file " + filename)
return img
# Calculates descriptors from preprocessed image
def calculate_descriptors(image):
return dsift.compute_descriptors(image)
...@@ -21,7 +21,6 @@ def run_prediction_dlc(config: str, image: np.ndarray, shuffle: int = 1, ...@@ -21,7 +21,6 @@ def run_prediction_dlc(config: str, image: np.ndarray, shuffle: int = 1,
if gputouse is not None: # gpu selection if gputouse is not None: # gpu selection
os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse) os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)
# tf.compat.v1.reset_default_graph()
reset_default_graph() reset_default_graph()
# record cwd to return to this directory in the end: # record cwd to return to this directory in the end:
...@@ -30,7 +29,6 @@ def run_prediction_dlc(config: str, image: np.ndarray, shuffle: int = 1, ...@@ -30,7 +29,6 @@ def run_prediction_dlc(config: str, image: np.ndarray, shuffle: int = 1,
model_folder = os.path.join(cfg["project_path"], model_folder = os.path.join(cfg["project_path"],
str(auxiliaryfunctions.GetModelFolder(train_fraction, shuffle, cfg))) str(auxiliaryfunctions.GetModelFolder(train_fraction, shuffle, cfg)))
path_test_config = Path(model_folder) / 'test' / 'pose_cfg.yaml' path_test_config = Path(model_folder) / 'test' / 'pose_cfg.yaml'
# print(path_test_config)
try: try:
dlc_cfg = load_config(str(path_test_config)) dlc_cfg = load_config(str(path_test_config))
except FileNotFoundError: except FileNotFoundError:
...@@ -58,24 +56,17 @@ def run_prediction_dlc(config: str, image: np.ndarray, shuffle: int = 1, ...@@ -58,24 +56,17 @@ def run_prediction_dlc(config: str, image: np.ndarray, shuffle: int = 1,
increasing_indices = np.argsort([int(m.split('-')[1]) for m in snapshots]) increasing_indices = np.argsort([int(m.split('-')[1]) for m in snapshots])
snapshots = snapshots[increasing_indices] snapshots = snapshots[increasing_indices]
# print("Using %s" % snapshots[snapshot_index], "for model", model_folder)
################################################## ##################################################
# Load and setup CNN part detector # Load and setup CNN part detector
################################################## ##################################################
# Check if data already was generated: # Check if data already was generated:
dlc_cfg['init_weights'] = os.path.join(model_folder, 'train', snapshots[snapshot_index]) dlc_cfg['init_weights'] = os.path.join(model_folder, 'train', snapshots[snapshot_index])
trainingsiterations = (dlc_cfg['init_weights'].split(os.sep)[-1]).split('-')[-1]
# Update batchsize (based on parameters in config.yaml) # Update batchsize (based on parameters in config.yaml)
dlc_cfg['batch_size'] = 1 dlc_cfg['batch_size'] = 1
# Name for scorer:
# DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction,trainingsiterations=trainingsiterations)
# return tensorflow session, input and outbut (whatever those are):
sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg) sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)
# sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)
# update number of outputs and adjust pandas indices # update number of outputs and adjust pandas indices
dlc_cfg['num_outputs'] = cfg.get('num_outputs', 1) dlc_cfg['num_outputs'] = cfg.get('num_outputs', 1)
...@@ -86,26 +77,12 @@ def run_prediction_dlc(config: str, image: np.ndarray, shuffle: int = 1, ...@@ -86,26 +77,12 @@ def run_prediction_dlc(config: str, image: np.ndarray, shuffle: int = 1,
################################################## ##################################################
# Loading the images # Loading the images
################################################## ##################################################
# Predicting data:
# print("session: ", sess)
# PredictedData,nframes,nx,ny=get_poses_deeplabcut_model(dlc_cfg, sess, inputs, outputs,image)
ny, nx, nc = np.shape(image) ny, nx, nc = np.shape(image)
nframes = 1 nframes = 1
# print("Frame dimensions: ", nx,ny)
PredictedData = np.zeros((nframes, dlc_cfg['num_outputs'] * 3 * len(dlc_cfg['all_joints_names']))) PredictedData = np.zeros((nframes, dlc_cfg['num_outputs'] * 3 * len(dlc_cfg['all_joints_names'])))
# change from int:
frame = img_as_ubyte(image) frame = img_as_ubyte(image)
pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs) pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs)
PredictedData[0, :] = pose.flatten() PredictedData[0, :] = pose.flatten()
stop = time.time()
# print("PredictedData done:\n")
# closing the session:
# device = cuda.get_current_device()
# device.reset()
# tf.Graph.reset_default_graph()
return PredictedData[0], nframes, nx, ny return PredictedData[0], nframes, nx, ny
from cv2 import cv2
import numpy as np
def get_salamander_mask(image):
ret, threshold = cv2.threshold(image, np.mean(image) - 40, 255, 0)
i, contours, hierarchy = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
largest_area, largest_contour_index = find_biggest_contour(contours, image.size)
cv2.drawContours(image, contours[largest_contour_index], -1, (0, 0, 255), 3)
mask = np.zeros(image.shape)
cv2.fillPoly(mask, pts=[contours[largest_contour_index]], color=(255, 255, 255))
return mask
def find_biggest_contour(contours, imsize):
largest_area = 0
largest_contour_index = -1
i = 0
total_contours = len(contours)
while i < total_contours:
area = cv2.contourArea(contours[i])
if largest_area < area < imsize * 0.9:
largest_area = area
largest_contour_index = i
i += 1
return largest_area, largest_contour_index
...@@ -169,11 +169,14 @@ def straighten(image): ...@@ -169,11 +169,14 @@ def straighten(image):
def generate_map_from_bellycurve(curve, width=STRAIGTHENED_IMAGE_WIDTH): def generate_map_from_bellycurve(curve, width=STRAIGTHENED_IMAGE_WIDTH):
""" """
Generates a map that can be passed to cv2.remap to extract the belly pattern Generates a map that can be passed to cv2.remap to extract the belly pattern
"""
# salamander_length = np.linalg.norm(curve[len(curve) - 1] - curve[0]) Args:
curve: Interpolated curve along the spine
width: Width of the abdominal pattern
Returns: Map generated from the curve
"""
# salamander_width = salamander_length * STRAIGHTENED_IMAGE_ASPECT_RATIO
salamander_width = width + SHOULDER_ESTIMATION_BUFFER salamander_width = width + SHOULDER_ESTIMATION_BUFFER
gradient = np.gradient(curve, axis=0) gradient = np.gradient(curve, axis=0)
...@@ -200,10 +203,16 @@ def get_smooth_curve(points, num_points): ...@@ -200,10 +203,16 @@ def get_smooth_curve(points, num_points):
Takes in an array of 2-D points and returns a new set of points Takes in an array of 2-D points and returns a new set of points
with numpoints elements where the missing points are interpolated with numpoints elements where the missing points are interpolated
using cubic interpolation using cubic interpolation
Args:
points:
num_points:
Returns: Interpolated points
""" """
# Calculate distance between all the points # Calculate distance between all the points
distance = np.sqrt(np.sum(np.diff(points, axis=0) ** 2, axis=1)) distance = np.sqrt(np.sum(np.diff(points, axis=0) ** 2, axis=1))
# print("distance, ", distance.shape)
# Accumulate distance to use as parameter # Accumulate distance to use as parameter
accumulated_distance = np.cumsum(distance) accumulated_distance = np.cumsum(distance)
...@@ -223,7 +232,14 @@ def get_smooth_curve(points, num_points): ...@@ -223,7 +232,14 @@ def get_smooth_curve(points, num_points):
def halfway_between(point1, point2): def halfway_between(point1, point2):
""" """
halfway_between() calculates a point between two points: halfway_between() calculates a point between two points
Args:
point1:
point2:
Returns: A point between the two provided points
""" """
vec = [(point2[0] - point1[0]) / 2, (point2[1] - point1[1]) / 2] vec = [(point2[0] - point1[0]) / 2, (point2[1] - point1[1]) / 2]
return [point1[0] + vec[0], point1[1] + vec[1]] return [point1[0] + vec[0], point1[1] + vec[1]]
...@@ -10,7 +10,8 @@ import glob ...@@ -10,7 +10,8 @@ import glob
from shutil import move from shutil import move
from path_constants import _ACCESS_DATABASE from path_constants import _ACCESS_DATABASE
from image_encoder.image_encoder import * from image_encoder.image_encoder import *
import cv2 from cv2 import cv2
import numpy as np
""" """
Endpoint for editing salamanders. Endpoint for editing salamanders.
...@@ -43,7 +44,10 @@ class EditSalamander(Resource): ...@@ -43,7 +44,10 @@ class EditSalamander(Resource):
basename = os.path.basename(path) basename = os.path.basename(path)
if basename.__contains__(str(image_id)): if basename.__contains__(str(image_id)):
image = cv2.imread(path) image = cv2.imdecode(np.fromfile(path, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
if image is None:
raise FileNotFoundError("Cannot find image file " + path)
if not basename.__contains__("str"): # Scale only original to set size if not basename.__contains__("str"): # Scale only original to set size
height, width, _ = image.shape height, width, _ = image.shape
......
...@@ -2,7 +2,7 @@ from flask import request, jsonify ...@@ -2,7 +2,7 @@ from flask import request, jsonify
from flask_restful import Resource from flask_restful import Resource
from flask_jwt_extended import get_jwt_identity, jwt_required from flask_jwt_extended import get_jwt_identity, jwt_required
from algorithm.straighten_with_dlc import straighten from algorithm.straighten_with_dlc import straighten
import cv2 from cv2 import cv2
import os import os
from api import limiter from api import limiter
from image_encoder.image_encoder import * from image_encoder.image_encoder import *
......
...@@ -5,7 +5,8 @@ from api import db, limiter ...@@ -5,7 +5,8 @@ from api import db, limiter
from api.models.dbmodels import Location, User, Salamander from api.models.dbmodels import Location, User, Salamander
import os import os
import glob import glob
import cv2 import numpy as np
from cv2 import cv2
from image_encoder.image_encoder import * from image_encoder.image_encoder import *
from path_constants import _ACCESS_DATABASE from path_constants import _ACCESS_DATABASE
...@@ -33,8 +34,10 @@ class SalamanderEndpoint(Resource): ...@@ -33,8 +34,10 @@ class SalamanderEndpoint(Resource):
list_of_paths = glob.glob(os.path.join(path_to_salamander_images, '*.*')) list_of_paths = glob.glob(os.path.join(path_to_salamander_images, '*.*'))
for path in list_of_paths: for path in list_of_paths:
if not path.__contains__("_str"): if not path.__contains__("_str"):
image = cv2.imdecode(np.fromfile(path, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
image = cv2.imread(path) if image is None:
raise FileNotFoundError("Cannot find image file " + path)
# scaling to set size # scaling to set size
height, width, _ = image.shape height, width, _ = image.shape
......
from api import app from api import app
if __name__ == "__main__": if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0') app.run(debug=True, host='0.0.0.0')
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment