From 760234c89e2c055a00651243f4e7e74b24f7089d Mon Sep 17 00:00:00 2001
From: Anders Langlie <andeslan@stud.ntnu.no>
Date: Thu, 10 Jun 2021 11:34:20 +0200
Subject: [PATCH] refactor and removed sex ident

---
 algorithm/DEMO_estimate_gender.py             |  18 -
 .../{ => ToolsDemos}/DEMO_straightening.py    |   0
 algorithm/{ => ToolsDemos}/convert_images.py  |   0
 .../dlc_create_training_dataset.py            |   0
 algorithm/{ => ToolsDemos}/dlc_launch_gui.py  |   0
 algorithm/{ => ToolsDemos}/dlc_train_model.py |   0
 algorithm/{ => ToolsDemos}/tf_find_gpu.py     |   0
 algorithm/constants.py                        |   2 +-
 algorithm/dsift.py                            |   5 +-
 algorithm/estimate_gender.py                  | 361 ------------------
 algorithm/imageprocessing.py                  |  65 ++--
 algorithm/predict_salamander_abdomen.py       |  39 +-
 algorithm/segmentation.py                     |  46 +--
 algorithm/straighten_with_dlc.py              | 349 ++++++++---------
 14 files changed, 253 insertions(+), 632 deletions(-)
 delete mode 100644 algorithm/DEMO_estimate_gender.py
 rename algorithm/{ => ToolsDemos}/DEMO_straightening.py (100%)
 rename algorithm/{ => ToolsDemos}/convert_images.py (100%)
 rename algorithm/{ => ToolsDemos}/dlc_create_training_dataset.py (100%)
 rename algorithm/{ => ToolsDemos}/dlc_launch_gui.py (100%)
 rename algorithm/{ => ToolsDemos}/dlc_train_model.py (100%)
 rename algorithm/{ => ToolsDemos}/tf_find_gpu.py (100%)
 delete mode 100644 algorithm/estimate_gender.py

diff --git a/algorithm/DEMO_estimate_gender.py b/algorithm/DEMO_estimate_gender.py
deleted file mode 100644
index 7c4ca72..0000000
--- a/algorithm/DEMO_estimate_gender.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import os
-import algorithm.estimate_gender as bb
-import cv2
-from path_constants import abs_path_temp_images
-
-
-def test():
-	img_path = os.path.join(abs_path_temp_images, "smallDSC_0605.png")
-	img = cv2.imread(img_path)
-	if None is not img:
-		gender, pred_img, objects, detected_objects = bb.estimate_image(input_image=img,
-																minimum_percentage_probability=0.1,
-																debug_img_name = img_path[0:-4] + "_pred.png",
-																debug_create_pred_img=True)
-		print(gender)
-		# print(pred_img)
-		print(objects)
-		print(detected_objects)
diff --git a/algorithm/DEMO_straightening.py b/algorithm/ToolsDemos/DEMO_straightening.py
similarity index 100%
rename from algorithm/DEMO_straightening.py
rename to algorithm/ToolsDemos/DEMO_straightening.py
diff --git a/algorithm/convert_images.py b/algorithm/ToolsDemos/convert_images.py
similarity index 100%
rename from algorithm/convert_images.py
rename to algorithm/ToolsDemos/convert_images.py
diff --git a/algorithm/dlc_create_training_dataset.py b/algorithm/ToolsDemos/dlc_create_training_dataset.py
similarity index 100%
rename from algorithm/dlc_create_training_dataset.py
rename to algorithm/ToolsDemos/dlc_create_training_dataset.py
diff --git a/algorithm/dlc_launch_gui.py b/algorithm/ToolsDemos/dlc_launch_gui.py
similarity index 100%
rename from algorithm/dlc_launch_gui.py
rename to algorithm/ToolsDemos/dlc_launch_gui.py
diff --git a/algorithm/dlc_train_model.py b/algorithm/ToolsDemos/dlc_train_model.py
similarity index 100%
rename from algorithm/dlc_train_model.py
rename to algorithm/ToolsDemos/dlc_train_model.py
diff --git a/algorithm/tf_find_gpu.py b/algorithm/ToolsDemos/tf_find_gpu.py
similarity index 100%
rename from algorithm/tf_find_gpu.py
rename to algorithm/ToolsDemos/tf_find_gpu.py
diff --git a/algorithm/constants.py b/algorithm/constants.py
index 54462b6..08d7de7 100644
--- a/algorithm/constants.py
+++ b/algorithm/constants.py
@@ -5,4 +5,4 @@ min_good_match = 15
 match_dist = 0.75
 images_basepath = "test_code/Feature_matching_extraction/test_images/straightened-manually/"
 input_images_path = "data/input"
-input_salamander_id = '00' #294,120,64,00
\ No newline at end of file
+input_salamander_id = '00'
\ No newline at end of file
diff --git a/algorithm/dsift.py b/algorithm/dsift.py
index 2d8e2a7..19ad68f 100644
--- a/algorithm/dsift.py
+++ b/algorithm/dsift.py
@@ -2,10 +2,11 @@ import numpy as np
 from cv2 import cv2
 from algorithm.constants import width, height, step_size
 
-kp = [cv2.KeyPoint(x, y, step_size) for y in range(0, width , step_size) for x in range(0, height, step_size)]
+kp = [cv2.KeyPoint(x, y, step_size) for y in range(0, width, step_size) for x in range(0, height, step_size)]
 sift = cv2.SIFT_create()
 
+
 def compute_descriptors(image):
 	dense = sift.compute(image,kp)
 	des = dense[1]
-	return des
\ No newline at end of file
+	return des
diff --git a/algorithm/estimate_gender.py b/algorithm/estimate_gender.py
deleted file mode 100644
index ba2357d..0000000
--- a/algorithm/estimate_gender.py
+++ /dev/null
@@ -1,361 +0,0 @@
-import json
-import cv2
-import numpy as np
-import os
-from multiprocessing import Process
-from multiprocessing import Manager
-from path_constants import abs_path_imageai_config
-from path_constants import abs_path_imageai_model
-from algorithm.straighten_with_dlc import _ACCESS_TF_AND_GPU_SEMA
-
-EXTRACT_DETECTED_OBJECTS = False
-MINIMUM_PERCENTAGE_PROBABILITY = 0.1
-NMS_TRESHOLD = 0.4
-DISPLAY_PERCENTAGE_PROBABILITY = True
-DISPLAY_OBJECT_NAME = True
-THREAD_SAFE = False
-
-
-def run_estimation(detection_model_path: str = None, configuration_json: str = None, input_image: np.ndarray = None,
-                   extract_detected_objects: bool = False, minimum_percentage_probability: float = 50,
-                   nms_treshold: float = 0.4,
-                   display_percentage_probability: bool = True, display_object_name: bool = True,
-                   thread_safe: bool = False, debug_img_name: str = "", debug_create_pred_img: bool = False,
-                   ret_dictionary: list = None):
-    # from tensorflow.keras.backend import clear_session
-    from tensorflow.core.protobuf.config_pb2 import ConfigProto
-    from tensorflow.python.client.session import Session
-    from imageai.Detection.YOLO.yolov3 import yolov3_main
-    from tensorflow.python.keras.backend import get_session
-    from tensorflow.keras import Input
-
-    # add to the top of your code under import tensorflow as tf
-    config = ConfigProto()
-    config.gpu_options.allow_growth = True
-    config.gpu_options.per_process_gpu_memory_fraction = 0.9
-    #	session = Session(config=config)
-    # For now the model is trained for quadratic images:
-    image_dim_size = 416
-    if detection_model_path == None or configuration_json == None:
-        for i in range(0, 4):
-            # have to append because for some reason I can't x = [1,2,3]
-            # with mulitprocessing:
-            ret_dictionary.append(None)
-        return
-    detection_model_path = os.path.abspath(detection_model_path)
-    configuration_json = os.path.abspath(configuration_json)
-    # Loading the model:
-    detection_model_json = json.load(open(configuration_json))
-
-    model_labels = detection_model_json["labels"]
-    model_anchors = detection_model_json["anchors"]
-
-    detection_utils = CustomDetectionUtils(labels=model_labels)
-
-    model = yolov3_main(Input(shape=(None, None, 3)), 3, len(model_labels))
-
-    model.load_weights(detection_model_path)
-    # estimating:
-    if model is None:
-        raise ValueError("The model was not able to load.")
-    else:
-
-        object_threshold = minimum_percentage_probability / 100
-
-        output_objects_array = []
-        detected_objects_image_array = []
-
-        image = input_image
-
-        image_frame = image.copy()
-
-        height, width, channels = image.shape
-
-        image = cv2.resize(image, (image_dim_size, image_dim_size))
-
-        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
-
-        image = image.astype("float32") / 255.
-
-        # expand the image to batch
-        image = np.expand_dims(image, 0)
-
-        if thread_safe == True:
-            with get_session():
-                yolo_results = model.predict(image)
-        else:
-            yolo_results = model.predict(image)
-        boxes = list()
-        for idx, result in enumerate(yolo_results):
-            box_set = detection_utils.decode_netout(result[0], model_anchors[idx],
-                                                    object_threshold, image_dim_size,
-                                                    image_dim_size)
-            boxes += box_set
-        detection_utils.correct_yolo_boxes(boxes, height, width, image_dim_size, image_dim_size)
-        detection_utils.do_nms(boxes, nms_treshold)
-        all_boxes, all_labels, all_scores = detection_utils.get_boxes(boxes, model_labels,
-                                                                      object_threshold)
-        for object_box, object_label, object_score in zip(all_boxes, all_labels, all_scores):
-            each_object_details = dict()
-            each_object_details["name"] = object_label
-            each_object_details["percentage_probability"] = object_score
-            if object_box.xmin < 0:
-                object_box.xmin = 0
-            if object_box.ymin < 0:
-                object_box.ymin = 0
-            each_object_details["box_points"] = [object_box.xmin, object_box.ymin, object_box.xmax, object_box.ymax]
-            output_objects_array.append(each_object_details)
-        drawn_image = detection_utils.draw_boxes_and_caption(image_frame.copy(), all_boxes, all_labels,
-                                                             all_scores, show_names=display_object_name,
-                                                             show_percentage=display_percentage_probability)
-        if extract_detected_objects:
-            for cnt, each_object in enumerate(output_objects_array):
-                splitted_image = image_frame[each_object["box_points"][1]:each_object["box_points"][3],
-                                 each_object["box_points"][0]:each_object["box_points"][2]]
-                detected_objects_image_array.append(splitted_image.copy())
-        if debug_create_pred_img:
-            if drawn_image is not None:
-                print(os.path.abspath(debug_img_name))
-                cv2.imwrite(os.path.abspath(debug_img_name), drawn_image)
-
-        # find the best estimation:
-        seq = [x['percentage_probability'] for x in output_objects_array]
-        gender = None
-        if len(seq)>0:
-            index = seq.index(max(seq))
-            gender = output_objects_array[index]
-        # have to append because for some reason I can't x = [1,2,3]
-        # with mulitprocessing:
-        if extract_detected_objects:
-            # ret_dictionary= [gender, drawn_image, output_objects_array, detected_objects_image_array]
-            ret_dictionary.append(gender)
-            ret_dictionary.append(drawn_image)
-            ret_dictionary.append(output_objects_array)
-            ret_dictionary.append(detected_objects_image_array)
-        else:
-            ret_dictionary.append(gender)
-            ret_dictionary.append(drawn_image)
-            ret_dictionary.append(output_objects_array)
-            ret_dictionary.append(None)
-        # ret_dictionary = [gender, drawn_image, output_objects_array, None]
-
-
-def estimate_image(detection_model_path: str = abs_path_imageai_model,
-                   configuration_json: str = abs_path_imageai_config,
-                   input_image: np.ndarray = None,
-                   extract_detected_objects: bool = EXTRACT_DETECTED_OBJECTS,
-                   minimum_percentage_probability: float = MINIMUM_PERCENTAGE_PROBABILITY,
-                   nms_treshold: float = NMS_TRESHOLD,
-                   display_percentage_probability: bool = DISPLAY_PERCENTAGE_PROBABILITY,
-                   display_object_name: bool = DISPLAY_OBJECT_NAME,
-                   thread_safe: bool = THREAD_SAFE,
-                   debug_img_name: str = "",
-                   debug_create_pred_img: bool = False):
-    """
-	'estimate_image()' estimates the gender of the salamander of the given image:
-				* input_image , which can be a filepath or image numpy array in BGR
-				* extract_detected_objects (optional) , option to save each object detected individually as an image and return an array of the objects' image path.
-				* minimum_percentage_probability (optional, 30 by default) , option to set the minimum percentage probability for nominating a detected object for output.
-				* nms_threshold (optional, o.45 by default) , option to set the Non-maximum suppression for the detection
-				* display_percentage_probability (optional, True by default), option to show or hide the percentage probability of each object in the saved/returned detected image
-				* display_display_object_name (optional, True by default), option to show or hide the name of each object in the saved/returned detected image
-				* thread_safe (optional, False by default), enforce the loaded detection model works across all threads if set to true, made possible by forcing all Keras inference to run on the default graph
-				* debug_img_name (optional, "" by default), the name of the predicted image if debug_create_pred_img is True
-				* debug_create_pred_img (optional, False by default), option to create an image with the bounding box
-
-	:param detection_model_path:
-	:param input_image:
-	:return detected_objects_image_array:
-	"""
-    with _ACCESS_TF_AND_GPU_SEMA:
-        gender = "AI_sex"
-        if os.path.isfile(detection_model_path) and os.path.isfile(configuration_json):
-            manager = Manager()
-            return_dict = manager.list()
-            # return_dict =  multiprocessing.Value("d", [], lock=False)
-            args = (detection_model_path, configuration_json, input_image,
-                    extract_detected_objects, minimum_percentage_probability,
-                    nms_treshold,
-                    display_percentage_probability, display_object_name,
-                    thread_safe, debug_img_name, debug_create_pred_img, return_dict)
-            p = Process(target=run_estimation, args=args)
-            p.start()
-            p.join()
-            if return_dict[0]:
-                gender = return_dict[0]['name']
-            return gender, return_dict[1], return_dict[2], return_dict[3]
-        return gender, None, None, None
-
-class BoundBox:
-    def __init__(self, xmin, ymin, xmax, ymax, objness=None, classes=None):
-        self.xmin = xmin
-        self.ymin = ymin
-        self.xmax = xmax
-        self.ymax = ymax
-        self.objness = objness
-        self.classes = classes
-        self.label = -1
-        self.score = -1
-
-
-class CustomDetectionUtils:
-    def __init__(self, labels):
-        self.__labels = labels
-        self.__colors = []
-
-        for i in range(len(labels)):
-            color_space_values = np.random.randint(50, 255, size=(3,))
-            red, green, blue = color_space_values
-            red, green, blue = int(red), int(green), int(blue)
-            self.__colors.append([red, green, blue])
-
-    @staticmethod
-    def _sigmoid(x):
-        return 1. / (1. + np.exp(-x))
-
-    def decode_netout(self, netout, anchors, obj_thresh, net_h, net_w):
-        grid_h, grid_w = netout.shape[:2]
-        nb_box = 3
-        netout = netout.reshape((grid_h, grid_w, nb_box, -1))
-        nb_class = netout.shape[-1] - 5
-        boxes = []
-        netout[..., :2] = self._sigmoid(netout[..., :2])
-        netout[..., 4:] = self._sigmoid(netout[..., 4:])
-        netout[..., 5:] = netout[..., 4][..., np.newaxis] * netout[..., 5:]
-        netout[..., 5:] *= netout[..., 5:] > obj_thresh
-
-        for row in range(grid_h):
-            for col in range(grid_w):
-                for b in range(nb_box):
-                    # 4th element is objectness score
-                    objectness = netout[row, col, b, 4]
-
-                    if objectness <= obj_thresh:
-                        continue
-
-                    # first 4 elements are x, y, w, and h
-                    x, y, w, h = netout[row, col, b, :4]
-                    x = (col + x) / grid_w  # center position, unit: image width
-                    y = (row + y) / grid_h  # center position, unit: image height
-                    w = anchors[2 * b + 0] * np.exp(w) / net_w  # unit: image width
-                    h = anchors[2 * b + 1] * np.exp(h) / net_h  # unit: image height
-                    # last elements are class probabilities
-                    classes = netout[row, col, b, 5:]
-                    box = BoundBox(x - w / 2, y - h / 2, x + w / 2, y + h / 2, objectness, classes)
-                    boxes.append(box)
-
-        return boxes
-
-    @staticmethod
-    def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
-        new_w, new_h = net_w, net_h
-        for i in range(len(boxes)):
-            x_offset, x_scale = (net_w - new_w) / 2. / net_w, float(new_w) / net_w
-            y_offset, y_scale = (net_h - new_h) / 2. / net_h, float(new_h) / net_h
-            boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
-            boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
-            boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
-            boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
-
-    def _interval_overlap(self, interval_a, interval_b):
-        x1, x2 = interval_a
-        x3, x4 = interval_b
-        if x3 < x1:
-            if x4 < x1:
-                return 0
-            else:
-                return min(x2, x4) - x1
-        else:
-            if x2 < x3:
-                return 0
-            else:
-                return min(x2, x4) - x3
-
-    def bbox_iou(self, box1, box2):
-        intersect_w = self._interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
-        intersect_h = self._interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
-        intersect = intersect_w * intersect_h
-        w1, h1 = box1.xmax - box1.xmin, box1.ymax - box1.ymin
-        w2, h2 = box2.xmax - box2.xmin, box2.ymax - box2.ymin
-        union = w1 * h1 + w2 * h2 - intersect
-
-        try:
-            result = float(intersect) / float(union)
-            return result
-        except:
-            return 0.0
-
-    def do_nms(self, boxes, nms_thresh):
-        if len(boxes) > 0:
-            nb_class = len(boxes[0].classes)
-        else:
-            return
-
-        for c in range(nb_class):
-            sorted_indices = np.argsort([-box.classes[c] for box in boxes])
-
-            for i in range(len(sorted_indices)):
-                index_i = sorted_indices[i]
-
-                if boxes[index_i].classes[c] == 0: continue
-
-                for j in range(i + 1, len(sorted_indices)):
-                    index_j = sorted_indices[j]
-
-                    if self.bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
-                        boxes[index_j].classes[c] = 0
-
-    def get_boxes(self, boxes, labels, thresh):
-        v_boxes, v_labels, v_scores = list(), list(), list()
-        # enumerate all boxes
-        for box in boxes:
-            # enumerate all possible labels
-            for i in range(len(labels)):
-                # check if the threshold for this label is high enough
-                if box.classes[i] > thresh:
-                    v_boxes.append(box)
-                    v_labels.append(labels[i])
-                    v_scores.append(box.classes[i] * 100)
-            # don't break, many labels may trigger for one box
-        return v_boxes, v_labels, v_scores
-
-    def label_color(self, label):
-        """ Return a color from a set of predefined colors. Contains 80 colors in total.
-
-        Args
-            label: The label to get the color for.
-
-        Returns
-            A list of three values representing a RGB color.
-
-            If no color is defined for a certain label, the color green is returned and a warning is printed.
-        """
-        if label < len(self.__colors):
-            return self.__colors[label]
-        else:
-            return 0, 255, 0
-
-    def draw_boxes_and_caption(self, image_frame, v_boxes, v_labels, v_scores, show_names=False, show_percentage=False):
-
-        for i in range(len(v_boxes)):
-            box = v_boxes[i]
-            y1, x1, y2, x2 = box.ymin, box.xmin, box.ymax, box.xmax
-            width, height = x2 - x1, y2 - y1
-            class_color = self.label_color(self.__labels.index(v_labels[i]))
-
-            image_frame = cv2.rectangle(image_frame, (x1, y1), (x2, y2), class_color, 2)
-
-            label = ""
-            if show_names and show_percentage:
-                label = "%s : %.3f" % (v_labels[i], v_scores[i])
-            elif show_names:
-                label = "%s" % (v_labels[i])
-            elif show_percentage:
-                label = "%.3f" % (v_scores[i])
-
-            if show_names or show_percentage:
-                b = np.array([x1, y1, x2, y2]).astype(int)
-                cv2.putText(image_frame, label, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (200, 0, 0), 3)
-                cv2.putText(image_frame, label, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 2)
-
-        return image_frame
diff --git a/algorithm/imageprocessing.py b/algorithm/imageprocessing.py
index 522bd3c..fdc01ae 100644
--- a/algorithm/imageprocessing.py
+++ b/algorithm/imageprocessing.py
@@ -3,53 +3,48 @@ import numpy as np
 
 from algorithm.SalamanderImage import SalamanderImage
 
-#import skeletonization
-#import straightening
 import algorithm.dsift as dsift
 import algorithm.segmentation as segmentation
 
+
 def create_salamander_image(filename: str):
-	#print("Processing image " + filename)
-	salamander_image = SalamanderImage(filename)
-	
-	salamander_image.filename = filename
-	salamander_image.descriptors = get_descriptors(filename)
+    salamander_image = SalamanderImage(filename)
+
+    salamander_image.filename = filename
+    salamander_image.descriptors = get_descriptors(filename)
+
+    return salamander_image
 
-	return salamander_image
 
 def get_descriptors(filename: str):
-	image = get_straightened_image(filename)
-	return calculate_descriptors(image)
+    image = get_straightened_image(filename)
+    return calculate_descriptors(image)
 
-"""
-Reads, straightens, resizes the image and returns it as a
-"""
+
+# Reads, straightens, resizes the image and returns it
 def get_straightened_image(filename: str):
-	straightened_filename = filename#[0:-4] + '_str.jpg'
-	image = get_image(straightened_filename)
-	
-	return image
-
-"""
-Should return a binary image (numpy ndarray) with 1 for "Part of salamander"
-and 0 for "Not part of the salamander".
-"""
+    straightened_filename = filename  # [0:-4] + '_str.jpg'
+    image = get_image(straightened_filename)
+
+    return image
+
+
+# Should return a binary image (numpy ndarray) with 1 for "Part of salamander"
+# and 0 for "Not part of the salamander".
 def get_segmented_image(filename: str):
-	image = get_image(filename)
-	return segmentation.get_salamander_mask(image)
-	
+    image = get_image(filename)
+    return segmentation.get_salamander_mask(image)
+
+
 def get_image(filename):
-	#image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
+    img = cv2.imdecode(np.fromfile(filename, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)
+
+    if img is None:
+        raise FileNotFoundError("Cannot find image file " + filename)
 
-	img = cv2.imdecode(np.fromfile(filename, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)
+    return img
 
-	if img is None:
-		raise FileNotFoundError("Cannot find image file " + filename)
-	
-	return img
 
-"""
-Calculates descriptors from preprocessed image
-"""
+# Calculates descriptors from preprocessed image
 def calculate_descriptors(image):
-	return dsift.compute_descriptors(image)
+    return dsift.compute_descriptors(image)
diff --git a/algorithm/predict_salamander_abdomen.py b/algorithm/predict_salamander_abdomen.py
index 0e16712..e54cd8c 100644
--- a/algorithm/predict_salamander_abdomen.py
+++ b/algorithm/predict_salamander_abdomen.py
@@ -1,11 +1,14 @@
+# This file contains modified code from DeepLabCut's source code
+
 import os.path
 import time
 import numpy as np
 from pathlib import Path
 from skimage.util import img_as_ubyte
 
-def run_prediction_dlc(config: str,image: np.ndarray,shuffle: int =1,
-                trainingsetindex: int =0,gputouse: int =None):
+
+def run_prediction_dlc(config: str, image: np.ndarray, shuffle: int = 1,
+                       trainingsetindex: int = 0, gputouse: int = None):
     from deeplabcutcore.pose_estimation_tensorflow.nnet import predict
     from deeplabcutcore.pose_estimation_tensorflow.config import load_config
     from tensorflow.python.framework.ops import reset_default_graph
@@ -15,8 +18,8 @@ def run_prediction_dlc(config: str,image: np.ndarray,shuffle: int =1,
         # was potentially set during training
         del os.environ['TF_CUDNN_USE_AUTOTUNE']
 
-    if gputouse is not None: #gpu selection
-            os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)
+    if gputouse is not None:  # gpu selection
+        os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)
 
     # tf.compat.v1.reset_default_graph()
     reset_default_graph()
@@ -24,21 +27,24 @@ def run_prediction_dlc(config: str,image: np.ndarray,shuffle: int =1,
 
     cfg = auxiliaryfunctions.read_config(config)
     train_fraction = cfg['TrainingFraction'][trainingsetindex]
-    model_folder=os.path.join(cfg["project_path"],str(auxiliaryfunctions.GetModelFolder(train_fraction,shuffle,cfg)))
+    model_folder = os.path.join(cfg["project_path"],
+                                str(auxiliaryfunctions.GetModelFolder(train_fraction, shuffle, cfg)))
     path_test_config = Path(model_folder) / 'test' / 'pose_cfg.yaml'
     # print(path_test_config)
     try:
         dlc_cfg = load_config(str(path_test_config))
     except FileNotFoundError:
-        raise FileNotFoundError("It seems the model for shuffle %s and trainFraction %s does not exist."%(shuffle,train_fraction))
+        raise FileNotFoundError(
+            "It seems the model for shuffle %s and trainFraction %s does not exist." % (shuffle, train_fraction))
     # Check which snapshots are available and sort them by # iterations
     try:
-      snapshots = np.array([fn.split('.')[0]for fn in os.listdir(os.path.join(model_folder , 'train'))if "index" in fn])
+        snapshots = np.array(
+            [fn.split('.')[0] for fn in os.listdir(os.path.join(model_folder, 'train')) if "index" in fn])
     except FileNotFoundError:
-      raise FileNotFoundError("Snapshots not found!\
+        raise FileNotFoundError("Snapshots not found!\
        It seems the dataset for shuffle %s has not been trained/does not exist.\n \
        Please train it before using it to analyze videos.\n Use the function \
-       'train_network' to train the network for shuffle %s."%(shuffle,shuffle))
+       'train_network' to train the network for shuffle %s." % (shuffle, shuffle))
 
     if cfg['snapshotindex'] == 'all':
         # print("Snapshotindex is set to 'all' in the config.yaml file.\
@@ -47,7 +53,7 @@ def run_prediction_dlc(config: str,image: np.ndarray,shuffle: int =1,
         #    For now, changing snapshot index to -1!")
         snapshot_index = -1
     else:
-        snapshot_index=cfg['snapshotindex']
+        snapshot_index = cfg['snapshotindex']
 
     increasing_indices = np.argsort([int(m.split('-')[1]) for m in snapshots])
     snapshots = snapshots[increasing_indices]
@@ -59,7 +65,7 @@ def run_prediction_dlc(config: str,image: np.ndarray,shuffle: int =1,
     ##################################################
 
     # Check if data already was generated:
-    dlc_cfg['init_weights'] = os.path.join(model_folder , 'train', snapshots[snapshot_index])
+    dlc_cfg['init_weights'] = os.path.join(model_folder, 'train', snapshots[snapshot_index])
     trainingsiterations = (dlc_cfg['init_weights'].split(os.sep)[-1]).split('-')[-1]
 
     # Update batchsize (based on parameters in config.yaml)
@@ -74,8 +80,8 @@ def run_prediction_dlc(config: str,image: np.ndarray,shuffle: int =1,
     # update number of outputs and adjust pandas indices
     dlc_cfg['num_outputs'] = cfg.get('num_outputs', 1)
 
-    if gputouse is not None: #gpu selectinon
-            os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)
+    if gputouse is not None:  # gpu selectinon
+        os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)
 
     ##################################################
     # Loading the images
@@ -84,8 +90,7 @@ def run_prediction_dlc(config: str,image: np.ndarray,shuffle: int =1,
     # print("session: ", sess)
     # PredictedData,nframes,nx,ny=get_poses_deeplabcut_model(dlc_cfg, sess, inputs, outputs,image)
 
-
-    ny,nx,nc=np.shape(image)
+    ny, nx, nc = np.shape(image)
     nframes = 1
     # print("Frame dimensions: ", nx,ny)
     PredictedData = np.zeros((nframes, dlc_cfg['num_outputs'] * 3 * len(dlc_cfg['all_joints_names'])))
@@ -95,16 +100,12 @@ def run_prediction_dlc(config: str,image: np.ndarray,shuffle: int =1,
     pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs)
     PredictedData[0, :] = pose.flatten()
 
-
     stop = time.time()
     # print("PredictedData done:\n")
 
-
     # closing the session:
     # device = cuda.get_current_device()
     # device.reset()
 
     # tf.Graph.reset_default_graph()
     return PredictedData[0], nframes, nx, ny
-
-
diff --git a/algorithm/segmentation.py b/algorithm/segmentation.py
index 722cea6..3e530cf 100644
--- a/algorithm/segmentation.py
+++ b/algorithm/segmentation.py
@@ -1,33 +1,35 @@
 from cv2 import cv2
 import numpy as np
 
+
 def get_salamander_mask(image):
-	ret, threshold = cv2.threshold(image, np.mean(image)-40, 255, 0)
+    ret, threshold = cv2.threshold(image, np.mean(image) - 40, 255, 0)
+
+    i, contours, hierarchy = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
+
+    largest_area, largest_contour_index = find_biggest_contour(contours, image.size)
 
-	i, contours, hierarchy = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
+    cv2.drawContours(image, contours[largest_contour_index], -1, (0, 0, 255), 3)
 
-	largest_area, largest_contour_index = find_biggest_contour(contours, image.size)
+    mask = np.zeros(image.shape)
 
-	cv2.drawContours(image, contours[largest_contour_index], -1, (0, 0, 255), 3)
+    cv2.fillPoly(mask, pts=[contours[largest_contour_index]], color=(255, 255, 255))
 
-	mask = np.zeros(image.shape)
+    return mask
 
-	cv2.fillPoly(mask, pts =[contours[largest_contour_index]], color=(255,255,255))
-	
-	return mask
 
 def find_biggest_contour(contours, imsize):
-	largest_area = 0
-	largest_contour_index = -1
-
-	i = 0
-	total_contours = len(contours)
-	while (i < total_contours ):
-		area = cv2.contourArea(contours[i])
-		if(area > largest_area and area < imsize*0.9):
-			largest_area = area
-			largest_contour_index = i
-
-		i+=1
-			
-	return largest_area, largest_contour_index
+    largest_area = 0
+    largest_contour_index = -1
+
+    i = 0
+    total_contours = len(contours)
+    while i < total_contours:
+        area = cv2.contourArea(contours[i])
+        if largest_area < area < imsize * 0.9:
+            largest_area = area
+            largest_contour_index = i
+
+        i += 1
+
+    return largest_area, largest_contour_index
diff --git a/algorithm/straighten_with_dlc.py b/algorithm/straighten_with_dlc.py
index 756e66c..cdf9861 100644
--- a/algorithm/straighten_with_dlc.py
+++ b/algorithm/straighten_with_dlc.py
@@ -15,8 +15,8 @@ LEAST_DEEPLABCUT_SCORE = 0.00
 # Only one thread is allowed to use tf and the gpu at once:
 _ACCESS_TF_AND_GPU_SEMA = Semaphore(1)
 
-# These are constants that determine the size of the straigthened image. They need to
-# be the same size and the same dimention:
+# These are constants that determine the size of the straightened image. They need to
+# be the same size and the same dimension:
 STRAIGTHENED_IMAGE_WIDTH = 320
 STRAIGTHENED_IMAGE_HEIGHT = 120
 STRAIGTHENED_IMAGE_ASPECT_RATIO = STRAIGTHENED_IMAGE_HEIGHT / STRAIGTHENED_IMAGE_WIDTH
@@ -32,197 +32,198 @@ MINIMUM_MID_POINT_DISTANCE = 30
 # FUNCTIONS
 ########################################
 def straighten(image):
-	"""
-	Takes the image and tries to find the abdomen of a salamander by locating
-	4 points down the abdomen of the salamander. Then we make a curve that
-	follows those 4 points. Each point will be given a score that tells how
-	confident the AI is with its estimation.
-
-	Parameters:
-	----------
-	image: cv2 image NEEDS to be RGB not BGR
-		Full path of the config.yaml file as a string.
-	----------
-	Returns:
-	-------
-	straightened_image: The straightened image of the abdomen. None if straightening fails.
-
-	cropped_image: A potentially cropped version of the original image.
-
-	points_spine: The spine points that were found by the AI.
-
-	points_shoulder: The shoulder points that were found by the AI.
-
-	score: How confident the AI was at each point.
-	-------
-	"""
-	with _ACCESS_TF_AND_GPU_SEMA:
-		start_time = time.time()
-		height, width, _ = image.shape
-		# find the largest dimension:
-		larger_dim = max(width, height)
-		cropped_image = image
-		# reduce size of image by the largest dimension, if it exceeds a given constant:
-		if larger_dim > MAX_DEEPLABCUT_IMAGE_SIZE:
-			factor = MAX_DEEPLABCUT_IMAGE_SIZE / larger_dim
-			cropped_image = cv2.resize(cropped_image, None, fx=factor, fy=factor, interpolation=cv2.INTER_CUBIC)
-
-		prediction, _, x, y = psa.run_prediction_dlc(config=abs_path_dlc_config, image=cropped_image, gputouse=GPUID)
-
-		# points is an array of coordinates to use to straighten the image:
-		# We use the prediction from deeplabcut for the points. There are two things to take note of.
-		# The predictions can be negative, so we ensure that negative values are set as 0 with max(0,num)
-		# the structure of prediction is: shape(1,12) content: [bp1_x, bp1_y, bp1_score, bp2_x, bp2_y, bp2_score
-		# bp3_x, bp3_y, bp3_score, bp4_x, bp4_y, bp4_score] 'bp' = body_part.
-		# we are using the score to determine if the image is valid and skipping the score for points:
-		score = np.array([prediction[2], prediction[5], prediction[8], prediction[11], prediction[14], prediction[17]])
-		if score[0] < LEAST_DEEPLABCUT_SCORE or score[1] < LEAST_DEEPLABCUT_SCORE or score[
-			2] < LEAST_DEEPLABCUT_SCORE or score[3] < LEAST_DEEPLABCUT_SCORE:
-			print("straighten failed because the score (", score, " found is below ", LEAST_DEEPLABCUT_SCORE)
-			return None, None, None, None, score
-
-		# getting new width and height:
-		height, width, _ = cropped_image.shape
-		# ensuring each point is between 0 and width/height:
-		points_spine = np.array([
-			(min(width, max(0, int(prediction[0]))), min(height, max(0, int(prediction[1])))),
-			(min(width, max(0, int(prediction[3]))), min(height, max(0, int(prediction[4])))),
-			(min(width, max(0, int(prediction[6]))), min(height, max(0, int(prediction[7])))),
-			(min(width, max(0, int(prediction[9]))), min(height, max(0, int(prediction[10]))))
-		])
-
-		# Used to check if the point 2 is closer to point 1. This will also be used later
-		# on to determine if the two points in the middle have been swapped.
-		dist_2_1 = np.linalg.norm(points_spine[1] - points_spine[0])
-		# print("dist_2_1: ", dist_2_1)
-		dist_2_3 = np.linalg.norm(points_spine[1] - points_spine[2])
-		# check if the two middle points are equal:
-		if dist_2_3 < MINIMUM_MID_POINT_DISTANCE:
-			print("the middle points needs to be moved")
-			dist_2_4 = np.linalg.norm(points_spine[1] - points_spine[3])
-			# print("dist_2_4: ", dist_2_4)
-
-			# if point 2 needs to move towards the 1 point:
-			if dist_2_1 > dist_2_4:
-				print("moving the 2 point torwards the 1")
-				points_spine[1] = halfway_between(point1=points_spine[1], point2=points_spine[0])
-			# else the 3 point needs to be moved towards the 4 point:
-			else:
-				print("moving the 3 point torwards the 4")
-				points_spine[2] = halfway_between(point1=points_spine[2], point2=points_spine[3])
-		del dist_2_3
-
-		# Shoulder points are cannot be a part of the spine points as the spine points are sent
-		# to a function that expects all the points to represent a line:
-		points_shoulder = np.array([
-			(min(width, max(0, int(prediction[12]))), min(height, max(0, int(prediction[13])))),
-			(min(width, max(0, int(prediction[15]))), min(height, max(0, int(prediction[16]))))
-		])
-		# Validating spine points:
-		# This would mean that either the first or last point are above each other.
-		# or the image is really small. This is ugly, but we can't just check for duplicates:
-		if np.linalg.norm(points_spine[0] - points_spine[1]) < MINIMUM_MID_POINT_DISTANCE or \
-				np.linalg.norm(points_spine[0] - points_spine[2]) < MINIMUM_MID_POINT_DISTANCE or \
-				np.linalg.norm(points_spine[0] - points_spine[3]) < MINIMUM_MID_POINT_DISTANCE or \
-				np.linalg.norm(points_spine[3] - points_spine[1]) < MINIMUM_MID_POINT_DISTANCE or \
-				np.linalg.norm(points_spine[3] - points_spine[2]) < MINIMUM_MID_POINT_DISTANCE:
-			# print("predicted points were not correct and we can't proceed with the straightening")
-			return None, None, None, None, score
-
-		# checking if the points in the middle needs to be swapped:
-		dist_2_1 = np.linalg.norm(points_spine[1] - points_spine[0])
-		dist_3_1 = np.linalg.norm(points_spine[2] - points_spine[0])
-
-		if dist_2_1 > dist_3_1:
-			points_spine[[1, 2], :] = points_spine[[2, 1], :]
-			# print("Two middle points need to be swapped, ", dist_3_1, " ", dist_2_1)
-		del dist_3_1
-		del dist_2_1
-
-		# Finding shoulder width because the pattern on the salamander is usually as wide as
-		# its shoulders. This will be used to stretch the pattern to the image borders. If
-		# we don't do this a lot of the image will be of either the salamanders side (that
-		# does not have a pattern) or background:
-		shoulder_width = math.sqrt(
-			(points_shoulder[1][0] - points_shoulder[0][0]) ** 2 + (points_shoulder[1][1] - points_shoulder[0][1]) ** 2)
-		# print("shoulder width: ", shoulder_width)
-		if shoulder_width < MINIMUM_SHOULDER_WIDTH:
-			shoulder_width = 2 * (math.sqrt(
-				(points_shoulder[1][0] - points_spine[0][0]) ** 2 + (points_shoulder[1][1] - points_spine[0][1]) ** 2))
-			# print("correcting: ", shoulder_width)
-			if shoulder_width < MINIMUM_SHOULDER_WIDTH:
-				shoulder_width = STRAIGTHENED_IMAGE_HEIGHT
-
-		start = time.time()
-		curve = get_smooth_curve(points_spine, STRAIGTHENED_IMAGE_WIDTH)
-		end = time.time()
-		# print("Bicubic interpolation of spine took " + str(end - start) + "s")
-
-		map = generate_map_from_bellycurve(curve, shoulder_width)
-		straightened_image = cv2.remap(cropped_image, map[:, :, 0], map[:, :, 1], cv2.INTER_LINEAR)
-
-		end_time = time.time()
-		# print("Straightening took " + str(end_time - start_time) + "s")
-		return straightened_image, cropped_image, points_spine, points_shoulder, score
+    """
+    Takes the image and tries to find the abdomen of a salamander by locating
+    4 points down the abdomen of the salamander. Then we make a curve that
+    follows those 4 points. Each point will be given a score that tells how
+    confident the AI is with its estimation.
+
+    Parameters:
+    ----------
+    image: cv2 image NEEDS to be RGB not BGR
+        Full path of the config.yaml file as a string.
+    ----------
+    Returns:
+    -------
+    straightened_image: The straightened image of the abdomen. None if straightening fails.
+
+    cropped_image: A potentially cropped version of the original image.
+
+    points_spine: The spine points that were found by the AI.
+
+    points_shoulder: The shoulder points that were found by the AI.
+
+    score: How confident the AI was at each point.
+    -------
+    """
+    with _ACCESS_TF_AND_GPU_SEMA:
+        start_time = time.time()
+        height, width, _ = image.shape
+        # find the largest dimension:
+        larger_dim = max(width, height)
+        cropped_image = image
+        # reduce size of image by the largest dimension, if it exceeds a given constant:
+        if larger_dim > MAX_DEEPLABCUT_IMAGE_SIZE:
+            factor = MAX_DEEPLABCUT_IMAGE_SIZE / larger_dim
+            cropped_image = cv2.resize(cropped_image, None, fx=factor, fy=factor, interpolation=cv2.INTER_CUBIC)
+
+        prediction, _, x, y = psa.run_prediction_dlc(config=abs_path_dlc_config, image=cropped_image, gputouse=GPUID)
+
+        # points is an array of coordinates to use to straighten the image:
+        # We use the prediction from deeplabcut for the points. There are two things to take note of.
+        # The predictions can be negative, so we ensure that negative values are set as 0 with max(0,num)
+        # the structure of prediction is: shape(1,12) content: [bp1_x, bp1_y, bp1_score, bp2_x, bp2_y, bp2_score
+        # bp3_x, bp3_y, bp3_score, bp4_x, bp4_y, bp4_score] 'bp' = body_part.
+        # we are using the score to determine if the image is valid and skipping the score for points:
+        score = np.array([prediction[2], prediction[5], prediction[8], prediction[11], prediction[14], prediction[17]])
+        if score[0] < LEAST_DEEPLABCUT_SCORE or score[1] < LEAST_DEEPLABCUT_SCORE or score[
+            2] < LEAST_DEEPLABCUT_SCORE or score[3] < LEAST_DEEPLABCUT_SCORE:
+            print("straighten failed because the score (", score, " found is below ", LEAST_DEEPLABCUT_SCORE)
+            return None, None, None, None, score
+
+        # getting new width and height:
+        height, width, _ = cropped_image.shape
+        # ensuring each point is between 0 and width/height:
+        points_spine = np.array([
+            (min(width, max(0, int(prediction[0]))), min(height, max(0, int(prediction[1])))),
+            (min(width, max(0, int(prediction[3]))), min(height, max(0, int(prediction[4])))),
+            (min(width, max(0, int(prediction[6]))), min(height, max(0, int(prediction[7])))),
+            (min(width, max(0, int(prediction[9]))), min(height, max(0, int(prediction[10]))))
+        ])
+
+        # Used to check if the point 2 is closer to point 1. This will also be used later
+        # on to determine if the two points in the middle have been swapped.
+        dist_2_1 = np.linalg.norm(points_spine[1] - points_spine[0])
+        # print("dist_2_1: ", dist_2_1)
+        dist_2_3 = np.linalg.norm(points_spine[1] - points_spine[2])
+        # check if the two middle points are equal:
+        if dist_2_3 < MINIMUM_MID_POINT_DISTANCE:
+            print("the middle points needs to be moved")
+            dist_2_4 = np.linalg.norm(points_spine[1] - points_spine[3])
+            # print("dist_2_4: ", dist_2_4)
+
+            # if point 2 needs to move towards the 1 point:
+            if dist_2_1 > dist_2_4:
+                print("moving the 2 point torwards the 1")
+                points_spine[1] = halfway_between(point1=points_spine[1], point2=points_spine[0])
+            # else the 3 point needs to be moved towards the 4 point:
+            else:
+                print("moving the 3 point torwards the 4")
+                points_spine[2] = halfway_between(point1=points_spine[2], point2=points_spine[3])
+        del dist_2_3
+
+        # Shoulder points are cannot be a part of the spine points as the spine points are sent
+        # to a function that expects all the points to represent a line:
+        points_shoulder = np.array([
+            (min(width, max(0, int(prediction[12]))), min(height, max(0, int(prediction[13])))),
+            (min(width, max(0, int(prediction[15]))), min(height, max(0, int(prediction[16]))))
+        ])
+        # Validating spine points:
+        # This would mean that either the first or last point are above each other.
+        # or the image is really small. This is ugly, but we can't just check for duplicates:
+        if np.linalg.norm(points_spine[0] - points_spine[1]) < MINIMUM_MID_POINT_DISTANCE or \
+                np.linalg.norm(points_spine[0] - points_spine[2]) < MINIMUM_MID_POINT_DISTANCE or \
+                np.linalg.norm(points_spine[0] - points_spine[3]) < MINIMUM_MID_POINT_DISTANCE or \
+                np.linalg.norm(points_spine[3] - points_spine[1]) < MINIMUM_MID_POINT_DISTANCE or \
+                np.linalg.norm(points_spine[3] - points_spine[2]) < MINIMUM_MID_POINT_DISTANCE:
+            # print("predicted points were not correct and we can't proceed with the straightening")
+            return None, None, None, None, score
+
+        # checking if the points in the middle needs to be swapped:
+        dist_2_1 = np.linalg.norm(points_spine[1] - points_spine[0])
+        dist_3_1 = np.linalg.norm(points_spine[2] - points_spine[0])
+
+        if dist_2_1 > dist_3_1:
+            points_spine[[1, 2], :] = points_spine[[2, 1], :]
+        # print("Two middle points need to be swapped, ", dist_3_1, " ", dist_2_1)
+        del dist_3_1
+        del dist_2_1
+
+        # Finding shoulder width because the pattern on the salamander is usually as wide as
+        # its shoulders. This will be used to stretch the pattern to the image borders. If
+        # we don't do this a lot of the image will be of either the salamanders side (that
+        # does not have a pattern) or background:
+        shoulder_width = math.sqrt(
+            (points_shoulder[1][0] - points_shoulder[0][0]) ** 2 + (points_shoulder[1][1] - points_shoulder[0][1]) ** 2)
+        # print("shoulder width: ", shoulder_width)
+        if shoulder_width < MINIMUM_SHOULDER_WIDTH:
+            shoulder_width = 2 * (math.sqrt(
+                (points_shoulder[1][0] - points_spine[0][0]) ** 2 + (points_shoulder[1][1] - points_spine[0][1]) ** 2))
+            # print("correcting: ", shoulder_width)
+            if shoulder_width < MINIMUM_SHOULDER_WIDTH:
+                shoulder_width = STRAIGTHENED_IMAGE_HEIGHT
+
+        start = time.time()
+        curve = get_smooth_curve(points_spine, STRAIGTHENED_IMAGE_WIDTH)
+        end = time.time()
+        # print("Bicubic interpolation of spine took " + str(end - start) + "s")
+
+        map = generate_map_from_bellycurve(curve, shoulder_width)
+        straightened_image = cv2.remap(cropped_image, map[:, :, 0], map[:, :, 1], cv2.INTER_LINEAR)
+
+        end_time = time.time()
+        # print("Straightening took " + str(end_time - start_time) + "s")
+        return straightened_image, cropped_image, points_spine, points_shoulder, score
 
 
 def generate_map_from_bellycurve(curve, width=STRAIGTHENED_IMAGE_WIDTH):
-	'''
-	Generates a map that can be passed to cv2.remap to extract the belly pattern
-	'''
-	salamander_length = np.linalg.norm(curve[len(curve) - 1] - curve[0])
+    """
+    Generates a map that can be passed to cv2.remap to extract the belly pattern
+    """
 
-	# salamander_width = salamander_length * STRAIGHTENED_IMAGE_ASPECT_RATIO
-	salamander_width = width + SHOULDER_ESTIMATION_BUFFER
-	gradient = np.gradient(curve, axis=0)
+    # salamander_length = np.linalg.norm(curve[len(curve) - 1] - curve[0])
 
-	gradientlength = np.linalg.norm(gradient, axis=1)
+    # salamander_width = salamander_length * STRAIGHTENED_IMAGE_ASPECT_RATIO
+    salamander_width = width + SHOULDER_ESTIMATION_BUFFER
+    gradient = np.gradient(curve, axis=0)
 
-	tu = np.divide(gradient, gradientlength.reshape((STRAIGTHENED_IMAGE_WIDTH, 1)))
+    gradientlength = np.linalg.norm(gradient, axis=1)
 
-	su = np.ndarray(tu.shape)
+    tu = np.divide(gradient, gradientlength.reshape((STRAIGTHENED_IMAGE_WIDTH, 1)))
 
-	# Invert the vectors
-	for i in range(0, len(tu)):
-		su[i, 1] = tu[i, 0]
-		su[i, 0] = -tu[i, 1]
+    su = np.ndarray(tu.shape)
 
-	s = np.linspace(-1, 1, STRAIGTHENED_IMAGE_HEIGHT).reshape((STRAIGTHENED_IMAGE_HEIGHT, 1, 1))
+    # Invert the vectors
+    for i in range(0, len(tu)):
+        su[i, 1] = tu[i, 0]
+        su[i, 0] = -tu[i, 1]
 
-	map = s * su * (salamander_width / 2) + curve
+    s = np.linspace(-1, 1, STRAIGTHENED_IMAGE_HEIGHT).reshape((STRAIGTHENED_IMAGE_HEIGHT, 1, 1))
 
-	return map.astype('float32')
+    map = s * su * (salamander_width / 2) + curve
+
+    return map.astype('float32')
 
 
 def get_smooth_curve(points, num_points):
-	'''
-	Takes in an array of 2-D points and returns a new set of points
-	with numpoints elements where the missing points are interpolated
-	using cubic interpolation
-	'''
-	# Calculate distance between all the points
-	distance = np.sqrt(np.sum(np.diff(points, axis=0) ** 2, axis=1))
-	# print("distance, ", distance.shape)
-	# Accumulate distance to use as parameter
-	accumulated_distance = np.cumsum(distance)
+    """
+    Takes in an array of 2-D points and returns a new set of points
+    with numpoints elements where the missing points are interpolated
+    using cubic interpolation
+    """
+    # Calculate distance between all the points
+    distance = np.sqrt(np.sum(np.diff(points, axis=0) ** 2, axis=1))
+    # print("distance, ", distance.shape)
+    # Accumulate distance to use as parameter
+    accumulated_distance = np.cumsum(distance)
 
-	# Insert starting point
-	accumulated_distance = np.insert(accumulated_distance, 0, 0)
+    # Insert starting point
+    accumulated_distance = np.insert(accumulated_distance, 0, 0)
 
-	# Make it go from 0 to 1
-	accumulated_distance /= accumulated_distance[-1]
+    # Make it go from 0 to 1
+    accumulated_distance /= accumulated_distance[-1]
 
-	alpha = np.linspace(0, 1, num_points)
+    alpha = np.linspace(0, 1, num_points)
 
-	f = interp1d(accumulated_distance, points, kind='cubic', axis=0)
+    f = interp1d(accumulated_distance, points, kind='cubic', axis=0)
 
-	inter_points = f(alpha)
-	return inter_points
+    inter_points = f(alpha)
+    return inter_points
 
 
 def halfway_between(point1, point2):
-	"""
-	halfway_between() calculates a point between two points:
-	"""
-	vec = [(point2[0] - point1[0]) / 2, (point2[1] - point1[1]) / 2]
-	return [point1[0] + vec[0], point1[1] + vec[1]]
+    """
+    halfway_between() calculates a point between two points:
+    """
+    vec = [(point2[0] - point1[0]) / 2, (point2[1] - point1[1]) / 2]
+    return [point1[0] + vec[0], point1[1] + vec[1]]
-- 
GitLab