diff --git a/Test.m b/Test.m
index 531fef47b228886abfba5747600822e84ad84faf..44b7d7b37181f344706def9edc557eb428e2d7b1 100644
--- a/Test.m
+++ b/Test.m
@@ -14,20 +14,25 @@ img = imread("colorHistogram/Edvard_Munch_-_Despair_(1894).jpg");
 % "visual" contains visualization object that can be used in plot method
 [hog, visual] = histogramOrientedGradients(img);
 
-figure,
-imshow(img),
-hold on,
-plot(visual),
-title("Hog features of image");
+%figure,
+%imshow(img),
+%hold on,
+%plot(visual),
+%title("Hog features of image");
 
 % Using Cellsize this time
 [hog2, visual2] = histogramOrientedGradients(img, 32);
 
-figure,
-imshow(img),
+%figure,
+%imshow(img),
+%hold on,
+%plot(visual2),
+%title("Hog features using Cellsize");
+%% Scaling Invariant Feature Transform - Test of imlementation
+%Can look at this https://www.analyticsvidhya.com/blog/2019/10/detailed-guide-powerful-sift-technique-image-matching-python/
+points = sift(img);
+
+imshow(img);
 hold on,
-plot(visual2),
-title("Hog features using Cellsize");
-%% Section 2 Title
-% Description of second code block
-b = 2;
+plot(points);
+
diff --git a/sift.m b/sift.m
new file mode 100644
index 0000000000000000000000000000000000000000..23ce43f5d996b5af5f413fb549b6df3b6377f150
--- /dev/null
+++ b/sift.m
@@ -0,0 +1,7 @@
+function siftPoints = sift(image)
+    %Convert image to greyscale
+    greyImage = rgb2gray(image);
+    %Use inbuilt edge function
+    siftPoints = detectSIFTFeatures(greyImage);
+end
+