diff --git a/filter_images_V1.py b/filter_images_V1.py
index 29e006c36404b668d61df3eee3d7c4047289acf9..1be6379ff0e00b112386c429b3a6ab1f34c50c1a 100644
--- a/filter_images_V1.py
+++ b/filter_images_V1.py
@@ -69,10 +69,15 @@ def filter_image(image):
     image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)
 
     # Minimum filter: Rauschen entfernen
+<<<<<<< HEAD
     # Closing operation elliptical shaped kernels
     kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))                        # use another kernal shapes 
     # perform erosion on the image
     image = cv2.erode(image, kernel)
+=======
+    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
+    #image = cv2.erode(image, kernel)
+>>>>>>> 15c1ee4b8b800ce409ac06c9128e25df62715fd1
 
     points = find_points(image)
     overlay = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)
@@ -87,7 +92,7 @@ def filter_image(image):
     #        pts = np.array([[x1, y1], [x2, y2]], np.int32)
     #        #cv2.polylines(overlay, [pts], False, (255, 0, 0), thickness=1)
 
-    return overlay
+    return overlay, image
 
 def sharpenImg(image):
     # median of all the pixels under the kernel area
@@ -156,13 +161,13 @@ while capture.isOpened():
         
         if last is not None:
             diff = cv2.absdiff(filter, last) # Difference
-            overlay = filter_image(diff)
+            overlay, image = filter_image(diff)
 
             result = overlay_imgs(frame, overlay)
             
             #function_img=calculate_function(overlay,result)
 
-            cv2.imshow(window, result)
+            cv2.imshow(window, image)
 
         last = filter
     
diff --git a/filter_images_V2.py b/filter_images_V2.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6c6f8b61e09955bcdf8c0074c035e837c79ea42
--- /dev/null
+++ b/filter_images_V2.py
@@ -0,0 +1,121 @@
+import cv2
+import numpy as np
+from datetime import datetime
+from collections import deque
+
+VIDEO = 'Videomaterial/WIN_20230414_13_41_55_Pro.mp4'
+capture = cv2.VideoCapture(VIDEO)
+
+last = None
+frame = None
+diff = None
+window = 'Filter'
+
+# Einstellungen
+min_threshold = 30
+max_threshold = 110
+img_threshold = 100
+line_threshold = 30
+
+def overlay_imgs(base, top):
+    topgray = cv2.cvtColor(top, cv2.COLOR_RGB2GRAY)
+
+    _, mask = cv2.threshold(topgray, 10, 255, cv2.THRESH_BINARY)
+    mask_inv = cv2.bitwise_not(mask)
+    # Now black-out the area of overlay
+    img1_bg = cv2.bitwise_and(base,base,mask = mask_inv)
+    # Take only region of from logo image.
+    img2_fg = cv2.bitwise_and(top,top,mask = mask)
+
+    return cv2.add(img1_bg, img2_fg)
+
+def filter_image(image):
+    global min_threshold, max_threshold, line_threshold, result, window
+    image = image.copy()
+    # Use canny edge detection
+    image = cv2.Canny(image, min_threshold, max_threshold)
+
+    # kernel = np.ones((5,5),np.uint8)
+    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
+    image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)
+
+    #lines = cv2.HoughLinesP(image, cv2.HOUGH_PROBABILISTIC, np.pi/360, line_threshold, minLineLength=10, maxLineGap=20)
+
+    overlay = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)
+
+    #if lines is not None:
+    #    for line in lines:
+    #        x1,y1,x2,y2 = line[0]
+    #        pts = np.array([[x1, y1], [x2, y2]], np.int32)
+    #        cv2.polylines(overlay, [pts], False, (255, 0, 0), thickness=3)
+
+    return overlay, image
+
+# def img_callback(val):
+#     global img_threshold, result
+
+#     img_threshold = val
+#     if result is not None:
+#         filter_image(result)
+
+# def max_callback(val):
+#     global max_threshold, result
+
+#     max_threshold = val
+#     if result is not None:
+#         filter_image(result)
+
+def line_callback(val):
+    global line_threshold, frame, diff
+
+    line_threshold = val
+    if diff is not None:
+        overlay = filter_image(diff)
+
+        result = overlay_imgs(frame, overlay)
+        cv2.imshow(window, result)
+
+
+cv2.namedWindow(window, cv2.WINDOW_AUTOSIZE)
+cv2.createTrackbar('Line Threshold: ', window, line_threshold, 100, line_callback)
+
+buffer = deque(maxlen=4)
+mix = 0.8
+
+while capture.isOpened():
+
+    ret, frame = capture.read()
+    if ret == True:
+        frame = cv2.resize(frame, (1280, 720))
+
+        filter = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
+        filter = cv2.medianBlur(filter, 3)
+        buffer.append(filter)
+        if len(buffer) >= 1:
+            diff = np.zeros((filter.shape[0], filter.shape[1], 1), np.uint8)
+            for i in range(0, len(buffer) - 1):
+                diff_frame = cv2.absdiff(buffer[i], buffer[i+1]) # Difference
+                diff = cv2.addWeighted(diff, 1.0, diff_frame, 1.0, 0.0)
+                #diff = cv2.add(diff, diff_frame)
+
+            overlay, image = filter_image(diff)
+            result = overlay_imgs(frame, overlay)
+
+            cv2.imshow(window, image)
+
+    
+        code = cv2.waitKey(33)
+        if code & 0xFF == ord('s'):
+            now = datetime.now()
+            str = now.strftime("%d_%m_%Y_%H_%M_%S")
+            cv2.imwrite(f'Filter/IMG_{str}.png', result)
+
+        if code & 0xFF == ord('q'):
+            break
+
+    else:
+        break
+
+capture.release()
+
+cv2.destroyAllWindows()
\ No newline at end of file