Skip to content
Snippets Groups Projects
Commit c0d8c3fe authored by Joel Steffens's avatar Joel Steffens
Browse files

Aufbereitung für Abgabe: Kommentare für Hauptprogramm ergänzt

parent 88559f8a
No related branches found
No related tags found
No related merge requests found
Showing
with 109 additions and 1004 deletions
<mxfile host="65bd71144e" scale="2" border="0">
<diagram id="lPL2x5aQqY1zRR2xWvco" name="Seite-1">
<mxGraphModel dx="1326" dy="740" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="1169" pageHeight="827" math="0" shadow="0">
<root>
<mxCell id="0"/>
<mxCell id="1" parent="0"/>
<mxCell id="2" value="Bildframe" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="70" y="180" width="120" height="60" as="geometry"/>
</mxCell>
<mxCell id="3" value="Bildframe" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="80" y="190" width="120" height="60" as="geometry"/>
</mxCell>
<mxCell id="12" style="edgeStyle=none;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;fontColor=#000000;" parent="1" source="4" target="5" edge="1">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="4" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;Bildframe&lt;br&gt;&lt;b&gt;&lt;font style=&quot;font-size: 14px;&quot; color=&quot;#b3b3b3&quot;&gt;(1280, 720, 3)&lt;/font&gt;&lt;/b&gt;&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="90" y="200" width="120" height="60" as="geometry"/>
</mxCell>
<mxCell id="13" style="edgeStyle=none;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;fontColor=#000000;" parent="1" source="5" target="6" edge="1">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="5" value="&lt;font style=&quot;font-size: 14px;&quot; color=&quot;#000000&quot;&gt;Filter A&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#dae8fc;strokeColor=#6c8ebf;" parent="1" vertex="1">
<mxGeometry x="260" y="200" width="120" height="60" as="geometry"/>
</mxCell>
<mxCell id="14" style="edgeStyle=none;html=1;fontColor=#000000;" parent="1" source="6" target="7" edge="1">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="6" value="&lt;font style=&quot;font-size: 14px;&quot; color=&quot;#000000&quot;&gt;Filter B&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
<mxGeometry x="410" y="200" width="120" height="60" as="geometry"/>
</mxCell>
<mxCell id="18" style="edgeStyle=none;html=1;fontColor=#000000;" parent="1" source="7" target="17" edge="1">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="7" value="&lt;font style=&quot;font-size: 14px;&quot; color=&quot;#000000&quot;&gt;Filter C&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#e1d5e7;strokeColor=#9673a6;" parent="1" vertex="1">
<mxGeometry x="560" y="200" width="120" height="60" as="geometry"/>
</mxCell>
<mxCell id="9" style="edgeStyle=none;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;fontColor=#000000;" parent="1" source="8" target="5" edge="1">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="10" style="edgeStyle=none;html=1;fontColor=#000000;" parent="1" source="8" target="6" edge="1">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="11" style="edgeStyle=none;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;fontColor=#000000;" parent="1" source="8" target="7" edge="1">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="8" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;Zustand&lt;/font&gt;" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;fontColor=#000000;" parent="1" vertex="1">
<mxGeometry x="440" y="80" width="60" height="80" as="geometry"/>
</mxCell>
<mxCell id="15" value="Bildframe" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="730" y="180" width="120" height="60" as="geometry"/>
</mxCell>
<mxCell id="16" value="Bildframe" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="740" y="190" width="120" height="60" as="geometry"/>
</mxCell>
<mxCell id="17" value="&lt;font style=&quot;font-size: 14px;&quot;&gt;Resultat&lt;/font&gt;" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="750" y="200" width="120" height="60" as="geometry"/>
</mxCell>
</root>
</mxGraphModel>
</diagram>
</mxfile>
\ No newline at end of file
Diagramme/Pipeline.png

48.2 KiB

Filter/IMG_23_05_2023_17_26_27.png

6.82 KiB

Filter/IMG_23_05_2023_17_32_16.png

7.49 KiB

Screenshots/Fenster_OpenCV_06_09_2023.png

1.06 MiB

Screenshots/Filter_3_06_09_2023.png

225 KiB

Screenshots/IMG_07_09_2023_16_08_04.png

1.18 MiB

Screenshots/IMG_07_09_2023_16_09_08.png

1.13 MiB

##############################################
# Modul: Computer Vision (SoSe23)
# Dozent: Prof. Dr-Ing. Gerhardt
#
# Erkennung eines Wasserstrahls
#
# Autoren: - Joel Steffens
# - Mohammend Ka
# - Midras Lappe
##############################################
import cv2
import numpy as np
import matplotlib.pyplot as plt
import time
from datetime import datetime
import pandas as pd
import filters
import sys
from datetime import datetime
argc = len(sys.argv)
if argc < 2 or argc > 3:
print("Syntax: ./detect_waterstream.py <Video> [Seconds]")
exit(1)
VIDEO = 'Videomaterial/WIN_20230602_14_45_52_Pro.mp4'
#VIDEO = 'Videomaterial/WIN_20230414_13_41_55_Pro.mp4'
capture = cv2.VideoCapture(VIDEO)
capture.set(cv2.CAP_PROP_POS_FRAMES, 30 * 0) # skip 40 seconds
videoFile = sys.argv[1]
skipSeconds = 0
if argc == 3:
skipSeconds = int(sys.argv[2])
capture = cv2.VideoCapture(videoFile)
capture.set(cv2.CAP_PROP_POS_FRAMES, 30 * skipSeconds)
last = None
frame = None
diff = None
window = 'Filter'
# Einstellungen
min_threshold = 30
max_threshold = 110
img_threshold = 100
line_threshold = 30
spanne = 2
def overlay_imgs(base, top):
topgray = cv2.cvtColor(top, cv2.COLOR_RGB2GRAY)
......@@ -36,41 +43,41 @@ def overlay_imgs(base, top):
return cv2.add(img1_bg, img2_fg)
def nothing_cb(val):
pass
# To control the Size of the Disply
# Neues OpenCV-Fenster erzeugen
cv2.namedWindow(window, cv2.WINDOW_NORMAL)
# Trackbar für Filter-Parameter anlegen
cv2.createTrackbar('Canny_Min', window, 90, 255, nothing_cb)
cv2.createTrackbar('Canny_Max', window, 150, 255, nothing_cb)
cv2.createTrackbar('Diff_Mix', window, 40, 100, nothing_cb)
cv2.createTrackbar('HSV_Min', window, 50, 180, nothing_cb)
cv2.createTrackbar('HSV_Max', window, 100, 180, nothing_cb)
# Definiton der Filter-Stufen
pipeline = [
('Original', filters.none),
#('GreenChannel', filters.greenfilter),
('Gray scale', filters.grayscale),
('Contrast and Blur', filters.medianBlur),
('Contrast and Blur', filters.median_blur),
('Video Diff Multiple', filters.video_absdiff2),
('Green filter abs', filters.green_absfilter),
('Canny edge detection', filters.filter_canny),
('Morph close', filters.filter_close),
# ('gitter_filter', filters.gitter),
# ('Gitter Filter', filters.gitter),
# ('Morph open', filters.filter_open),
('Einzelne Rauswerfen', filters.sortOut),
('Point extraction', filters.points_extract),
('Polyfit lines', filters.points_overlay)
]
state = {} # Empty dictionary to store filters state
state = {} # Leeres Dictionary als Zustandsobjekt
info = {
'abs_diff': 2, # 3 images for difference,
'dim': (1920, 1080), #
'params': {}
'abs_diff': 2, # Standard: 2+1 Bilder
'dim': (1920, 1080), # Dimension des Videos
'params': {} # Leeres Dictionary für Filter-Parameter
}
result = None
......@@ -81,7 +88,7 @@ while capture.isOpened():
if ret == True:
frame, _ = filters.resize(None, frame, None)
# Apply
# Trackbar-Parameter auslesen und setzen
info['params']['mix'] = cv2.getTrackbarPos('Diff_Mix', window) / 100.0
info['params']['canny_min'] = cv2.getTrackbarPos('Canny_Min', window)
info['params']['canny_max'] = cv2.getTrackbarPos('Canny_Max', window)
......@@ -89,6 +96,7 @@ while capture.isOpened():
info['params']['hsv_max'] = cv2.getTrackbarPos('HSV_Max', window)
result = frame
image = result
for i, (name, filter) in enumerate(pipeline):
# Apply each filter
result, overlay = filter(info, result, state)
......@@ -107,26 +115,29 @@ while capture.isOpened():
cv2.imshow('Filter', image)
# Tastatur abfragen
code = cv2.waitKey(33) & 0xFF
# Option 1: Speichern (s)
if code == ord('s'):
now = datetime.now()
str = now.strftime("%d_%m_%Y_%H_%M_%S")
cv2.imwrite(f'Filter/IMG_{str}.png', result)
cv2.imwrite(f'Screenshots/IMG_{str}.png', image)
# Option 2: Nächster Filter (d)
elif code == ord('d'):
visible_filter_idx += 1
if visible_filter_idx >= len(pipeline):
visible_filter_idx = len(pipeline) - 1
# Option 3: Vorheriger Filter (a)
elif code == ord('a'):
visible_filter_idx -= 1
if visible_filter_idx < 0:
visible_filter_idx = 0
# Option 4: Programm beenden (q)
elif code & 0xFF == ord('q'):
break
else:
break
# Aufräumen
capture.release()
cv2.destroyAllWindows()
\ No newline at end of file
import cv2
import numpy as np
from datetime import datetime
VIDEO = 'Videomaterial/WIN_20230414_13_41_55_Pro.mp4'
capture = cv2.VideoCapture(VIDEO)
last = None
frame = None
diff = None
window = 'Filter'
# Einstellungen
min_threshold = 30
max_threshold = 110
img_threshold = 100
line_threshold = 30
def overlay_imgs(base, top):
topgray = cv2.cvtColor(top, cv2.COLOR_RGB2GRAY)
_, mask = cv2.threshold(topgray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of overlay
img1_bg = cv2.bitwise_and(base,base,mask = mask_inv)
# Take only region of from logo image.
img2_fg = cv2.bitwise_and(top,top,mask = mask)
return cv2.add(img1_bg, img2_fg)
def filter_image(image):
global min_threshold, max_threshold, line_threshold, result, window
image = image.copy()
# Use canny edge detection
image = cv2.Canny(image, min_threshold, max_threshold)
# kernel = np.ones((5,5),np.uint8)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)
lines = cv2.HoughLinesP(image, cv2.HOUGH_PROBABILISTIC, np.pi/360, line_threshold, minLineLength=10, maxLineGap=20)
overlay = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)
if lines is not None:
for line in lines:
x1,y1,x2,y2 = line[0]
pts = np.array([[x1, y1], [x2, y2]], np.int32)
cv2.polylines(overlay, [pts], False, (255, 0, 0), thickness=3)
return overlay
# def img_callback(val):
# global img_threshold, result
# img_threshold = val
# if result is not None:
# filter_image(result)
# def max_callback(val):
# global max_threshold, result
# max_threshold = val
# if result is not None:
# filter_image(result)
def line_callback(val):
global line_threshold, frame, diff
line_threshold = val
if diff is not None:
overlay = filter_image(diff)
result = overlay_imgs(frame, overlay)
cv2.imshow(window, result)
cv2.namedWindow(window, cv2.WINDOW_AUTOSIZE)
cv2.createTrackbar('Line Threshold: ', window, line_threshold, 100, line_callback)
while capture.isOpened():
ret, frame = capture.read()
if ret == True:
frame = cv2.resize(frame, (1280, 720))
filter = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
#frame = frame[:,:,2] # Blue Channel
filter = cv2.medianBlur(filter, 3)
if last is not None:
diff = cv2.absdiff(filter, last) # Difference
overlay = filter_image(diff)
result = overlay_imgs(frame, overlay)
cv2.imshow(window, result)
last = filter
code = cv2.waitKey(33)
if code & 0xFF == ord('s'):
now = datetime.now()
str = now.strftime("%d_%m_%Y_%H_%M_%S")
cv2.imwrite(f'Filter/IMG_{str}.png', result)
if code & 0xFF == ord('q'):
break
else:
break
capture.release()
cv2.destroyAllWindows()
\ No newline at end of file
import cv2
import numpy as np
import matplotlib.pyplot as plt
import time
from datetime import datetime
VIDEO = 'Videomaterial/WIN_20230414_13_41_55_Pro.mp4'
capture = cv2.VideoCapture(VIDEO)
last = None
frame = None
diff = None
window = 'Filter'
# Einstellungen
min_threshold = 30
max_threshold = 110
img_threshold = 100
line_threshold = 30
def overlay_imgs(base, top):
topgray = cv2.cvtColor(top, cv2.COLOR_RGB2GRAY)
_, mask = cv2.threshold(topgray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of overlay
img1_bg = cv2.bitwise_and(base,base,mask = mask_inv)
# Take only region of from logo image.
img2_fg = cv2.bitwise_and(top,top,mask = mask)
return cv2.add(img1_bg, img2_fg)
def plot_points(x, y, px, py):
fig = plt.figure()
ax = fig.add_subplot()
ax.scatter(px, py, c='r')
ax.plot(x, y)
ax.set_xlim([0, 1280])
ax.set_ylim([720, 0])
plt.show()
def find_points(image):
indices = np.where(image > 0)
if (indices[1].size > 0):
x_sort = np.sort(indices[1])
p = np.polyfit(indices[1], indices[0], 4)
x = np.arange(x_sort[0], x_sort[-1], 1)
y = np.polyval(p, x)
points = np.column_stack((x, y)).astype(np.int32)
return points
return None
def filter_image(image):
global min_threshold, max_threshold, line_threshold, result, window
image = image.copy()
# Use canny edge detection
image = cv2.Canny(image, min_threshold, max_threshold)
# Closing operation elliptical shaped kernels
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5)) # use --> dilation = cv2.dilate(img,kernel,iterations = 1)
# Closing operation / closing small holes
image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)
# Minimum filter: Rauschen entfernen
# Closing operation elliptical shaped kernels
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3)) # use another kernal shapes
# perform erosion on the image
image = cv2.erode(image, kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
#image = cv2.erode(image, kernel)
points = find_points(image)
overlay = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)
# draw a polygon on the image
cv2.polylines(overlay, [points], False, (255, 0, 0), thickness=3)
#lines = cv2.HoughLinesP(image, cv2.HOUGH_PROBABILISTIC, np.pi/360, line_threshold, minLineLength=10, maxLineGap=20)
#if lines is not None:
# for line in lines:
# x1,y1,x2,y2 = line[0]
# pts = np.array([[x1, y1], [x2, y2]], np.int32)
# #cv2.polylines(overlay, [pts], False, (255, 0, 0), thickness=1)
return overlay, image
def sharpenImg(image):
# median of all the pixels under the kernel area
blur = cv2.medianBlur(image, 5)
# adding tow images
sharp = cv2.addWeighted(image, 1.5, blur, -0.5, 0.0)
return sharp
# def img_callback(val):
# global img_threshold, result
# img_threshold = val
# if result is not None:
# filter_image(result)
# def max_callback(val):
# global max_threshold, result
# max_threshold = val
# if result is not None:
# filter_image(result)
def line_callback(val):
global line_threshold, frame, diff
line_threshold = val
if diff is not None:
overlay = filter_image(diff)
result = overlay_imgs(frame, overlay)
cv2.imshow(window, result)
def calculate_function(overlay,image):
indices = np.where(overlay > [0])
if((indices[0].size>0)):
p=np.polyfit(indices[0],indices[1], 4)
x = np.arange(0, overlay.shape[1], 1)
y=np.polyval(p,x)
points=np.column_stack((x,y)).astype(np.int32)
indices = np.where(points < 0)
points = np.delete(points,indices,axis=0)
print(points)
cv2.polylines(image, [points], False, (255, 0, 0), thickness=3)
# plt.figure()
# plt.plot(x,y)
# plt.savefig("test.png")
# plt.show()
# time.sleep(20)
# print (p)
return overlay
# To control the Size of the Disply
cv2.namedWindow(window, cv2.WINDOW_AUTOSIZE)
cv2.createTrackbar('Line Threshold: ', window, line_threshold, 100, line_callback)
result = None
while capture.isOpened():
# ret is the stat of the reading
ret, frame = capture.read()
if ret == True:
frame = cv2.resize(frame, (1280, 720))
filter = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
filter = sharpenImg(filter)
if last is not None:
diff = cv2.absdiff(filter, last) # Difference
overlay, image = filter_image(diff)
result = overlay_imgs(frame, overlay)
#function_img=calculate_function(overlay,result)
cv2.imshow(window, image)
cv2.imshow('Rsult', result)
last = filter
code = cv2.waitKey(33)
if code & 0xFF == ord('s'):
now = datetime.now()
str = now.strftime("%d_%m_%Y_%H_%M_%S")
cv2.imwrite(f'Filter/IMG_{str}.png', result)
if code & 0xFF == ord('q'):
break
else:
break
capture.release()
cv2.destroyAllWindows()
\ No newline at end of file
import cv2
import numpy as np
from datetime import datetime
from collections import deque
VIDEO = 'Videomaterial/WIN_20230414_13_41_55_Pro.mp4'
capture = cv2.VideoCapture(VIDEO)
last = None
frame = None
diff = None
window = 'Filter'
# Einstellungen
min_threshold = 30
max_threshold = 110
img_threshold = 100
line_threshold = 30
def overlay_imgs(base, top):
topgray = cv2.cvtColor(top, cv2.COLOR_RGB2GRAY)
_, mask = cv2.threshold(topgray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of overlay
img1_bg = cv2.bitwise_and(base,base,mask = mask_inv)
# Take only region of from logo image.
img2_fg = cv2.bitwise_and(top,top,mask = mask)
return cv2.add(img1_bg, img2_fg)
def find_points(image):
indices = np.where(image > 0)
if (indices[1].size > 0):
x_sort = np.sort(indices[1])
p = np.polyfit(indices[1], indices[0], 4)
x = np.arange(x_sort[0], x_sort[-1], 1)
y = np.polyval(p, x)
points = np.column_stack((x, y)).astype(np.int32)
return points
return None
def filter_image(image):
global min_threshold, max_threshold, line_threshold, result, window
image = image.copy()
# Use canny edge detection
image = cv2.Canny(image, min_threshold, max_threshold)
# kernel = np.ones((5,5),np.uint8)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)
#lines = cv2.HoughLinesP(image, cv2.HOUGH_PROBABILISTIC, np.pi/360, line_threshold, minLineLength=10, maxLineGap=20)
points = find_points(image)
overlay = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)
cv2.polylines(overlay, [points], False, (255, 0, 0), thickness=2)
#if lines is not None:
# for line in lines:
# x1,y1,x2,y2 = line[0]
# pts = np.array([[x1, y1], [x2, y2]], np.int32)
# cv2.polylines(overlay, [pts], False, (255, 0, 0), thickness=3)
return overlay, image
# def img_callback(val):
# global img_threshold, result
# img_threshold = val
# if result is not None:
# filter_image(result)
# def max_callback(val):
# global max_threshold, result
# max_threshold = val
# if result is not None:
# filter_image(result)
def line_callback(val):
global line_threshold, frame, diff
line_threshold = val
if diff is not None:
overlay = filter_image(diff)
result = overlay_imgs(frame, overlay)
cv2.imshow(window, result)
cv2.namedWindow(window, cv2.WINDOW_AUTOSIZE)
cv2.createTrackbar('Line Threshold: ', window, line_threshold, 100, line_callback)
buffer = deque(maxlen=4)
mix = 0.8
while capture.isOpened():
ret, frame = capture.read()
if ret == True:
frame = cv2.resize(frame, (1280, 720))
filter = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
filter = cv2.medianBlur(filter, 3)
buffer.append(filter)
if len(buffer) >= 1:
diff = np.zeros((filter.shape[0], filter.shape[1], 1), np.uint8)
for i in range(0, len(buffer) - 1):
diff_frame = cv2.absdiff(buffer[i], buffer[i+1]) # Difference
diff = cv2.addWeighted(diff, 1.0, diff_frame, 1.0, 0.0)
#diff = cv2.add(diff, diff_frame)
overlay, image = filter_image(diff)
result = overlay_imgs(frame, overlay)
cv2.imshow(window, image)
cv2.imshow('overlay', overlay)
cv2.imshow('result', result)
code = cv2.waitKey(33)
if code & 0xFF == ord('s'):
now = datetime.now()
str = now.strftime("%d_%m_%Y_%H_%M_%S")
cv2.imwrite(f'Filter/IMG_{str}.png', result)
if code & 0xFF == ord('q'):
break
else:
break
capture.release()
cv2.destroyAllWindows()
\ No newline at end of file
import cv2
import numpy as np
import matplotlib.pyplot as plt
import time
from datetime import datetime
VIDEO = 'Videomaterial/WIN_20230414_13_41_55_Pro.mp4'
capture = cv2.VideoCapture(VIDEO)
last = None
frame = None
diff = None
window = 'Filter'
# Einstellungen
min_threshold = 30
max_threshold = 110
img_threshold = 100
line_threshold = 30
def overlay_imgs(base, top):
topgray = cv2.cvtColor(top, cv2.COLOR_RGB2GRAY)
_, mask = cv2.threshold(topgray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of overlay
img1_bg = cv2.bitwise_and(base,base,mask = mask_inv)
# Take only region of from logo image.
img2_fg = cv2.bitwise_and(top,top,mask = mask)
return cv2.add(img1_bg, img2_fg)
def plot_points(x, y, px, py):
fig = plt.figure()
ax = fig.add_subplot()
ax.scatter(px, py, c='r')
ax.plot(x, y)
ax.set_xlim([0, 1280])
ax.set_ylim([720, 0])
plt.show()
def find_points(image):
indices = np.where(image > 0)
if (indices[1].size > 0):
x_sort = np.sort(indices[1])
p = np.polyfit(indices[1], indices[0], 4)
x = np.arange(x_sort[0], x_sort[-1], 1)
y = np.polyval(p, x)
points = np.column_stack((x, y)).astype(np.int32)
return points
return None
def filter_image(image):
global min_threshold, max_threshold, line_threshold, result, window
image = image.copy()
# construct a rectangular kernel from the current size / rect shaped kernel
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2,2))
# Opening operation
image = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
# Closing operation / closing small holes
image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)
# dilation
kernel2 = np.ones((3,3),np.uint8)
image = cv2.dilate(image,kernel2,iterations = 3)
# perform erosion on the image
image = cv2.erode(image, kernel)
# Use canny edge detection
image = cv2.Canny(image, min_threshold, max_threshold)
points = find_points(image)
overlay = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)
# draw a polygon on the image
cv2.polylines(overlay, [points], False, (255, 0, 0), thickness=7)
return overlay, image
def sharpenImg(image):
# median of all the pixels under the kernel area
blur = cv2.medianBlur(image, 5)
# adding tow images
sharp = cv2.addWeighted(image, 1.5, blur, -0.5, 0.0)
return sharp
def line_callback(val):
global line_threshold, frame, diff
line_threshold = val
if diff is not None:
overlay = filter_image(diff)
result = overlay_imgs(frame, overlay)
cv2.imshow(window, result)
def calculate_function(overlay,image):
indices = np.where(overlay > [0])
if((indices[0].size>0)):
p=np.polyfit(indices[0],indices[1], 4)
x = np.arange(0, overlay.shape[1], 1)
y=np.polyval(p,x)
points=np.column_stack((x,y)).astype(np.int32)
indices = np.where(points < 0)
points = np.delete(points,indices,axis=0)
print(points)
cv2.polylines(image, [points], False, (255, 0, 0), thickness=3)
# plt.figure()
# plt.plot(x,y)
# plt.savefig("test.png")
# plt.show()
# time.sleep(20)
# print (p)
return overlay
# To control the Size of the Disply
cv2.namedWindow(window, cv2.WINDOW_AUTOSIZE)
cv2.createTrackbar('Line Threshold: ', window, line_threshold, 100, line_callback)
result = None
while capture.isOpened():
# ret is the stat of the reading
ret, frame = capture.read()
if ret == True:
frame = cv2.resize(frame, (1280, 720))
filter = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
filter = sharpenImg(filter)
if last is not None:
diff = cv2.absdiff(filter, last) # Difference
overlay, image = filter_image(diff)
result = overlay_imgs(frame, overlay)
#function_img=calculate_function(overlay,result)
cv2.imshow('image', image)
cv2.imshow(window, result)
last = filter
code = cv2.waitKey(33)
if code & 0xFF == ord('s'):
now = datetime.now()
str = now.strftime("%d_%m_%Y_%H_%M_%S")
cv2.imwrite(f'Filter/IMG_{str}.png', result)
if code & 0xFF == ord('q'):
break
else:
break
capture.release()
cv2.destroyAllWindows()
\ No newline at end of file
import cv2
import numpy as np
import matplotlib.pyplot as plt
import time
from datetime import datetime
import pandas as pd
VIDEO = 'Videomaterial/WIN_20230414_13_41_55_Pro.mp4'
capture = cv2.VideoCapture(VIDEO)
last = None
frame = None
diff = None
window = 'Filter'
# Einstellungen
min_threshold = 30
max_threshold = 110
img_threshold = 100
line_threshold = 30
spanne = 2
def overlay_imgs(base, top):
topgray = cv2.cvtColor(top, cv2.COLOR_RGB2GRAY)
_, mask = cv2.threshold(topgray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of overlay
img1_bg = cv2.bitwise_and(base,base,mask = mask_inv)
# Take only region of from logo image.
img2_fg = cv2.bitwise_and(top,top,mask = mask)
return cv2.add(img1_bg, img2_fg)
def plot_points(x, y, px, py):
fig = plt.figure()
ax = fig.add_subplot()
ax.scatter(px, py, c='r')
ax.plot(x, y)
ax.set_xlim([0, 1280])
ax.set_ylim([720, 0])
plt.show()
def find_points(image):
indices = np.where(image > 0)
if (indices[1].size > 0):
x_so = indices[1]
y_so = indices[0]
list_xy = np.column_stack((x_so, y_so)).astype(np.int32)
# list_xy = np.sort(list_xy, axis=0)
# print( list_xy)
df = pd.DataFrame(list_xy,columns=['x','y'])
df = df.sort_values(by=['x'], ascending=True)
n_list = []
for el in df.x.unique():
med = (df.y.where(df.x >= el-spanne).where(el+spanne >= df.x)).median()
n_list.append([el,med])
n_list = np.array(n_list)
p = np.polyfit(n_list[:,0], n_list[:,1], 6)
x = np.arange(n_list[:,0][0], n_list[:,0][-1], 1)
y = np.polyval(p,x)
points = np.column_stack((x, y)).astype(np.int32)
return points
return None
def filter_image(image):
global min_threshold, max_threshold, line_threshold, result, window
image = image.copy()
# construct a rectangular kernel from the current size / rect shaped kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1,1))
# Opening operation
image = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
# perform erosion on the image
image = cv2.erode(image, (3,3))
# Closing operation / closing small holes
# image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, (1,1))
image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, (5,5))
# # image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, (4,4))
# dilation
kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2,2))
image = cv2.dilate(image,kernel2,iterations = 1)
# Use canny edge detection
image = cv2.Canny(image, min_threshold, max_threshold)
points = find_points(image)
overlay = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)
# draw a polygon on the image
cv2.polylines(overlay, [points], False, (255, 0, 0), thickness=6)
return overlay, image
def sharpenImg(image):
# median of all the pixels under the kernel area
blur = cv2.medianBlur(image, 7)
# adding tow images
sharp = cv2.addWeighted(image, 1.5, blur, -0.5, 0.0)
return sharp
def line_callback(val):
global line_threshold, frame, diff
line_threshold = val
if diff is not None:
overlay = filter_image(diff)
result = overlay_imgs(frame, overlay)
cv2.imshow(window, result)
# To control the Size of the Disply
cv2.namedWindow(window, cv2.WINDOW_AUTOSIZE)
cv2.createTrackbar('Line Threshold: ', window, line_threshold, 100, line_callback)
result = None
while capture.isOpened():
# ret is the stat of the reading
ret, frame = capture.read()
if ret == True:
frame = cv2.resize(frame, (1280, 720))
# filter = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
filter = sharpenImg(frame)
if last is not None:
diff = cv2.absdiff(filter, last) # Difference
cv2.imshow("diff", diff)
overlay, image = filter_image(diff)
result = overlay_imgs(frame, overlay)
cv2.imshow("image", image)
cv2.imshow("result", result)
# time.sleep(0.1)
last = filter
code = cv2.waitKey(10)
if code & 0xFF == ord('s'):
now = datetime.now()
str = now.strftime("%d_%m_%Y_%H_%M_%S")
cv2.imwrite(f'Filter/IMG_{str}.png', result)
if code & 0xFF == ord('q'):
break
else:
break
capture.release()
cv2.destroyAllWindows()
\ No newline at end of file
import cv2
import numpy as np
import matplotlib.pyplot as plt
import time
from datetime import datetime
import pandas as pd
VIDEO = 'Videomaterial/WIN_20230602_14_42_19_Pro.mp4'
capture = cv2.VideoCapture(VIDEO)
last = None
frame = None
diff = None
window = 'Filter'
# Einstellungen
min_threshold = 30
max_threshold = 110
img_threshold = 100
line_threshold = 30
spanne = 2
def overlay_imgs(base, top):
topgray = cv2.cvtColor(top, cv2.COLOR_RGB2GRAY)
_, mask = cv2.threshold(topgray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of overlay
img1_bg = cv2.bitwise_and(base,base,mask = mask_inv)
# Take only region of from logo image.
img2_fg = cv2.bitwise_and(top,top,mask = mask)
return cv2.add(img1_bg, img2_fg)
def plot_points(x, y, px, py):
fig = plt.figure()
ax = fig.add_subplot()
ax.scatter(px, py, c='r')
ax.plot(x, y)
ax.set_xlim([0, 1280])
ax.set_ylim([720, 0])
plt.show()
def find_points(image):
indices = np.where(image > 0)
if (indices[1].size > 0):
x_so = indices[1]
y_so = indices[0]
list_xy = np.column_stack((x_so, y_so)).astype(np.int32)
# list_xy = np.sort(list_xy, axis=0)
# print( list_xy)
df = pd.DataFrame(list_xy,columns=['x','y'])
df = df.sort_values(by=['x'], ascending=True)
n_list = []
df_un = df.x.unique()
for el in df_un[::2]:
med = (df.y.where(df.x >= el-spanne).where(el+spanne >= df.x)).median()
n_list.append([el,med])
n_list = np.array(n_list)
p = np.polyfit(n_list[:,0], n_list[:,1], 6)
x = np.arange(n_list[:,0][0], n_list[:,0][-1], 1)
y = np.polyval(p,x)
points = np.column_stack((x, y)).astype(np.int32)
return points
return None
def filter_image(image):
global min_threshold, max_threshold, line_threshold, result, window
image = image.copy()
# construct a rectangular kernel from the current size / rect shaped kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1,1))
# Opening operation
image = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
# perform erosion on the image
image = cv2.erode(image, (3,3))
# Closing operation / closing small holes
# image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, (1,1))
image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, (5,5))
# # image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, (4,4))
# dilation
kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2,2))
image = cv2.dilate(image,kernel2,iterations = 1)
# Use canny edge detection
image = cv2.Canny(image, min_threshold, max_threshold)
points = find_points(image)
overlay = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)
# draw a polygon on the image
cv2.polylines(overlay, [points], False, (255, 0, 0), thickness=6)
return overlay, image
def sharpenImg(image):
# median of all the pixels under the kernel area
blur = cv2.medianBlur(image, 7)
# adding tow images
sharp = cv2.addWeighted(image, 1.5, blur, -0.5, 0.0)
return sharp
def line_callback(val):
global line_threshold, frame, diff
line_threshold = val
if diff is not None:
overlay = filter_image(diff)
result = overlay_imgs(frame, overlay)
cv2.imshow(window, result)
# To control the Size of the Disply
cv2.namedWindow(window, cv2.WINDOW_AUTOSIZE)
cv2.createTrackbar('Line Threshold: ', window, line_threshold, 100, line_callback)
result = None
while capture.isOpened():
# ret is the stat of the reading
ret, frame = capture.read()
if ret == True:
frame = cv2.resize(frame, (1280, 720))
# filter = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
filter = sharpenImg(frame)
if last is not None:
diff = cv2.absdiff(filter, last) # Difference
cv2.imshow("diff", diff)
overlay, image = filter_image(diff)
result = overlay_imgs(frame, overlay)
cv2.imshow("image", image)
cv2.imshow("result", result)
# time.sleep(0.1)
last = filter
code = cv2.waitKey(10)
if code & 0xFF == ord('s'):
now = datetime.now()
str = now.strftime("%d_%m_%Y_%H_%M_%S")
cv2.imwrite(f'Filter/IMG_{str}.png', result)
if code & 0xFF == ord('q'):
break
else:
break
capture.release()
cv2.destroyAllWindows()
\ No newline at end of file
......@@ -15,6 +15,7 @@ def none(info, image, state):
bufferOrginal.append(image)
return image, False
def plot_points(x, y, px, py):
# currentfig
# if currentfig is not None:
......@@ -185,7 +186,7 @@ def grayscale(info, image, state):
res = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
return res, False
def medianBlur(info, image, state):
def median_blur(info, image, state):
# median of all the pixels under the kernel area
blur = cv.medianBlur(image, 7)
# adding tow images
......@@ -302,8 +303,8 @@ def polyfit(n_list,n=4):
return None
min_threshold = 30
max_threshold = 110
#min_threshold = 30
#max_threshold = 110
def filter_canny(info, image, state):
min_threshold = info['params']['canny_min']
......@@ -397,7 +398,7 @@ def video_absdiff2(info, image, state):
if len(buffer) >= 2:
diff = np.zeros((image.shape[0], image.shape[1], 1), np.uint8)
for i in range(0, len(buffer) - 1):
diff_frame = cv.absdiff(buffer[i], buffer[i+1]) # Difference
diff_frame = cv.absdiff(buffer[i], buffer[i+1])
diff = cv.addWeighted(diff, 1.0, diff_frame, 1.0 - mix, 0.0)
return diff, False
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment