Select Git revision
keycloak.nginx
filters.py 12.32 KiB
##############################################
# Modul: Computer Vision (SoSe23)
# Dozent: Prof. Dr-Ing. Gerhardt
#
# Erkennung eines Wasserstrahls
#
# Autoren: - Joel Steffens
# - Mohammad Khaleeliyeh
# - Midras Lappe
##############################################
import cv2 as cv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import signal
from collections import deque
currentfig = None
def none(info, image, state):
if 'bufferOrginal' not in state:
state['bufferOrginal'] = deque(maxlen=info['abs_diff'])
bufferOrginal = state['bufferOrginal']
bufferOrginal.append(image)
return image, False
def plot_points(x, y, px, py):
# currentfig
# if currentfig is not None:
# plt.close(currentfig)
plt.close()
fig = plt.figure()
# currentfig = fig
ax = fig.add_subplot()
ax.scatter(px, py, c='r')
ax.plot(x, y)
ax.set_xlim([0, 1280])
ax.set_ylim([720, 0])
plt.show()
def gitter(info, image, state):
list_of_lines = []
# image Range ((1280, 720))
fig = plt.figure()
ax = fig.add_subplot()
ax.set_xlim([0, 1280])
ax.set_ylim([720, 0])
ax.grid(True, which='major')
diff_x = 40
diff_y = 40
treshold = 5
haspoints = np.zeros([(int)(1280/diff_x),(int)(720/diff_y)])
ax.set_xticks(range(0,1280, diff_x))
ax.set_yticks(range(0,720, diff_y))
for x in range(0,1280,diff_x):
for y in range(0,720,diff_y):
subimg = image[y:y+diff_y,x:x+diff_x]
indices = np.where(subimg > 0)
if (indices[1].size > treshold):
haspoints[(int)(x/diff_x),(int)(y/diff_y)]=1
neighbours= findneighbours(haspoints)
for x in range(0,1280,diff_x):
for y in range(0,720,diff_y):
subimg = image[y:y+diff_y,x:x+diff_x]
indices = np.where(subimg > 0)
if (neighbours[(int)(x/diff_x),(int)(y/diff_y)]>2):
x_so = indices[1] + x
y_so = indices[0] + y
list_xy = np.column_stack((x_so, y_so)).astype(np.int32)
complete, poly_xy = trendline(list_xy, x, x + diff_x, y, y + diff_y)
list_of_lines.append(poly_xy)
ax.scatter(x_so, y_so, c='r')
#ax.plot(complete[:,0], complete[:,1], 'g')
ax.plot(poly_xy[:,0], poly_xy[:,1], 'b')
#print(complete)
if complete is None or poly_xy is None:
print('Poly xy none')
#plot_points(poly_xy[:,0], poly_xy[:,1], x_so, y_so)
return image, False
def sortOut(info, image, state):
# image Range ((1280, 720))
diff_x = 40
diff_y = 40
treshold = 30
haspoints = np.zeros([(int)(1280/diff_x),(int)(720/diff_y)])
for x in range(0,1280,diff_x):
for y in range(0,720,diff_y):
subimg = image[y:y+diff_y,x:x+diff_x]
indices = np.where(subimg > 0)
if (indices[1].size > treshold):
haspoints[(int)(x/diff_x),(int)(y/diff_y)]=1
neighbours= findneighbours(haspoints)
for x in range(0,1280,diff_x):
for y in range(0,720,diff_y):
if (neighbours[(int)(x/diff_x),(int)(y/diff_y)]<3):
image[y:y+diff_y,x:x+diff_x] = 0
return image, False
def findneighbours(haspoints):
#print(haspoints)
#flat = np.ravel(haspoints)
# Definition des Kernel-Filters für die Nachbarschaft (umliegenden Felder)
#kernel = np.ones((3, 3)) # 3x3 Kernel aus Einsen
# Berechnung der Summe der umliegenden Felder
#sum_neighbors = np.convolve2d(haspoints, kernel, mode='same')
kernel = np.ones([3,3]) # 3x3 Kernel aus Einsen
result = signal.convolve2d(haspoints, kernel, mode='same')
result[result < 3] = 0
result = result * haspoints
return result
# Cut green kanal
def greenfilter(info, image, state):
# Umwandeln des Bildes in den Farbraum HSV
hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)
# Definition des grünen Farbbereichs in HSV
lower_green = np.array([40, 10, 10])
upper_green = np.array([80, 240, 240])
# Erstellen einer Maske, die den grünen Farbbereich ausschließt
mask = cv.inRange(hsv, lower_green, upper_green)
# Invertieren der Maske, um den grünen Farbbereich auszuschließen
inverted_mask = cv.bitwise_not(mask)
# Anwenden der Maske auf das Bild
res = cv.bitwise_and(image, image, mask=inverted_mask)
return res,False
def greenfilter_mask(image, info):
min_threshold = info['params']['hsv_min']
max_threshold = info['params']['hsv_max']
# Umwandeln des Bildes in den Farbraum HSV
hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)
# Definition des grünen Farbbereichs in HSV
lower_green = np.array([min_threshold, 10, 10])
upper_green = np.array([max_threshold, 240, 240])
# Erstellen einer Maske, die den grünen Farbbereich ausschließt
mask = cv.inRange(hsv, lower_green, upper_green)
return mask
def green_absfilter(info, image, state):
bufferOrginal = state['bufferOrginal']
res = None
for img in bufferOrginal:
mask = greenfilter_mask(img, info)
if res is None:
res = mask
else:
res = cv.bitwise_or(res, mask)
res_inv = cv.bitwise_not(res)
res_img = cv.bitwise_and(image, image, mask=res_inv)
return res_img,False
# Bild in andere Dimensionen skalieren
def resize(info, image, state):
res = cv.resize(image, info['scaled'])
return res, False
# Bild in Graustufen umwandeln
def grayscale(info, image, state):
res = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
return res, False
# Schärfe des Bild erhöhen
def median_blur(info, image, state):
# Median-Blur auf Bild anwenden
blur = cv.medianBlur(image, 7)
# beide Bilder zusammenfassen
sharp = cv.addWeighted(image, 1.5, blur, -0.5, 0.0)
return sharp, False
# def plot_points(x, y, px, py):
# fig = plt.figure()
# ax = fig.add_subplot()
# ax.scatter(px, py, c='r')
# ax.plot(x, y)
# ax.set_xlim([0, 1280])
# ax.set_ylim([720, 0])
# plt.show()
spanne = 5
def find_points(image):
indices = np.where(image > 0)
if (indices[1].size > 0):
x_so = indices[1]
y_so = indices[0]
list_xy = np.column_stack((x_so, y_so)).astype(np.int32)
# list_xy = np.sort(list_xy, axis=0)
# print( list_xy)
df = pd.DataFrame(list_xy,columns=['x','y'])
df = df.sort_values(by=['x'], ascending=True)
n_list = []
df_un = df.x.unique()
for el in df_un[::2]:
med = (df.y.where(df.x >= el-spanne).where(el+spanne >= df.x)).median()
n_list.append([el,med])
n_list = np.array(n_list).astype(np.int32)
return n_list
return None
def polyfit2(n_list, xs, xe, ys, ye):
if n_list is not None:
p = np.polyfit(n_list[:,0], n_list[:,1], 1)
x = np.arange(xs, xe, 1)
y = np.polyval(p,x)
points = np.column_stack((x, y)).astype(np.int32)
points = points[(y >= ys) & (y <= ye)]
return points
return None
def trendline(n_list, x1, x2, y1, y2):
if n_list is not None and len(n_list) >= 2:
m, b = np.polyfit(n_list[:,0], n_list[:,1], 1)
# f_n(x) = m*x+b
# f_i(y) = (y-b)/m
# x1 < x2 and y1 < y2
# Ersten beide Punkte (links x1 und rechts x2)
y_f1 = m*x1+b # (x1, y_f1)
y_f2 = m*x2+b # (x2, y_f2)
# Letzten beide Punkte (oben y1 und unten y2)
if m != 0:
x_f1 = (y1-b)/m # (x_f1, y1)
x_f2 = (y2-b)/m # (x_f2, y2)
else:
x_f1 = x1
x_f2 = x2
# Schnittpunkt mit vertikaler Grenze (links)
if y_f1 >= y1 and y_f1 <= y2:
p1 = [x1, y_f1]
# Schnittpunkt mit horizontaler Grenze (oben)
elif x_f1 >= x1 and x_f1 <= x2:
p1 = [x_f1, y1]
# Schnittpunkt mit horizontaler Grenze (unten)
elif x_f2 >= x1 and x_f2 <= x2:
p1 = [x_f2, y2]
else:
raise ValueError('Not possible')
# Schnittpunkt mit vertikaler Grenze (rechts)
if y_f2 >= y1 and y_f2 <= y2:
p2 = [x2, y_f2]
# Schnittpunkt mit horizontaler Grenze (oben)
elif x_f1 >= x1 and x_f1 <= x2:
p2 = [x_f1, y1]
# Schnittpunkt mit horizontaler Grenze (unten)
elif x_f2 >= x1 and x_f2 <= x2:
p2 = [x_f2, y2]
else:
raise ValueError('Not possible')
return [np.array([[x1, y_f1], [x2, y_f2], [x_f1, y1], [x_f2, y2]]), np.array([p1, p2])]
return None
def polyfit(n_list,n=4):
if n_list is not None:
p = np.polyfit(n_list[:,0], n_list[:,1], n)
x = np.arange(n_list[:,0][0], n_list[:,0][-1], 1)
y = np.polyval(p,x)
points = np.column_stack((x, y)).astype(np.int32)
return points
return None
#min_threshold = 30
#max_threshold = 110
def filter_canny(info, image, state):
min_threshold = info['params']['canny_min']
max_threshold = info['params']['canny_max']
image = cv.Canny(image, min_threshold, max_threshold)
return image, False
def filter_open(info, image, state):
ksize = (3,3)
# kernel = np.ones(ksize,np.uint8)
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, ksize)
image = cv.morphologyEx(image, cv.MORPH_OPEN, kernel)
return image, False
def filter_close(info, image, state):
ksize = (3,3)
# kernel = np.ones(ksize,np.uint8)
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, ksize)
image = cv.morphologyEx(image, cv.MORPH_CLOSE, kernel)
return image, False
def points_extract(info, image, state):
points = find_points(image)
if points is None:
return None, False
overlay = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)
for y, x in points:
if x < image.shape[0] and y < image.shape[1]:
overlay[x, y] = (255, 0, 0)
#cv.polylines(overlay, [points], False, (255, 0, 0), thickness=1)
state['points'] = points
return overlay, False
def points_overlay(info, image, state):
points = state['points']
poly_points = polyfit(points)
overlay = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)
# draw a polygon on the image
cv.polylines(overlay, [poly_points], False, (255, 0, 0), thickness=5)
return overlay, True
def filter_all(info, image, state):
image = image.copy()
# construct a rectangular kernel from the current size / rect shaped kernel
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (1,1))
# Opening operation
image = cv.morphologyEx(image, cv.MORPH_OPEN, kernel)
# perform erosion on the image
image = cv.erode(image, (3,3))
# Closing operation / closing small holes
# image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, (1,1))
image = cv.morphologyEx(image, cv.MORPH_CLOSE, (5,5))
# # image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, (4,4))
# dilation
kernel2 = cv.getStructuringElement(cv.MORPH_ELLIPSE, (2,2))
image = cv.dilate(image,kernel2,iterations = 1)
# Use canny edge detection
image = cv.Canny(image, min_threshold, max_threshold)
points = find_points(image)
overlay = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)
# draw a polygon on the image
cv.polylines(overlay, [points], False, (255, 0, 0), thickness=6)
return overlay, True
def video_absdiff2(info, image, state):
if 'buffer' not in state:
state['buffer'] = deque(maxlen=info['abs_diff'])
buffer = state['buffer']
mix = info['params']['mix']
buffer.append(image)
if len(buffer) >= 2:
diff = np.zeros((image.shape[0], image.shape[1], 1), np.uint8)
for i in range(0, len(buffer) - 1):
diff_frame = cv.absdiff(buffer[i], buffer[i+1])
diff = cv.addWeighted(diff, 1.0, diff_frame, 1.0 - mix, 0.0)
return diff, False
else:
return None, False
def video_absdiff(info, image, state):
if 'last' not in state or state['last'] is None:
state['last'] = image
return None, False
diff = cv.absdiff(image, state['last'])
state['last'] = image
return diff, False