##############################################
# Modul: Computer Vision (SoSe23)
# Dozent: Prof. Dr-Ing. Gerhardt
#
# Erkennung eines Wasserstrahls
#
# Autoren: - Joel Steffens
#          - Mohammad Khaleeliyeh
#          - Midras Lappe
##############################################
import cv2
import filters
import sys
from datetime import datetime

argc = len(sys.argv)
if argc < 2 or argc > 3:
    print("Syntax: ./detect_waterstream.py <Video> [Seconds]")
    exit(1)

videoFile = sys.argv[1]
skipSeconds = 0
if argc == 3:
    skipSeconds = int(sys.argv[2])

capture = cv2.VideoCapture(videoFile)

width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(capture.get(cv2.CAP_PROP_FPS))
frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))

skipFrame = fps * skipSeconds
capture.set(cv2.CAP_PROP_POS_FRAMES, skipFrame)

print(f"Reading video file: {videoFile}")
print(f"Data: {width}x{height}@{fps} Hz")
print(f"Skipping to frame {skipFrame} of total {frames} frames")

last = None
frame = None
diff = None
window = 'Filter'

def overlay_imgs(base, top):
    topgray = cv2.cvtColor(top, cv2.COLOR_RGB2GRAY)

    _, mask = cv2.threshold(topgray, 10, 255, cv2.THRESH_BINARY)
    mask_inv = cv2.bitwise_not(mask)
    # Now black-out the area of overlay
    img1_bg = cv2.bitwise_and(base,base,mask = mask_inv)
    # Take only region of from logo image.
    img2_fg = cv2.bitwise_and(top,top,mask = mask)

    return cv2.add(img1_bg, img2_fg)

def nothing_cb(val):
    pass

# Neues OpenCV-Fenster erzeugen
cv2.namedWindow(window, cv2.WINDOW_NORMAL)
# Trackbar für Filter-Parameter anlegen
cv2.createTrackbar('Canny_Min', window, 90, 255, nothing_cb)
cv2.createTrackbar('Canny_Max', window, 150, 255, nothing_cb)
cv2.createTrackbar('Diff_Mix', window, 40, 100, nothing_cb)
cv2.createTrackbar('HSV_Min', window, 50, 180, nothing_cb)
cv2.createTrackbar('HSV_Max', window, 100, 180, nothing_cb)

# Definiton der Filter-Stufen
pipeline = [
    ('Original', filters.none),
    #('GreenChannel', filters.greenfilter),
    ('Gray scale', filters.grayscale),
    ('Contrast and Blur', filters.median_blur),
    ('Video Diff Multiple', filters.video_absdiff2),
    ('Green filter abs', filters.green_absfilter),
    ('Canny edge detection', filters.filter_canny),
    ('Morph close', filters.filter_close),
    # ('Gitter Filter', filters.gitter),
    # ('Morph open', filters.filter_open),
    ('Einzelne Rauswerfen', filters.sortOut),
    ('Point extraction', filters.points_extract),
    ('Polyfit lines', filters.points_overlay)
]

state = {}               # Leeres Dictionary als Zustandsobjekt

info = {
    'abs_diff': 2,          # Standard: 2+1 Bilder
    'dim': (width, height), # Dimension des Videos
    'scaled': (1280, 720),  # Skalierung für die Filter
    'params': {}            # Leeres Dictionary für Filter-Parameter
}

result = None
visible_filter_idx = 0
while capture.isOpened():
    ret, frame = capture.read()
    if ret == True:
        frame, _ = filters.resize(info, frame, None)

        # Trackbar-Parameter auslesen und setzen
        info['params']['mix'] = cv2.getTrackbarPos('Diff_Mix', window) / 100.0
        info['params']['canny_min'] = cv2.getTrackbarPos('Canny_Min', window)
        info['params']['canny_max'] = cv2.getTrackbarPos('Canny_Max', window)
        info['params']['hsv_min'] = cv2.getTrackbarPos('HSV_Min', window)
        info['params']['hsv_max'] = cv2.getTrackbarPos('HSV_Max', window)

        result = frame
        image = result
        for i, (name, filter) in enumerate(pipeline):
            result, overlay = filter(info, result, state)

            if result is None:
                break

            if visible_filter_idx == i:
                image = result.copy()

                if overlay:
                    image = overlay_imgs(frame, image)

                cv2.putText(image, f'Filter #{i}: {name}', (10, 30), cv2.FONT_HERSHEY_PLAIN, 
                            1, (255, 0, 0), 2, cv2.LINE_AA)
                cv2.imshow('Filter', image)
    

        # Tastatur abfragen
        code = cv2.waitKey(33) & 0xFF
        # Option 1: Speichern (s)
        if code == ord('s'):
            now = datetime.now()
            str = now.strftime("%d_%m_%Y_%H_%M_%S")
            cv2.imwrite(f'Screenshots/IMG_{str}.png', image)
        # Option 2: Nächster Filter (d)
        elif code == ord('d'):
            visible_filter_idx += 1
            if visible_filter_idx >= len(pipeline):
                visible_filter_idx = len(pipeline) - 1
         # Option 3: Vorheriger Filter (a)
        elif code == ord('a'):    
            visible_filter_idx -= 1
            if visible_filter_idx < 0:
                visible_filter_idx = 0
        # Option 4: Programm beenden (q)
        elif code & 0xFF == ord('q'):
            break
    else:
        break

# Aufräumen
capture.release()
cv2.destroyAllWindows()