#!/bin/env python
import cv2
import gettext
from itertools import count
from importlib import import_module
import os
# Needed for utf-8 text
from PIL import ImageFont, ImageDraw, Image
from send2trash import send2trash
import signal
import sys
import subprocess
import time
from timeit import default_timer as timer
import tomllib
import numpy as np

# Run from SSH
if not os.getenv('DISPLAY'):
    os.putenv('DISPLAY', ':0')

running_from_folder = os.path.realpath(__file__)
alphabet = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']

index = 0
playhead = 0
playback = 0
camera_settings = 0
camera_status = []

# l10n
# TODO: Update fr translation
LOCALE = os.getenv('LANG', 'en_EN')
_ = gettext.translation('template', localedir='locales', languages=[LOCALE]).gettext

# Config
# defaults
project_settings_defaults = {
    'cam_type': "webcam",
    'apply_settings_on_startup' : True,
    'use_date_for_folder': False,
    'file_extension':'png',
    'jpg_quality':90,
    'projects_folder': '',
    'onion_skin_onstartup' : False,
    'onionskin_alpha_default' : 0.4,
    'fullscreen_bool' : True,
    'screen_w' : 1920,
    'screen_h' : 1080,
    'framerate' : 16,
    'ffmpeg_path' : None,
    'v4l2-ctl_path' : None,
    'export_options' : 'scale=1920:-1,crop=1920:1080:0:102',
}

camera_current_settings_defaults = {
    'cam_w' : 800,
    'cam_h' : 600,
    'vflip' : 0,
    'hflip' : 0,
    'auto_exposure' : 0,
    'white_balance_auto_preset' : 0,
}

# Load from file
config_locations = ["./", "~/.", "~/.config/"]
config_found_msg = _("No configuration file found, using defaults.")
project_settings = project_settings_defaults
camera_current_settings = camera_current_settings_defaults
for location in config_locations:
    # Optional config files, ~ is expanded to $HOME on *nix, %USERPROFILE% on windows
    if os.path.exists( os.path.expanduser(os.path.join(location, 'config.toml'))):
        with open(os.path.expanduser(location + 'config.toml'), 'rb') as config_file:
            project_settings = tomllib.load(config_file)
            if 'CAMERA' in project_settings:
                camera_settings = project_settings['CAMERA']
            if 'DEFAULT' in project_settings:
                project_settings = project_settings['DEFAULT']
            config_found_msg = _("Found configuration file in {}").format(os.path.expanduser(location))
print(config_found_msg)

class webcam():
    def __init__(self):
        self.camera_current_settings = {
            'auto_exposure': dict(min=1, max=3, step=2, default=camera_settings['auto_exposure'], value=camera_settings['auto_exposure']),
            'white_balance_temperature': dict(min=0, max=10000, step=1000, default=4000, value=8000),
            'white_balance_automatic' : dict(min=0, max=1, step=1, default=0, value=0), # Auto wb
            'power_line_frequency': dict(min=0, max=2, step=1, default=2, value=2), # power_line_frequency
            'sharpness': dict(min=-100, max=100, step=1, default=0, value=10),
        }
        self.cam_settings_map = {
                        'white_balance_auto_preset': 'white_balance_temperature',
                        'white_balance_automatic': 'white_balance_automatic',
                        'auto_exposure':'auto_exposure',
                        'anti_flicker' : 'power_line_frequency',
                        'lenspos' : 'sharpness',
                        }
        self.has_liveview = True
        # UI frame
        self.frame = None
        # Onionskin_frame
        self.o_frame = None
        # Overlay frame
        self.overlay = None
        # Original frame for saving
        self.og_frame = None
        self.onionskin = project_settings['onion_skin_onstartup']
        self.onionskin_was_on = self.onionskin
        self.liveview_only = False
        self.lenspos = None
        # Test first device
        if not self.test_device(0):
            print(_("No camera device found. Exiting..."))
            sys.exit()
        try:
            self.cam = cv2.VideoCapture(0)
            self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_settings['cam_w'])
            self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_settings['cam_h'])
        except:
            sys.exit()

    def test_device(self, source):
        self.cap = cv2.VideoCapture(source)
        if self.cap is None or not self.cap.isOpened():
            print(_("Warning: unable to open video source: {}").format(source))
            return False
        self.cap.release()
        return True

    def capture_preview(self):
        ret, overlay = self.cam.read()
        if not ret:
            print(_("Failed to grab frame."))
            return False
        # Keep original pic in memory
        self.og_frame = overlay.copy()
        # Resize preview to fit screen
        overlay = cv2.resize(overlay, (project_settings['screen_w'], project_settings['screen_h']))
        if self.liveview_only:
            # Don't mix it
            self.frame = overlay
            return True
        if self.onionskin:
            # calculate and apply alpha
            alpha = project_settings['onionskin_alpha_default']
            beta = (1.0 - alpha)
            self.frame = cv2.addWeighted(self.o_frame, alpha, overlay, beta, 0)
            return True
        self.frame = self.o_frame
        return True

    def capture_frame(self, img_path):
        if project_settings['file_extension'] == 'jpg':
            capture_ok = cv2.imwrite(img_path, self.og_frame, [int(cv2.IMWRITE_JPEG_QUALITY), project_settings['jpg_quality']])
        else:
            capture_ok = cv2.imwrite(img_path, self.og_frame)
        return capture_ok

    def increment_setting(self, setting:str, inc:int=-1):
        if setting in self.camera_current_settings:
            if inc == -1:
                if self.camera_current_settings[setting]['value'] - self.camera_current_settings[setting]['step'] in range(self.camera_current_settings[setting]['min'],self.camera_current_settings[setting]['max']+1):
                    self.camera_current_settings[setting]['value'] -= self.camera_current_settings[setting]['step']
                else:
                    self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['max']
            elif inc == 1:
                if self.camera_current_settings[setting]['value'] + self.camera_current_settings[setting]['step'] in range(self.camera_current_settings[setting]['min'],self.camera_current_settings[setting]['max']+1):
                    self.camera_current_settings[setting]['value'] += self.camera_current_settings[setting]['step']
                else:
                    self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['min']

    def build_v4l2_cmd(self, to_set:list=None, inc:int=-1):
        cmd = '{} -d /dev/video0'.format(project_settings['v4l2-ctl_path'])
        args = []
        value = inc
        for setting in self.camera_current_settings:
            if to_set is None:
                # Apply defaults
                self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['default']
                cmd += ' -c {}={}'
                args.append(setting)
                args.append(self.camera_current_settings[setting]['value'])
            else:
                # Increment settings in to_set
                for setting in to_set:
                    if type(setting) is tuple:
                        value = setting[1]
                        setting = setting[0]
                    if setting in self.cam_settings_map:
                        setting = self.cam_settings_map[setting]
                        print(setting)
                    if setting in self.camera_current_settings:
                        self.increment_setting(setting, value)
                        cmd += ' -c {}={}'
                        args.append(setting)
                        args.append(self.camera_current_settings[setting]['value'])
                    else:
                        print(_("Unknown setting!"))
                break
        return cmd.format(*args)

    def run_v4l2_ctl(self, cmd):
        if project_settings['v4l2-ctl_path'] is None:
            return False
        print(cmd)
        v4l2_ctl_process = subprocess.Popen(cmd.split(' '))
        return v4l2_ctl_process

    def apply_setting(self, to_set:list=None, inc:int=-1):
        cmd = self.build_v4l2_cmd(to_set, inc)
        self.run_v4l2_ctl(cmd)
        return self.camera_current_settings

    def flip_image(self):
        self.frame = cv2.flip(self.frame, -1)
        # Also flip original file
        self.og_frame = cv2.flip(self.og_frame, -1)

    def focus(self, direction:str='-'):
        self.apply_setting(['lenspos'], 1)

    def reset_picture_settings(self):
        self.apply_setting()

    def close(self):
        self.cam.release()


class showmewebcam(webcam):
    def __init__(self):
        self.serialutils = import_module('serialutils')
        super().__init__()
        self.camera_current_settings = {
            'auto_exposure': dict(min=0, max=1, step=1, default=camera_settings['auto_exposure'], value=camera_settings['auto_exposure']),
            'white_balance_auto_preset': dict(min=0, max=9, step=1, default=camera_settings['white_balance_auto_preset'], value=camera_settings['white_balance_auto_preset']),
            'horizontal_flip': dict(min=0, max=1, step=1, default=camera_settings['hflip'], value=camera_settings['hflip']),
            'vertical_flip': dict(min=0, max=1, step=1, default=camera_settings['vflip'], value=camera_settings['vflip']),
            'video_bitrate': dict(min=25000000, max=25000000, step=10000, default=camera_settings['video_bitrate'], value=camera_settings['video_bitrate']),
            'power_line_frequency': dict(min=0, max=2, step=1, default=2, value=2), # power_line_frequency
            'sharpness': dict(min=-100, max=100, step=1, default=0, value=10),
            'exposure_time_absolute': dict(min=1, max=1000, step=30, default=1000, value=1000),
        }
        self.cam_settings_map = {
            'anti_flicker': 'power_line_frequency',
            'lenspos': 'sharpness',
            'exposure': 'exposure_time_absolute'
        }

    def apply_setting(self, to_set:list=None, inc:int=-1):
        cmd = self.build_v4l2_cmd(to_set, inc)
        self.serialutils.send_serial_cmd(self.serialutils.find_cam_port(), cmd)
        return self.camera_current_settings
    
    def flip_image(self):
        self.apply_setting(['horizontal_flip', 'vertical_flip'])


class picam():
    def __init__(self):
        self.camera_current_settings = {
            'auto_exposure': dict(min=0, max=4, step=1, default=camera_settings['auto_exposure'], value=camera_settings['auto_exposure']),
            'white_balance_auto_preset': dict(min=0, max=7, step=1, default=camera_settings['white_balance_auto_preset'], value=camera_settings['white_balance_auto_preset']),
            'horizontal_flip': dict(min=0, max=1, step=1, default=camera_settings['hflip'], value=camera_settings['hflip']),
            'vertical_flip': dict(min=0, max=1, step=1, default=camera_settings['vflip'], value=camera_settings['vflip']),
            'anti_flicker': dict(min=0, max=20000, step=1000, default=0, value=0),
            'exposure' : dict(min=0, max=50000, step=1000, default=0, value=0)
        }
        # Map generic config name to specific picamera setting name
        self.cam_settings_map = {
                            'white_balance_auto_preset': 'AwbMode',
                            'auto_exposure':'AeExposureMode',
                            'exposure':'ExposureTime',
                            # ~ 'anti_flicker' : 'AeFlickerMode',
                            'anti_flicker' : 'AeFlickerPeriod',
                            'lenspos' : 'LensPosition',
                            }
        self.has_liveview = True
        # UI frame
        self.frame = None
        # Onionskin_frame
        self.o_frame = None
        # Overlay frame
        self.overlay = None
        # Original frame for saving
        self.og_frame = None
        self.onionskin = project_settings['onion_skin_onstartup']
        self.onionskin_was_on = self.onionskin
        self.liveview_only = False
        # Pi Cam V3 setup
        self.Picamera2 = getattr(import_module('picamera2'), 'Picamera2')
        self.Metadata = getattr(import_module('picamera2'), 'Metadata')
        self.Transform = getattr(import_module('libcamera'), 'Transform')
        # Cam setup
        self.cam = self.Picamera2()
        self.picam_config = self.cam.create_video_configuration(main={"format": 'RGB888',"size": (camera_settings['cam_w'], camera_settings['cam_h'])})
        self.picam_config["transform"] = self.Transform(vflip=self.camera_current_settings['vertical_flip']['value'],hflip=self.camera_current_settings['horizontal_flip']['value'])        
        self.cam.configure(self.picam_config)
        # Autofocus, get lens position and switch to manual mode
        # Set Af mode to Auto then Manual (0). Default is Continuous (2), Auto is 1
        self.cam.set_controls({'AfMode':1})
        self.cam.start()
        self.cam.autofocus_cycle()
        self.lenspos = self.cam.capture_metadata()['LensPosition']
        # Set focus, wb, exp to manual
        self.camera_default_settings = {'AfMode': 0,
                               'AwbEnable': 1,
                               'AwbMode': self.camera_current_settings['white_balance_auto_preset']['default'],
                               # Disable Autoexposure 
                               'AeEnable': 1,
                               'ExposureTime': 0,
                               'AeExposureMode': self.camera_current_settings['auto_exposure']['default'],
                               # Enable flicker avoidance due to mains
                               # AeFlickerModeEnum { FlickerOff = 0, FlickerManual = 1, FlickerAuto = 2 }
                               'AeFlickerMode': 1,
                               # Mains 50hz = 10000, 60hz = 8333
                               # ~ 'AeFlickerPeriod': 8333,
                               'AeFlickerPeriod': self.camera_current_settings['anti_flicker']['default'],
                               # Format is (min, max, default) in ms
                               # here: (60fps, 12fps, None)
                               # ~ 'FrameDurationLimits':(16666,83333,None)
                              }
        self.cam.set_controls(self.camera_default_settings)
        # Get current exposure value and store it in settings
        metadata = self.Metadata(self.cam.capture_metadata())
        self.camera_current_settings['exposure']['value'] = self.camera_current_settings['exposure']['default'] = metadata.ExposureTime

    def test_device(self, source):
        pass

    # Same as in webcam() class
    def capture_preview(self):
        overlay = self.cam.capture_array("main")
        # Keep original pic in memory
        self.og_frame = overlay.copy()
        # Resize preview to fit screen
        overlay = cv2.resize(overlay, (project_settings['screen_w'], project_settings['screen_h']))
        if self.liveview_only:
            # Don't mix it
            self.frame = overlay
            return True
        if self.onionskin:
            # calculate and apply alpha
            alpha = project_settings['onionskin_alpha_default']
            beta = (1.0 - alpha)
            self.frame = cv2.addWeighted(self.o_frame, alpha, overlay, beta, 0)
            return True
        self.frame = self.o_frame
        return True

    # Same as in webcam() class
    def capture_frame(self, img_path):
        if project_settings['file_extension'] == 'jpg':
            capture_ok = cv2.imwrite(img_path, self.og_frame, [int(cv2.IMWRITE_JPEG_QUALITY), project_settings['jpg_quality']])
        else:
            capture_ok = cv2.imwrite(img_path, self.og_frame)
        return capture_ok

    def increment_setting(self, setting:str, inc:int=1):
        if setting in self.camera_current_settings:
            if inc == -1:
                if self.camera_current_settings[setting]['value'] - self.camera_current_settings[setting]['step'] in range(self.camera_current_settings[setting]['min'],self.camera_current_settings[setting]['max']+1):
                    self.camera_current_settings[setting]['value'] -= self.camera_current_settings[setting]['step']
                else:
                    self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['max']
            elif inc == 1:
                if self.camera_current_settings[setting]['value'] + self.camera_current_settings[setting]['step'] in range(self.camera_current_settings[setting]['min'],self.camera_current_settings[setting]['max']+1):
                    self.camera_current_settings[setting]['value'] += self.camera_current_settings[setting]['step']
                else:
                    self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['min']
            # Special cases
            # Autoexposure
            if setting == 'autoexposure' and self.camera_current_settings['autoexposure']['value'] == 4:
                self.cam.set_controls({'AeEnable': 1})
            else:
                self.cam.set_controls({'AeEnable': 0})
                self.cam.set_controls({"AeExposureMode": self.camera_current_settings['auto_exposure']['value']})
            return True

    def apply_setting(self, to_set:list=None, inc:int=0):
        set_controls = False
        if to_set is not None:
            for setting in to_set:
                if inc != 0:
                    set_controls = self.increment_setting(setting, inc)
                if set_controls:
                    self.cam.set_controls({self.cam_settings_map[setting] : self.camera_current_settings[setting]['value']})

    def flip_image(self):
        self.cam.stop()
        self.increment_setting('vertical_flip')
        self.increment_setting('horizontal_flip')
        self.picam_config["transform"] = self.Transform(vflip=self.camera_current_settings['vertical_flip']['value'],hflip=self.camera_current_settings['horizontal_flip']['value'])
        self.cam.configure(self.picam_config)
        self.cam.start()

    def focus(self, direction:str='-'):
        if direction == '+':
            self.lenspos += 0.2
        else:
            self.lenspos -= 0.2
        # Set AfMode to Manual
        self.cam.set_controls({'AfMode': 0, 'LensPosition': self.lenspos})
        print(_("-Lens pos: {}".format(self.lenspos)))

    def reset_picture_settings(self):
        # ~ for setting in self.camera_default_settings:
            # ~ self.cam.set_controls({setting : self.camera_default_settings[setting]})
        self.cam.set_controls(self.camera_default_settings)
        metadata = self.Metadata(self.cam.capture_metadata())
        self.camera_current_settings['exposure']['value'] = self.camera_current_settings['exposure']['default'] = metadata.ExposureTime

    def close(self):
        self.cam.close()


class dslr():
    def __init__(self):
        # ~ import gphoto2 as gp
        self.gp = import_module('gphoto2')
        self.camera_current_settings = {
            'capturemode' : dict(min=0, max=4, step=1, default=0, value=1),     # 0: single,1: burst,2:Timer,3:2S Remote,4:Quick remote
            'imagesize' : dict(min=0, max=2, step=1, default=2, value=2),       # 0:L, 1:M, 2: S (1936x1296)
            'imagequality' : dict(min=0, max=2, step=1, default=2, value=2),       # 0 JPEG basic 1 JPEG normal 2 JPEG fine  3 raw 4 raw+jpg
            'whitebalance' : dict(min=0, max=7, step=1, default=2, value=1),    # 0 Automatic 1 Daylight 2 Fluorescent 3 Tungsten 4 Flash 5 Cloudy 6 Shade 7 Preset
            'capturetarget' : dict(min=0, max=1, step=1, default=0, value=0),   # Internal memory
            'iso' : dict(min=0, max=5, default=0, step=1, value=0),   # 0:100, 5:3200
            'shutterspeed' : dict(min=0, max=51, step=1, default=30, value=30),   # 0 : 1/4000, 51: 30s
            # ~ 'manualfocusdrive' : dict(min=0, max=1, step=1, default=0, value=0),   # Trigger autofocus # manualfocusdrive
        }
                    # Map generic config name to specific picamera setting name
        self.cam_settings_map = {
                            'white_balance_auto_preset': 'whitebalance',
                            'auto_exposure':'iso',
                            'anti_flicker' : 'imagesize',
                            'lenspos' : 'shutterspeed',
                            }
        # UI frame
        self.frame = None
        # Onionskin_frame
        self.o_frame = None
        # Overlay frame
        self.overlay = None
        # Original frame for saving
        self.og_frame = None
        self.has_liveview = False
        # TODO: check if DSLR has live feed and set accordingly
        # ~ self.onionskin = project_settings['onion_skin_onstartup']
        self.onionskin = False
        self.onionskin_was_on = self.onionskin
        self.liveview_only = False
        self.lenspos = None
        self.flip_img = False
        self.cam_busy = False
        self.camera_current_config = None
        self.cam = self.init_camera()
        
    def init_camera(self):
        cam = self.gp.check_result(self.gp.gp_camera_new())
        try:
            self.gp.check_result(self.gp.gp_camera_init(cam))
            # get configuration tree
            self.camera_current_config = self.gp.check_result(self.gp.gp_camera_get_config(cam))
        except:
            print(_("No camera found."))
            cam = None
            self.current_camera_config = None
        return cam

    def test_device(self, source):
        pass

    def capture_preview(self):
        # TODO : check DSLR has preview/live feed
        pass

    def find_file_ext(self, gp_name:str, full_path:str):
        # TODO: use re to sub png with jpg ?
        # extract dir path
        dirname = os.path.dirname(full_path)
        # extract filename from path
        new_name = os.path.basename(full_path)
        # if the path doesn't contain file name, return camera's FS filename
        if not full_path.endswith(('.jpg', '.JPG', '.raw')):
            return gp_name
        suffix = gp_name.split('.')[-1].lower()
        prefix = new_name.split('.')[:-1]
        prefix.insert(len(prefix), suffix)
        return os.path.join(dirname, '.'.join(prefix))

    def check_status_value(self, config, value, optimal_value=None):
        cur_check = self.gp.check_result(self.gp.gp_widget_get_child_by_name(config, value))
        cur_check_value = self.gp.check_result(self.gp.gp_widget_get_value(cur_check))
        if optimal_value is not None:
            cur_check_choice = self.gp.check_result(self.gp.gp_widget_get_choice(cur_check, optimal_value[value]))
            return [cur_check_value, cur_check_choice]
        else:
            return cur_check_value

    def capture_frame(self, img_path):
        if self.cam is None:
            self.cam = self.init_camera()
        if not self.cam_busy:
            # CHECK: Should we init and close dslr for each frame ?
            # Check battery level
            battery_level = int(self.check_status_value(self.camera_current_config, 'batterylevel')[:-1])
            if battery_level < 10:
                print("Battery level is too low, shutter disabled.")
                return False
            try:
                self.cam_busy = True
                file_path = self.cam.capture(self.gp.GP_CAPTURE_IMAGE)
                print('Camera file path: {0}/{1}'.format(file_path.folder, file_path.name))
                # We don't want to download a jpg or raw from the dslr and save it as a false *.png.
                img_path = self.find_file_ext(file_path.name, img_path)
                print('Copying image to', img_path)
                camera_file = self.cam.file_get(
                            file_path.folder,
                            file_path.name,
                            self.gp.GP_FILE_TYPE_NORMAL
                            )
                capture_ok = camera_file.save(img_path)
            except self.gp.GPhoto2Error as ex:
                print(ex)
                # This is the way to find which error code is returned by gphoto
                # See http://gphoto.org/doc/api/gphoto2-result_8h.html for error codes
                # Cam was turned off/on
                if ex.code == self.gp.GP_ERROR_IO_USB_FIND or ex.code == self.gp.GP_ERROR_IO_USB_CLAIM:
                    self.cam.exit()
                    self.cam = self.init_camera()
                # ~ if ex.code == self.gp.GP_ERROR_CAMERA_BUSY:
                    # ~ print("Camera Busy.")
                self.cam_busy = False
                return False           
            # Flip image if needed
            if self.flip_img:
                frm = cv2.imread(img_path)
                frm = cv2.flip(frm, -1)
                cv2.imwrite(img_path, frm)
            # Update frame
            self.frame = cv2.imread(img_path)
            self.cam_busy = False
            return True

    def apply_gphoto_setting(self, setting:str):
        # Get corresponding setting name if possible
        if setting in self.cam_settings_map:
            setting = self.cam_settings_map[setting]
        # Try to apply setting
        if setting in self.camera_current_settings:
            print(setting)
            select_setting = self.camera_current_settings[setting]
            # find the $setting config item
            try:
                # Get widget with name $setting
                cur_setting = self.gp.check_result(self.gp.gp_widget_get_child_by_name(self.camera_current_config, setting))
                # Get a list of available choices
                choices = list(self.gp.check_result(self.gp.gp_widget_get_choices(cur_setting)))
                # Build dict with name/value equivalence
                choices_dict = {choices.index(i):i for i in list(choices)}
                # If new_value exists in list, apply
                if select_setting['value'] in choices_dict:
                    cur_setting_choice = self.gp.check_result(self.gp.gp_widget_get_choice(cur_setting, select_setting['value']))
                    # set config value
                    self.gp.check_result(self.gp.gp_widget_set_value(cur_setting, cur_setting_choice))
            except:
                print("Configuration error while setting {} to {}".format(setting, select_setting))

    def increment_setting(self, setting:str, inc:int=-1):
        if setting in self.camera_current_settings:
            if inc == -1:
                if self.camera_current_settings[setting]['value'] - self.camera_current_settings[setting]['step'] in range(self.camera_current_settings[setting]['min'],self.camera_current_settings[setting]['max']+1):
                    self.camera_current_settings[setting]['value'] -= self.camera_current_settings[setting]['step']
                else:
                    self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['max']
            elif inc == 1:
                if self.camera_current_settings[setting]['value'] + self.camera_current_settings[setting]['step'] in range(self.camera_current_settings[setting]['min'],self.camera_current_settings[setting]['max']+1):
                    self.camera_current_settings[setting]['value'] += self.camera_current_settings[setting]['step']
                else:
                    self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['min']

    def apply_setting(self, to_set:list=None, inc:int=0):
        if self.cam is None:
            self.cam = self.init_camera()
        self.camera_current_config = self.gp.check_result(self.gp.gp_camera_get_config(self.cam))
        # iterate over the settings dictionary
        if to_set is None:
            for setting in self.camera_current_settings:
                if inc:
                    self.increment_setting(setting, inc)
                self.apply_gphoto_setting(setting)
        else:
            # Get corresponding setting name if possible
            for setting in to_set:
                if setting in self.cam_settings_map:
                    setting = self.cam_settings_map[setting]
                if inc:
                    self.increment_setting(setting, inc)
                self.apply_gphoto_setting(setting)
        # validate config
        status = self.gp.check_result(self.gp.gp_camera_set_config(self.cam, self.camera_current_config))
        # close camera
        # ~ self.cam.exit()
        return status

    def flip_image(self):
        self.flip_img = True

    def focus(self, direction:str='-'):
        if direction == '-':
            self.apply_setting(['shutterspeed'], -1)
        elif direction == '+':
            self.apply_setting(['shutterspeed'], 1)

    def reset_picture_settings(self):
        if self.cam is None:
            self.cam = self.init_camera()
        self.camera_current_config = self.gp.check_result(self.gp.gp_camera_get_config(self.cam))
        for setting in self.camera_current_settings:
            self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['default']
            # TODO: use self.apply_setting() instead
            self.apply_gphoto_setting(setting)
        status = self.gp.check_result(self.gp.gp_camera_set_config(self.cam, self.camera_current_config))

    def close(self):
        if self.cam is not None:
            self.cam.exit()


def get_cam_class(camera_type):
    if camera_type == 'webcam':
        return webcam()
    elif camera_type == 'showmewebcam':
        return showmewebcam()
    elif camera_type == 'picam':
        return picam()
    elif camera_type == 'dslr':
        return dslr()
    elif camera_type == 'webcam':
        # ~ return webcam()
        pass
    else:
        return None


def generate_text_image(text:str, screen_w, screen_h, bullets=False):
    text_image = Image.new('RGB',
                            (screen_w, screen_h),
                            (0,0,0)
                          )
    text_image_draw = ImageDraw.Draw(text_image)
    if text is not None:
        font = ImageFont.truetype("Tuffy_Bold.ttf", int(screen_w/32))
        lines = text.split('\n')
        longest_line = lines[0]
        for line in lines:
            if len(line) > len(longest_line):
                longest_line = line
        font_len = font.getlength(lines[lines.index(longest_line)])
        text_image_draw.multiline_text((screen_w/2 - font_len/2, screen_h/3 ),
                                        text,
                                        fill=(255, 255, 255),
                                        font=font,
                                        align='center',
                                        spacing=20
                                        )
    if bullets:
        dot_radius = screen_w/24
        x_unit = (screen_w/32)
        y_unit = (screen_h/32)
        green_dot = (x_unit*14, y_unit*24)
        red_dot   = (green_dot[0]+x_unit*4, green_dot[1])
        # Green dot
        text_image_draw.circle(green_dot,
                         dot_radius,
                         fill=(0,255,0),
                         outline=None,
                         width=1
                         )
        text_image_draw.text(green_dot,
                            _("Yes"),
                            fill=(0, 0, 0),
                            font=font,
                            anchor="mm",
                            spacing=20
                            )
        # Red dot
        text_image_draw.circle(red_dot,
                         dot_radius,
                         fill=(0,0,255),
                         outline=None,
                         width=1
                         )
        text_image_draw.text(red_dot,
                            _("No"),
                            fill=(0, 0, 0),
                            font=font,
                            anchor="mm",
                            spacing=20
                            )
    text_image = np.array(text_image)
    return text_image


def askyesno(text:str):
    blank = generate_text_image(text, project_settings['screen_w'], project_settings['screen_h'], bullets=True)
    cv2.imshow("StopiCV", blank)
    # Wait for input to continue
    answer = cv2.waitKey(0)
    # Space pressed == yes
    if answer%256 == 32 or answer%256 == 48 :
        return True
    # Any other key == no
    else:
        return False


def find_letter_after(letter:str, date=False):
    if letter in alphabet and alphabet.index(letter) < len(alphabet) - 1 and not date:
        letter = alphabet[alphabet.index(letter) + 1]
    else:
        # Create folder with date
        year,mon,day,hour,minute,sec,wd,yd,dst = time.localtime()
        letter = '{}-{}-{}_{}-{}-{}'.format(year,mon,day,hour,minute,sec)
    return letter


def get_projects_folder():
    if len(projects_folder):
        project_folder = projects_folder
    else:
        # Get user folder
        project_folder = os.path.expanduser('~')
        # If a project folder is defined in settings, use it
        if project_settings['projects_folder'] != '':
            subfolder = project_settings['projects_folder']
        else:
            # If it doesn't exist, use a default name
            subfolder = 'Stopmotion Projects'
        project_folder = os.path.join(project_folder, subfolder)
    # Create folder if it doesn't exist
    if os.path.exists(project_folder) == False:
        os.mkdir(project_folder)
    else:
        if not os.path.isdir(project_folder):
            # If file exists but is not a folder, can't create it, abort
            return False
    return project_folder


def get_session_folder():
    global next_letter
    project_folder = get_projects_folder()
    if project_folder:
        sessions_list = []
        dir_list = os.listdir(project_folder)
        # Filter folders with name only one char long
        for folder in dir_list:
            if len(folder) == 1 and folder in alphabet:
                sessions_list.append(folder)
        # If folders exist, find last folder in alphabetical order
        if len(sessions_list):
            sessions_list.sort()
            last_letter = sessions_list[-1]
            # By default, find next letter for a new session
            next_letter = find_letter_after(last_letter, project_settings['use_date_for_folder'])
            if next_letter is False:
                return False
            # A previous session folder was found; ask the user if they wish to resume session
            if not project_settings['use_date_for_folder']:
                resume_session = askyesno(_("A previous session was found in\n {},\n resume shooting ?").format(os.path.join(project_folder, last_letter)))
                # ~ resume_session = tk.messagebox.askyesno(_("Resume session?"), _("A previous session was found in {}, resume shooting ?").format(os.path.join(project_folder, last_letter)))
                if resume_session:
                    next_letter = last_letter
        else:
            if not project_settings['use_date_for_folder']:
                next_letter = 'A'
            else:
                next_letter = find_letter_after('A', project_settings['use_date_for_folder'])
        if os.path.exists(os.path.join(project_folder, next_letter)) is False:
            os.mkdir(os.path.join(project_folder, next_letter))
        print(_("Using {} as session folder.").format(os.path.join(project_folder, next_letter)))
        return os.path.join(project_folder, next_letter)
    return False


def get_frames_list(folder:str):
    # Get JPG files list in current directory
    existing_animation_files = img_list
    file_list = os.listdir(folder)
    for file in file_list:
        if (file.startswith(project_letter) and file.endswith(project_settings['file_extension'])):
            if file not in existing_animation_files:
                existing_animation_files.append(file)
    if len(existing_animation_files) == 0:
        # If no images were found, return fake name set to -001 to init file count to 000
        return ["{}.{:04d}.{}".format(next_letter, -1, project_settings['file_extension'])]
    existing_animation_files.sort()
    return existing_animation_files


def get_frame_by_idx(folder:str, index:int):
    # Refresh file list
    existing_animation_files = get_frames_list(folder)
    # Filename pattern is A.0001.JPG
    return existing_animation_files[index].split('.')

def get_last_frame(folder:str):
    # Refresh file list
    existing_animation_files = get_frames_list(folder)
    # Get last file
    # Filename pattern is A.0001.JPG
    return existing_animation_files[-1].split('.')


def get_before_last_frame(folder:str):
    # Refresh file list
    existing_animation_files = get_frames_list(folder)
    # Get last file
    # Filename pattern is A.0001.JPG
    return existing_animation_files[-2]


def get_onionskin_frame(folder:str, index:int=-1):
    if index == -1:
        prev_image = get_last_frame(folder)
    else:
        prev_image = get_frame_by_idx(folder, index)
    prev_image = '.'.join(prev_image)
    if os.path.exists(os.path.expanduser(os.path.join(savepath, prev_image))):
        frm = cv2.imread(os.path.join(savepath, prev_image))
        frm = cv2.resize(frm, (project_settings['screen_w'], project_settings['screen_h']))
    # Img does not exist, load blank image
    else:
        frm = blank_image
    return frm


def return_next_frame_number(last_frame_name):
    prefix, filecount, ext = last_frame_name
    filename = '.{:04d}.'.format(int(filecount)+1)
    return prefix + filename + ext


def update_image(img_list, img_index):
    if len(img_list) == 0:
        return 0
    img_filename = img_list[img_index]
    if os.path.exists( os.path.expanduser(os.path.join(savepath, img_filename))):
        img = cv2.imread(os.path.join(savepath, img_filename))
        img = cv2.resize(img, (project_settings['screen_w'], project_settings['screen_h']))
    else:
        img = blank_image
    return img


def next_frame(img_index, loop=True):
    img_index = check_range(img_index+1, loop)
    return img_index, update_image(img_list, img_index)


def previous_frame(img_index):
    img_index = check_range(img_index-1)
    return img_index, update_image(img_list, img_index)


def last_frame(img_index):
    img_index = len(img_list)-1
    return img_index, update_image(img_list, img_index)


def first_frame(img_index):
    img_index = 0
    return img_index, update_image(img_list, img_index)


def clean_img_list(folder_path):
    # Check file in dict exists, else remove it
    file_list = os.listdir(folder_path)
    # Iterate over copy of dict to avoid OOR error
    img_list_copy = img_list
    for file in img_list_copy:
        if file not in file_list:
            img_list.remove(file)


def check_range(x, loop=True):
    if x < 0:
        if loop:
            return len(img_list)-1
        else:
            return 0
    elif x > len(img_list)-1:
        if loop:
            return 0
        else:
            return len(img_list)-1
    else:
        return x


def batch_rename(folder:str):
    # initialize counter to 0
    frame_list = get_frames_list(folder)
    counter = (".%04i." % x for x in count(0))
    for i in frame_list:
        if os.path.exists(os.path.join(folder, i)):
            os.rename(os.path.join(folder, i), os.path.join(folder, "{}{}{}".format(project_letter, next(counter), project_settings['file_extension'])))
        else:
            print(_("{} does not exist").format(str(i)))
    return get_frames_list(folder)


def batch_rename_reversed(folder:str, from_index:int=0):
    # initialize counter to 0
    frame_list = get_frames_list(os.path.realpath(folder))
    print(frame_list[from_index:])
    counter = (".%04i." % x for x in count(len(frame_list), -1))
    for i in reversed(frame_list[from_index:]):
        if os.path.exists(os.path.join(folder, i)):
            cnt = next(counter)
            os.rename(os.path.join(folder, i), os.path.join(folder, "{}{}{}".format(project_letter, cnt, project_settings['file_extension'])))
    return get_frames_list(folder)


def offset_dictvalues(from_index=0):
    dict_copy = dict(img_list)
    for i in range(from_index, len(dict_copy)):
        if i < len(img_list)-1:
            img_list[list(img_list.keys())[i]] = list(img_list.values())[i+1]
        else:
            img_list[list(img_list.keys())[i]] = None


def remove_frame(img_list:list, img_index:int):
    if len(img_list):
        folder_path = os.path.realpath(savepath)
        frame_name = img_list[img_index]
        # ~ frame_path = os.path.realpath(frame_name)
        frame_path = os.path.join(folder_path, frame_name)
        if not os.path.exists(frame_path):
            return img_list, img_index, blank_image
        print(_("Removing {}").format(frame_path))
        # trash file
        send2trash(frame_path)
        # remove entry from list
        img_list.remove(frame_name)
        # rename files and get new list
        img_list = batch_rename(folder_path)
        clean_img_list(folder_path)
        # update index if possible
        img_index = check_range(img_index, False)
        # update display
        return img_list, img_index, update_image(img_list, img_index)
    else:
        return img_list, 0, blank_image

def insert_frame(img_list:list, frame_name:str, img_index:int):
    if len(img_list) > img_index:
        folder_path = os.path.realpath(savepath)
        # ~ frame_name = img_list[img_index]
        frame_path = os.path.join(folder_path, frame_name)
        # ~ if not os.path.exists(frame_path):
            # ~ return img_list, img_index, blank_image
        print(_("Inserting {}").format(frame_path))
        # rename files and get new list
        img_list = batch_rename_reversed(folder_path, img_index+1)
        return img_list, img_index
    else:
        return img_list, 0

def testDevice(source):
    cap = cv2.VideoCapture(source)
    if cap is None or not cap.isOpened():
        print(_("Warning: unable to open video source: {}").format(source))
        return False
    cap.release()
    return True


def signal_handler(sig, frame):
    global ctrlc_pressed
    ctrlc_pressed = True


def parse_export_options(options:str, vflip:int=0, hflip:int=0):
    if vflip:
        options += ',vflip'
    if hflip:
        options += ',hflip'
    return options


def export_animation(input_filename, export_filename):
    input_format, framerate = input_options
    if project_settings['ffmpeg_path'] is None:
        return False
    ffmpeg_process = subprocess.Popen([
        project_settings['ffmpeg_path'],
        '-v','quiet',
        '-y',
        '-f', input_format,
        '-r', framerate,
        '-i', input_filename,
        '-vf', output_options,
        # Fix for Quicktime
        '-pix_fmt', 'yuv420p',
        '-vcodec', 'libx264',
        # Visually lossless export
        '-crf', '18',
        export_filename,
    ])
    return ffmpeg_process


cam = get_cam_class(project_settings['cam_type'])

if cam is None:
    print(_("Wrong camera type in configuration."))
    time.sleep(1)
    sys.exit()


def main(args):
    global img_list

    playback = False
    first_playback = True
    playhead = 0
    loop_playback = True
    index = len(img_list)-1
    playhead = index

    if project_settings['apply_settings_on_startup']:
        cam.apply_setting()

    cam.frame = get_onionskin_frame(savepath)
    cam.o_frame = cam.frame.copy()

    loop_delta = 0
    while True:
        start = timer()
        if playback:
            if cam.onionskin:
                cam.onionskin = False
                cam.onionskin_was_on = True
            if first_playback:
                playhead = 0
                first_playback = False
            # Play all frames
            if playhead < len(img_list)-1:
                playhead, img = next_frame(playhead, loop_playback)
                cv2.imshow("StopiCV", img)
                # Calculate framerate according to loop execution time
                frame_interval = 1.0/project_settings['framerate']-loop_delta
                if frame_interval < 0:
                    frame_interval = 0
                time.sleep(frame_interval)
            else:
                playhead = index
                img = update_image(img_list, index)
                first_playback = True
                playback = False
                # Restore onionskin
                if cam.onionskin_was_on:
                    cam.onionskin = True
            loop_playback = False

        if not playback:
            if cam.has_liveview:
                cam.capture_preview()
            cv2.imshow("StopiCV", cam.frame)

        k = cv2.waitKey(1)
        # Key l / kp 5
        if (k%256 == 108) or (k%256 == 53) or (k%256 == 181):
            print(_("Liveview only"))
            # Toggle liveview
            cam.liveview_only = not cam.liveview_only
            # set onionskin back on when liveview_only false
            if cam.liveview_only == False:
                cam.onionskin = True
            else:
                cam.onionskin = False
        # Key o / kp slash
        elif (k%256 == 111) or (k%256 == 47) or (k%256 == 175):
            print(_("Onionskin toggle"))
            # Toggle onionskin
            cam.onionskin = not cam.onionskin
            # liveview_only is always off when onionskin is true
            if cam.onionskin:
                cam.liveview_only = False
        # Key w / 7 - cycle wb
        elif (k%256 == 119) or (k%256 == 55) or (k%256 == 183):
            print(_("White balance mode"))
            cam.apply_setting(['white_balance_auto_preset'], 1)
        # Key x / 1 - cycle exposure
        elif (k%256 == 120) or (k%256 == 49) or (k%256 == 177):
            print(_("Exp. mode"))
            cam.apply_setting(['auto_exposure'], 1)
        # Key f / 3 - flip image
        elif (k%256 == 102) or (k%256 == 51) or (k%256 == 179):
            print(_("Flip image"))
            cam.flip_image()
        # Key up, kp 8
        elif (k%256 == 82) or (k%256 == 56) or (k%256 == 184):
            print(_("Last frame"))
            if len(img_list):
                if playback:
                    playback = False
                index, cam.frame = last_frame(index)
                cam.o_frame = cam.frame.copy()
        # Key down , kp 2
        elif (k%256 == 84) or (k%256 == 50) or (k%256 == 178):
            print(_("First frame"))
            if len(img_list):
                if playback:
                    playback = False
                index, cam.frame = first_frame(index)
                cam.o_frame = cam.frame.copy()
        # Key left, kp 4
        elif (k%256 == 81) or (k%256 == 52) or (k%256 == 180):
            print(_("Prev. frame"))
            # Displau previous frame
            if len(img_list):
                if playback:
                    playback = False
                index, cam.frame = previous_frame(index)
                cam.o_frame = cam.frame.copy()
        # Key right, kp 6
        elif (k%256 == 83) or (k%256 == 54) or (k%256 == 182):
            print(_("Next frame"))
            # Displau next frame
            if len(img_list):
                if playback:
                    playback = False
                index, cam.frame = next_frame(index)
                cam.o_frame = cam.frame.copy()
        # Key r / keypad 9 - reset wb,exp
        elif (k%256 == 114) or (k%256 == 57) or (k%256 == 185) :
            print(_("Reset camera settings"))
            cam.reset_picture_settings()
        # Key e / keypad *
        elif (k%256 == 101) or (k%256 == 42) or (k%256 == 170) :
            print(_("Export"))
            ffmpeg_process = export_animation(input_filename, export_filename)
        # Key Return, kp return
        elif (k%256 == 13) or (k%256 == 141) :
            print(_("Playback"))
            playhead = index
            loop_playback = True
            playback = not playback
        # Key remove frame - backspace, del, numpad_minus
        elif (k%256 == 8) or (k%256 == 45) or (k == 255) or (k%256 == 173) :
            # Remove frame
            print(_("Remove frame"))
            img_list, index, cam.frame = remove_frame(img_list, index)
            cam.o_frame = cam.frame.copy()
        # Focus +/- with a,z
        elif (k%256 == 97):
            cam.focus('+')
            print(_("+Lens pos: {}".format(cam.lenspos)))
        elif (k%256 == 122):
            cam.focus('-')
            print(_("-Lens pos: {}".format(cam.lenspos)))
        # Set anti-flicker mode with q
        elif (k%256 == 113):
            print(_("Anti-flicker mode"))
            cam.apply_setting(['anti_flicker'], True)
        # Exposure : key S or maj A
        elif (k%256 == 115) or (k%256 == 65):
            print(_("Inc. exposure"))
            cam.apply_setting(['exposure'], 1)
        # Exposure: key Maj Q
        elif (k%256 == 90):
            print(_("Dec. exposure"))
            cam.apply_setting(['exposure'], -1)
        # SPACE or numpad 0 pressed
        elif (k%256 == 32) or (k%256 == 48) or (k%256 == 176):
            print(_("Capture frame"))
            # ~ img_name = return_next_frame_number(get_last_frame(savepath))
            img_name = return_next_frame_number(img_list[index].split('.'))
            if img_name in img_list:
                insert_frame(img_list, img_name, index)
            else:
                img_name = return_next_frame_number(get_last_frame(savepath))
            img_path = os.path.join(savepath, img_name)
            capture_ok = cam.capture_frame(img_path)
            if capture_ok:
                print(_("File {} written.").format(img_path))
                # Special case when we've no frame yet
                if len(img_list) and (img_list[index] == '{letter}.-001.{ext}'.format(letter=project_letter, ext=project_settings['file_extension'])):
                    img_list[index] = img_name
                else:
                    index += 1  
                cam.frame = get_onionskin_frame(savepath, index)
                cam.o_frame = cam.frame.copy()
            else:
                print(_("Error during capture. Try again."))
            # Display a message if capture was not successfull
            # This happens when you try to take too much pictures in a short span of time with the DSLR.
            # With a long exposure, gphoto will sometimes throw a GP_ERROR_CAMERA_BUSY (-110) error.
            # In this case, the user should try to take the picture again and it should work, so the need
            # for a message in UI is not obvious, and can be confusing.
            # Disabled for now
            # ~ else:
                # ~ cam.frame = generate_text_image(_("Error during capture."),
                            # ~ project_settings['screen_w'], project_settings['screen_h']
                            # ~ )
                # ~ cam.o_frame = cam.frame.copy()
                
        # Quit app
        elif k%256 == 27:
            # ESC pressed
            print(_("Escape hit, exiting..."))
            break
        elif ctrlc_pressed:
            print(_("Ctrl-C hit, exiting..."))
            break
        elif cv2.getWindowProperty("StopiCV", cv2.WND_PROP_AUTOSIZE) == -1:
            print(_("Window was closed, exiting..."))
            # ~ pass
            break
        # REMOVE : Debug print keycode
        elif k==-1:  # normally -1 returned,so don't print it
            continue
        else:
            print(k) # else print its value
        end = timer()
        loop_delta = end - start

    if 'ffmpeg_process' in locals():
         if ffmpeg_process.poll() is None:
            print(_("Ffmpeg is still running.\n Waiting for task to complete."))
            msg = generate_text_image(_("Ffmpeg is still running.\n Waiting for task to complete."),
                                project_settings['screen_w'], project_settings['screen_h']
                                )
            cv2.imshow("StopiCV", msg)
            # Force window refresh
            cv2.pollKey()
            try:
                ffmpeg_process.wait(timeout=20)
            except:
                print(_("Terminating running process..."))
                ffmpeg_process.terminate()
    cam.close()
    cv2.destroyAllWindows()

cv2.namedWindow("StopiCV", cv2.WINDOW_GUI_NORMAL)
cv2.setWindowProperty("StopiCV", cv2.WND_PROP_OPENGL, cv2.WINDOW_OPENGL)
cv2.setWindowProperty("StopiCV", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.setWindowProperty("StopiCV", cv2.WND_PROP_ASPECT_RATIO, cv2.WINDOW_KEEPRATIO)

ctrlc_pressed = False
projects_folder = project_settings['projects_folder']
next_letter = 'Z'
img_list = []
savepath = get_session_folder()
onionskin = project_settings['onion_skin_onstartup']
# ~ liveview_only = False
blank_image = generate_text_image(_("No images yet! Start shooting..."), project_settings['screen_w'], project_settings['screen_h'])

if len(savepath):
    project_letter = savepath.split(os.sep)[-1]
else:
    project_letter = 'A'
img_list = get_frames_list(savepath)
index = len(img_list)-1

# Export settings
input_filename = "{folder}{sep}{letter}.%04d.{ext}".format(folder=savepath, sep=os.sep, letter=project_letter, ext=project_settings['file_extension'])
input_options = ["image2", str(project_settings['framerate'])]
output_filename = "{filename}.mp4".format(filename=project_letter)
output_options = project_settings['export_options']

export_filename = os.path.join(savepath, output_filename)

if __name__ == '__main__':
    signal.signal(signal.SIGINT, signal_handler)
    sys.exit(main(sys.argv[1:]))