diff --git a/config.toml b/config.toml index a4bfa28..6acea19 100644 --- a/config.toml +++ b/config.toml @@ -1,6 +1,6 @@ [DEFAULT] # Camera type - can be : showmewebcam, picam, webcam, dslr -cam_type = 'dslr' +cam_type = 'webcam' use_date_for_folder = false file_extension = 'jpg' jpg_quality = 88 @@ -15,8 +15,8 @@ ffmpeg_path = '/usr/bin/ffmpeg' v4l2-ctl_path = '/usr/bin/v4l2-ctl' export_options = 'scale=1920:-1,crop=1920:1080' [CAMERA] -cam_w = 1920 -cam_h = 1080 +cam_w = 1280 +cam_h = 960 vflip = 0 hflip = 0 auto_exposure = 1 diff --git a/frame_opencv.py b/frame_opencv.py index 7337195..fd4491d 100644 --- a/frame_opencv.py +++ b/frame_opencv.py @@ -28,6 +28,7 @@ camera_settings = 0 camera_status = [] # l10n +# TODO: Update fr translation LOCALE = os.getenv('LANG', 'en_EN') _ = gettext.translation('template', localedir='locales', languages=[LOCALE]).gettext @@ -79,15 +80,27 @@ print(config_found_msg) class webcam(): def __init__(self): self.camera_current_settings = { - 'auto_exposure': dict(min=0, max=1, default=camera_settings['auto_exposure'], value=camera_settings['auto_exposure']), - 'white_balance_auto_preset': dict(min=0, max=9, default=camera_settings['white_balance_auto_preset'], value=camera_settings['white_balance_auto_preset']), - 'horizontal_flip': dict(min=0, max=1, default=camera_settings['hflip'], value=camera_settings['hflip']), - 'vertical_flip': dict(min=0, max=1, default=camera_settings['vflip'], value=camera_settings['vflip']), - 'video_bitrate': dict(min=25000000, max=25000000, default=camera_settings['video_bitrate'], value=camera_settings['video_bitrate']), + 'auto_exposure': dict(min=1, max=3, step=2, default=camera_settings['auto_exposure'], value=camera_settings['auto_exposure']), + 'white_balance_temperature': dict(min=0, max=10000, step=1000, default=4000, value=8000), + 'white_balance_automatic' : dict(min=0, max=1, step=1, default=0, value=0), # Auto wb + 'power_line_frequency': dict(min=0, max=2, step=1, default=2, value=2), # power_line_frequency } + self.cam_settings_map = { + 'white_balance_auto_preset': 'white_balance_temperature', + 'white_balance_automatic': 'white_balance_automatic', + 'auto_exposure':'auto_exposure', + 'anti_flicker' : 'power_line_frequency', + 'lenspos' : 'sharpness', + } self.has_liveview = True + # UI frame self.frame = None + # Onionskin_frame + self.o_frame = None + # Overlay frame self.overlay = None + # Original frame for saving + self.og_frame = None self.onionskin = project_settings['onion_skin_onstartup'] self.onionskin_was_on = self.onionskin self.liveview_only = False @@ -110,27 +123,28 @@ class webcam(): return False self.cap.release() return True - + def capture_preview(self): ret, overlay = self.cam.read() if not ret: print(_("Failed to grab frame.")) return False - # Resize preview - frame = cv2.resize(overlay, (project_settings['screen_w'], project_settings['screen_h'])) + # Resize preview to fit screen + overlay = cv2.resize(overlay, (project_settings['screen_w'], project_settings['screen_h'])) if self.liveview_only: # Don't mix it - self.frame = frame - # ~ return frame + self.frame = overlay + return True if self.onionskin: # Keep original pic in memory self.og_frame = overlay.copy() # calculate and apply alpha alpha = project_settings['onionskin_alpha_default'] beta = (1.0 - alpha) - frame = cv2.addWeighted(frame, alpha, overlay, beta, 0) - # ~ self.frame = frame - # ~ return frame + self.frame = cv2.addWeighted(self.o_frame, alpha, overlay, beta, 0) + return True + self.frame = self.o_frame + return True def capture_frame(self, img_path): if project_settings['file_extension'] == 'jpg': @@ -139,48 +153,62 @@ class webcam(): capture_ok = cv2.imwrite(img_path, self.og_frame) return capture_ok - def increment_setting(self, setting:str): - if self.camera_current_settings[setting]['value']+1 in range(self.camera_current_settings[setting]['min'],self.camera_current_settings[setting]['max']+1): - self.camera_current_settings[setting]['value'] += 1 + def increment_setting(self, setting:str, value:int=-1): + # If value has default -1 value, increment setting + if value == -1: + if setting in self.camera_current_settings: + if self.camera_current_settings[setting]['value'] + self.camera_current_settings[setting]['step'] in range(self.camera_current_settings[setting]['min'],self.camera_current_settings[setting]['max']+1): + self.camera_current_settings[setting]['value'] += self.camera_current_settings[setting]['step'] + else: + self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['min'] + # Apply special cases else: - self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['min'] + pass def build_v4l2_cmd(self, to_set:list=None): - self.cmd = '{v4l2-ctl_path} -d /dev/video0'.format(project_settings['v4l2-ctl_path']) - self.args = [] + cmd = '{} -d /dev/video0'.format(project_settings['v4l2-ctl_path']) + args = [] + value = -1 for setting in self.camera_current_settings: if to_set is None: # Apply defaults self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['default'] - self.cmd += ' -c {}={}' - self.args.append(setting) - self.args.append(self.camera_current_settings[setting]['value']) + cmd += ' -c {}={}' + args.append(setting) + args.append(self.camera_current_settings[setting]['value']) else: # Increment settings in to_set for setting in to_set: + if type(setting) is tuple: + value = setting[1] + setting = setting[0] + if setting in self.cam_settings_map: + setting = self.cam_settings_map[setting] if setting in self.camera_current_settings: - self.increment_setting(setting) - self.cmd += ' -c {}={}' - self.args.append(setting) - self.args.append(self.camera_current_settings[setting]['value']) + self.increment_setting(setting, value) + cmd += ' -c {}={}' + args.append(setting) + args.append(self.camera_current_settings[setting]['value']) else: print(_("Unknown setting!")) break - return self.cmd, self.args + return cmd.format(*args) - def run_v4l2_ctl(self): + def run_v4l2_ctl(self, cmd): if project_settings['v4l2-ctl_path'] is None: return False - v4l2_ctl_process = subprocess.Popen(self.cmd.split(' ')) - return ffmpeg_process + print(cmd) + v4l2_ctl_process = subprocess.Popen(cmd.split(' ')) + return v4l2_ctl_process - def apply_setting(self, to_set:list=None): - self.cmd, self.args = self.build_v4l2_cmd(to_set) - run_v4l2_ctl(self.cmd) + def apply_setting(self, to_set:list=None, inc:bool=False): + cmd = self.build_v4l2_cmd(to_set) + self.run_v4l2_ctl(cmd) return self.camera_current_settings def flip_image(self): - self.apply_setting(['vertical_flip','horizontal_flip']) + self.frame = cv2.flip(self.frame, -1) + self.og_frame = cv2.flip(self.og_frame, -1) def focus(self, direction:str='-'): pass @@ -196,24 +224,45 @@ class showmewebcam(webcam): def __init__(self): self.serialutils = __import__('serialutils') super().__init__() + self.camera_current_settings = { + 'auto_exposure': dict(min=0, max=1, step=1, default=camera_settings['auto_exposure'], value=camera_settings['auto_exposure']), + 'white_balance_auto_preset': dict(min=0, max=9, step=1, default=camera_settings['white_balance_auto_preset'], value=camera_settings['white_balance_auto_preset']), + 'horizontal_flip': dict(min=0, max=1, step=1, default=camera_settings['hflip'], value=camera_settings['hflip']), + 'vertical_flip': dict(min=0, max=1, step=1, default=camera_settings['vflip'], value=camera_settings['vflip']), + 'video_bitrate': dict(min=25000000, max=25000000, step=10000, default=camera_settings['video_bitrate'], value=camera_settings['video_bitrate']), + } - def apply_setting(self, to_set:list=None): + def apply_setting(self, to_set:list=None, inc:bool=False): self.cmd, self.args = self.build_v4l2_cmd(to_set) self.serialutils.send_serial_cmd(self.serialutils.find_cam_port(), self.cmd.format(*self.args)) return self.camera_current_settings + class picam(): def __init__(self): self.camera_current_settings = { - 'auto_exposure': dict(min=0, max=4, default=camera_settings['auto_exposure'], value=camera_settings['auto_exposure']), - 'white_balance_auto_preset': dict(min=0, max=7, default=camera_settings['white_balance_auto_preset'], value=camera_settings['white_balance_auto_preset']), - 'horizontal_flip': dict(min=0, max=1, default=camera_settings['hflip'], value=camera_settings['hflip']), - 'vertical_flip': dict(min=0, max=1, default=camera_settings['vflip'], value=camera_settings['vflip']), - 'anti_flicker': dict(min=0, max=2, default=1, value=1), + 'auto_exposure': dict(min=0, max=4, step=1, default=camera_settings['auto_exposure'], value=camera_settings['auto_exposure']), + 'white_balance_auto_preset': dict(min=0, max=7, step=1, default=camera_settings['white_balance_auto_preset'], value=camera_settings['white_balance_auto_preset']), + 'horizontal_flip': dict(min=0, max=1, step=1, default=camera_settings['hflip'], value=camera_settings['hflip']), + 'vertical_flip': dict(min=0, max=1, step=1, default=camera_settings['vflip'], value=camera_settings['vflip']), + 'anti_flicker': dict(min=0, max=2, step=1, default=1, value=1), } + # Map generic config name to specific picamera setting name + self.cam_settings_map = { + 'white_balance_auto_preset': 'AwbMode', + 'auto_exposure':'AeExposureMode', + 'anti_flicker' : 'AeFlickerMode', + 'lenspos' : 'LensPosition', + } self.has_liveview = True + # UI frame self.frame = None + # Onionskin_frame + self.o_frame = None + # Overlay frame self.overlay = None + # Original frame for saving + self.og_frame = None self.onionskin = project_settings['onion_skin_onstartup'] self.onionskin_was_on = self.onionskin self.liveview_only = False @@ -251,29 +300,23 @@ class picam(): # ~ 'FrameDurationLimits':(16666,83333,None) } self.cam.set_controls(self.camera_default_settings) - # Map generic config name to specific picamera setting name - self.cam_settings_map = { - 'white_balance_auto_preset': 'AwbMode', - 'auto_exposure':'AeExposureMode', - 'anti_flicker' : 'AeFlickerMode', - 'lenspos' : 'LensPosition', - } except: pass def test_device(self, source): pass + # Same as in webcam() class def capture_preview(self): ret, overlay = self.cam.read() if not ret: print(_("Failed to grab frame.")) return False - # Resize preview - self.frame = cv2.resize(overlay, (project_settings['screen_w'], project_settings['screen_h'])) + # Resize preview to fit screen + overlay = cv2.resize(overlay, (project_settings['screen_w'], project_settings['screen_h'])) if self.liveview_only: # Don't mix it - frame = self.frame + self.frame = overlay return True if self.onionskin: # Keep original pic in memory @@ -281,10 +324,12 @@ class picam(): # calculate and apply alpha alpha = project_settings['onionskin_alpha_default'] beta = (1.0 - alpha) - frame = cv2.addWeighted(self.frame, alpha, overlay, beta, 0) - # ~ self.frame = frame - # ~ return frame - + self.frame = cv2.addWeighted(self.o_frame, alpha, overlay, beta, 0) + return True + self.frame = self.o_frame + return True + + # Same as in webcam() class def capture_frame(self, img_path): if project_settings['file_extension'] == 'jpg': capture_ok = cv2.imwrite(img_path, self.og_frame, [int(cv2.IMWRITE_JPEG_QUALITY), project_settings['jpg_quality']]) @@ -293,28 +338,30 @@ class picam(): return capture_ok def increment_setting(self, setting:str): - if self.camera_current_settings[setting]['value']+1 in range(self.camera_current_settings[setting]['min'],self.camera_current_settings[setting]['max']+1): - self.camera_current_settings[setting]['value'] += 1 - else: - self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['min'] - # Special cases - # Autoexposure - if setting == 'autoexposure' and self.camera_current_settings['autoexposure']['value'] == 4: - self.cam.set_controls({'AeEnable': 1}) - else: - self.cam.set_controls({'AeEnable': 0}) - self.cam.set_controls({"AeExposureMode": self.camera_current_settings['auto_exposure']['value']}) - # Antiflicker - if setting == 'anti_flicker' and self.camera_current_settings['anti_flicker']['value'] == 0: - self.cam.set_controls({'AeFlickerMode': 0}) - elif self.camera_current_settings['anti_flicker']['value'] == 1: - self.cam.set_controls({'AeFlickerMode': 1, 'AeFlickerPeriod':8333}) - else: - self.cam.set_controls({'AeFlickerMode': 1, 'AeFlickerPeriod':10000}) + if setting in self.camera_current_settings: + if self.camera_current_settings[setting]['value'] + self.camera_current_settings[setting]['step'] in range(self.camera_current_settings[setting]['min'],self.camera_current_settings[setting]['max']+1): + self.camera_current_settings[setting]['value'] += self.camera_current_settings[setting]['step'] + else: + self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['min'] + # Special cases + # Autoexposure + if setting == 'autoexposure' and self.camera_current_settings['autoexposure']['value'] == 4: + self.cam.set_controls({'AeEnable': 1}) + else: + self.cam.set_controls({'AeEnable': 0}) + self.cam.set_controls({"AeExposureMode": self.camera_current_settings['auto_exposure']['value']}) + # Antiflicker + if setting == 'anti_flicker' and self.camera_current_settings['anti_flicker']['value'] == 0: + self.cam.set_controls({'AeFlickerMode': 0}) + elif self.camera_current_settings['anti_flicker']['value'] == 1: + self.cam.set_controls({'AeFlickerMode': 1, 'AeFlickerPeriod':8333}) + else: + self.cam.set_controls({'AeFlickerMode': 1, 'AeFlickerPeriod':10000}) - def apply_setting(self, to_set:list=None): + def apply_setting(self, to_set:list=None, inc:bool=False): for setting in to_set: - self.increment_setting(setting) + if inc: + self.increment_setting(setting) self.cam.set_controls({self.cam_settings_map[setting] : self.camera_current_settings[setting]['value']}) def flip_image(self): @@ -361,7 +408,14 @@ class dslr(): 'anti_flicker' : 'imagesize', 'lenspos' : 'shutterspeed', } + # UI frame self.frame = None + # Onionskin_frame + self.o_frame = None + # Overlay frame + self.overlay = None + # Original frame for saving + self.og_frame = None self.has_liveview = False # TODO: check if DSLR has live feed and set accordingly # ~ self.onionskin = project_settings['onion_skin_onstartup'] @@ -438,9 +492,11 @@ class dslr(): return False # ~ camera.exit() # Update frame - frame = cv2.imread(img_path) + cam.frame = cv2.imread(img_path) # ~ frame = cv2.resize(frame, (project_settings['screen_w'], project_settings['screen_h'])) - return capture_ok + # ~ if capture_ok is None: + # ~ return True + return True def apply_gphoto_setting(self, setting:str): # Get corresponding setting name if possible @@ -476,8 +532,8 @@ class dslr(): def increment_setting(self, setting:str): if setting in self.camera_current_settings: - if self.camera_current_settings[setting]['value']+1 in range(self.camera_current_settings[setting]['min'],self.camera_current_settings[setting]['max']+1): - self.camera_current_settings[setting]['value'] += 1 + if self.camera_current_settings[setting]['value'] + self.camera_current_settings[setting]['step'] in range(self.camera_current_settings[setting]['min'],self.camera_current_settings[setting]['max']+1): + self.camera_current_settings[setting]['value'] += self.camera_current_settings[setting]['step'] else: self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['min'] @@ -520,8 +576,11 @@ class dslr(): def close(self): self.cam.exit() + def get_cam_class(camera_type): - if camera_type == 'showmewebcam': + if camera_type == 'webcam': + return webcam() + elif camera_type == 'showmewebcam': return showmewebcam() elif camera_type == 'picam': return picam() @@ -903,7 +962,8 @@ def main(args): cam.apply_setting() - frame = get_onionskin_frame(savepath) + cam.frame = get_onionskin_frame(savepath) + cam.o_frame = cam.frame.copy() loop_delta = 0 while True: @@ -936,8 +996,8 @@ def main(args): if not playback: if cam.has_liveview: - frame = cam.capture_preview() - cv2.imshow("StopiCV", frame) + cam.capture_preview() + cv2.imshow("StopiCV", cam.frame) k = cv2.waitKey(1) # Key l / kp 5 @@ -971,6 +1031,7 @@ def main(args): if playback: playback = False index, frame = last_frame(index) + cam.o_frame = cam.frame.copy() # Key down , kp 2 elif (k%256 == 84) or (k%256 == 50) or (k%256 == 178): print(_("First frame")) @@ -978,6 +1039,7 @@ def main(args): if playback: playback = False index, frame = first_frame(index) + cam.o_frame = cam.frame.copy() # Key left, kp 4 elif (k%256 == 81) or (k%256 == 52) or (k%256 == 180): print(_("Prev. frame")) @@ -985,7 +1047,8 @@ def main(args): if len(img_list): if playback: playback = False - index, frame = previous_frame(index) + index, cam.frame = previous_frame(index) + cam.o_frame = cam.frame.copy() # Key right, kp 6 elif (k%256 == 83) or (k%256 == 54) or (k%256 == 182): print(_("Next frame")) @@ -993,7 +1056,8 @@ def main(args): if len(img_list): if playback: playback = False - index, frame = next_frame(index) + index, cam.frame = next_frame(index) + cam.o_frame = cam.frame.copy() # Key r / keypad 9 - reset wb,exp elif (k%256 == 114) or (k%256 == 57) or (k%256 == 185) : print(_("Reset camera settings")) @@ -1012,7 +1076,8 @@ def main(args): elif (k%256 == 8) or (k%256 == 45) or (k == 255) or (k%256 == 173) : # Remove frame print(_("Remove frame")) - img_list, index, frame = remove_frame(img_list, index) + img_list, index, cam.frame = remove_frame(img_list, index) + cam.o_frame = cam.frame.copy() # Focus +/- with a,z elif (k%256 == 97): cam.focus('+') @@ -1036,14 +1101,15 @@ def main(args): img_list[index] = img_name else: index += 1 - print(index) # Display a message if capture was not successfull - if capture_ok is not None: - frame = generate_text_image(_("Error during capture."), + if not capture_ok: + cam.frame = generate_text_image(_("Error during capture."), project_settings['screen_w'], project_settings['screen_h'] ) + cam.o_frame = cam.frame.copy() else: - frame = get_onionskin_frame(savepath) + cam.frame = get_onionskin_frame(savepath) + cam.o_frame = cam.frame.copy() # ~ frame = cam.frame # Quit app elif k%256 == 27: diff --git a/readme.md b/readme.md index b7e546c..b16f246 100644 --- a/readme.md +++ b/readme.md @@ -8,7 +8,7 @@ Seconde version du script python [stopi](https://git.arthus.net/arthus/stopi) destiné à être utilisé avec une télécommande [picote](/arthus/picote/src/branch/picamera). - + Cette version utilise opencv et libcamera.Elle fonctionne avec une webcam ou un module vidéo Picamera (v1,v2 ou v3). Encore une fois, l'objectif est de créer un logiciel simple et minimaliste dans son interface, dont les caractéristiques sont les suivantes : @@ -33,7 +33,7 @@ Dans un terminal : 1. Installer les dépendances suivantes : ``` # Avec une distribution basée sur Debian (Ubuntu, Mint...) - sudo apt install --no-install-recommends --no-install-suggests git ffmpeg python3-tk python3-pip python3-venv libtiff5-dev libtopenjp2 libopenjp2-7-dev zlib1g-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev libharfbuzz-dev libfribidi-dev libxcb1-dev python3-tk python3-dev libopenblas-dev libatlas-base-dev libhdf5-dev libhdf5-serial-dev libatlas-base-dev libjasper-dev libqtgui4 libqt4-test + sudo apt install --no-install-recommends --no-install-suggests git ffmpeg python3-pip python3-venv libtiff5-dev libtopenjp2 libopenjp2-7-dev zlib1g-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev libharfbuzz-dev libfribidi-dev libxcb1-dev python3-tk python3-dev libopenblas-dev libatlas-base-dev libhdf5-dev libhdf5-serial-dev libatlas-base-dev libjasper-dev libqtgui4 libqt4-test v4l-utils ``` - (Optionnel) Pour installer un environnement graphique minimal sur une [installation console](https://debian-facile.org/doc:install:installation-minimale) : `sudo apt install --no-install-recommends --no-install-suggests openbox xserver-xorg xinit pcmanfm gmrun lxterminal hsetroot unclutter plymouth plymouth-themes` 2. Cloner le dépôt dans le dossier de votre choix : `git clone https://git.arthus.net/arthus/stopi2.git`