Compare commits
13 Commits
picamera_s
...
gphoto
Author | SHA1 | Date |
---|---|---|
|
5fef8eed03 | |
|
52d058047a | |
|
77c17f11ec | |
|
92059f7ffc | |
|
3b8a3cd197 | |
|
4cead89259 | |
|
e03d5e7872 | |
|
d254b6307b | |
|
ab586d29ca | |
|
8bc52eae04 | |
|
c302f17087 | |
|
fb842cdb69 | |
|
d8cc388279 |
13
config.toml
13
config.toml
|
@ -1,6 +1,6 @@
|
||||||
[DEFAULT]
|
[DEFAULT]
|
||||||
cam_is_picam = true
|
# Camera type - can be : showmewebcam, picam, webcam, dslr
|
||||||
cam_is_showmewebcam = false
|
cam_type = 'webcam'
|
||||||
use_date_for_folder = false
|
use_date_for_folder = false
|
||||||
file_extension = 'jpg'
|
file_extension = 'jpg'
|
||||||
jpg_quality = 88
|
jpg_quality = 88
|
||||||
|
@ -8,14 +8,15 @@ projects_folder = ''
|
||||||
onion_skin_onstartup = true
|
onion_skin_onstartup = true
|
||||||
onionskin_alpha_default = 0.5
|
onionskin_alpha_default = 0.5
|
||||||
fullscreen_bool = true
|
fullscreen_bool = true
|
||||||
screen_w = 1440
|
screen_w = 1920
|
||||||
screen_h = 900
|
screen_h = 1080
|
||||||
framerate = 16
|
framerate = 16
|
||||||
ffmpeg_path = '/usr/bin/ffmpeg'
|
ffmpeg_path = '/usr/bin/ffmpeg'
|
||||||
|
v4l2-ctl_path = '/usr/bin/v4l2-ctl'
|
||||||
export_options = 'scale=1920:-1,crop=1920:1080'
|
export_options = 'scale=1920:-1,crop=1920:1080'
|
||||||
[CAMERA]
|
[CAMERA]
|
||||||
cam_w = 1920
|
cam_w = 1280
|
||||||
cam_h = 1080
|
cam_h = 960
|
||||||
vflip = 0
|
vflip = 0
|
||||||
hflip = 0
|
hflip = 0
|
||||||
auto_exposure = 1
|
auto_exposure = 1
|
||||||
|
|
794
frame_opencv.py
794
frame_opencv.py
|
@ -2,6 +2,7 @@
|
||||||
import cv2
|
import cv2
|
||||||
import gettext
|
import gettext
|
||||||
from itertools import count
|
from itertools import count
|
||||||
|
from importlib import import_module
|
||||||
import os
|
import os
|
||||||
# Needed for utf-8 text
|
# Needed for utf-8 text
|
||||||
from PIL import ImageFont, ImageDraw, Image
|
from PIL import ImageFont, ImageDraw, Image
|
||||||
|
@ -13,7 +14,6 @@ import time
|
||||||
from timeit import default_timer as timer
|
from timeit import default_timer as timer
|
||||||
import tomllib
|
import tomllib
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import serialutils
|
|
||||||
|
|
||||||
# Run from SSH
|
# Run from SSH
|
||||||
if not os.getenv('DISPLAY'):
|
if not os.getenv('DISPLAY'):
|
||||||
|
@ -22,16 +22,21 @@ if not os.getenv('DISPLAY'):
|
||||||
running_from_folder = os.path.realpath(__file__)
|
running_from_folder = os.path.realpath(__file__)
|
||||||
alphabet = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
|
alphabet = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
|
||||||
|
|
||||||
|
index = 0
|
||||||
|
playhead = 0
|
||||||
|
playback = 0
|
||||||
|
camera_settings = 0
|
||||||
|
camera_status = []
|
||||||
|
|
||||||
# l10n
|
# l10n
|
||||||
|
# TODO: Update fr translation
|
||||||
LOCALE = os.getenv('LANG', 'en_EN')
|
LOCALE = os.getenv('LANG', 'en_EN')
|
||||||
_ = gettext.translation('template', localedir='locales', languages=[LOCALE]).gettext
|
_ = gettext.translation('template', localedir='locales', languages=[LOCALE]).gettext
|
||||||
|
|
||||||
# Config
|
# Config
|
||||||
# defaults
|
# defaults
|
||||||
project_settings_defaults = {
|
project_settings_defaults = {
|
||||||
'cam_is_picam': True,
|
'cam_type': "webcam",
|
||||||
'cam_is_showmewebcam': False,
|
|
||||||
'use_date_for_folder': False,
|
'use_date_for_folder': False,
|
||||||
'file_extension':'png',
|
'file_extension':'png',
|
||||||
'jpg_quality':90,
|
'jpg_quality':90,
|
||||||
|
@ -43,6 +48,7 @@ project_settings_defaults = {
|
||||||
'screen_h' : 1080,
|
'screen_h' : 1080,
|
||||||
'framerate' : 16,
|
'framerate' : 16,
|
||||||
'ffmpeg_path' : None,
|
'ffmpeg_path' : None,
|
||||||
|
'v4l2-ctl_path' : None,
|
||||||
'export_options' : 'scale=1920:-1,crop=1920:1080:0:102',
|
'export_options' : 'scale=1920:-1,crop=1920:1080:0:102',
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,54 +78,522 @@ for location in config_locations:
|
||||||
config_found_msg = _("Found configuration file in {}").format(os.path.expanduser(location))
|
config_found_msg = _("Found configuration file in {}").format(os.path.expanduser(location))
|
||||||
print(config_found_msg)
|
print(config_found_msg)
|
||||||
|
|
||||||
if project_settings['cam_is_showmewebcam']:
|
class webcam():
|
||||||
camera_current_settings = {
|
def __init__(self):
|
||||||
'auto_exposure': dict(min=0, max=1, default=camera_settings['auto_exposure'], value=camera_settings['auto_exposure']),
|
self.camera_current_settings = {
|
||||||
'white_balance_auto_preset': dict(min=0, max=9, default=camera_settings['white_balance_auto_preset'], value=camera_settings['white_balance_auto_preset']),
|
'auto_exposure': dict(min=1, max=3, step=2, default=camera_settings['auto_exposure'], value=camera_settings['auto_exposure']),
|
||||||
'horizontal_flip': dict(min=0, max=1, default=camera_settings['hflip'], value=camera_settings['hflip']),
|
'white_balance_temperature': dict(min=0, max=10000, step=1000, default=4000, value=8000),
|
||||||
'vertical_flip': dict(min=0, max=1, default=camera_settings['vflip'], value=camera_settings['vflip']),
|
'white_balance_automatic' : dict(min=0, max=1, step=1, default=0, value=0), # Auto wb
|
||||||
'video_bitrate': dict(min=25000000, max=25000000, default=camera_settings['video_bitrate'], value=camera_settings['video_bitrate']),
|
'power_line_frequency': dict(min=0, max=2, step=1, default=2, value=2), # power_line_frequency
|
||||||
}
|
}
|
||||||
else: # cam is picam
|
self.cam_settings_map = {
|
||||||
camera_current_settings = {
|
'white_balance_auto_preset': 'white_balance_temperature',
|
||||||
'auto_exposure': dict(min=0, max=4, default=camera_settings['auto_exposure'], value=camera_settings['auto_exposure']),
|
'white_balance_automatic': 'white_balance_automatic',
|
||||||
'white_balance_auto_preset': dict(min=0, max=7, default=camera_settings['white_balance_auto_preset'], value=camera_settings['white_balance_auto_preset']),
|
'auto_exposure':'auto_exposure',
|
||||||
'horizontal_flip': dict(min=0, max=1, default=camera_settings['hflip'], value=camera_settings['hflip']),
|
'anti_flicker' : 'power_line_frequency',
|
||||||
'vertical_flip': dict(min=0, max=1, default=camera_settings['vflip'], value=camera_settings['vflip']),
|
'lenspos' : 'sharpness',
|
||||||
'anti_flicker': dict(min=0, max=2, default=1, value=1),
|
|
||||||
}
|
}
|
||||||
|
self.has_liveview = True
|
||||||
|
# UI frame
|
||||||
|
self.frame = None
|
||||||
|
# Onionskin_frame
|
||||||
|
self.o_frame = None
|
||||||
|
# Overlay frame
|
||||||
|
self.overlay = None
|
||||||
|
# Original frame for saving
|
||||||
|
self.og_frame = None
|
||||||
|
self.onionskin = project_settings['onion_skin_onstartup']
|
||||||
|
self.onionskin_was_on = self.onionskin
|
||||||
|
self.liveview_only = False
|
||||||
|
self.lenspos = None
|
||||||
|
# Test first device
|
||||||
|
if not self.test_device(0):
|
||||||
|
print(_("No camera device found. Exiting..."))
|
||||||
|
sys.exit()
|
||||||
|
try:
|
||||||
|
self.cam = cv2.VideoCapture(0)
|
||||||
|
self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_settings['cam_w'])
|
||||||
|
self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_settings['cam_h'])
|
||||||
|
except:
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
def test_device(self, source):
|
||||||
|
self.cap = cv2.VideoCapture(source)
|
||||||
|
if self.cap is None or not self.cap.isOpened():
|
||||||
|
print(_("Warning: unable to open video source: {}").format(source))
|
||||||
|
return False
|
||||||
|
self.cap.release()
|
||||||
|
return True
|
||||||
|
|
||||||
def apply_cam_setting(cam_settings:dict, to_set:list=None):
|
def capture_preview(self):
|
||||||
cmd = 'v4l2-ctl -d /dev/video0'
|
ret, overlay = self.cam.read()
|
||||||
|
if not ret:
|
||||||
|
print(_("Failed to grab frame."))
|
||||||
|
return False
|
||||||
|
# Resize preview to fit screen
|
||||||
|
overlay = cv2.resize(overlay, (project_settings['screen_w'], project_settings['screen_h']))
|
||||||
|
if self.liveview_only:
|
||||||
|
# Don't mix it
|
||||||
|
self.frame = overlay
|
||||||
|
return True
|
||||||
|
if self.onionskin:
|
||||||
|
# Keep original pic in memory
|
||||||
|
self.og_frame = overlay.copy()
|
||||||
|
# calculate and apply alpha
|
||||||
|
alpha = project_settings['onionskin_alpha_default']
|
||||||
|
beta = (1.0 - alpha)
|
||||||
|
self.frame = cv2.addWeighted(self.o_frame, alpha, overlay, beta, 0)
|
||||||
|
return True
|
||||||
|
self.frame = self.o_frame
|
||||||
|
return True
|
||||||
|
|
||||||
|
def capture_frame(self, img_path):
|
||||||
|
if project_settings['file_extension'] == 'jpg':
|
||||||
|
capture_ok = cv2.imwrite(img_path, self.og_frame, [int(cv2.IMWRITE_JPEG_QUALITY), project_settings['jpg_quality']])
|
||||||
|
else:
|
||||||
|
capture_ok = cv2.imwrite(img_path, self.og_frame)
|
||||||
|
return capture_ok
|
||||||
|
|
||||||
|
def increment_setting(self, setting:str, value:int=-1):
|
||||||
|
# If value has default -1 value, increment setting
|
||||||
|
if value == -1:
|
||||||
|
if setting in self.camera_current_settings:
|
||||||
|
if self.camera_current_settings[setting]['value'] + self.camera_current_settings[setting]['step'] in range(self.camera_current_settings[setting]['min'],self.camera_current_settings[setting]['max']+1):
|
||||||
|
self.camera_current_settings[setting]['value'] += self.camera_current_settings[setting]['step']
|
||||||
|
else:
|
||||||
|
self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['min']
|
||||||
|
# Apply special cases
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def build_v4l2_cmd(self, to_set:list=None):
|
||||||
|
cmd = '{} -d /dev/video0'.format(project_settings['v4l2-ctl_path'])
|
||||||
args = []
|
args = []
|
||||||
for setting in cam_settings:
|
value = -1
|
||||||
|
for setting in self.camera_current_settings:
|
||||||
if to_set is None:
|
if to_set is None:
|
||||||
# Apply defaults
|
# Apply defaults
|
||||||
cam_settings[setting]['value'] = cam_settings[setting]['default']
|
self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['default']
|
||||||
cmd += ' -c {}={}'
|
cmd += ' -c {}={}'
|
||||||
args.append(setting)
|
args.append(setting)
|
||||||
args.append(cam_settings[setting]['value'])
|
args.append(self.camera_current_settings[setting]['value'])
|
||||||
else:
|
else:
|
||||||
# Increment settings in to_set
|
# Increment settings in to_set
|
||||||
for setting in to_set:
|
for setting in to_set:
|
||||||
if setting in cam_settings:
|
if type(setting) is tuple:
|
||||||
if cam_settings[setting]['value']+1 in range(cam_settings[setting]['min'],cam_settings[setting]['max']+1):
|
value = setting[1]
|
||||||
cam_settings[setting]['value'] += 1
|
setting = setting[0]
|
||||||
else:
|
if setting in self.cam_settings_map:
|
||||||
cam_settings[setting]['value'] = cam_settings[setting]['min']
|
setting = self.cam_settings_map[setting]
|
||||||
|
if setting in self.camera_current_settings:
|
||||||
|
self.increment_setting(setting, value)
|
||||||
cmd += ' -c {}={}'
|
cmd += ' -c {}={}'
|
||||||
args.append(setting)
|
args.append(setting)
|
||||||
args.append(cam_settings[setting]['value'])
|
args.append(self.camera_current_settings[setting]['value'])
|
||||||
else:
|
else:
|
||||||
print(_("Unknown setting!"))
|
print(_("Unknown setting!"))
|
||||||
break
|
break
|
||||||
if project_settings['cam_is_showmewebcam']:
|
return cmd.format(*args)
|
||||||
serialutils.send_serial_cmd(serialutils.find_cam_port(), cmd.format(*args))
|
|
||||||
|
def run_v4l2_ctl(self, cmd):
|
||||||
|
if project_settings['v4l2-ctl_path'] is None:
|
||||||
|
return False
|
||||||
|
print(cmd)
|
||||||
|
v4l2_ctl_process = subprocess.Popen(cmd.split(' '))
|
||||||
|
return v4l2_ctl_process
|
||||||
|
|
||||||
|
def apply_setting(self, to_set:list=None, inc:bool=False):
|
||||||
|
cmd = self.build_v4l2_cmd(to_set)
|
||||||
|
self.run_v4l2_ctl(cmd)
|
||||||
|
return self.camera_current_settings
|
||||||
|
|
||||||
|
def flip_image(self):
|
||||||
|
self.frame = cv2.flip(self.frame, -1)
|
||||||
|
self.og_frame = cv2.flip(self.og_frame, -1)
|
||||||
|
|
||||||
|
def focus(self, direction:str='-'):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def reset_picture_settings(self):
|
||||||
|
self.apply_setting()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.cam.release()
|
||||||
|
|
||||||
|
|
||||||
|
class showmewebcam(webcam):
|
||||||
|
def __init__(self):
|
||||||
|
self.serialutils = import_module('serialutils')
|
||||||
|
super().__init__()
|
||||||
|
self.camera_current_settings = {
|
||||||
|
'auto_exposure': dict(min=0, max=1, step=1, default=camera_settings['auto_exposure'], value=camera_settings['auto_exposure']),
|
||||||
|
'white_balance_auto_preset': dict(min=0, max=9, step=1, default=camera_settings['white_balance_auto_preset'], value=camera_settings['white_balance_auto_preset']),
|
||||||
|
'horizontal_flip': dict(min=0, max=1, step=1, default=camera_settings['hflip'], value=camera_settings['hflip']),
|
||||||
|
'vertical_flip': dict(min=0, max=1, step=1, default=camera_settings['vflip'], value=camera_settings['vflip']),
|
||||||
|
'video_bitrate': dict(min=25000000, max=25000000, step=10000, default=camera_settings['video_bitrate'], value=camera_settings['video_bitrate']),
|
||||||
|
}
|
||||||
|
|
||||||
|
def apply_setting(self, to_set:list=None, inc:bool=False):
|
||||||
|
self.cmd, self.args = self.build_v4l2_cmd(to_set)
|
||||||
|
self.serialutils.send_serial_cmd(self.serialutils.find_cam_port(), self.cmd.format(*self.args))
|
||||||
|
return self.camera_current_settings
|
||||||
|
|
||||||
|
|
||||||
|
class picam():
|
||||||
|
def __init__(self):
|
||||||
|
self.camera_current_settings = {
|
||||||
|
'auto_exposure': dict(min=0, max=4, step=1, default=camera_settings['auto_exposure'], value=camera_settings['auto_exposure']),
|
||||||
|
'white_balance_auto_preset': dict(min=0, max=7, step=1, default=camera_settings['white_balance_auto_preset'], value=camera_settings['white_balance_auto_preset']),
|
||||||
|
'horizontal_flip': dict(min=0, max=1, step=1, default=camera_settings['hflip'], value=camera_settings['hflip']),
|
||||||
|
'vertical_flip': dict(min=0, max=1, step=1, default=camera_settings['vflip'], value=camera_settings['vflip']),
|
||||||
|
'anti_flicker': dict(min=0, max=2, step=1, default=1, value=1),
|
||||||
|
}
|
||||||
|
# Map generic config name to specific picamera setting name
|
||||||
|
self.cam_settings_map = {
|
||||||
|
'white_balance_auto_preset': 'AwbMode',
|
||||||
|
'auto_exposure':'AeExposureMode',
|
||||||
|
'anti_flicker' : 'AeFlickerMode',
|
||||||
|
'lenspos' : 'LensPosition',
|
||||||
|
}
|
||||||
|
self.has_liveview = True
|
||||||
|
# UI frame
|
||||||
|
self.frame = None
|
||||||
|
# Onionskin_frame
|
||||||
|
self.o_frame = None
|
||||||
|
# Overlay frame
|
||||||
|
self.overlay = None
|
||||||
|
# Original frame for saving
|
||||||
|
self.og_frame = None
|
||||||
|
self.onionskin = project_settings['onion_skin_onstartup']
|
||||||
|
self.onionskin_was_on = self.onionskin
|
||||||
|
self.liveview_only = False
|
||||||
|
# Pi Cam V3 setup
|
||||||
|
self.Picamera2 = getattr(import_module('picamera2'), 'Picamera2')
|
||||||
|
self.Transform = getattr(import_module('libcamera'), 'Transform')
|
||||||
|
# Cam setup
|
||||||
|
self.cam = self.Picamera2()
|
||||||
|
self.picam_config = self.cam.create_video_configuration(main={"format": 'RGB888',"size": (camera_settings['cam_w'], camera_settings['cam_h'])})
|
||||||
|
self.picam_config["transform"] = self.Transform(vflip=self.camera_current_settings['vertical_flip']['value'],hflip=self.camera_current_settings['horizontal_flip']['value'])
|
||||||
|
|
||||||
|
self.cam.configure(self.picam_config)
|
||||||
|
# Autofocus, get lens position and switch to manual mode
|
||||||
|
# Set Af mode to Auto then Manual (0). Default is Continuous (2), Auto is 1
|
||||||
|
self.cam.set_controls({'AfMode':1})
|
||||||
|
self.cam.start()
|
||||||
|
self.cam.autofocus_cycle()
|
||||||
|
self.lenspos = self.cam.capture_metadata()['LensPosition']
|
||||||
|
# Set focus, wb, exp to manual
|
||||||
|
self.camera_default_settings = {'AfMode': 0,
|
||||||
|
'AwbEnable': 1,
|
||||||
|
'AwbMode': self.camera_current_settings['white_balance_auto_preset']['default'],
|
||||||
|
'AeEnable': 1,
|
||||||
|
'AeExposureMode': self.camera_current_settings['auto_exposure']['default'],
|
||||||
|
# Enable flicker avoidance due to mains
|
||||||
|
'AeFlickerMode': 1,
|
||||||
|
# Mains 50hz = 10000, 60hz = 8333
|
||||||
|
# ~ 'AeFlickerPeriod': 8333,
|
||||||
|
'AeFlickerPeriod': 10000,
|
||||||
|
# Format is (min, max, default) in ms
|
||||||
|
# here: (60fps, 12fps, None)
|
||||||
|
# ~ 'FrameDurationLimits':(16666,83333,None)
|
||||||
|
}
|
||||||
|
self.cam.set_controls(self.camera_default_settings)
|
||||||
|
|
||||||
|
def test_device(self, source):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Same as in webcam() class
|
||||||
|
def capture_preview(self):
|
||||||
|
overlay = self.cam.capture_array("main")
|
||||||
|
# Resize preview to fit screen
|
||||||
|
overlay = cv2.resize(overlay, (project_settings['screen_w'], project_settings['screen_h']))
|
||||||
|
if self.liveview_only:
|
||||||
|
# Don't mix it
|
||||||
|
self.frame = overlay
|
||||||
|
return True
|
||||||
|
if self.onionskin:
|
||||||
|
# Keep original pic in memory
|
||||||
|
self.og_frame = overlay.copy()
|
||||||
|
# calculate and apply alpha
|
||||||
|
alpha = project_settings['onionskin_alpha_default']
|
||||||
|
beta = (1.0 - alpha)
|
||||||
|
self.frame = cv2.addWeighted(self.o_frame, alpha, overlay, beta, 0)
|
||||||
|
return True
|
||||||
|
self.frame = self.o_frame
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Same as in webcam() class
|
||||||
|
def capture_frame(self, img_path):
|
||||||
|
if project_settings['file_extension'] == 'jpg':
|
||||||
|
capture_ok = cv2.imwrite(img_path, self.og_frame, [int(cv2.IMWRITE_JPEG_QUALITY), project_settings['jpg_quality']])
|
||||||
else:
|
else:
|
||||||
# TODO: v4l2 support
|
capture_ok = cv2.imwrite(img_path, self.og_frame)
|
||||||
print(_("Camera function not supported."))
|
return capture_ok
|
||||||
return cam_settings
|
|
||||||
|
def increment_setting(self, setting:str):
|
||||||
|
if setting in self.camera_current_settings:
|
||||||
|
if self.camera_current_settings[setting]['value'] + self.camera_current_settings[setting]['step'] in range(self.camera_current_settings[setting]['min'],self.camera_current_settings[setting]['max']+1):
|
||||||
|
self.camera_current_settings[setting]['value'] += self.camera_current_settings[setting]['step']
|
||||||
|
else:
|
||||||
|
self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['min']
|
||||||
|
# Special cases
|
||||||
|
# Autoexposure
|
||||||
|
if setting == 'autoexposure' and self.camera_current_settings['autoexposure']['value'] == 4:
|
||||||
|
self.cam.set_controls({'AeEnable': 1})
|
||||||
|
else:
|
||||||
|
self.cam.set_controls({'AeEnable': 0})
|
||||||
|
self.cam.set_controls({"AeExposureMode": self.camera_current_settings['auto_exposure']['value']})
|
||||||
|
# Antiflicker
|
||||||
|
if setting == 'anti_flicker' and self.camera_current_settings['anti_flicker']['value'] == 0:
|
||||||
|
self.cam.set_controls({'AeFlickerMode': 0})
|
||||||
|
elif self.camera_current_settings['anti_flicker']['value'] == 1:
|
||||||
|
self.cam.set_controls({'AeFlickerMode': 1, 'AeFlickerPeriod':8333})
|
||||||
|
else:
|
||||||
|
self.cam.set_controls({'AeFlickerMode': 1, 'AeFlickerPeriod':10000})
|
||||||
|
|
||||||
|
def apply_setting(self, to_set:list=None, inc:bool=False):
|
||||||
|
if to_set is not None:
|
||||||
|
for setting in to_set:
|
||||||
|
if inc:
|
||||||
|
self.increment_setting(setting)
|
||||||
|
self.cam.set_controls({self.cam_settings_map[setting] : self.camera_current_settings[setting]['value']})
|
||||||
|
|
||||||
|
def flip_image(self):
|
||||||
|
self.cam.stop()
|
||||||
|
self.picam_config["transform"] = self.Transform(vflip=self.camera_current_settings['vertical_flip']['value'],hflip=self.camera_current_settings['horizontal_flip']['value'])
|
||||||
|
self.cam.configure(self.picam_config)
|
||||||
|
self.cam.start()
|
||||||
|
|
||||||
|
def focus(self, direction:str='-'):
|
||||||
|
if direction == '+':
|
||||||
|
self.lenspos += 0.2
|
||||||
|
else:
|
||||||
|
self.lenspos -= 0.2
|
||||||
|
# Set AfMode to Manual
|
||||||
|
self.cam.set_controls({'AfMode': 0, 'LensPosition': self.lenspos})
|
||||||
|
print(_("-Lens pos: {}".format(self.lenspos)))
|
||||||
|
|
||||||
|
def reset_picture_settings(self):
|
||||||
|
for setting in self.camera_default_settings:
|
||||||
|
self.cam.set_controls({setting : self.camera_default_settings[setting]})
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.cam.close()
|
||||||
|
|
||||||
|
|
||||||
|
class dslr():
|
||||||
|
def __init__(self):
|
||||||
|
# ~ import gphoto2 as gp
|
||||||
|
self.gp = import_module('gphoto2')
|
||||||
|
self.camera_current_settings = {
|
||||||
|
'capturemode' : dict(min=0, max=4, step=1, default=0, value=1), # 0: single,1: burst,2:Timer,3:2S Remote,4:Quick remote
|
||||||
|
'imagesize' : dict(min=0, max=2, step=1, default=2, value=2), # 0:L, 1:M, 2: S (1936x1296)
|
||||||
|
'imagequality' : dict(min=0, max=2, step=1, default=2, value=2), # 0 JPEG basic 1 JPEG normal 2 JPEG fine 3 raw 4 raw+jpg
|
||||||
|
'whitebalance' : dict(min=0, max=7, step=1, default=2, value=1), # 0 Automatic 1 Daylight 2 Fluorescent 3 Tungsten 4 Flash 5 Cloudy 6 Shade 7 Preset
|
||||||
|
'capturetarget' : dict(min=0, max=1, step=1, default=0, value=0), # Internal memory
|
||||||
|
'iso' : dict(min=0, max=5, default=0, step=1, value=0), # 0:100, 5:3200
|
||||||
|
'shutterspeed' : dict(min=0, max=51, step=1, default=0, value=20), # 0 : 1/4000, 51: 30s
|
||||||
|
'manualfocusdrive' : dict(min=0, max=1, step=1, default=0, value=0), # Trigger autofocus # manualfocusdrive
|
||||||
|
}
|
||||||
|
# Map generic config name to specific picamera setting name
|
||||||
|
self.cam_settings_map = {
|
||||||
|
'white_balance_auto_preset': 'whitebalance',
|
||||||
|
'auto_exposure':'iso',
|
||||||
|
'anti_flicker' : 'imagesize',
|
||||||
|
'lenspos' : 'shutterspeed',
|
||||||
|
}
|
||||||
|
# UI frame
|
||||||
|
self.frame = None
|
||||||
|
# Onionskin_frame
|
||||||
|
self.o_frame = None
|
||||||
|
# Overlay frame
|
||||||
|
self.overlay = None
|
||||||
|
# Original frame for saving
|
||||||
|
self.og_frame = None
|
||||||
|
self.has_liveview = False
|
||||||
|
# TODO: check if DSLR has live feed and set accordingly
|
||||||
|
# ~ self.onionskin = project_settings['onion_skin_onstartup']
|
||||||
|
self.onionskin = False
|
||||||
|
self.onionskin_was_on = self.onionskin
|
||||||
|
self.liveview_only = False
|
||||||
|
self.lenspos = None
|
||||||
|
self.cam_busy = False
|
||||||
|
self.cam = self.gp.check_result(self.gp.gp_camera_new())
|
||||||
|
try:
|
||||||
|
self.gp.check_result(self.gp.gp_camera_init(self.cam))
|
||||||
|
# get configuration tree
|
||||||
|
self.camera_current_config = self.gp.check_result(self.gp.gp_camera_get_config(self.cam))
|
||||||
|
except:
|
||||||
|
print(_("No camera found."))
|
||||||
|
self.cam.exit()
|
||||||
|
self.camera = None
|
||||||
|
self.current_camera_config = None
|
||||||
|
|
||||||
|
def test_device(self, source):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def capture_preview(self):
|
||||||
|
# TODO : check DSLR has preview/live feed
|
||||||
|
pass
|
||||||
|
|
||||||
|
def find_file_ext(self, gp_name:str, full_path:str):
|
||||||
|
# TODO: use re to sub png with jpg ?
|
||||||
|
# extract dir path
|
||||||
|
dirname = os.path.dirname(full_path)
|
||||||
|
# extract filename from path
|
||||||
|
new_name = os.path.basename(full_path)
|
||||||
|
# if the path doesn't contain file name, return camera's FS filename
|
||||||
|
if not full_path.endswith(('.jpg', '.JPG', '.raw')):
|
||||||
|
return gp_name
|
||||||
|
suffix = gp_name.split('.')[-1]
|
||||||
|
prefix = new_name.split('.')[:-1]
|
||||||
|
prefix.insert(len(prefix), suffix)
|
||||||
|
return os.path.join(dirname, '.'.join(prefix))
|
||||||
|
|
||||||
|
def check_status_value(self, config, value, optimal_value=None):
|
||||||
|
cur_check = self.gp.check_result(self.gp.gp_widget_get_child_by_name(config, value))
|
||||||
|
cur_check_value = self.gp.check_result(self.gp.gp_widget_get_value(cur_check))
|
||||||
|
if optimal_value is not None:
|
||||||
|
cur_check_choice = self.gp.check_result(self.gp.gp_widget_get_choice(cur_check, optimal_value[value]))
|
||||||
|
return [cur_check_value, cur_check_choice]
|
||||||
|
else:
|
||||||
|
return cur_check_value
|
||||||
|
|
||||||
|
def capture_frame(self, img_path):
|
||||||
|
if not self.cam_busy:
|
||||||
|
# CHECK: Should we init and close dslr for each frame ?
|
||||||
|
# Check battery level
|
||||||
|
battery_level = int(self.check_status_value(self.camera_current_config, 'batterylevel')[:-1])
|
||||||
|
if battery_level < 10:
|
||||||
|
print("Battery level is too low, shutter disabled.")
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
self.cam_busy = True
|
||||||
|
file_path = self.cam.capture(self.gp.GP_CAPTURE_IMAGE)
|
||||||
|
print('Camera file path: {0}/{1}'.format(file_path.folder, file_path.name))
|
||||||
|
# We don't want to download a jpg or raw from the dslr and save it as a false *.png.
|
||||||
|
img_path = self.find_file_ext(file_path.name, img_path)
|
||||||
|
print('Copying image to', img_path)
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
try:
|
||||||
|
camera_file = self.cam.file_get(
|
||||||
|
file_path.folder,
|
||||||
|
file_path.name,
|
||||||
|
self.gp.GP_FILE_TYPE_NORMAL
|
||||||
|
)
|
||||||
|
except:
|
||||||
|
print("Camera error. Check Battery and try restarting the camera.")
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
capture_ok = camera_file.save(img_path)
|
||||||
|
except:
|
||||||
|
print('File access error.')
|
||||||
|
return False
|
||||||
|
# ~ camera.exit()
|
||||||
|
self.cam_busy = False
|
||||||
|
# Update frame
|
||||||
|
cam.frame = cv2.imread(img_path)
|
||||||
|
# ~ frame = cv2.resize(frame, (project_settings['screen_w'], project_settings['screen_h']))
|
||||||
|
# ~ if capture_ok is None:
|
||||||
|
# ~ return True
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def apply_gphoto_setting(self, setting:str):
|
||||||
|
# Get corresponding setting name if possible
|
||||||
|
if setting in self.cam_settings_map:
|
||||||
|
setting = self.cam_settings_map[setting]
|
||||||
|
# Try to apply setting
|
||||||
|
if setting in self.camera_current_settings:
|
||||||
|
print(setting)
|
||||||
|
select_setting = self.camera_current_settings[setting]
|
||||||
|
# find the $setting config item
|
||||||
|
try:
|
||||||
|
# Get widget with name $setting
|
||||||
|
cur_setting = self.gp.check_result(self.gp.gp_widget_get_child_by_name(self.camera_current_config, setting))
|
||||||
|
# Get a list of available choices
|
||||||
|
choices = list(self.gp.check_result(self.gp.gp_widget_get_choices(cur_setting)))
|
||||||
|
# Build dict with name/value equivalence
|
||||||
|
choices_dict = {choices.index(i):i for i in list(choices)}
|
||||||
|
# Increment mode : current value is increased or looped
|
||||||
|
# ~ if inc:
|
||||||
|
# Get current setting value
|
||||||
|
# ~ new_value = gp.check_result(gp.gp_widget_get_value(cur_setting))
|
||||||
|
# Check current value + 1 is in range
|
||||||
|
# ~ if choices.index(new_value) in range(0, self.camera_current_settings[setting]['max']+1):
|
||||||
|
# Apply or loop value accordingly
|
||||||
|
# ~ pass
|
||||||
|
# If new_value exists in list, apply
|
||||||
|
if select_setting['value'] in choices_dict:
|
||||||
|
cur_setting_choice = self.gp.check_result(self.gp.gp_widget_get_choice(cur_setting, select_setting['value']))
|
||||||
|
# set config value
|
||||||
|
self.gp.check_result(self.gp.gp_widget_set_value(cur_setting, cur_setting_choice))
|
||||||
|
except:
|
||||||
|
print("Configuration error while setting {} to {}".format(setting, select_setting))
|
||||||
|
|
||||||
|
def increment_setting(self, setting:str):
|
||||||
|
if setting in self.camera_current_settings:
|
||||||
|
if self.camera_current_settings[setting]['value'] + self.camera_current_settings[setting]['step'] in range(self.camera_current_settings[setting]['min'],self.camera_current_settings[setting]['max']+1):
|
||||||
|
self.camera_current_settings[setting]['value'] += self.camera_current_settings[setting]['step']
|
||||||
|
else:
|
||||||
|
self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['min']
|
||||||
|
|
||||||
|
def apply_setting(self, to_set:list=None, inc:bool=False):
|
||||||
|
self.camera_current_config = self.gp.check_result(self.gp.gp_camera_get_config(self.cam))
|
||||||
|
# iterate over the settings dictionary
|
||||||
|
if to_set is None:
|
||||||
|
for setting in self.camera_current_settings:
|
||||||
|
if inc:
|
||||||
|
self.increment_setting(setting)
|
||||||
|
self.apply_gphoto_setting(setting)
|
||||||
|
else:
|
||||||
|
# Get corresponding setting name if possible
|
||||||
|
for setting in to_set:
|
||||||
|
if setting in self.cam_settings_map:
|
||||||
|
setting = self.cam_settings_map[setting]
|
||||||
|
if inc:
|
||||||
|
self.increment_setting(setting)
|
||||||
|
self.apply_gphoto_setting(setting)
|
||||||
|
# validate config
|
||||||
|
status = self.gp.check_result(self.gp.gp_camera_set_config(self.cam, self.camera_current_config))
|
||||||
|
# close camera
|
||||||
|
# ~ self.cam.exit()
|
||||||
|
return status
|
||||||
|
|
||||||
|
def flip_image(self):
|
||||||
|
self.frame = cv2.flip(self.frame, -1)
|
||||||
|
|
||||||
|
def focus(self, direction:str='-'):
|
||||||
|
self.apply_setting(['shutterspeed'], True)
|
||||||
|
|
||||||
|
def reset_picture_settings(self):
|
||||||
|
self.camera_current_config = self.gp.check_result(self.gp.gp_camera_get_config(self.cam))
|
||||||
|
for setting in self.camera_current_settings:
|
||||||
|
self.camera_current_settings[setting]['value'] = self.camera_current_settings[setting]['default']
|
||||||
|
# TODO: use self.apply_setting() instead
|
||||||
|
self.apply_gphoto_setting(setting)
|
||||||
|
status = self.gp.check_result(self.gp.gp_camera_set_config(self.cam, self.camera_current_config))
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.cam.exit()
|
||||||
|
|
||||||
|
|
||||||
|
def get_cam_class(camera_type):
|
||||||
|
if camera_type == 'webcam':
|
||||||
|
return webcam()
|
||||||
|
elif camera_type == 'showmewebcam':
|
||||||
|
return showmewebcam()
|
||||||
|
elif camera_type == 'picam':
|
||||||
|
return picam()
|
||||||
|
elif camera_type == 'dslr':
|
||||||
|
return dslr()
|
||||||
|
elif camera_type == 'webcam':
|
||||||
|
# ~ return webcam()
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def generate_text_image(text:str, screen_w, screen_h, bullets=False):
|
def generate_text_image(text:str, screen_w, screen_h, bullets=False):
|
||||||
|
@ -278,6 +752,19 @@ def get_frames_list(folder:str):
|
||||||
return existing_animation_files
|
return existing_animation_files
|
||||||
|
|
||||||
|
|
||||||
|
def get_frame_by_idx(folder:str, index:int):
|
||||||
|
# Refresh file list
|
||||||
|
existing_animation_files = get_frames_list(folder)
|
||||||
|
# Get last file
|
||||||
|
# Filename pattern is A.0001.JPG
|
||||||
|
if index and index in range(len(existing_animation_files)):
|
||||||
|
frm = cv2.imread(os.path.join(folder, existing_animation_files[index]))
|
||||||
|
frm = cv2.resize(frm, (project_settings['screen_w'], project_settings['screen_h']))
|
||||||
|
return frm
|
||||||
|
else:
|
||||||
|
return generate_text_image(_("Image not found."), project_settings['screen_w'], project_settings['screen_h'])
|
||||||
|
|
||||||
|
|
||||||
def get_last_frame(folder:str):
|
def get_last_frame(folder:str):
|
||||||
# Refresh file list
|
# Refresh file list
|
||||||
existing_animation_files = get_frames_list(folder)
|
existing_animation_files = get_frames_list(folder)
|
||||||
|
@ -286,13 +773,21 @@ def get_last_frame(folder:str):
|
||||||
return existing_animation_files[-1].split('.')
|
return existing_animation_files[-1].split('.')
|
||||||
|
|
||||||
|
|
||||||
def get_onionskin_frame(folder:str, index=None):
|
def get_before_last_frame(folder:str):
|
||||||
|
# Refresh file list
|
||||||
|
existing_animation_files = get_frames_list(folder)
|
||||||
|
# Get last file
|
||||||
|
# Filename pattern is A.0001.JPG
|
||||||
|
return existing_animation_files[-2]
|
||||||
|
|
||||||
|
|
||||||
|
def get_onionskin_frame(folder:str):
|
||||||
prev_image = get_last_frame(folder)
|
prev_image = get_last_frame(folder)
|
||||||
prev_image = '.'.join(prev_image)
|
prev_image = '.'.join(prev_image)
|
||||||
if os.path.exists( os.path.expanduser(os.path.join(savepath, prev_image))):
|
if os.path.exists( os.path.expanduser(os.path.join(savepath, prev_image))):
|
||||||
frm = cv2.imread(os.path.join(savepath, prev_image))
|
frm = cv2.imread(os.path.join(savepath, prev_image))
|
||||||
frm = cv2.resize(frm, (project_settings['screen_w'], project_settings['screen_h']))
|
frm = cv2.resize(frm, (project_settings['screen_w'], project_settings['screen_h']))
|
||||||
# Imge does not exist, load blank image
|
# Img does not exist, load blank image
|
||||||
else:
|
else:
|
||||||
frm = blank_image
|
frm = blank_image
|
||||||
return frm
|
return frm
|
||||||
|
@ -315,6 +810,7 @@ def update_image(img_list, img_index):
|
||||||
img = blank_image
|
img = blank_image
|
||||||
return img
|
return img
|
||||||
|
|
||||||
|
|
||||||
def next_frame(img_index, loop=True):
|
def next_frame(img_index, loop=True):
|
||||||
img_index = check_range(img_index+1, loop)
|
img_index = check_range(img_index+1, loop)
|
||||||
return img_index, update_image(img_list, img_index)
|
return img_index, update_image(img_list, img_index)
|
||||||
|
@ -351,7 +847,7 @@ def check_range(x, loop=True):
|
||||||
return len(img_list)-1
|
return len(img_list)-1
|
||||||
else:
|
else:
|
||||||
return 0
|
return 0
|
||||||
elif x >= len(img_list)-1:
|
elif x > len(img_list)-1:
|
||||||
if loop:
|
if loop:
|
||||||
return 0
|
return 0
|
||||||
else:
|
else:
|
||||||
|
@ -442,69 +938,43 @@ def export_animation(input_filename, export_filename):
|
||||||
# Fix for Quicktime
|
# Fix for Quicktime
|
||||||
'-pix_fmt', 'yuv420p',
|
'-pix_fmt', 'yuv420p',
|
||||||
'-vcodec', 'libx264',
|
'-vcodec', 'libx264',
|
||||||
|
# Visually lossless export
|
||||||
|
'-crf', '18',
|
||||||
export_filename,
|
export_filename,
|
||||||
])
|
])
|
||||||
return ffmpeg_process
|
return ffmpeg_process
|
||||||
|
|
||||||
|
|
||||||
|
cam = get_cam_class(project_settings['cam_type'])
|
||||||
|
|
||||||
|
if cam is None:
|
||||||
|
print(_("Wrong camera type in configuration."))
|
||||||
|
time.sleep(1)
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
|
||||||
def main(args):
|
def main(args):
|
||||||
|
global img_list
|
||||||
|
|
||||||
global onionskin, liveview_only, playback, loop_playback, playhead, index, img_list, first_playback, camera_current_settings
|
playback = False
|
||||||
|
first_playback = True
|
||||||
|
playhead = 0
|
||||||
|
loop_playback = True
|
||||||
|
index = len(img_list)-1
|
||||||
|
playhead = index
|
||||||
|
|
||||||
if not project_settings['cam_is_picam']:
|
cam.apply_setting()
|
||||||
if not testDevice(0):
|
|
||||||
print(_("No camera device found. Exiting..."))
|
|
||||||
return 1
|
|
||||||
cam = cv2.VideoCapture(0)
|
|
||||||
cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_settings['cam_w'])
|
|
||||||
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_settings['cam_h'])
|
|
||||||
else:
|
|
||||||
# Pi Cam V3 setup
|
|
||||||
from picamera2 import Picamera2
|
|
||||||
from libcamera import Transform
|
|
||||||
cam = Picamera2()
|
|
||||||
picam_config = cam.create_video_configuration(main={"format": 'RGB888',"size": (camera_settings['cam_w'], camera_settings['cam_h'])})
|
|
||||||
# ~ picam_config["transform"] = Transform(hflip=camera_settings['hflip'], vflip=camera_settings['vflip'])
|
|
||||||
picam_config["transform"] = Transform(vflip=camera_current_settings['vertical_flip']['value'],hflip=camera_current_settings['horizontal_flip']['value'])
|
|
||||||
|
|
||||||
cam.configure(picam_config)
|
cam.frame = get_onionskin_frame(savepath)
|
||||||
# Autofocus, get lens position and switch to manual mode
|
cam.o_frame = cam.frame.copy()
|
||||||
# Set Af mode to Auto then Manual (0). Default is Continuous (2), Auto is 1
|
|
||||||
cam.set_controls({'AfMode':1})
|
|
||||||
cam.start()
|
|
||||||
cam.autofocus_cycle()
|
|
||||||
cam_lenspos = cam.capture_metadata()['LensPosition']
|
|
||||||
# Set focus, wb, exp to manual
|
|
||||||
cam.set_controls({'AfMode': 0,
|
|
||||||
'AwbEnable': 1,
|
|
||||||
'AwbMode': camera_current_settings['white_balance_auto_preset']['default'],
|
|
||||||
'AeEnable': 1,
|
|
||||||
'AeExposureMode': camera_current_settings['auto_exposure']['default'],
|
|
||||||
# Enable flicker avoidance due to mains
|
|
||||||
'AeFlickerMode': 1,
|
|
||||||
# Mains 50hz = 10000, 60hz = 8333
|
|
||||||
# ~ 'AeFlickerPeriod': 8333,
|
|
||||||
'AeFlickerPeriod': 10000,
|
|
||||||
# Format is (min, max, default) in ms
|
|
||||||
# here: (60fps, 12fps, None)
|
|
||||||
# ~ 'FrameDurationLimits':(16666,83333,None)
|
|
||||||
})
|
|
||||||
# ~ cam.stop()
|
|
||||||
|
|
||||||
frame = get_onionskin_frame(savepath, index)
|
|
||||||
|
|
||||||
if project_settings['cam_is_showmewebcam']:
|
|
||||||
# Apply config camera settings
|
|
||||||
camera_current_settings = apply_cam_setting(camera_current_settings)
|
|
||||||
time.sleep(.5)
|
|
||||||
|
|
||||||
loop_delta = 0
|
loop_delta = 0
|
||||||
while True:
|
while True:
|
||||||
start = timer()
|
start = timer()
|
||||||
if playback:
|
if playback:
|
||||||
if onionskin:
|
if cam.onionskin:
|
||||||
onionskin = False
|
cam.onionskin = False
|
||||||
onionskin_was_on = True
|
cam.onionskin_was_on = True
|
||||||
if first_playback:
|
if first_playback:
|
||||||
playhead = 0
|
playhead = 0
|
||||||
first_playback = False
|
first_playback = False
|
||||||
|
@ -523,84 +993,40 @@ def main(args):
|
||||||
first_playback = True
|
first_playback = True
|
||||||
playback = False
|
playback = False
|
||||||
# Restore onionskin
|
# Restore onionskin
|
||||||
if 'onionskin_was_on' in locals():
|
if cam.onionskin_was_on:
|
||||||
onionskin = True
|
cam.onionskin = True
|
||||||
loop_playback = False
|
loop_playback = False
|
||||||
|
|
||||||
if liveview_only:
|
if not playback:
|
||||||
# ~ onionskin = False
|
if cam.has_liveview:
|
||||||
if not project_settings['cam_is_picam']:
|
cam.capture_preview()
|
||||||
ret, overlay = cam.read()
|
cv2.imshow("StopiCV", cam.frame)
|
||||||
if not ret:
|
|
||||||
print(_("Failed to grab frame."))
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
overlay = cam.capture_array("main")
|
|
||||||
# Resize preview
|
|
||||||
overlay = cv2.resize(overlay, (project_settings['screen_w'], project_settings['screen_h']))
|
|
||||||
cv2.imshow("StopiCV", overlay)
|
|
||||||
# ~ else:
|
|
||||||
# ~ onionskin = True
|
|
||||||
|
|
||||||
if onionskin:
|
|
||||||
if not project_settings['cam_is_picam']:
|
|
||||||
ret, overlay = cam.read()
|
|
||||||
if not ret:
|
|
||||||
print(_("Failed to grab frame."))
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
overlay = cam.capture_array("main")
|
|
||||||
og_frame = overlay.copy()
|
|
||||||
# Resize preview
|
|
||||||
overlay = cv2.resize(overlay, (project_settings['screen_w'], project_settings['screen_h']))
|
|
||||||
# Apply onionskin
|
|
||||||
alpha = project_settings['onionskin_alpha_default']
|
|
||||||
beta = (1.0 - alpha)
|
|
||||||
overlay = cv2.addWeighted(frame, alpha, overlay, beta, 0)
|
|
||||||
cv2.imshow("StopiCV", overlay)
|
|
||||||
|
|
||||||
if not playback and not onionskin and not liveview_only:
|
|
||||||
cv2.imshow("StopiCV", frame)
|
|
||||||
|
|
||||||
k = cv2.waitKey(1)
|
k = cv2.waitKey(1)
|
||||||
# Key l / kp 5
|
# Key l / kp 5
|
||||||
if (k%256 == 108) or (k%256 == 53) or (k%256 == 181):
|
if (k%256 == 108) or (k%256 == 53) or (k%256 == 181):
|
||||||
print(_("Liveview only"))
|
print(_("Liveview only"))
|
||||||
# Toggle liveview
|
# Toggle liveview
|
||||||
liveview_only = not liveview_only
|
cam.liveview_only = not cam.liveview_only
|
||||||
onionskin = not onionskin
|
cam.onionskin = not cam.onionskin
|
||||||
# Key o / kp slash
|
# Key o / kp slash
|
||||||
elif (k%256 == 111) or (k%256 == 47) or (k%256 == 175):
|
elif (k%256 == 111) or (k%256 == 47) or (k%256 == 175):
|
||||||
print(_("Onionskin toggle"))
|
print(_("Onionskin toggle"))
|
||||||
# Toggle onionskin
|
# Toggle onionskin
|
||||||
onionskin = not onionskin
|
cam.onionskin = not cam.onionskin
|
||||||
liveview_only = False
|
cam.liveview_only = False
|
||||||
# Key w / 7 - cycle wb
|
# Key w / 7 - cycle wb
|
||||||
elif (k%256 == 119) or (k%256 == 55) or (k%256 == 183):
|
elif (k%256 == 119) or (k%256 == 55) or (k%256 == 183):
|
||||||
print(_("White balance mode"))
|
print(_("White balance mode"))
|
||||||
camera_current_settings = apply_cam_setting(camera_current_settings, ['white_balance_auto_preset'])
|
cam.apply_setting(['white_balance_auto_preset'], True)
|
||||||
if project_settings['cam_is_picam']:
|
|
||||||
cam.set_controls({'AwbMode': camera_current_settings['white_balance_auto_preset']['value']})
|
|
||||||
# Key x / 1 - cycle exposure
|
# Key x / 1 - cycle exposure
|
||||||
elif (k%256 == 120) or (k%256 == 49) or (k%256 == 177):
|
elif (k%256 == 120) or (k%256 == 49) or (k%256 == 177):
|
||||||
print(_("Exp. mode"))
|
print(_("Exp. mode"))
|
||||||
camera_current_settings = apply_cam_setting(camera_current_settings, ['auto_exposure'])
|
cam.apply_setting(['auto_exposure'], True)
|
||||||
if project_settings['cam_is_picam']:
|
|
||||||
print(camera_current_settings['auto_exposure']['value'])
|
|
||||||
if camera_current_settings['auto_exposure']['value'] == 4:
|
|
||||||
cam.set_controls({'AeEnable': 1})
|
|
||||||
else:
|
|
||||||
cam.set_controls({'AeEnable': 0})
|
|
||||||
cam.set_controls({"AeExposureMode": camera_current_settings['auto_exposure']['value']})
|
|
||||||
# Key f / 3 - flip image
|
# Key f / 3 - flip image
|
||||||
elif (k%256 == 102) or (k%256 == 51) or (k%256 == 179):
|
elif (k%256 == 102) or (k%256 == 51) or (k%256 == 179):
|
||||||
print(_("Flip image"))
|
print(_("Flip image"))
|
||||||
camera_current_settings = apply_cam_setting(camera_current_settings, ['vertical_flip','horizontal_flip'])
|
cam.flip_image()
|
||||||
if project_settings['cam_is_picam']:
|
|
||||||
cam.stop()
|
|
||||||
picam_config["transform"] = Transform(vflip=camera_current_settings['vertical_flip']['value'],hflip=camera_current_settings['horizontal_flip']['value'])
|
|
||||||
cam.configure(picam_config)
|
|
||||||
cam.start()
|
|
||||||
# Key up, kp 8
|
# Key up, kp 8
|
||||||
elif (k%256 == 82) or (k%256 == 56) or (k%256 == 184):
|
elif (k%256 == 82) or (k%256 == 56) or (k%256 == 184):
|
||||||
print(_("Last frame"))
|
print(_("Last frame"))
|
||||||
|
@ -608,6 +1034,7 @@ def main(args):
|
||||||
if playback:
|
if playback:
|
||||||
playback = False
|
playback = False
|
||||||
index, frame = last_frame(index)
|
index, frame = last_frame(index)
|
||||||
|
cam.o_frame = cam.frame.copy()
|
||||||
# Key down , kp 2
|
# Key down , kp 2
|
||||||
elif (k%256 == 84) or (k%256 == 50) or (k%256 == 178):
|
elif (k%256 == 84) or (k%256 == 50) or (k%256 == 178):
|
||||||
print(_("First frame"))
|
print(_("First frame"))
|
||||||
|
@ -615,6 +1042,7 @@ def main(args):
|
||||||
if playback:
|
if playback:
|
||||||
playback = False
|
playback = False
|
||||||
index, frame = first_frame(index)
|
index, frame = first_frame(index)
|
||||||
|
cam.o_frame = cam.frame.copy()
|
||||||
# Key left, kp 4
|
# Key left, kp 4
|
||||||
elif (k%256 == 81) or (k%256 == 52) or (k%256 == 180):
|
elif (k%256 == 81) or (k%256 == 52) or (k%256 == 180):
|
||||||
print(_("Prev. frame"))
|
print(_("Prev. frame"))
|
||||||
|
@ -622,7 +1050,8 @@ def main(args):
|
||||||
if len(img_list):
|
if len(img_list):
|
||||||
if playback:
|
if playback:
|
||||||
playback = False
|
playback = False
|
||||||
index, frame = previous_frame(index)
|
index, cam.frame = previous_frame(index)
|
||||||
|
cam.o_frame = cam.frame.copy()
|
||||||
# Key right, kp 6
|
# Key right, kp 6
|
||||||
elif (k%256 == 83) or (k%256 == 54) or (k%256 == 182):
|
elif (k%256 == 83) or (k%256 == 54) or (k%256 == 182):
|
||||||
print(_("Next frame"))
|
print(_("Next frame"))
|
||||||
|
@ -630,23 +1059,12 @@ def main(args):
|
||||||
if len(img_list):
|
if len(img_list):
|
||||||
if playback:
|
if playback:
|
||||||
playback = False
|
playback = False
|
||||||
index, frame = next_frame(index)
|
index, cam.frame = next_frame(index)
|
||||||
|
cam.o_frame = cam.frame.copy()
|
||||||
# Key r / keypad 9 - reset wb,exp
|
# Key r / keypad 9 - reset wb,exp
|
||||||
elif (k%256 == 114) or (k%256 == 57) or (k%256 == 185) :
|
elif (k%256 == 114) or (k%256 == 57) or (k%256 == 185) :
|
||||||
print(_("Reset camera settings"))
|
print(_("Reset camera settings"))
|
||||||
camera_current_settings = apply_cam_setting(camera_current_settings)
|
cam.reset_picture_settings()
|
||||||
if project_settings['cam_is_picam']:
|
|
||||||
if camera_current_settings['auto_exposure']['default'] == 4:
|
|
||||||
cam.set_controls({'AeEnable': 0})
|
|
||||||
else:
|
|
||||||
cam.set_controls({'AeEnable': 1})
|
|
||||||
cam.set_controls({"AeExposureMode": camera_current_settings['auto_exposure']['default']})
|
|
||||||
cam.set_controls({'AwbMode': camera_current_settings['white_balance_auto_preset']['default']})
|
|
||||||
cam.stop()
|
|
||||||
picam_config["transform"] = Transform(vflip=camera_current_settings['vertical_flip']['default'],hflip=camera_current_settings['horizontal_flip']['default'])
|
|
||||||
cam.configure(picam_config)
|
|
||||||
cam.start()
|
|
||||||
|
|
||||||
# Key e / keypad *
|
# Key e / keypad *
|
||||||
elif (k%256 == 101) or (k%256 == 42) or (k%256 == 170) :
|
elif (k%256 == 101) or (k%256 == 42) or (k%256 == 170) :
|
||||||
print(_("Export"))
|
print(_("Export"))
|
||||||
|
@ -661,49 +1079,41 @@ def main(args):
|
||||||
elif (k%256 == 8) or (k%256 == 45) or (k == 255) or (k%256 == 173) :
|
elif (k%256 == 8) or (k%256 == 45) or (k == 255) or (k%256 == 173) :
|
||||||
# Remove frame
|
# Remove frame
|
||||||
print(_("Remove frame"))
|
print(_("Remove frame"))
|
||||||
img_list, index, frame = remove_frame(img_list, index)
|
img_list, index, cam.frame = remove_frame(img_list, index)
|
||||||
# TODO: replace keys with rotary encoder
|
cam.o_frame = cam.frame.copy()
|
||||||
# Focus +/- with a,z
|
# Focus +/- with a,z
|
||||||
elif (k%256 == 97) and project_settings['cam_is_picam']:
|
elif (k%256 == 97):
|
||||||
cam_lenspos += 0.2
|
cam.focus('+')
|
||||||
# Set AfMode to Manual
|
print(_("+Lens pos: {}".format(cam.lenspos)))
|
||||||
cam.set_controls({'AfMode': 0, 'LensPosition': cam_lenspos})
|
elif (k%256 == 122):
|
||||||
print(_("+Lens pos: {}".format(cam_lenspos)))
|
cam.focus('-')
|
||||||
elif (k%256 == 122) and project_settings['cam_is_picam']:
|
print(_("-Lens pos: {}".format(cam.lenspos)))
|
||||||
cam_lenspos -= 0.2
|
|
||||||
# Set AfMode to Manual
|
|
||||||
cam.set_controls({'AfMode': 0, 'LensPosition': cam_lenspos})
|
|
||||||
print(_("-Lens pos: {}".format(cam_lenspos)))
|
|
||||||
# Set anti-flicker mode with q
|
# Set anti-flicker mode with q
|
||||||
elif (k%256 == 113) and project_settings['cam_is_picam']:
|
elif (k%256 == 113):
|
||||||
# Set AfMode to Manual
|
print(_("Anti-flicker mode"))
|
||||||
camera_current_settings = apply_cam_setting(camera_current_settings, ['anti_flicker'])
|
cam.apply_setting(['anti_flicker'], True)
|
||||||
if camera_current_settings['anti_flicker']['value'] == 0:
|
|
||||||
cam.set_controls({'AeFlickerMode': 0})
|
|
||||||
elif camera_current_settings['anti_flicker']['value'] == 1:
|
|
||||||
cam.set_controls({'AeFlickerMode': 1, 'AeFlickerPeriod':8333})
|
|
||||||
else:
|
|
||||||
cam.set_controls({'AeFlickerMode': 1, 'AeFlickerPeriod':10000})
|
|
||||||
print(camera_current_settings['anti_flicker']['value'])
|
|
||||||
# ~ elif (k%256 == 115) and project_settings['cam_is_picam']:
|
|
||||||
# ~ # Set AfMode to Manual
|
|
||||||
# ~ cam.set_controls({'AeFlickerMode': 0, 'AeFlickerPeriod': 8333})
|
|
||||||
# Take pic
|
|
||||||
# SPACE or numpad 0 pressed
|
# SPACE or numpad 0 pressed
|
||||||
elif (k%256 == 32) or (k%256 == 48) or (k%256 == 176):
|
elif (k%256 == 32) or (k%256 == 48) or (k%256 == 176):
|
||||||
print(_("Capture frame"))
|
print(_("Capture frame"))
|
||||||
img_name = return_next_frame_number(get_last_frame(savepath))
|
img_name = return_next_frame_number(get_last_frame(savepath))
|
||||||
img_path = os.path.join(savepath, img_name)
|
img_path = os.path.join(savepath, img_name)
|
||||||
if project_settings['file_extension'] == 'jpg':
|
capture_ok = cam.capture_frame(img_path)
|
||||||
cv2.imwrite(img_path, og_frame, [int(cv2.IMWRITE_JPEG_QUALITY), project_settings['jpg_quality']])
|
|
||||||
else:
|
|
||||||
cv2.imwrite(img_path, og_frame)
|
|
||||||
print(_("File {} written.").format(img_path))
|
print(_("File {} written.").format(img_path))
|
||||||
|
# Special case when we've no frame yet
|
||||||
if len(img_list) and (img_list[index] == '{letter}.-001.{ext}'.format(letter=project_letter, ext=project_settings['file_extension'])):
|
if len(img_list) and (img_list[index] == '{letter}.-001.{ext}'.format(letter=project_letter, ext=project_settings['file_extension'])):
|
||||||
img_list[index] = img_name
|
img_list[index] = img_name
|
||||||
else:
|
else:
|
||||||
index += 1
|
index += 1
|
||||||
frame = get_onionskin_frame(savepath, index)
|
# Display a message if capture was not successfull
|
||||||
|
if not capture_ok:
|
||||||
|
cam.frame = generate_text_image(_("Error during capture."),
|
||||||
|
project_settings['screen_w'], project_settings['screen_h']
|
||||||
|
)
|
||||||
|
cam.o_frame = cam.frame.copy()
|
||||||
|
else:
|
||||||
|
cam.frame = get_onionskin_frame(savepath)
|
||||||
|
cam.o_frame = cam.frame.copy()
|
||||||
|
# ~ frame = cam.frame
|
||||||
# Quit app
|
# Quit app
|
||||||
elif k%256 == 27:
|
elif k%256 == 27:
|
||||||
# ESC pressed
|
# ESC pressed
|
||||||
|
@ -738,9 +1148,6 @@ def main(args):
|
||||||
except:
|
except:
|
||||||
print(_("Terminating running process..."))
|
print(_("Terminating running process..."))
|
||||||
ffmpeg_process.terminate()
|
ffmpeg_process.terminate()
|
||||||
if not project_settings["cam_is_picam"]:
|
|
||||||
cam.release()
|
|
||||||
else:
|
|
||||||
cam.close()
|
cam.close()
|
||||||
cv2.destroyAllWindows()
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
|
@ -756,10 +1163,6 @@ img_list = []
|
||||||
savepath = get_session_folder()
|
savepath = get_session_folder()
|
||||||
onionskin = project_settings['onion_skin_onstartup']
|
onionskin = project_settings['onion_skin_onstartup']
|
||||||
liveview_only = False
|
liveview_only = False
|
||||||
playback = False
|
|
||||||
first_playback = True
|
|
||||||
playhead = 0
|
|
||||||
loop_playback = True
|
|
||||||
blank_image = generate_text_image(_("No images yet! Start shooting..."), project_settings['screen_w'], project_settings['screen_h'])
|
blank_image = generate_text_image(_("No images yet! Start shooting..."), project_settings['screen_w'], project_settings['screen_h'])
|
||||||
|
|
||||||
if len(savepath):
|
if len(savepath):
|
||||||
|
@ -772,7 +1175,6 @@ index = len(img_list)-1
|
||||||
# Export settings
|
# Export settings
|
||||||
input_filename = "{folder}{sep}{letter}.%04d.{ext}".format(folder=savepath, sep=os.sep, letter=project_letter, ext=project_settings['file_extension'])
|
input_filename = "{folder}{sep}{letter}.%04d.{ext}".format(folder=savepath, sep=os.sep, letter=project_letter, ext=project_settings['file_extension'])
|
||||||
input_options = ["image2", str(project_settings['framerate'])]
|
input_options = ["image2", str(project_settings['framerate'])]
|
||||||
# ~ output_filename = "{folder}{sep}{filename}.mp4".format(folder=projects_folder, sep=os.sep, filename=savepath.split(os.sep)[-1])
|
|
||||||
output_filename = "{filename}.mp4".format(filename=project_letter)
|
output_filename = "{filename}.mp4".format(filename=project_letter)
|
||||||
output_options = project_settings['export_options']
|
output_options = project_settings['export_options']
|
||||||
|
|
||||||
|
|
118
readme.md
118
readme.md
|
@ -1,28 +1,52 @@
|
||||||
# Stopi2
|
# Stopi2
|
||||||
|
|
||||||
## Branche libcamera
|
## Branche gphoto / réflexe numérique
|
||||||
|
|
||||||
**Ceci est la branche qui restaure la possibilité d'utiliser des périphériques compatibles rpi-libcamera (Modules Raspicam v1,v2 et v3).**
|
**Ceci est la branche qui restaure la possibilité d'utiliser des périphériques compatibles [gphoto](http://gphoto.org/doc/remote).**
|
||||||
**En utilisant la [branche correspondante pour la télécommande picote](/arthus/picote/src/branch/picamera), vous pouvez régler la mise au point du module caméra avec un [codeur rotatif](https://fr.wikipedia.org/wiki/Codeur_rotatif).**
|
|
||||||
|
|
||||||
<a style="max-height: 300px;display: inline-block;" href="./stopi2/raw/branch/master/stopi_station.jpg"><img src="./stopi_station.jpg"/><a/>
|
<a style="max-height: 300px;display: inline-block;" href="./stopi2/raw/branch/master/stopi_station.jpg"><img src="./stopi_station.jpg"/><a/>
|
||||||
|
|
||||||
Seconde version du script python [stopi](https://git.arthus.net/arthus/stopi) destiné à être utilisé avec une télécommande [picote](/arthus/picote/src/branch/picamera).
|
Seconde version du script python [stopi](https://git.arthus.net/arthus/stopi) destiné à être utilisé avec une télécommande [picote](/arthus/picote/src/branch/picamera).
|
||||||
|
|
||||||
Cette version utilise opencv et libcamera.Elle fonctionne avec une webcam ou un module vidéo Picamera (v1,v2 ou v3).
|
Cette version utilise opencv, libcamera et gphoto.
|
||||||
|
Elle fonctionne avec une webcam, un module vidéo Picamera (v1,v2 ou v3) ou un réflexe numérique.
|
||||||
|
|
||||||
Encore une fois, l'objectif est de créer un logiciel simple et minimaliste dans son interface, dont les caractéristiques sont les suivantes :
|
Encore une fois, l'objectif est de créer un logiciel simple et minimaliste dans son interface, dont les caractéristiques sont les suivantes :
|
||||||
|
|
||||||
* Affichage des images en plein écran sans interface : toutes les fonctions utilisent quelques touches du clavier.
|
* Affichage des images en plein écran sans interface : toutes les fonctions utilisent quelques touches du clavier.
|
||||||
* [Pelure d'oignon](https://fr.wikipedia.org/wiki/Pelure_d'oignon#Sciences_et_techniques) entre la dernière image et le flux vidéo.
|
* [Pelure d'oignon](https://fr.wikipedia.org/wiki/Pelure_d'oignon#Sciences_et_techniques) entre la dernière image et le flux vidéo.
|
||||||
* Un seul fichier de configuration permet de régler les options (résolution, images/secondes, mirroir vertical/horizontal, )
|
* Un seul [fichier de configuration](/arthus/stopi2/wiki/configuration) permet de régler les options (résolution, images/secondes, mirroir vertical/horizontal, )
|
||||||
* Auto configuration de la camera au démarrage (exposition et balance des blancs)
|
* Auto configuration de la camera au démarrage (exposition et balance des blancs)
|
||||||
|
* Pilotage de certains réglages de la caméra depuis la télécommande/clavier (balance des blancs, rotation de l'image, mise au point (picam V3), exposition...)
|
||||||
* Prévisualisation de l'animation
|
* Prévisualisation de l'animation
|
||||||
* Exportation vidéo avec [ffmpeg](https://ffmpeg.org/)
|
* Exportation vidéo avec [ffmpeg](https://ffmpeg.org/)
|
||||||
* Interface localisée (anglais et français disponible pour le moment.)
|
* Interface localisée (anglais et français disponible pour le moment.)
|
||||||
|
|
||||||
## Banc de test
|
## Banc de test
|
||||||
|
|
||||||
Ce script a été testé avec une webcam compatible V4L2, une ["showmewebcam"](https://github.com/showmewebcam/showmewebcam) à base de rpi 0 et d'un module caméra v2 (8Mp), et un ordinateur classique sous [Debian](https://debian.org) et un [RPI 4B](https://www.raspberrypi.com/products/raspberry-pi-4-model-b/) munis d'un module [Picamera V3](https://www.raspberrypi.com/products/camera-module-3/).
|
Ce script a été testé avec une webcam compatible V4L2, une ["showmewebcam"](https://github.com/showmewebcam/showmewebcam) à base de rpi 0 et d'un module caméra v2 (8Mp), et un ordinateur classique sous [Debian](https://debian.org), ainsi qu'avec un [RPI 4B](https://www.raspberrypi.com/products/raspberry-pi-4-model-b/) munis d'un module [Picamera V3](https://www.raspberrypi.com/products/camera-module-3/).
|
||||||
|
|
||||||
|
Voici un récapitulatif des tests effectués :
|
||||||
|
|
||||||
|
| Machine \ Type de Caméra | Webcam | [Showmewebcam](https://github.com/showmewebcam/showmewebcam) | RPI Caméra module V1 (5MP) | [RPI Caméra module V3](https://www.raspberrypi.com/products/camera-module-3/) (12MP) | [Réflexe numérique](http://gphoto.org/doc/remote) (Nikon D3000/D40x)|
|
||||||
|
| --- | --- | --- | --- | --- | --- |
|
||||||
|
| Raspberry Pi 3B+ (Debian 12) | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||||
|
| Raspberry Pi 4B (Debian 12) | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||||
|
| PC Linux (Debian, Manjaro) | ✓ | ✓ | N/A | N/A | ✓ |
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
La documentation est disponible [dans le wiki](/arthus/stopi2/wiki/).
|
||||||
|
|
||||||
|
## Feuille de route
|
||||||
|
|
||||||
|
Des fonctions supplémentaires sont prévues :
|
||||||
|
|
||||||
|
* Mise à jour de la traduction FR
|
||||||
|
* Réflexe numérique: utilisation du liveview de l'appareil (si je trouve un modèle compatible pour le développement)
|
||||||
|
|
||||||
|
## Contributions
|
||||||
|
|
||||||
Les contributions et rapports de bugs sont les bienvenus !
|
Les contributions et rapports de bugs sont les bienvenus !
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
@ -33,18 +57,21 @@ Dans un terminal :
|
||||||
1. Installer les dépendances suivantes :
|
1. Installer les dépendances suivantes :
|
||||||
```
|
```
|
||||||
# Avec une distribution basée sur Debian (Ubuntu, Mint...)
|
# Avec une distribution basée sur Debian (Ubuntu, Mint...)
|
||||||
sudo apt install --no-install-recommends --no-install-suggests git ffmpeg python3-tk python3-pip python3-venv libtiff5-dev libtopenjp2 libopenjp2-7-dev zlib1g-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev libharfbuzz-dev libfribidi-dev libxcb1-dev python3-tk python3-dev libopenblas-dev libatlas-base-dev libhdf5-dev libhdf5-serial-dev libatlas-base-dev libjasper-dev libqtgui4 libqt4-test
|
sudo apt install --no-install-recommends --no-install-suggests git ffmpeg python3-pip python3-venv libtiff5-dev libopenjp2-7 libopenjp2-7-dev zlib1g-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev libharfbuzz-dev libfribidi-dev libxcb1-dev python3-tk python3-dev libopenblas-dev libatlas-base-dev libhdf5-dev libhdf5-serial-dev libatlas-base-dev libjasper-dev v4l-utils
|
||||||
```
|
```
|
||||||
- (Optionnel) Pour installer un environnement graphique minimal sur une [installation console](https://debian-facile.org/doc:install:installation-minimale) : `sudo apt install --no-install-recommends --no-install-suggests openbox xserver-xorg xinit pcmanfm gmrun lxterminal hsetroot unclutter plymouth plymouth-themes`
|
- (Optionnel) Pour installer un environnement graphique minimal sur une [installation console](https://debian-facile.org/doc:install:installation-minimale) : `sudo apt install --no-install-recommends --no-install-suggests openbox xserver-xorg xinit pcmanfm gmrun lxterminal hsetroot unclutter plymouth plymouth-themes`
|
||||||
2. Cloner le dépôt dans le dossier de votre choix : `git clone https://git.arthus.net/arthus/stopi2.git`
|
2. Cloner le dépôt dans le dossier de votre choix : `git clone https://git.arthus.net/arthus/stopi2.git`
|
||||||
3. Aller dans répertoire du projet : `cd stopi2`
|
3. Aller dans répertoire du projet : `cd stopi2`
|
||||||
4. Créer un environnement virtuel (venv) Python : `python -m venv ./`
|
4. Créer un environnement virtuel (venv) Python : `python -m venv ./`
|
||||||
|
- Mise à jour de l'environnement pypi: '`MAKEFLAGS="-j$(nproc)" pip install -vvv --upgrade pip setuptools wheel`
|
||||||
- (Optionnel) Dans le cas de l'utilisation d'une "raspicam", il faudra ajouter le paramètre `--system-site-packages` pour avoir accès au module picamera2, et installer la librairie correspondante `python3-picamera2`.
|
- (Optionnel) Dans le cas de l'utilisation d'une "raspicam", il faudra ajouter le paramètre `--system-site-packages` pour avoir accès au module picamera2, et installer la librairie correspondante `python3-picamera2`.
|
||||||
5. Activer l'environnement virtuel avec `source bin/activate`
|
5. Activer l'environnement virtuel avec `source bin/activate`
|
||||||
6. Installer les dépendances python (~150Mo) : `pip install -r requirements.txt`
|
6. Installer les dépendances python (~150Mo) : `MAKEFLAGS="-j$(nproc)" pip install -r requirements.txt`
|
||||||
7. Activer l'éxécution du script : `chmod +x stopi2.sh`
|
7. Activer l'éxécution du script : `chmod +x stopi2.sh`
|
||||||
8. Lancer le script : `./stopi2.sh`
|
8. Lancer le script : `./stopi2.sh`
|
||||||
|
|
||||||
|
[Voir le wiki pour plus de détails](/arthus/stopi2/wiki/python_environnement)
|
||||||
|
|
||||||
## Fonction par touches
|
## Fonction par touches
|
||||||
|
|
||||||
L'idéal est d'utiliser une télécommande [picote](/arthus/picote) mais le logiciel est aussi pilotable via un clavier/clavier numérique.
|
L'idéal est d'utiliser une télécommande [picote](/arthus/picote) mais le logiciel est aussi pilotable via un clavier/clavier numérique.
|
||||||
|
@ -68,75 +95,18 @@ L'idéal est d'utiliser une télécommande [picote](/arthus/picote) mais le logi
|
||||||
| Afficher seulement le flux vidéo | ② | touche L ou 3 sur le clavier numérique |
|
| Afficher seulement le flux vidéo | ② | touche L ou 3 sur le clavier numérique |
|
||||||
| Rotation de 180° de la capture vidéo | Alt + ② | touche F ou 5 sur le clavier numérique |
|
| Rotation de 180° de la capture vidéo | Alt + ② | touche F ou 5 sur le clavier numérique |
|
||||||
|
|
||||||
|
Si vous utilisez une télécommande [picote avec un encodeur rotatif (ou potar)](/arthus/picote/src/branch/picamera), les fonctions suivantes sont également disponibles :
|
||||||
|
|
||||||
|
| Fonction | Boutton | Clavier |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| Mise au point - | ↶ | touche A |
|
||||||
|
| Mise au point + | ↷ | touche Z |
|
||||||
|
| Mode anti-flicker | Alt+↶ | touche Q |
|
||||||
|
|
||||||
## Installation Kiosque
|
## Installation Kiosque
|
||||||
|
|
||||||
Pour créer un kiosque d'animation à partir d'une installation minimale de Debian :
|
[Voir le wiki](/arthus/stopi2/wiki/kiosk)
|
||||||
1. Installer Debian pour un environnement console avec environnement graphique minimal, par exemple:
|
|
||||||
```sudo apt install --no-install-recommends --no-install-suggests openbox xserver-xorg xinit hsetroot unclutter```
|
|
||||||
2. Suivre les [étapes d'installation](#installation) ci-dessus.
|
|
||||||
3. Activer le login automatique de votre utilisateur au démarrage :
|
|
||||||
```
|
|
||||||
sudo systemctl edit getty@tty1.service
|
|
||||||
# Ajout du contenu suivant dans le fichier créé:
|
|
||||||
[Service]
|
|
||||||
ExecStart=
|
|
||||||
ExecStart=-/sbin/agetty -o '-p -f -- \\u' --skip-login --nonewline --noissue --noclear --autologin arthus %I $TERM
|
|
||||||
```
|
|
||||||
4. Ajouter ce contenu à '~/.bash_login' :
|
|
||||||
```
|
|
||||||
if [ -z "$DISPLAY" ] && [ "$XDG_VTNR" = 1 ]; then
|
|
||||||
exec startx &>/dev/null
|
|
||||||
fi
|
|
||||||
```
|
|
||||||
5. Ajouter ce contenu à '~/.xinitrc' :
|
|
||||||
```
|
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# /etc/X11/xinit/xinitrc
|
|
||||||
#
|
|
||||||
# global xinitrc file, used by all X sessions started by xinit (startx)
|
|
||||||
|
|
||||||
# invoke global X session script
|
|
||||||
. /etc/X11/Xsession
|
|
||||||
|
|
||||||
exec openbox-session
|
|
||||||
```
|
|
||||||
6. Ajouter ce contenu à '~/.config/openbox/autostart.sh' :
|
|
||||||
```
|
|
||||||
#!/bin/env bash
|
|
||||||
# Change X keyboard mapping
|
|
||||||
setxkbmap fr
|
|
||||||
# Set background color
|
|
||||||
hsetroot -solid "#8393CC"
|
|
||||||
# Hide mouse after 0.2 seconds
|
|
||||||
unclutter -idle 0.2 &
|
|
||||||
# Start script
|
|
||||||
/home/$USER/stopi2.sh &
|
|
||||||
```
|
|
||||||
|
|
||||||
Au redémarrage, la session graphique devrait démarrer automatiquement.
|
|
||||||
|
|
||||||
# Démarrage 'silencieux'
|
# Démarrage 'silencieux'
|
||||||
|
|
||||||
Pour un démarrage sans aucun texte avant la session graphique, ajouter les lignes suivantes à `/etc/default/grub` :
|
[Voir le wiki](/arthus/stopi2/wiki/demarrage_silencieux)
|
||||||
```
|
|
||||||
GRUB_HIDDEN_TIMEOUT=5
|
|
||||||
GRUB_HIDDEN_TIMEOUT_QUIET=true
|
|
||||||
GRUB_GFXMODE=1920x1080
|
|
||||||
```
|
|
||||||
et modifier la ligne :
|
|
||||||
`GRUB_CMDLINE_LINUX_DEFAULT="quiet`
|
|
||||||
en :
|
|
||||||
`GRUB_CMDLINE_LINUX_DEFAULT="quiet splash loglevel=3`
|
|
||||||
|
|
||||||
Puis configurer plymouth : `sudo plymouth-set-default-theme`
|
|
||||||
|
|
||||||
Appliquer les modifs avec `sudo update-grub`.
|
|
||||||
|
|
||||||
## Raspberry Pi OS
|
|
||||||
|
|
||||||
Avec Raspberry Pi OS, il suffit d'ajouter les options suivantes dans '/boot/firmware/cmdline.txt':
|
|
||||||
`loglevel=3 vt.global_cursor_default=0 logo.nologo consoleblank=3 quiet`
|
|
||||||
|
|
||||||
``
|
|
||||||
|
|
|
@ -3,3 +3,4 @@ numpy
|
||||||
pyserial
|
pyserial
|
||||||
pillow
|
pillow
|
||||||
opencv-python
|
opencv-python
|
||||||
|
gphoto2
|
||||||
|
|
|
@ -9,6 +9,9 @@ def find_cam_port():
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def send_serial_cmd(cam_port, cmd:str, clear=True):
|
def send_serial_cmd(cam_port, cmd:str, clear=True):
|
||||||
|
if cam_port is None:
|
||||||
|
print("No com. port found.")
|
||||||
|
return False
|
||||||
con = serial.Serial(cam_port, baudrate=115200)
|
con = serial.Serial(cam_port, baudrate=115200)
|
||||||
if clear:
|
if clear:
|
||||||
append = b'\rclear\r'
|
append = b'\rclear\r'
|
||||||
|
@ -16,3 +19,11 @@ def send_serial_cmd(cam_port, cmd:str, clear=True):
|
||||||
append = b'\r'
|
append = b'\r'
|
||||||
con.write(str.encode(cmd) + append)
|
con.write(str.encode(cmd) + append)
|
||||||
con.close()
|
con.close()
|
||||||
|
|
||||||
|
def main():
|
||||||
|
cmd = "/usr/bin/v4l2-ctl --all"
|
||||||
|
send_serial_cmd(find_cam_port(), cmd)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
import sys
|
||||||
|
sys.exit(main())
|
||||||
|
|
Loading…
Reference in New Issue