Added relogin schedule

Fixes #47
This commit is contained in:
2023-10-09 23:18:04 +02:00
parent 41a085c475
commit 53dae0aaaf
7 changed files with 50 additions and 54 deletions

View File

@ -1,11 +1,12 @@
#!/usr/bin/env python3
import os
import kopf
import schedule
import time
import threading
from utils.utils import command_wrapper, unlock_bw
from utils.utils import command_wrapper, unlock_bw, sync_bw
@kopf.on.startup()
def bitwarden_signin(logger, **kwargs):
if 'BW_HOST' in os.environ:
try:
@ -18,3 +19,29 @@ def bitwarden_signin(logger, **kwargs):
logger.info("BW_HOST not set. Assuming SaaS installation")
command_wrapper(logger, "login --apikey")
unlock_bw(logger)
def run_continuously(interval=30):
cease_continuous_run = threading.Event()
class ScheduleThread(threading.Thread):
@classmethod
def run(cls):
while not cease_continuous_run.is_set():
schedule.run_pending()
time.sleep(interval)
continuous_thread = ScheduleThread()
continuous_thread.start()
return cease_continuous_run
@kopf.on.startup()
def load_schedules(logger, **kwargs):
bitwarden_signin(logger)
logger.info("Loading schedules")
bw_relogin_interval = float(os.environ.get('BW_RELOGIN_INTERVAL', 3600))
bw_sync_interval = float(os.environ.get('BW_SYNC_INTERVAL', 900))
schedule.every(bw_relogin_interval).seconds.do(bitwarden_signin, logger=logger)
logger.info(f"relogin scheduled every {bw_relogin_interval} seconds")
schedule.every(bw_sync_interval).seconds.do(sync_bw, logger=logger)
logger.info(f"sync scheduled every {bw_relogin_interval} seconds")
stop_run_continuously = run_continuously()

View File

@ -2,10 +2,6 @@ import os
import json
import subprocess
import distutils
from datetime import datetime, timezone, timedelta
from dateutil import parser
from dateutil.tz import tzutc
tzinfos = {"CDT": tzutc()}
bw_sync_interval = float(os.environ.get(
'BW_SYNC_INTERVAL', 900))
@ -30,42 +26,17 @@ def sync_bw(logger, force=False):
_sync(logger)
return
last_sync = last_sync_bw(logger)
now = datetime.now(tzutc())
sync_interval = timedelta(seconds=bw_sync_interval)
bw_is_out_of_sync_inverval = (now - last_sync) >= sync_interval
global_force_sync = bool(distutils.util.strtobool(
os.environ.get('BW_FORCE_SYNC', "false")))
needs_sync = force or global_force_sync or bw_is_out_of_sync_inverval
logger.debug(f"last_sync: {last_sync}")
logger.debug(
f"force: {force}, global_force_sync: {global_force_sync}, bw_is_out_of_sync_inverval: {bw_is_out_of_sync_inverval}, needs_sync: {needs_sync}")
if needs_sync:
if global_force_sync:
logger.debug("Running forced sync")
status_output = _sync(logger)
logger.info(f"Sync successful {status_output}")
else:
logger.debug("Running scheduled sync")
status_output = _sync(logger)
logger.info(f"Sync successful {status_output}")
def last_sync_bw(logger):
null_datetime_string = "0001-01-01T00:00:00.000Z"
# retruns: {"success":true,"data":{"object":"string","data":"2023-09-22T13:50:09.995Z"}}
last_sync_output = command_wrapper(
logger, command="sync --last", use_success=False)
# if not last_sync_output:
# return parser.parse(null_datetime_string, tzinfos=tzinfos)
if not last_sync_output or not last_sync_output.get("success"):
logger.error("Error getting last sync time.")
return parser.parse(null_datetime_string, tzinfos=tzinfos)
# in case no sync was done yet, null is returned from api
# use some long ago date...
last_sync_string = last_sync_output.get(
"data").get("data", null_datetime_string)
last_sync = parser.parse(last_sync_string, tzinfos=tzinfos)
return last_sync
def unlock_bw(logger):