Merge pull request #48 from Lerentis/Lerentis/issue47

This commit is contained in:
Tobias Trabelsi 2023-10-09 23:24:26 +02:00 committed by GitHub
commit 94bc6b10b1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 50 additions and 54 deletions

View File

@ -4,7 +4,7 @@ LABEL org.opencontainers.image.source=https://github.com/Lerentis/bitwarden-crd-
LABEL org.opencontainers.image.description="Kubernetes Operator to create k8s secrets from bitwarden" LABEL org.opencontainers.image.description="Kubernetes Operator to create k8s secrets from bitwarden"
LABEL org.opencontainers.image.licenses=MIT LABEL org.opencontainers.image.licenses=MIT
ARG PYTHON_VERSION=3.11.5-r0 ARG PYTHON_VERSION=3.11.6-r0
ARG PIP_VERSION=23.1.2-r0 ARG PIP_VERSION=23.1.2-r0
ARG GCOMPAT_VERSION=1.1.0-r1 ARG GCOMPAT_VERSION=1.1.0-r1
ARG LIBCRYPTO_VERSION=3.1.2-r0 ARG LIBCRYPTO_VERSION=3.1.2-r0

View File

@ -173,13 +173,6 @@ please note that the rendering engine for this template is jinja2, with an addit
## Configurations parameters ## Configurations parameters
The operator uses the bitwarden cli in the background and does not communicate to the api directly. The cli mirrors the credential store locally but doesn't sync it on every get request. Instead it will sync each secret every 15 minutes (900 seconds). You can adjust the interval by setting `BW_SYNC_INTERVAL` in the values. If you're secrets update very very frequently, you can force the operator to do a sync before each get by setting `BW_FORCE_SYNC="true"`. You might run into rate limits if you do this too frequent. The operator uses the bitwarden cli in the background and does not communicate to the api directly. The cli mirrors the credential store locally but doesn't sync it on every get request. Instead it will sync each secret every 15 minutes (900 seconds). You can adjust the interval by setting `BW_SYNC_INTERVAL` in the values. If your secrets update very very frequently, you can force the operator to do a sync before each get by setting `BW_FORCE_SYNC="true"`. You might run into rate limits if you do this too frequent.
Additionally the bitwarden cli session may expire at some time. In order to create a new session, the login command is triggered from time to time. In what interval exactly can be configured with the env `BW_RELOGIN_INTERVAL` which defaults to 3600s.
## Short Term Roadmap
- [ ] support more types
- [x] offer option to use a existing secret in helm chart
- [x] host chart on gh pages
- [x] write release pipeline
- [x] maybe extend spec to offer modification of keys as well

View File

@ -4,9 +4,9 @@ description: Deploy the Bitwarden CRD Operator
type: application type: application
version: "v0.8.0" version: "v0.9.0"
appVersion: "0.7.0" appVersion: "0.8.0"
keywords: keywords:
- operator - operator
@ -96,9 +96,11 @@ annotations:
artifacthub.io/containsSecurityUpdates: "false" artifacthub.io/containsSecurityUpdates: "false"
artifacthub.io/changes: | artifacthub.io/changes: |
- kind: changed - kind: changed
description: "Take care to sync with bitwarden before getting a secret, added BW_SYNC_INTERVAL and BW_FORCE_SYNC envs to control sync." description: "Unified scheduled none crd related operations (bw sync and login)"
- kind: fixed - kind: added
description: "Downgrade bitwarden cli due to segfault on newer versions" description: "Added relogin interval which can be finetuned with env `BW_RELOGIN_INTERVAL`. defaults to 3600 seconds"
- kind: chanced
description: "Updated python to 3.11.6-r0"
artifacthub.io/images: | artifacthub.io/images: |
- name: bitwarden-crd-operator - name: bitwarden-crd-operator
image: ghcr.io/lerentis/bitwarden-crd-operator:0.7.0 image: ghcr.io/lerentis/bitwarden-crd-operator:0.8.0

View File

@ -27,6 +27,8 @@ fullnameOverride: ""
# value: "define_it" # value: "define_it"
# - name: BW_PASSWORD # - name: BW_PASSWORD
# value: "define_id" # value: "define_id"
## - name: BW_RELOGIN_INTERVAL
## value: "3600"
externalConfigSecret: externalConfigSecret:
enabled: false enabled: false

View File

@ -1,3 +1,4 @@
kopf==1.36.2 kopf==1.36.2
kubernetes==26.1.0 kubernetes==26.1.0
Jinja2==3.1.2 Jinja2==3.1.2
schedule==1.2.1

View File

@ -1,11 +1,12 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import os import os
import kopf import kopf
import schedule
import time
import threading
from utils.utils import command_wrapper, unlock_bw from utils.utils import command_wrapper, unlock_bw, sync_bw
@kopf.on.startup()
def bitwarden_signin(logger, **kwargs): def bitwarden_signin(logger, **kwargs):
if 'BW_HOST' in os.environ: if 'BW_HOST' in os.environ:
try: try:
@ -18,3 +19,29 @@ def bitwarden_signin(logger, **kwargs):
logger.info("BW_HOST not set. Assuming SaaS installation") logger.info("BW_HOST not set. Assuming SaaS installation")
command_wrapper(logger, "login --apikey") command_wrapper(logger, "login --apikey")
unlock_bw(logger) unlock_bw(logger)
def run_continuously(interval=30):
cease_continuous_run = threading.Event()
class ScheduleThread(threading.Thread):
@classmethod
def run(cls):
while not cease_continuous_run.is_set():
schedule.run_pending()
time.sleep(interval)
continuous_thread = ScheduleThread()
continuous_thread.start()
return cease_continuous_run
@kopf.on.startup()
def load_schedules(logger, **kwargs):
bitwarden_signin(logger)
logger.info("Loading schedules")
bw_relogin_interval = float(os.environ.get('BW_RELOGIN_INTERVAL', 3600))
bw_sync_interval = float(os.environ.get('BW_SYNC_INTERVAL', 900))
schedule.every(bw_relogin_interval).seconds.do(bitwarden_signin, logger=logger)
logger.info(f"relogin scheduled every {bw_relogin_interval} seconds")
schedule.every(bw_sync_interval).seconds.do(sync_bw, logger=logger)
logger.info(f"sync scheduled every {bw_relogin_interval} seconds")
stop_run_continuously = run_continuously()

View File

@ -2,10 +2,6 @@ import os
import json import json
import subprocess import subprocess
import distutils import distutils
from datetime import datetime, timezone, timedelta
from dateutil import parser
from dateutil.tz import tzutc
tzinfos = {"CDT": tzutc()}
bw_sync_interval = float(os.environ.get( bw_sync_interval = float(os.environ.get(
'BW_SYNC_INTERVAL', 900)) 'BW_SYNC_INTERVAL', 900))
@ -30,42 +26,17 @@ def sync_bw(logger, force=False):
_sync(logger) _sync(logger)
return return
last_sync = last_sync_bw(logger)
now = datetime.now(tzutc())
sync_interval = timedelta(seconds=bw_sync_interval)
bw_is_out_of_sync_inverval = (now - last_sync) >= sync_interval
global_force_sync = bool(distutils.util.strtobool( global_force_sync = bool(distutils.util.strtobool(
os.environ.get('BW_FORCE_SYNC', "false"))) os.environ.get('BW_FORCE_SYNC', "false")))
needs_sync = force or global_force_sync or bw_is_out_of_sync_inverval
logger.debug(f"last_sync: {last_sync}")
logger.debug(
f"force: {force}, global_force_sync: {global_force_sync}, bw_is_out_of_sync_inverval: {bw_is_out_of_sync_inverval}, needs_sync: {needs_sync}")
if needs_sync: if global_force_sync:
logger.debug("Running forced sync")
status_output = _sync(logger)
logger.info(f"Sync successful {status_output}")
else:
logger.debug("Running scheduled sync")
status_output = _sync(logger) status_output = _sync(logger)
logger.info(f"Sync successful {status_output}") logger.info(f"Sync successful {status_output}")
def last_sync_bw(logger):
null_datetime_string = "0001-01-01T00:00:00.000Z"
# retruns: {"success":true,"data":{"object":"string","data":"2023-09-22T13:50:09.995Z"}}
last_sync_output = command_wrapper(
logger, command="sync --last", use_success=False)
# if not last_sync_output:
# return parser.parse(null_datetime_string, tzinfos=tzinfos)
if not last_sync_output or not last_sync_output.get("success"):
logger.error("Error getting last sync time.")
return parser.parse(null_datetime_string, tzinfos=tzinfos)
# in case no sync was done yet, null is returned from api
# use some long ago date...
last_sync_string = last_sync_output.get(
"data").get("data", null_datetime_string)
last_sync = parser.parse(last_sync_string, tzinfos=tzinfos)
return last_sync
def unlock_bw(logger): def unlock_bw(logger):