commit
6ebb1dd45b
21
.github/workflows/filecheck.yml
vendored
Normal file
21
.github/workflows/filecheck.yml
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
name: File Presence QC
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- dev
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Check Files
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v1
|
||||||
|
- name: check existance
|
||||||
|
run: |
|
||||||
|
for pathname in blueprints/*; do test -e $pathname/readme.md || { echo "File missing: $pathname/readme.md"; error="true"; }; done
|
||||||
|
for pathname in blueprints/*; do test -e $pathname/install.sh || { echo "File missing: $pathname/install.sh"; error="true"; }; done
|
||||||
|
for pathname in blueprints/*; do test -e $pathname/update.sh || { echo "File missing: $pathname/update.sh"; error="true"; }; done
|
||||||
|
for pathname in blueprints/*; do test -e $pathname/config.yml || { echo "File missing: $pathname/config.yml"; error="true"; }; done
|
||||||
|
if [ "${error}" == "true" ]; then echo "Missing files detected" && exit 1; fi
|
||||||
|
shell: bash
|
5
.github/workflows/shellcheck.yml
vendored
5
.github/workflows/shellcheck.yml
vendored
@ -1,5 +1,5 @@
|
|||||||
# This is a workflow to run shellcheck on all scripts
|
# This is a workflow to run shellcheck on all scripts
|
||||||
name: Shellcheck Workflow
|
name: Shell Linter QC
|
||||||
|
|
||||||
# Controls when the action will run. Triggers the workflow on push or pull request
|
# Controls when the action will run. Triggers the workflow on push or pull request
|
||||||
# events but only for the master branch
|
# events but only for the master branch
|
||||||
@ -10,7 +10,8 @@ on:
|
|||||||
- dev
|
- dev
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
shellcheck:
|
Shellcheck:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Shell Linter
|
- name: Shell Linter
|
||||||
|
23
.github/workflows/wiki.yml
vendored
Normal file
23
.github/workflows/wiki.yml
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
name: Publish docs via GitHub Pages
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Deploy docs
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout master
|
||||||
|
uses: actions/checkout@v1
|
||||||
|
with:
|
||||||
|
ref: 'master'
|
||||||
|
- name: rename-readme
|
||||||
|
run: |
|
||||||
|
for pathname in blueprints/*/readme.md; do ! cp "$pathname" "docs/blueprints/$( basename "$( dirname "$pathname" )" ).md"; done
|
||||||
|
shell: bash
|
||||||
|
- name: Deploy docs
|
||||||
|
uses: mhausenblas/mkdocs-deploy-gh-pages@master
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.WIKI_GH_PAT }}
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -1 +1 @@
|
|||||||
config.yml
|
/config.yml
|
1
.shellcheckrc
Normal file
1
.shellcheckrc
Normal file
@ -0,0 +1 @@
|
|||||||
|
disable=SC2034
|
3
blueprints/bitwarden/config.yml
Normal file
3
blueprints/bitwarden/config.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
blueprint:
|
||||||
|
bitwarden:
|
||||||
|
pkgs: sqlite3 nginx git sudo vim-tiny bash node npm python27-2.7.17_1 mariadb104-client
|
0
jails/bitwarden/includes/bitwarden.rc.conf → blueprints/bitwarden/includes/bitwarden.rc.conf
Executable file → Normal file
0
jails/bitwarden/includes/bitwarden.rc.conf → blueprints/bitwarden/includes/bitwarden.rc.conf
Executable file → Normal file
119
blueprints/bitwarden/install.sh
Executable file
119
blueprints/bitwarden/install.sh
Executable file
@ -0,0 +1,119 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the install script for bitwarden
|
||||||
|
|
||||||
|
# Initialise defaults
|
||||||
|
JAIL_IP="jail_${1}_ip4_addr"
|
||||||
|
JAIL_IP="${!JAIL_IP%/*}"
|
||||||
|
HOST_NAME="jail_${1}_host_name"
|
||||||
|
|
||||||
|
DB_DATABASE="jail_${1}_db_database"
|
||||||
|
DB_DATABASE="${!DB_DATABASE:-$1}"
|
||||||
|
|
||||||
|
DB_USER="jail_${1}_db_user"
|
||||||
|
DB_USER="${!DB_USER:-$DB_DATABASE}"
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
INSTALL_TYPE="jail_${1}_db_type"
|
||||||
|
INSTALL_TYPE="${!INSTALL_TYPE:-mariadb}"
|
||||||
|
|
||||||
|
DB_JAIL="jail_${1}_db_jail"
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
DB_HOST="jail_${!DB_JAIL}_ip4_addr"
|
||||||
|
DB_HOST="${!DB_HOST%/*}:3306"
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
DB_PASSWORD="jail_${1}_db_password"
|
||||||
|
DB_STRING="mysql://${DB_USER}:${!DB_PASSWORD}@${DB_HOST}/${DB_DATABASE}"
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
ADMIN_TOKEN="jail_${1}_admin_token"
|
||||||
|
|
||||||
|
if [ -z "${!DB_PASSWORD}" ]; then
|
||||||
|
echo "db_password can't be empty"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${!DB_JAIL}" ]; then
|
||||||
|
echo "db_jail can't be empty"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${!JAIL_IP}" ]; then
|
||||||
|
echo "ip4_addr can't be empty"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${!ADMIN_TOKEN}" ]; then
|
||||||
|
ADMIN_TOKEN=$(openssl rand -base64 16)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# install latest rust version, pkg version is outdated and can't build bitwarden_rs
|
||||||
|
iocage exec "${1}" "curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y"
|
||||||
|
|
||||||
|
# Install Bitwarden_rs
|
||||||
|
iocage exec "${1}" mkdir -p /usr/local/share/bitwarden/src
|
||||||
|
iocage exec "${1}" git clone https://github.com/dani-garcia/bitwarden_rs/ /usr/local/share/bitwarden/src
|
||||||
|
TAG=$(iocage exec "${1}" "git -C /usr/local/share/bitwarden/src tag --sort=v:refname | tail -n1")
|
||||||
|
iocage exec "${1}" "git -C /usr/local/share/bitwarden/src checkout ${TAG}"
|
||||||
|
#TODO replace with: cargo build --features mysql --release
|
||||||
|
if [ "${INSTALL_TYPE}" == "mariadb" ]; then
|
||||||
|
iocage exec "${1}" "cd /usr/local/share/bitwarden/src && $HOME/.cargo/bin/cargo build --features mysql --release"
|
||||||
|
iocage exec "${1}" "cd /usr/local/share/bitwarden/src && $HOME/.cargo/bin/cargo install diesel_cli --no-default-features --features mysql"
|
||||||
|
else
|
||||||
|
iocage exec "${1}" "cd /usr/local/share/bitwarden/src && $HOME/.cargo/bin/cargo build --features sqlite --release"
|
||||||
|
iocage exec "${1}" "cd /usr/local/share/bitwarden/src && $HOME/.cargo/bin/cargo install diesel_cli --no-default-features --features sqlite-bundled"
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
iocage exec "${1}" cp -r /usr/local/share/bitwarden/src/target/release /usr/local/share/bitwarden/bin
|
||||||
|
|
||||||
|
# Download and install webvault
|
||||||
|
WEB_RELEASE_URL=$(curl -Ls -o /dev/null -w "%{url_effective}" https://github.com/dani-garcia/bw_web_builds/releases/latest)
|
||||||
|
WEB_TAG="${WEB_RELEASE_URL##*/}"
|
||||||
|
iocage exec "${1}" "fetch http://github.com/dani-garcia/bw_web_builds/releases/download/$WEB_TAG/bw_web_$WEB_TAG.tar.gz -o /usr/local/share/bitwarden"
|
||||||
|
iocage exec "${1}" "tar -xzvf /usr/local/share/bitwarden/bw_web_$WEB_TAG.tar.gz -C /usr/local/share/bitwarden/"
|
||||||
|
iocage exec "${1}" rm /usr/local/share/bitwarden/bw_web_"$WEB_TAG".tar.gz
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
if [ -f "/mnt/${global_dataset_config}/${1}/ssl/bitwarden-ssl.crt" ]; then
|
||||||
|
echo "certificate exist... Skipping cert generation"
|
||||||
|
else
|
||||||
|
"No ssl certificate present, generating self signed certificate"
|
||||||
|
if [ ! -d "/mnt/${global_dataset_config}/${1}/ssl" ]; then
|
||||||
|
echo "cert folder not existing... creating..."
|
||||||
|
iocage exec "${1}" mkdir /config/ssl
|
||||||
|
fi
|
||||||
|
openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=localhost" -keyout /mnt/"${global_dataset_config}"/"${1}"/ssl/bitwarden-ssl.key -out /mnt/"${global_dataset_config}"/"${1}"/ssl/bitwarden-ssl.crt
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f "/mnt/${global_dataset_config}/${1}/bitwarden.log" ]; then
|
||||||
|
echo "Reinstall of Bitwarden detected... using existing config and database"
|
||||||
|
elif [ "${INSTALL_TYPE}" == "mariadb" ]; then
|
||||||
|
echo "No config detected, doing clean install, utilizing the Mariadb database ${DB_HOST}"
|
||||||
|
iocage exec "${!DB_JAIL}" mysql -u root -e "CREATE DATABASE ${DB_DATABASE};"
|
||||||
|
iocage exec "${!DB_JAIL}" mysql -u root -e "GRANT ALL ON ${DB_DATABASE}.* TO ${DB_USER}@${JAIL_IP} IDENTIFIED BY '${!DB_PASSWORD}';"
|
||||||
|
iocage exec "${!DB_JAIL}" mysqladmin reload
|
||||||
|
else
|
||||||
|
echo "No config detected, doing clean install."
|
||||||
|
fi
|
||||||
|
|
||||||
|
iocage exec "${1}" "pw user add bitwarden -c bitwarden -u 725 -d /nonexistent -s /usr/bin/nologin"
|
||||||
|
iocage exec "${1}" chown -R bitwarden:bitwarden /usr/local/share/bitwarden /config
|
||||||
|
iocage exec "${1}" mkdir /usr/local/etc/rc.d /usr/local/etc/rc.conf.d
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/bitwarden/includes/bitwarden.rc /mnt/"${global_dataset_iocage}"/jails/"${1}"/root/usr/local/etc/rc.d/bitwarden
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/bitwarden/includes/bitwarden.rc.conf /mnt/"${global_dataset_iocage}"/jails/"${1}"/root/usr/local/etc/rc.conf.d/bitwarden
|
||||||
|
echo 'export DATABASE_URL="'"${DB_STRING}"'"' >> /mnt/"${global_dataset_iocage}"/jails/"${1}"/root/usr/local/etc/rc.conf.d/bitwarden
|
||||||
|
echo 'export ADMIN_TOKEN="'"${!ADMIN_TOKEN}"'"' >> /mnt/"${global_dataset_iocage}"/jails/"${1}"/root/usr/local/etc/rc.conf.d/bitwarden
|
||||||
|
|
||||||
|
if [ "${!ADMIN_TOKEN}" == "NONE" ]; then
|
||||||
|
echo "Admin_token set to NONE, disabling admin portal"
|
||||||
|
else
|
||||||
|
echo "Admin_token set and admin portal enabled"
|
||||||
|
iocage exec "${1}" echo "${DB_NAME} Admin Token is ${!ADMIN_TOKEN}" > /root/"${1}"_admin_token.txt
|
||||||
|
fi
|
||||||
|
|
||||||
|
iocage exec "${1}" chmod u+x /usr/local/etc/rc.d/bitwarden
|
||||||
|
iocage exec "${1}" sysrc "bitwarden_enable=YES"
|
||||||
|
iocage exec "${1}" service bitwarden restart
|
||||||
|
echo "Jail ${1} finished Bitwarden install."
|
||||||
|
echo "Admin Token is ${!ADMIN_TOKEN}"
|
5
jails/bitwarden/readme.md → blueprints/bitwarden/readme.md
Executable file → Normal file
5
jails/bitwarden/readme.md → blueprints/bitwarden/readme.md
Executable file → Normal file
@ -1,4 +1,7 @@
|
|||||||
# Original README from the Bitwarden_rs github:
|
# Bitwarden_RS
|
||||||
|
|
||||||
|
|
||||||
|
## Original README from the Bitwarden_rs github:
|
||||||
|
|
||||||
https://github.com/dani-garcia/bitwarden_rs
|
https://github.com/dani-garcia/bitwarden_rs
|
||||||
|
|
100
blueprints/bitwarden/update.sh
Executable file
100
blueprints/bitwarden/update.sh
Executable file
@ -0,0 +1,100 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the update script for bitwarden
|
||||||
|
# Due to it being build from scratch or downloaded directly to execution dir,
|
||||||
|
# Update for Bitwarden is pretty similair to installation
|
||||||
|
|
||||||
|
# Initialise defaults
|
||||||
|
JAIL_IP="jail_${1}_ip4_addr"
|
||||||
|
JAIL_IP="${!JAIL_IP%/*}"
|
||||||
|
HOST_NAME="jail_${1}_host_name"
|
||||||
|
DB_DATABASE="jail_${1}_db_datavase"
|
||||||
|
DB_USER="jail_${1}_db_user"
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
INSTALL_TYPE="jail_${1}_type"
|
||||||
|
DB_JAIL="jail_${1}_db_jail"
|
||||||
|
DB_JAIL="${!DB_JAIL}"
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
DB_HOST="${DB_JAIL}_ip4_addr"
|
||||||
|
DB_HOST="${!DB_HOST%/*}:3306"
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
DB_PASSWORD="jail_${1}_db_password"
|
||||||
|
DB_STRING="mysql://${!DB_USER}:${!DB_PASSWORD}@${DB_HOST}/${!DB_DATABASE}"
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
ADMIN_TOKEN="jail_${1}_admin_token"
|
||||||
|
|
||||||
|
if [ -z "${!DB_USER}" ]; then
|
||||||
|
echo "db_user can't be empty"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${!DB_DATABASE}" ]; then
|
||||||
|
echo "db_database can't be empty"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${!DB_PASSWORD}" ]; then
|
||||||
|
echo "db_password can't be empty"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${!DB_JAIL}" ]; then
|
||||||
|
echo "db_jail can't be empty"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${!JAIL_IP}" ]; then
|
||||||
|
echo "ip4_addr can't be empty"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${!ADMIN_TOKEN}" ]; then
|
||||||
|
ADMIN_TOKEN=$(openssl rand -base64 16)
|
||||||
|
fi
|
||||||
|
|
||||||
|
iocage exec "${1}" service bitwarden stop
|
||||||
|
|
||||||
|
# install latest rust version, pkg version is outdated and can't build bitwarden_rs
|
||||||
|
iocage exec "${1}" "curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y"
|
||||||
|
|
||||||
|
# Install Bitwarden_rs
|
||||||
|
iocage exec "${1}" "git -C /usr/local/share/bitwarden/src fetch"
|
||||||
|
TAG=$(iocage exec "${1}" "git -C /usr/local/share/bitwarden/src tag --sort=v:refname | tail -n1")
|
||||||
|
iocage exec "${1}" "git -C /usr/local/share/bitwarden/src checkout ${TAG}"
|
||||||
|
#TODO replace with: cargo build --features mysql --release
|
||||||
|
if [ "${INSTALL_TYPE}" == "mariadb" ]; then
|
||||||
|
iocage exec "${1}" "cd /usr/local/share/bitwarden/src && $HOME/.cargo/bin/cargo build --features mysql --release"
|
||||||
|
iocage exec "${1}" "cd /usr/local/share/bitwarden/src && $HOME/.cargo/bin/cargo install diesel_cli --no-default-features --features mysql"
|
||||||
|
else
|
||||||
|
iocage exec "${1}" "cd /usr/local/share/bitwarden/src && $HOME/.cargo/bin/cargo build --features sqlite --release"
|
||||||
|
iocage exec "${1}" "cd /usr/local/share/bitwarden/src && $HOME/.cargo/bin/cargo install diesel_cli --no-default-features --features sqlite-bundled"
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
iocage exec "${1}" cp -r /usr/local/share/bitwarden/src/target/release /usr/local/share/bitwarden/bin
|
||||||
|
|
||||||
|
# Download and install webvault
|
||||||
|
WEB_RELEASE_URL=$(curl -Ls -o /dev/null -w "%{url_effective}" https://github.com/dani-garcia/bw_web_builds/releases/latest)
|
||||||
|
WEB_TAG="${WEB_RELEASE_URL##*/}"
|
||||||
|
iocage exec "${1}" "fetch http://github.com/dani-garcia/bw_web_builds/releases/download/$WEB_TAG/bw_web_$WEB_TAG.tar.gz -o /usr/local/share/bitwarden"
|
||||||
|
iocage exec "${1}" "tar -xzvf /usr/local/share/bitwarden/bw_web_$WEB_TAG.tar.gz -C /usr/local/share/bitwarden/"
|
||||||
|
iocage exec "${1}" rm /usr/local/share/bitwarden/bw_web_"$WEB_TAG".tar.gz
|
||||||
|
|
||||||
|
iocage exec "${1}" chown -R bitwarden:bitwarden /usr/local/share/bitwarden /config
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/"${1}"/includes/bitwarden.rc /mnt/"${global_dataset_iocage}"/jails/"${1}"/root/usr/local/etc/rc.d/bitwarden
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/"${1}"/includes/bitwarden.rc.conf /mnt/"${global_dataset_iocage}"/jails/"${1}"/root/usr/local/etc/rc.conf.d/bitwarden
|
||||||
|
echo 'export DATABASE_URL="'"${DB_STRING}"'"' >> /mnt/"${global_dataset_iocage}"/jails/"${1}"/root/usr/local/etc/rc.conf.d/bitwarden
|
||||||
|
echo 'export ADMIN_TOKEN="'"${!ADMIN_TOKEN}"'"' >> /mnt/"${global_dataset_iocage}"/jails/"${1}"/root/usr/local/etc/rc.conf.d/bitwarden
|
||||||
|
|
||||||
|
if [ "${!ADMIN_TOKEN}" == "NONE" ]; then
|
||||||
|
echo "Admin_token set to NONE, disabling admin portal"
|
||||||
|
else
|
||||||
|
echo "Admin_token set and admin portal enabled"
|
||||||
|
iocage exec "${1}" echo "${DB_NAME} Admin Token is ${!ADMIN_TOKEN}" > /root/"${1}"_admin_token.txt
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
iocage exec "${1}" chmod u+x /usr/local/etc/rc.d/bitwarden
|
||||||
|
iocage exec "${1}" service bitwarden restart
|
||||||
|
echo "Jail ${1} finished Bitwarden update."
|
||||||
|
echo "Admin Token is ${!ADMIN_TOKEN}"
|
3
blueprints/influxdb/config.yml
Normal file
3
blueprints/influxdb/config.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
blueprint:
|
||||||
|
influxdb:
|
||||||
|
pkgs: influxdb
|
586
blueprints/influxdb/includes/influxd.conf
Normal file
586
blueprints/influxdb/includes/influxd.conf
Normal file
@ -0,0 +1,586 @@
|
|||||||
|
### Welcome to the InfluxDB configuration file.
|
||||||
|
|
||||||
|
# The values in this file override the default values used by the system if
|
||||||
|
# a config option is not specified. The commented out lines are the configuration
|
||||||
|
# field and the default value used. Uncommenting a line and changing the value
|
||||||
|
# will change the value used at runtime when the process is restarted.
|
||||||
|
|
||||||
|
# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com
|
||||||
|
# The data includes a random ID, os, arch, version, the number of series and other
|
||||||
|
# usage data. No data from user databases is ever transmitted.
|
||||||
|
# Change this option to true to disable reporting.
|
||||||
|
# reporting-disabled = false
|
||||||
|
|
||||||
|
# Bind address to use for the RPC service for backup and restore.
|
||||||
|
# bind-address = "127.0.0.1:8088"
|
||||||
|
|
||||||
|
###
|
||||||
|
### [meta]
|
||||||
|
###
|
||||||
|
### Controls the parameters for the Raft consensus group that stores metadata
|
||||||
|
### about the InfluxDB cluster.
|
||||||
|
###
|
||||||
|
|
||||||
|
[meta]
|
||||||
|
# Where the metadata/raft database is stored
|
||||||
|
dir = "/config/db/meta"
|
||||||
|
|
||||||
|
# Automatically create a default retention policy when creating a database.
|
||||||
|
# retention-autocreate = true
|
||||||
|
|
||||||
|
# If log messages are printed for the meta service
|
||||||
|
# logging-enabled = true
|
||||||
|
|
||||||
|
###
|
||||||
|
### [data]
|
||||||
|
###
|
||||||
|
### Controls where the actual shard data for InfluxDB lives and how it is
|
||||||
|
### flushed from the WAL. "dir" may need to be changed to a suitable place
|
||||||
|
### for your system, but the WAL settings are an advanced configuration. The
|
||||||
|
### defaults should work for most systems.
|
||||||
|
###
|
||||||
|
|
||||||
|
[data]
|
||||||
|
# The directory where the TSM storage engine stores TSM files.
|
||||||
|
dir = "/config/db/data"
|
||||||
|
|
||||||
|
# The directory where the TSM storage engine stores WAL files.
|
||||||
|
wal-dir = "/config/db/wal"
|
||||||
|
|
||||||
|
# The amount of time that a write will wait before fsyncing. A duration
|
||||||
|
# greater than 0 can be used to batch up multiple fsync calls. This is useful for slower
|
||||||
|
# disks or when WAL write contention is seen. A value of 0s fsyncs every write to the WAL.
|
||||||
|
# Values in the range of 0-100ms are recommended for non-SSD disks.
|
||||||
|
# wal-fsync-delay = "0s"
|
||||||
|
|
||||||
|
|
||||||
|
# The type of shard index to use for new shards. The default is an in-memory index that is
|
||||||
|
# recreated at startup. A value of "tsi1" will use a disk based index that supports higher
|
||||||
|
# cardinality datasets.
|
||||||
|
# index-version = "inmem"
|
||||||
|
|
||||||
|
# Trace logging provides more verbose output around the tsm engine. Turning
|
||||||
|
# this on can provide more useful output for debugging tsm engine issues.
|
||||||
|
# trace-logging-enabled = false
|
||||||
|
|
||||||
|
# Whether queries should be logged before execution. Very useful for troubleshooting, but will
|
||||||
|
# log any sensitive data contained within a query.
|
||||||
|
# query-log-enabled = true
|
||||||
|
|
||||||
|
# Validates incoming writes to ensure keys only have valid unicode characters.
|
||||||
|
# This setting will incur a small overhead because every key must be checked.
|
||||||
|
# validate-keys = false
|
||||||
|
|
||||||
|
# Settings for the TSM engine
|
||||||
|
|
||||||
|
# CacheMaxMemorySize is the maximum size a shard's cache can
|
||||||
|
# reach before it starts rejecting writes.
|
||||||
|
# Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
|
||||||
|
# Values without a size suffix are in bytes.
|
||||||
|
# cache-max-memory-size = "1g"
|
||||||
|
|
||||||
|
# CacheSnapshotMemorySize is the size at which the engine will
|
||||||
|
# snapshot the cache and write it to a TSM file, freeing up memory
|
||||||
|
# Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
|
||||||
|
# Values without a size suffix are in bytes.
|
||||||
|
# cache-snapshot-memory-size = "25m"
|
||||||
|
|
||||||
|
# CacheSnapshotWriteColdDuration is the length of time at
|
||||||
|
# which the engine will snapshot the cache and write it to
|
||||||
|
# a new TSM file if the shard hasn't received writes or deletes
|
||||||
|
# cache-snapshot-write-cold-duration = "10m"
|
||||||
|
|
||||||
|
# CompactFullWriteColdDuration is the duration at which the engine
|
||||||
|
# will compact all TSM files in a shard if it hasn't received a
|
||||||
|
# write or delete
|
||||||
|
# compact-full-write-cold-duration = "4h"
|
||||||
|
|
||||||
|
# The maximum number of concurrent full and level compactions that can run at one time. A
|
||||||
|
# value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. Any number greater
|
||||||
|
# than 0 limits compactions to that value. This setting does not apply
|
||||||
|
# to cache snapshotting.
|
||||||
|
# max-concurrent-compactions = 0
|
||||||
|
|
||||||
|
# CompactThroughput is the rate limit in bytes per second that we
|
||||||
|
# will allow TSM compactions to write to disk. Note that short bursts are allowed
|
||||||
|
# to happen at a possibly larger value, set by CompactThroughputBurst
|
||||||
|
# compact-throughput = "48m"
|
||||||
|
|
||||||
|
# CompactThroughputBurst is the rate limit in bytes per second that we
|
||||||
|
# will allow TSM compactions to write to disk.
|
||||||
|
# compact-throughput-burst = "48m"
|
||||||
|
|
||||||
|
# If true, then the mmap advise value MADV_WILLNEED will be provided to the kernel with respect to
|
||||||
|
# TSM files. This setting has been found to be problematic on some kernels, and defaults to off.
|
||||||
|
# It might help users who have slow disks in some cases.
|
||||||
|
# tsm-use-madv-willneed = false
|
||||||
|
|
||||||
|
# Settings for the inmem index
|
||||||
|
|
||||||
|
# The maximum series allowed per database before writes are dropped. This limit can prevent
|
||||||
|
# high cardinality issues at the database level. This limit can be disabled by setting it to
|
||||||
|
# 0.
|
||||||
|
# max-series-per-database = 1000000
|
||||||
|
|
||||||
|
# The maximum number of tag values per tag that are allowed before writes are dropped. This limit
|
||||||
|
# can prevent high cardinality tag values from being written to a measurement. This limit can be
|
||||||
|
# disabled by setting it to 0.
|
||||||
|
# max-values-per-tag = 100000
|
||||||
|
|
||||||
|
# Settings for the tsi1 index
|
||||||
|
|
||||||
|
# The threshold, in bytes, when an index write-ahead log file will compact
|
||||||
|
# into an index file. Lower sizes will cause log files to be compacted more
|
||||||
|
# quickly and result in lower heap usage at the expense of write throughput.
|
||||||
|
# Higher sizes will be compacted less frequently, store more series in-memory,
|
||||||
|
# and provide higher write throughput.
|
||||||
|
# Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
|
||||||
|
# Values without a size suffix are in bytes.
|
||||||
|
# max-index-log-file-size = "1m"
|
||||||
|
|
||||||
|
# The size of the internal cache used in the TSI index to store previously
|
||||||
|
# calculated series results. Cached results will be returned quickly from the cache rather
|
||||||
|
# than needing to be recalculated when a subsequent query with a matching tag key/value
|
||||||
|
# predicate is executed. Setting this value to 0 will disable the cache, which may
|
||||||
|
# lead to query performance issues.
|
||||||
|
# This value should only be increased if it is known that the set of regularly used
|
||||||
|
# tag key/value predicates across all measurements for a database is larger than 100. An
|
||||||
|
# increase in cache size may lead to an increase in heap usage.
|
||||||
|
series-id-set-cache-size = 100
|
||||||
|
|
||||||
|
###
|
||||||
|
### [coordinator]
|
||||||
|
###
|
||||||
|
### Controls the clustering service configuration.
|
||||||
|
###
|
||||||
|
|
||||||
|
[coordinator]
|
||||||
|
# The default time a write request will wait until a "timeout" error is returned to the caller.
|
||||||
|
# write-timeout = "10s"
|
||||||
|
|
||||||
|
# The maximum number of concurrent queries allowed to be executing at one time. If a query is
|
||||||
|
# executed and exceeds this limit, an error is returned to the caller. This limit can be disabled
|
||||||
|
# by setting it to 0.
|
||||||
|
# max-concurrent-queries = 0
|
||||||
|
|
||||||
|
# The maximum time a query will is allowed to execute before being killed by the system. This limit
|
||||||
|
# can help prevent run away queries. Setting the value to 0 disables the limit.
|
||||||
|
# query-timeout = "0s"
|
||||||
|
|
||||||
|
# The time threshold when a query will be logged as a slow query. This limit can be set to help
|
||||||
|
# discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging.
|
||||||
|
# log-queries-after = "0s"
|
||||||
|
|
||||||
|
# The maximum number of points a SELECT can process. A value of 0 will make
|
||||||
|
# the maximum point count unlimited. This will only be checked every second so queries will not
|
||||||
|
# be aborted immediately when hitting the limit.
|
||||||
|
# max-select-point = 0
|
||||||
|
|
||||||
|
# The maximum number of series a SELECT can run. A value of 0 will make the maximum series
|
||||||
|
# count unlimited.
|
||||||
|
# max-select-series = 0
|
||||||
|
|
||||||
|
# The maximum number of group by time bucket a SELECT can create. A value of zero will max the maximum
|
||||||
|
# number of buckets unlimited.
|
||||||
|
# max-select-buckets = 0
|
||||||
|
|
||||||
|
###
|
||||||
|
### [retention]
|
||||||
|
###
|
||||||
|
### Controls the enforcement of retention policies for evicting old data.
|
||||||
|
###
|
||||||
|
|
||||||
|
[retention]
|
||||||
|
# Determines whether retention policy enforcement enabled.
|
||||||
|
# enabled = true
|
||||||
|
|
||||||
|
# The interval of time when retention policy enforcement checks run.
|
||||||
|
# check-interval = "30m"
|
||||||
|
|
||||||
|
###
|
||||||
|
### [shard-precreation]
|
||||||
|
###
|
||||||
|
### Controls the precreation of shards, so they are available before data arrives.
|
||||||
|
### Only shards that, after creation, will have both a start- and end-time in the
|
||||||
|
### future, will ever be created. Shards are never precreated that would be wholly
|
||||||
|
### or partially in the past.
|
||||||
|
|
||||||
|
[shard-precreation]
|
||||||
|
# Determines whether shard pre-creation service is enabled.
|
||||||
|
# enabled = true
|
||||||
|
|
||||||
|
# The interval of time when the check to pre-create new shards runs.
|
||||||
|
# check-interval = "10m"
|
||||||
|
|
||||||
|
# The default period ahead of the endtime of a shard group that its successor
|
||||||
|
# group is created.
|
||||||
|
# advance-period = "30m"
|
||||||
|
|
||||||
|
###
|
||||||
|
### Controls the system self-monitoring, statistics and diagnostics.
|
||||||
|
###
|
||||||
|
### The internal database for monitoring data is created automatically if
|
||||||
|
### if it does not already exist. The target retention within this database
|
||||||
|
### is called 'monitor' and is also created with a retention period of 7 days
|
||||||
|
### and a replication factor of 1, if it does not exist. In all cases the
|
||||||
|
### this retention policy is configured as the default for the database.
|
||||||
|
|
||||||
|
[monitor]
|
||||||
|
# Whether to record statistics internally.
|
||||||
|
# store-enabled = true
|
||||||
|
|
||||||
|
# The destination database for recorded statistics
|
||||||
|
# store-database = "_internal"
|
||||||
|
|
||||||
|
# The interval at which to record statistics
|
||||||
|
# store-interval = "10s"
|
||||||
|
|
||||||
|
###
|
||||||
|
### [http]
|
||||||
|
###
|
||||||
|
### Controls how the HTTP endpoints are configured. These are the primary
|
||||||
|
### mechanism for getting data into and out of InfluxDB.
|
||||||
|
###
|
||||||
|
|
||||||
|
[http]
|
||||||
|
# Determines whether HTTP endpoint is enabled.
|
||||||
|
# enabled = true
|
||||||
|
|
||||||
|
# Determines whether the Flux query endpoint is enabled.
|
||||||
|
# flux-enabled = false
|
||||||
|
|
||||||
|
# Determines whether the Flux query logging is enabled.
|
||||||
|
# flux-log-enabled = false
|
||||||
|
|
||||||
|
# The bind address used by the HTTP service.
|
||||||
|
# bind-address = ":8086"
|
||||||
|
|
||||||
|
# Determines whether user authentication is enabled over HTTP/HTTPS.
|
||||||
|
# auth-enabled = false
|
||||||
|
|
||||||
|
# The default realm sent back when issuing a basic auth challenge.
|
||||||
|
# realm = "InfluxDB"
|
||||||
|
|
||||||
|
# Determines whether HTTP request logging is enabled.
|
||||||
|
# log-enabled = true
|
||||||
|
|
||||||
|
# Determines whether the HTTP write request logs should be suppressed when the log is enabled.
|
||||||
|
# suppress-write-log = false
|
||||||
|
|
||||||
|
# When HTTP request logging is enabled, this option specifies the path where
|
||||||
|
# log entries should be written. If unspecified, the default is to write to stderr, which
|
||||||
|
# intermingles HTTP logs with internal InfluxDB logging.
|
||||||
|
#
|
||||||
|
# If influxd is unable to access the specified path, it will log an error and fall back to writing
|
||||||
|
# the request log to stderr.
|
||||||
|
# access-log-path = ""
|
||||||
|
|
||||||
|
# Filters which requests should be logged. Each filter is of the pattern NNN, NNX, or NXX where N is
|
||||||
|
# a number and X is a wildcard for any number. To filter all 5xx responses, use the string 5xx.
|
||||||
|
# If multiple filters are used, then only one has to match. The default is to have no filters which
|
||||||
|
# will cause every request to be printed.
|
||||||
|
# access-log-status-filters = []
|
||||||
|
|
||||||
|
# Determines whether detailed write logging is enabled.
|
||||||
|
# write-tracing = false
|
||||||
|
|
||||||
|
# Determines whether the pprof endpoint is enabled. This endpoint is used for
|
||||||
|
# troubleshooting and monitoring.
|
||||||
|
# pprof-enabled = true
|
||||||
|
|
||||||
|
# Enables authentication on pprof endpoints. Users will need admin permissions
|
||||||
|
# to access the pprof endpoints when this setting is enabled. This setting has
|
||||||
|
# no effect if either auth-enabled or pprof-enabled are set to false.
|
||||||
|
# pprof-auth-enabled = false
|
||||||
|
|
||||||
|
# Enables a pprof endpoint that binds to localhost:6060 immediately on startup.
|
||||||
|
# This is only needed to debug startup issues.
|
||||||
|
# debug-pprof-enabled = false
|
||||||
|
|
||||||
|
# Enables authentication on the /ping, /metrics, and deprecated /status
|
||||||
|
# endpoints. This setting has no effect if auth-enabled is set to false.
|
||||||
|
# ping-auth-enabled = false
|
||||||
|
|
||||||
|
# Determines whether HTTPS is enabled.
|
||||||
|
# https-enabled = false
|
||||||
|
|
||||||
|
# The SSL certificate to use when HTTPS is enabled.
|
||||||
|
# https-certificate = "/config/ssl/influxdb.pem"
|
||||||
|
|
||||||
|
# Use a separate private key location.
|
||||||
|
# https-private-key = ""
|
||||||
|
|
||||||
|
# The JWT auth shared secret to validate requests using JSON web tokens.
|
||||||
|
# shared-secret = ""
|
||||||
|
|
||||||
|
# The default chunk size for result sets that should be chunked.
|
||||||
|
# max-row-limit = 0
|
||||||
|
|
||||||
|
# The maximum number of HTTP connections that may be open at once. New connections that
|
||||||
|
# would exceed this limit are dropped. Setting this value to 0 disables the limit.
|
||||||
|
# max-connection-limit = 0
|
||||||
|
|
||||||
|
# Enable http service over unix domain socket
|
||||||
|
# unix-socket-enabled = false
|
||||||
|
|
||||||
|
# The path of the unix domain socket.
|
||||||
|
# bind-socket = "/var/run/influxdb.sock"
|
||||||
|
|
||||||
|
# The maximum size of a client request body, in bytes. Setting this value to 0 disables the limit.
|
||||||
|
# max-body-size = 25000000
|
||||||
|
|
||||||
|
# The maximum number of writes processed concurrently.
|
||||||
|
# Setting this to 0 disables the limit.
|
||||||
|
# max-concurrent-write-limit = 0
|
||||||
|
|
||||||
|
# The maximum number of writes queued for processing.
|
||||||
|
# Setting this to 0 disables the limit.
|
||||||
|
# max-enqueued-write-limit = 0
|
||||||
|
|
||||||
|
# The maximum duration for a write to wait in the queue to be processed.
|
||||||
|
# Setting this to 0 or setting max-concurrent-write-limit to 0 disables the limit.
|
||||||
|
# enqueued-write-timeout = 0
|
||||||
|
|
||||||
|
###
|
||||||
|
### [logging]
|
||||||
|
###
|
||||||
|
### Controls how the logger emits logs to the output.
|
||||||
|
###
|
||||||
|
|
||||||
|
[logging]
|
||||||
|
# Determines which log encoder to use for logs. Available options
|
||||||
|
# are auto, logfmt, and json. auto will use a more a more user-friendly
|
||||||
|
# output format if the output terminal is a TTY, but the format is not as
|
||||||
|
# easily machine-readable. When the output is a non-TTY, auto will use
|
||||||
|
# logfmt.
|
||||||
|
# format = "auto"
|
||||||
|
|
||||||
|
# Determines which level of logs will be emitted. The available levels
|
||||||
|
# are error, warn, info, and debug. Logs that are equal to or above the
|
||||||
|
# specified level will be emitted.
|
||||||
|
# level = "info"
|
||||||
|
|
||||||
|
# Suppresses the logo output that is printed when the program is started.
|
||||||
|
# The logo is always suppressed if STDOUT is not a TTY.
|
||||||
|
# suppress-logo = false
|
||||||
|
|
||||||
|
###
|
||||||
|
### [subscriber]
|
||||||
|
###
|
||||||
|
### Controls the subscriptions, which can be used to fork a copy of all data
|
||||||
|
### received by the InfluxDB host.
|
||||||
|
###
|
||||||
|
|
||||||
|
[subscriber]
|
||||||
|
# Determines whether the subscriber service is enabled.
|
||||||
|
# enabled = true
|
||||||
|
|
||||||
|
# The default timeout for HTTP writes to subscribers.
|
||||||
|
# http-timeout = "30s"
|
||||||
|
|
||||||
|
# Allows insecure HTTPS connections to subscribers. This is useful when testing with self-
|
||||||
|
# signed certificates.
|
||||||
|
# insecure-skip-verify = false
|
||||||
|
|
||||||
|
# The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used
|
||||||
|
# ca-certs = ""
|
||||||
|
|
||||||
|
# The number of writer goroutines processing the write channel.
|
||||||
|
# write-concurrency = 40
|
||||||
|
|
||||||
|
# The number of in-flight writes buffered in the write channel.
|
||||||
|
# write-buffer-size = 1000
|
||||||
|
|
||||||
|
|
||||||
|
###
|
||||||
|
### [[graphite]]
|
||||||
|
###
|
||||||
|
### Controls one or many listeners for Graphite data.
|
||||||
|
###
|
||||||
|
|
||||||
|
[[graphite]]
|
||||||
|
# Determines whether the graphite endpoint is enabled.
|
||||||
|
# enabled = false
|
||||||
|
# database = "graphite"
|
||||||
|
# retention-policy = ""
|
||||||
|
# bind-address = ":2003"
|
||||||
|
# protocol = "tcp"
|
||||||
|
# consistency-level = "one"
|
||||||
|
|
||||||
|
# These next lines control how batching works. You should have this enabled
|
||||||
|
# otherwise you could get dropped metrics or poor performance. Batching
|
||||||
|
# will buffer points in memory if you have many coming in.
|
||||||
|
|
||||||
|
# Flush if this many points get buffered
|
||||||
|
# batch-size = 5000
|
||||||
|
|
||||||
|
# number of batches that may be pending in memory
|
||||||
|
# batch-pending = 10
|
||||||
|
|
||||||
|
# Flush at least this often even if we haven't hit buffer limit
|
||||||
|
# batch-timeout = "1s"
|
||||||
|
|
||||||
|
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
|
||||||
|
# udp-read-buffer = 0
|
||||||
|
|
||||||
|
### This string joins multiple matching 'measurement' values providing more control over the final measurement name.
|
||||||
|
# separator = "."
|
||||||
|
|
||||||
|
### Default tags that will be added to all metrics. These can be overridden at the template level
|
||||||
|
### or by tags extracted from metric
|
||||||
|
# tags = ["region=us-east", "zone=1c"]
|
||||||
|
|
||||||
|
### Each template line requires a template pattern. It can have an optional
|
||||||
|
### filter before the template and separated by spaces. It can also have optional extra
|
||||||
|
### tags following the template. Multiple tags should be separated by commas and no spaces
|
||||||
|
### similar to the line protocol format. There can be only one default template.
|
||||||
|
# templates = [
|
||||||
|
# "*.app env.service.resource.measurement",
|
||||||
|
# # Default template
|
||||||
|
# "server.*",
|
||||||
|
# ]
|
||||||
|
|
||||||
|
###
|
||||||
|
### [collectd]
|
||||||
|
###
|
||||||
|
### Controls one or many listeners for collectd data.
|
||||||
|
###
|
||||||
|
|
||||||
|
[[collectd]]
|
||||||
|
# enabled = false
|
||||||
|
# bind-address = ":25826"
|
||||||
|
# database = "collectd"
|
||||||
|
# retention-policy = ""
|
||||||
|
#
|
||||||
|
# The collectd service supports either scanning a directory for multiple types
|
||||||
|
# db files, or specifying a single db file.
|
||||||
|
# typesdb = "/usr/local/share/collectd"
|
||||||
|
#
|
||||||
|
# security-level = "none"
|
||||||
|
# auth-file = "/etc/collectd/auth_file"
|
||||||
|
|
||||||
|
# These next lines control how batching works. You should have this enabled
|
||||||
|
# otherwise you could get dropped metrics or poor performance. Batching
|
||||||
|
# will buffer points in memory if you have many coming in.
|
||||||
|
|
||||||
|
# Flush if this many points get buffered
|
||||||
|
# batch-size = 5000
|
||||||
|
|
||||||
|
# Number of batches that may be pending in memory
|
||||||
|
# batch-pending = 10
|
||||||
|
|
||||||
|
# Flush at least this often even if we haven't hit buffer limit
|
||||||
|
# batch-timeout = "10s"
|
||||||
|
|
||||||
|
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
|
||||||
|
# read-buffer = 0
|
||||||
|
|
||||||
|
# Multi-value plugins can be handled two ways.
|
||||||
|
# "split" will parse and store the multi-value plugin data into separate measurements
|
||||||
|
# "join" will parse and store the multi-value plugin as a single multi-value measurement.
|
||||||
|
# "split" is the default behavior for backward compatibility with previous versions of influxdb.
|
||||||
|
# parse-multivalue-plugin = "split"
|
||||||
|
###
|
||||||
|
### [opentsdb]
|
||||||
|
###
|
||||||
|
### Controls one or many listeners for OpenTSDB data.
|
||||||
|
###
|
||||||
|
|
||||||
|
[[opentsdb]]
|
||||||
|
# enabled = false
|
||||||
|
# bind-address = ":4242"
|
||||||
|
# database = "opentsdb"
|
||||||
|
# retention-policy = ""
|
||||||
|
# consistency-level = "one"
|
||||||
|
# tls-enabled = false
|
||||||
|
# certificate= "/config/ssl/influxdb.pem"
|
||||||
|
|
||||||
|
# Log an error for every malformed point.
|
||||||
|
# log-point-errors = true
|
||||||
|
|
||||||
|
# These next lines control how batching works. You should have this enabled
|
||||||
|
# otherwise you could get dropped metrics or poor performance. Only points
|
||||||
|
# metrics received over the telnet protocol undergo batching.
|
||||||
|
|
||||||
|
# Flush if this many points get buffered
|
||||||
|
# batch-size = 1000
|
||||||
|
|
||||||
|
# Number of batches that may be pending in memory
|
||||||
|
# batch-pending = 5
|
||||||
|
|
||||||
|
# Flush at least this often even if we haven't hit buffer limit
|
||||||
|
# batch-timeout = "1s"
|
||||||
|
|
||||||
|
###
|
||||||
|
### [[udp]]
|
||||||
|
###
|
||||||
|
### Controls the listeners for InfluxDB line protocol data via UDP.
|
||||||
|
###
|
||||||
|
|
||||||
|
[[udp]]
|
||||||
|
# enabled = false
|
||||||
|
# bind-address = ":8089"
|
||||||
|
# database = "udp"
|
||||||
|
# retention-policy = ""
|
||||||
|
|
||||||
|
# InfluxDB precision for timestamps on received points ("" or "n", "u", "ms", "s", "m", "h")
|
||||||
|
# precision = ""
|
||||||
|
|
||||||
|
# These next lines control how batching works. You should have this enabled
|
||||||
|
# otherwise you could get dropped metrics or poor performance. Batching
|
||||||
|
# will buffer points in memory if you have many coming in.
|
||||||
|
|
||||||
|
# Flush if this many points get buffered
|
||||||
|
# batch-size = 5000
|
||||||
|
|
||||||
|
# Number of batches that may be pending in memory
|
||||||
|
# batch-pending = 10
|
||||||
|
|
||||||
|
# Will flush at least this often even if we haven't hit buffer limit
|
||||||
|
# batch-timeout = "1s"
|
||||||
|
|
||||||
|
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
|
||||||
|
# read-buffer = 0
|
||||||
|
|
||||||
|
###
|
||||||
|
### [continuous_queries]
|
||||||
|
###
|
||||||
|
### Controls how continuous queries are run within InfluxDB.
|
||||||
|
###
|
||||||
|
|
||||||
|
[continuous_queries]
|
||||||
|
# Determines whether the continuous query service is enabled.
|
||||||
|
# enabled = true
|
||||||
|
|
||||||
|
# Controls whether queries are logged when executed by the CQ service.
|
||||||
|
# log-enabled = true
|
||||||
|
|
||||||
|
# Controls whether queries are logged to the self-monitoring data store.
|
||||||
|
# query-stats-enabled = false
|
||||||
|
|
||||||
|
# interval for how often continuous queries will be checked if they need to run
|
||||||
|
# run-interval = "1s"
|
||||||
|
|
||||||
|
###
|
||||||
|
### [tls]
|
||||||
|
###
|
||||||
|
### Global configuration settings for TLS in InfluxDB.
|
||||||
|
###
|
||||||
|
|
||||||
|
[tls]
|
||||||
|
# Determines the available set of cipher suites. See https://golang.org/pkg/crypto/tls/#pkg-constants
|
||||||
|
# for a list of available ciphers, which depends on the version of Go (use the query
|
||||||
|
# SHOW DIAGNOSTICS to see the version of Go used to build InfluxDB). If not specified, uses
|
||||||
|
# the default settings from Go's crypto/tls package.
|
||||||
|
# ciphers = [
|
||||||
|
# "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||||
|
# "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||||
|
# ]
|
||||||
|
|
||||||
|
# Minimum version of the tls protocol that will be negotiated. If not specified, uses the
|
||||||
|
# default settings from Go's crypto/tls package.
|
||||||
|
# min-version = "tls1.2"
|
||||||
|
|
||||||
|
# Maximum version of the tls protocol that will be negotiated. If not specified, uses the
|
||||||
|
# default settings from Go's crypto/tls package.
|
||||||
|
# max-version = "tls1.2"
|
31
blueprints/influxdb/install.sh
Executable file
31
blueprints/influxdb/install.sh
Executable file
@ -0,0 +1,31 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This script installs the current release of InfluxDB
|
||||||
|
|
||||||
|
#####
|
||||||
|
#
|
||||||
|
# Init and Mounts
|
||||||
|
#
|
||||||
|
#####
|
||||||
|
|
||||||
|
# Initialise variables
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
JAIL_IP="jail_${1}_ip4_addr"
|
||||||
|
JAIL_IP="${!JAIL_IP%/*}"
|
||||||
|
INCLUDES_PATH="${SCRIPT_DIR}/blueprints/influxdb/includes"
|
||||||
|
|
||||||
|
# Mount and configure proper configuration location
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp -rf "${INCLUDES_PATH}/influxd.conf" "/mnt/${global_dataset_config}/${1}/influxd.conf"
|
||||||
|
iocage exec "${1}" mkdir -p /config/db/data /config/db/meta /config/db/wal
|
||||||
|
iocage exec "${1}" chown -R influxd:influxd /config/db
|
||||||
|
iocage exec "${1}" sysrc influxd_conf="/config/influxd.conf"
|
||||||
|
iocage exec "${1}" sysrc influxd_enable="YES"
|
||||||
|
|
||||||
|
# Start influxdb and wait for it to startup
|
||||||
|
iocage exec "${1}" service influxd start
|
||||||
|
sleep 15
|
||||||
|
|
||||||
|
# Done!
|
||||||
|
echo "Installation complete!"
|
||||||
|
echo "You may connect InfluxDB plugins to the InfluxDB jail at http://${JAIL_IP}:8086."
|
||||||
|
echo ""
|
262
blueprints/influxdb/readme.md
Normal file
262
blueprints/influxdb/readme.md
Normal file
@ -0,0 +1,262 @@
|
|||||||
|
# InfluxDB
|
||||||
|
|
||||||
|
## Original README from the influxdb github:
|
||||||
|
|
||||||
|
https://github.com/influxdata/influxdb
|
||||||
|
|
||||||
|
# InfluxDB [![CircleCI](https://circleci.com/gh/influxdata/influxdb.svg?style=svg)](https://circleci.com/gh/influxdata/influxdb)
|
||||||
|
[![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://www.influxdata.com/slack)
|
||||||
|
|
||||||
|
|
||||||
|
InfluxDB is an open source time series platform. This includes APIs for storing and querying data, processing it in the background for ETL or monitoring and alerting purposes, user dashboards, and visualizing and exploring the data and more. The master branch on this repo now represents the latest InfluxDB, which now includes functionality for Kapacitor (background processing) and Chronograf (the UI) all in a single binary.
|
||||||
|
|
||||||
|
The list of InfluxDB Client Libraries that are compatible with the latest version can be found in [our documentation](https://v2.docs.influxdata.com/v2.0/reference/api/client-libraries/).
|
||||||
|
|
||||||
|
If you are looking for the 1.x line of releases, there are branches for each minor version as well as a `master-1.x` branch that will contain the code for the next 1.x release. The master-1.x [working branch is here](https://github.com/influxdata/influxdb/tree/master-1.x). The [InfluxDB 1.x Go Client can be found here](https://github.com/influxdata/influxdb1-client).
|
||||||
|
|
||||||
|
## State of the Project
|
||||||
|
|
||||||
|
The latest InfluxDB 1.x is the stable release and recommended for production use. The InfluxDB that is on the master branch is currently in the beta stage. This means that it is still **NOT** recommended for production usage. There may be breaking API changes, breaking changes in the [Flux language](https://github.com/influxdata/flux), changes in the underlying storage format that will require you to delete all your data, and significant changes to the UI. The beta is intended for feature exploration and gathering feedback on the available feature set. It **SHOULD NOT** be used for performance testing, benchmarks, or other stress tests.
|
||||||
|
|
||||||
|
Additional features will arrive during the beta period until we reach general availability (GA). We will be cutting versioned releases at least every two weeks starting in the first release. There will also be nightly builds based off the latest code in master.
|
||||||
|
|
||||||
|
Once we close on the final feature set of what will be in the first GA release of InfluxDB in the 2.x line, we will move into the release candidate (RC) phase. At that point, we do not expect there to be breaking changes to the API or Flux language. We may still need to make a breaking change prior to GA due to some unforseen circumstance, but it would need to be extremely important and will be clearly communicated via the changelog and all available channels.
|
||||||
|
|
||||||
|
Our current plans are to release RCs suitable for production usage, but we will re-evaluate in consultation with the community as the cycle progresses. During the RC period, we will focus on feedback from users, bug fixes, performance, and additive features (where time permits).
|
||||||
|
|
||||||
|
### What you can expect in the Beta and RC Phases
|
||||||
|
|
||||||
|
#### Beta
|
||||||
|
**Releases every two weeks or as needed**
|
||||||
|
|
||||||
|
Planned additions include:
|
||||||
|
- Compatibility layer with 1.x including: 1.x HTTP Write API and HTTP Read API support for InfluxQL
|
||||||
|
- Import Bulk Data from 1.x - convert TSM from 1.x to 2.x
|
||||||
|
- Performance tuning, stability improvements, and fine tuning based on community feedback.
|
||||||
|
- Finalization of supported client libraries starting with JavaScript and Go.
|
||||||
|
|
||||||
|
#### RC
|
||||||
|
**As needed**
|
||||||
|
|
||||||
|
Planned activities include:
|
||||||
|
- Performance tuning, stability improvements, and fine-tuning based on community feedback.
|
||||||
|
|
||||||
|
### What is **NOT** planned?
|
||||||
|
- Migration of users/security permissions from InfluxDB v1.x to 2.x. ACTION REQUIRED: Re-establish users and permissions within the new unified security model which now spans the underlying database and user interface.
|
||||||
|
- Migration of Continuous Queries. ACTION REQUIRED: These will need to be re-implemented as Flux tasks.
|
||||||
|
- Direct support by InfluxDB for CollectD, StatsD, Graphite, or UDP. ACTION REQUIRED: Leverage Telegraf 1.9+ along with the InfluxDB v2.0 output plugin to translate these protocols/formats.
|
||||||
|
|
||||||
|
## Installing from Source
|
||||||
|
|
||||||
|
We have nightly and weekly versioned Docker images, Debian packages, RPM packages, and tarballs of InfluxDB available at the [InfluxData downloads page](https://portal.influxdata.com/downloads/).
|
||||||
|
|
||||||
|
## Building From Source
|
||||||
|
|
||||||
|
This project requires Go 1.13 and Go module support.
|
||||||
|
|
||||||
|
Set `GO111MODULE=on` or build the project outside of your `GOPATH` for it to succeed.
|
||||||
|
|
||||||
|
The project also requires a recent stable version of Rust. We recommend using [rustup](https://rustup.rs/) to install Rust.
|
||||||
|
|
||||||
|
If you are getting an `error loading module requirements` error with `bzr executable file not found in $PATH”` on `make`, then you need to ensure you have `bazaar`, `protobuf`, and `yarn` installed.
|
||||||
|
|
||||||
|
- OSX: `brew install bazaar yarn`
|
||||||
|
- Linux (Arch): `pacman -S bzr protobuf yarn`
|
||||||
|
- Linux (Ubuntu): `apt install bzr protobuf-compiler yarnpkg`
|
||||||
|
|
||||||
|
**NB:** For RedHat, there are some extra steps:
|
||||||
|
|
||||||
|
1. You must enable the [EPEL](https://fedoraproject.org/wiki/EPEL)
|
||||||
|
2. You must add the `yarn` [repository](https://yarnpkg.com/lang/en/docs/install/#centos-stable)
|
||||||
|
|
||||||
|
For information about modules, please refer to the [wiki](https://github.com/golang/go/wiki/Modules).
|
||||||
|
|
||||||
|
A successful `make` run results in two binaries, with platform-dependent paths:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ make
|
||||||
|
...
|
||||||
|
env GO111MODULE=on go build -tags 'assets ' -o bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx ./cmd/influx
|
||||||
|
env GO111MODULE=on go build -tags 'assets ' -o bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influxd ./cmd/influxd
|
||||||
|
```
|
||||||
|
|
||||||
|
`influxd` is the InfluxDB service.
|
||||||
|
`influx` is the CLI management tool.
|
||||||
|
|
||||||
|
Start the service.
|
||||||
|
Logs to stdout by default:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influxd
|
||||||
|
```
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
For a complete getting started guide, please see our full [online documentation site](https://v2.docs.influxdata.com/v2.0/).
|
||||||
|
|
||||||
|
To write and query data or use the API in any way, you'll need to first create a user, credentials, organization and bucket.
|
||||||
|
Everything in InfluxDB is organized under a concept of an organization. The API is designed to be multi-tenant.
|
||||||
|
Buckets represent where you store time series data.
|
||||||
|
They're synonymous with what was previously in InfluxDB 1.x a database and retention policy.
|
||||||
|
|
||||||
|
The simplest way to get set up is to point your browser to [http://localhost:9999](http://localhost:9999) and go through the prompts.
|
||||||
|
|
||||||
|
**Note**: Port 9999 will be used during the beta phases of development of InfluxDB v2.0.
|
||||||
|
This should allow a v2.0-beta instance to be run alongside a v1.x instance without interfering on port 8086.
|
||||||
|
InfluxDB will thereafter continue to use 8086.
|
||||||
|
|
||||||
|
You can also get set up from the CLI using the subcommands `influx user`, `influx auth`, `influx org` and `influx bucket`,
|
||||||
|
or do it all in one breath with `influx setup`:
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx setup
|
||||||
|
Welcome to InfluxDB 2.0!
|
||||||
|
Please type your primary username: marty
|
||||||
|
|
||||||
|
Please type your password:
|
||||||
|
|
||||||
|
Please type your password again:
|
||||||
|
|
||||||
|
Please type your primary organization name.: InfluxData
|
||||||
|
|
||||||
|
Please type your primary bucket name.: telegraf
|
||||||
|
|
||||||
|
Please type your retention period in hours.
|
||||||
|
Or press ENTER for infinite.: 72
|
||||||
|
|
||||||
|
|
||||||
|
You have entered:
|
||||||
|
Username: marty
|
||||||
|
Organization: InfluxData
|
||||||
|
Bucket: telegraf
|
||||||
|
Retention Period: 72 hrs
|
||||||
|
Confirm? (y/n): y
|
||||||
|
|
||||||
|
UserID Username Organization Bucket
|
||||||
|
033a3f2c5ccaa000 marty InfluxData Telegraf
|
||||||
|
Your token has been stored in /Users/marty/.influxdbv2/credentials
|
||||||
|
```
|
||||||
|
|
||||||
|
You may get into a development loop where `influx setup` becomes tedious.
|
||||||
|
Some added flags can help:
|
||||||
|
```
|
||||||
|
$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx setup --username marty --password F1uxKapacit0r85 --org InfluxData --bucket telegraf --retention 168 --token where-were-going-we-dont-need-roads --force
|
||||||
|
```
|
||||||
|
|
||||||
|
`~/.influxdbv2/credentials` contains your auth token.
|
||||||
|
Most `influx` commands read the token from this file path by default.
|
||||||
|
|
||||||
|
You may need the organization ID and bucket ID later:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ influx org find
|
||||||
|
ID Name
|
||||||
|
033a3f2c708aa000 InfluxData
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
$ influx bucket find
|
||||||
|
ID Name Retention Organization OrganizationID
|
||||||
|
033a3f2c710aa000 telegraf 72h0m0s InfluxData 033a3f2c708aa000
|
||||||
|
```
|
||||||
|
|
||||||
|
Write to measurement `m`, with tag `v=2`, in bucket `telegraf`, which belongs to organization `InfluxData`:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx write --org InfluxData --bucket telegraf --precision s "m v=2 $(date +%s)"
|
||||||
|
```
|
||||||
|
|
||||||
|
Write the same point using `curl`:
|
||||||
|
|
||||||
|
```
|
||||||
|
curl --header "Authorization: Token $(cat ~/.influxdbv2/credentials)" --data-raw "m v=2 $(date +%s)" "http://localhost:9999/api/v2/write?org=InfluxData&bucket=telegraf&precision=s"
|
||||||
|
```
|
||||||
|
|
||||||
|
Read that back with a simple Flux query:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx query -o InfluxData 'from(bucket:"telegraf") |> range(start:-1h)'
|
||||||
|
Result: _result
|
||||||
|
Table: keys: [_start, _stop, _field, _measurement]
|
||||||
|
_start:time _stop:time _field:string _measurement:string _time:time _value:float
|
||||||
|
------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
|
||||||
|
2019-12-30T22:19:39.043918000Z 2019-12-30T23:19:39.043918000Z v m 2019-12-30T23:17:02.000000000Z 2
|
||||||
|
```
|
||||||
|
|
||||||
|
Use the fancy REPL:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx repl -o InfluxData
|
||||||
|
> from(bucket:"telegraf") |> range(start:-1h)
|
||||||
|
Result: _result
|
||||||
|
Table: keys: [_start, _stop, _field, _measurement]
|
||||||
|
_start:time _stop:time _field:string _measurement:string _time:time _value:float
|
||||||
|
------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
|
||||||
|
2019-12-30T22:22:44.776351000Z 2019-12-30T23:22:44.776351000Z v m 2019-12-30T23:17:02.000000000Z 2
|
||||||
|
>
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Introducing Flux
|
||||||
|
|
||||||
|
Flux is an MIT-licensed data scripting language (previously named IFQL) used for querying time series data from InfluxDB. The source for Flux is [available on GitHub](https://github.com/influxdata/flux). Learn more about Flux from [CTO Paul Dix's presentation](https://speakerdeck.com/pauldix/flux-number-fluxlang-a-new-time-series-data-scripting-language).
|
||||||
|
|
||||||
|
## Contributing to the Project
|
||||||
|
|
||||||
|
InfluxDB is an [MIT licensed](LICENSE) open source project and we love our community. The fastest way to get something fixed is to open a PR. Check out our [contributing](CONTRIBUTING.md) guide if you're interested in helping out. Also, join us on our [Community Slack Workspace](https://influxdata.com/slack) if you have questions or comments for our engineering teams.
|
||||||
|
|
||||||
|
## CI and Static Analysis
|
||||||
|
|
||||||
|
### CI
|
||||||
|
|
||||||
|
All pull requests will run through CI, which is currently hosted by Circle.
|
||||||
|
Community contributors should be able to see the outcome of this process by looking at the checks on their PR.
|
||||||
|
Please fix any issues to ensure a prompt review from members of the team.
|
||||||
|
|
||||||
|
The InfluxDB project is used internally in a number of proprietary InfluxData products, and as such, PRs and changes need to be tested internally.
|
||||||
|
This can take some time, and is not really visible to community contributors.
|
||||||
|
|
||||||
|
### Static Analysis
|
||||||
|
|
||||||
|
This project uses the following static analysis tools.
|
||||||
|
Failure during the running of any of these tools results in a failed build.
|
||||||
|
Generally, code must be adjusted to satisfy these tools, though there are exceptions.
|
||||||
|
|
||||||
|
- [go vet](https://golang.org/cmd/vet/) checks for Go code that should be considered incorrect.
|
||||||
|
- [go fmt](https://golang.org/cmd/gofmt/) checks that Go code is correctly formatted.
|
||||||
|
- [go mod tidy](https://tip.golang.org/cmd/go/#hdr-Add_missing_and_remove_unused_modules) ensures that the source code and go.mod agree.
|
||||||
|
- [staticcheck](http://next.staticcheck.io/docs/) checks for things like: unused code, code that can be simplified, code that is incorrect and code that will have performance issues.
|
||||||
|
|
||||||
|
### staticcheck
|
||||||
|
|
||||||
|
If your PR fails `staticcheck` it is easy to dig into why it failed, and also to fix the problem.
|
||||||
|
First, take a look at the error message in Circle under the `staticcheck` build section, e.g.,
|
||||||
|
|
||||||
|
```
|
||||||
|
tsdb/tsm1/encoding.gen.go:1445:24: func BooleanValues.assertOrdered is unused (U1000)
|
||||||
|
tsdb/tsm1/encoding.go:172:7: receiver name should not be an underscore, omit the name if it is unused (ST1006)
|
||||||
|
```
|
||||||
|
|
||||||
|
Next, go and take a [look here](http://next.staticcheck.io/docs/checks) for some clarification on the error code that you have received, e.g., `U1000`.
|
||||||
|
The docs will tell you what's wrong, and often what you need to do to fix the issue.
|
||||||
|
|
||||||
|
#### Generated Code
|
||||||
|
|
||||||
|
Sometimes generated code will contain unused code or occasionally that will fail a different check.
|
||||||
|
`staticcheck` allows for [entire files](http://next.staticcheck.io/docs/#ignoring-problems) to be ignored, though it's not ideal.
|
||||||
|
A linter directive, in the form of a comment, must be placed within the generated file.
|
||||||
|
This is problematic because it will be erased if the file is re-generated.
|
||||||
|
Until a better solution comes about, below is the list of generated files that need an ignores comment.
|
||||||
|
If you re-generate a file and find that `staticcheck` has failed, please see this list below for what you need to put back:
|
||||||
|
|
||||||
|
| File | Comment |
|
||||||
|
| :--------------------: | :--------------------------------------------------------------: |
|
||||||
|
| query/promql/promql.go | //lint:file-ignore SA6001 Ignore all unused code, it's generated |
|
||||||
|
|
||||||
|
#### End-to-End Tests
|
||||||
|
|
||||||
|
CI also runs end-to-end tests. These test the integration between the influx server the ui. You can run them locally in two steps:
|
||||||
|
|
||||||
|
- Start the server in "testing mode" by running `make run-e2e`.
|
||||||
|
- Run the tests with `make e2e`.
|
6
blueprints/influxdb/update.sh
Executable file
6
blueprints/influxdb/update.sh
Executable file
@ -0,0 +1,6 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the update script for influxdb
|
||||||
|
|
||||||
|
iocage exec "$1" service influxd stop
|
||||||
|
# InfluxDB is updated during PKG update, this file is mostly just a placeholder
|
||||||
|
iocage exec "$1" service influxd restart
|
3
blueprints/jackett/config.yml
Normal file
3
blueprints/jackett/config.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
blueprint:
|
||||||
|
jackett:
|
||||||
|
pkgs: mono
|
14
blueprints/jackett/install.sh
Executable file
14
blueprints/jackett/install.sh
Executable file
@ -0,0 +1,14 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the install script for jackett
|
||||||
|
|
||||||
|
iocage exec "$1" "fetch https://github.com/Jackett/Jackett/releases/download/v0.11.502/Jackett.Binaries.Mono.tar.gz -o /usr/local/share"
|
||||||
|
iocage exec "$1" "tar -xzvf /usr/local/share/Jackett.Binaries.Mono.tar.gz -C /usr/local/share"
|
||||||
|
iocage exec "$1" rm /usr/local/share/Jackett.Binaries.Mono.tar.gz
|
||||||
|
iocage exec "$1" "pw user add jackett -c jackett -u 818 -d /nonexistent -s /usr/bin/nologin"
|
||||||
|
iocage exec "$1" chown -R jackett:jackett /usr/local/share/Jackett /config
|
||||||
|
iocage exec "$1" mkdir /usr/local/etc/rc.d
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/jackett/includes/jackett.rc /mnt/"${global_dataset_iocage}"/jails/"$1"/root/usr/local/etc/rc.d/jackett
|
||||||
|
iocage exec "$1" chmod u+x /usr/local/etc/rc.d/jackett
|
||||||
|
iocage exec "$1" sysrc "jackett_enable=YES"
|
||||||
|
iocage exec "$1" service jackett restart
|
0
jails/jackett/readme.md → blueprints/jackett/readme.md
Executable file → Normal file
0
jails/jackett/readme.md → blueprints/jackett/readme.md
Executable file → Normal file
10
blueprints/jackett/update.sh
Executable file
10
blueprints/jackett/update.sh
Executable file
@ -0,0 +1,10 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the update script for jackett
|
||||||
|
|
||||||
|
iocage exec "$1" service jackett stop
|
||||||
|
#TODO insert code to update jacket itself here
|
||||||
|
iocage exec "$1" chown -R jackett:jackett /usr/local/share/Jackett /config
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/jackett/includes/jackett.rc /mnt/"${global_dataset_iocage}"/jails/"$1"/root/usr/local/etc/rc.d/jackett
|
||||||
|
iocage exec "$1" chmod u+x /usr/local/etc/rc.d/jackett
|
||||||
|
iocage exec "$1" service jackett restart
|
3
blueprints/kms/config.yml
Normal file
3
blueprints/kms/config.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
blueprint:
|
||||||
|
kms:
|
||||||
|
pkgs: bash py37-tkinter py37-pip py37-sqlite3 git
|
0
jails/kms/includes/Activate_Office_2019_Pro.bat → blueprints/kms/includes/Activate_Office_2019_Pro.bat
Executable file → Normal file
0
jails/kms/includes/Activate_Office_2019_Pro.bat → blueprints/kms/includes/Activate_Office_2019_Pro.bat
Executable file → Normal file
0
jails/kms/includes/Activate_Windows_10_Pro.bat → blueprints/kms/includes/Activate_Windows_10_Pro.bat
Executable file → Normal file
0
jails/kms/includes/Activate_Windows_10_Pro.bat → blueprints/kms/includes/Activate_Windows_10_Pro.bat
Executable file → Normal file
0
jails/kms/includes/Activate_Windows_Server_2019_Standard.bat → blueprints/kms/includes/Activate_Windows_Server_2019_Standard.bat
Executable file → Normal file
0
jails/kms/includes/Activate_Windows_Server_2019_Standard.bat → blueprints/kms/includes/Activate_Windows_Server_2019_Standard.bat
Executable file → Normal file
0
jails/kms/includes/Office-2019-Pro-VLK-Config.xml → blueprints/kms/includes/Office-2019-Pro-VLK-Config.xml
Executable file → Normal file
0
jails/kms/includes/Office-2019-Pro-VLK-Config.xml → blueprints/kms/includes/Office-2019-Pro-VLK-Config.xml
Executable file → Normal file
0
jails/kms/includes/Readme.md → blueprints/kms/includes/Readme.md
Executable file → Normal file
0
jails/kms/includes/Readme.md → blueprints/kms/includes/Readme.md
Executable file → Normal file
0
jails/kms/includes/Setup_Office_2019_Pro.txt → blueprints/kms/includes/Setup_Office_2019_Pro.txt
Executable file → Normal file
0
jails/kms/includes/Setup_Office_2019_Pro.txt → blueprints/kms/includes/Setup_Office_2019_Pro.txt
Executable file → Normal file
12
blueprints/kms/install.sh
Executable file
12
blueprints/kms/install.sh
Executable file
@ -0,0 +1,12 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the install script for KMS
|
||||||
|
|
||||||
|
iocage exec "$1" svn checkout https://github.com/SystemRage/py-kms/trunk/py-kms /usr/local/share/py-kms
|
||||||
|
iocage exec "$1" "pw user add kms -c kms -u 666 -d /nonexistent -s /usr/bin/nologin"
|
||||||
|
iocage exec "$1" chown -R kms:kms /usr/local/share/py-kms /config
|
||||||
|
iocage exec "$1" mkdir /usr/local/etc/rc.d
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/kms/includes/py_kms.rc /mnt/"${global_dataset_iocage}"/jails/"$1"/root/usr/local/etc/rc.d/py_kms
|
||||||
|
iocage exec "$1" chmod u+x /usr/local/etc/rc.d/py_kms
|
||||||
|
iocage exec "$1" sysrc "py_kms_enable=YES"
|
||||||
|
iocage exec "$1" service py_kms start
|
3
jails/kms/readme.md → blueprints/kms/readme.md
Executable file → Normal file
3
jails/kms/readme.md → blueprints/kms/readme.md
Executable file → Normal file
@ -1,5 +1,6 @@
|
|||||||
|
# Py-KMS
|
||||||
|
|
||||||
# Original README from the py-kms github:
|
## Original README from the py-kms github:
|
||||||
|
|
||||||
https://github.com/SystemRage/py-kms
|
https://github.com/SystemRage/py-kms
|
||||||
|
|
10
blueprints/kms/update.sh
Executable file
10
blueprints/kms/update.sh
Executable file
@ -0,0 +1,10 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the update script for KMS
|
||||||
|
|
||||||
|
iocage exec "$1" service py_kms stop
|
||||||
|
iocage exec "$1" svn checkout https://github.com/SystemRage/py-kms/trunk/py-kms /usr/local/share/py-kms
|
||||||
|
iocage exec "$1" chown -R kms:kms /usr/local/share/py-kms /config
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/kms/includes/py_kms.rc /mnt/"${global_dataset_iocage}"/jails/"$1"/root/usr/local/etc/rc.d/py_kms
|
||||||
|
iocage exec "$1" chmod u+x /usr/local/etc/rc.d/py_kms
|
||||||
|
iocage exec "$1" service py_kms start
|
3
blueprints/lidarr/config.yml
Normal file
3
blueprints/lidarr/config.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
blueprint:
|
||||||
|
lidarr: lidarr
|
||||||
|
pkgs: mono mediainfo sqlite3
|
25
blueprints/lidarr/install.sh
Executable file
25
blueprints/lidarr/install.sh
Executable file
@ -0,0 +1,25 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the install script for lidarr
|
||||||
|
|
||||||
|
# Check if dataset for completed download and it parent dataset exist, create if they do not.
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
createmount "$1" "${global_dataset_downloads}"
|
||||||
|
createmount "$1" "${global_dataset_downloads}"/complete /mnt/fetched
|
||||||
|
|
||||||
|
# Check if dataset for media library and the dataset for movies exist, create if they do not.
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
createmount "$1" "${global_dataset_media}"
|
||||||
|
createmount "$1" "${global_dataset_media}"/music /mnt/music
|
||||||
|
|
||||||
|
|
||||||
|
iocage exec "$1" "fetch https://github.com/lidarr/Lidarr/releases/download/v0.2.0.371/Lidarr.develop.0.2.0.371.linux.tar.gz -o /usr/local/share"
|
||||||
|
iocage exec "$1" "tar -xzvf /usr/local/share/Lidarr.develop.0.2.0.371.linux.tar.gz -C /usr/local/share"
|
||||||
|
iocage exec "$1" "rm /usr/local/share/Lidarr.develop.0.2.0.371.linux.tar.gz"
|
||||||
|
iocage exec "$1" "pw user add lidarr -c lidarr -u 353 -d /nonexistent -s /usr/bin/nologin"
|
||||||
|
iocage exec "$1" chown -R lidarr:lidarr /usr/local/share/Lidarr /config
|
||||||
|
iocage exec "$1" mkdir /usr/local/etc/rc.d
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/lidarr/includes/lidarr.rc /mnt/"${global_dataset_iocage}"/jails/"$1"/root/usr/local/etc/rc.d/lidarr
|
||||||
|
iocage exec "$1" chmod u+x /usr/local/etc/rc.d/lidarr
|
||||||
|
iocage exec "$1" sysrc "lidarr_enable=YES"
|
||||||
|
iocage exec "$1" service lidarr start
|
4
jails/lidarr/readme.md → blueprints/lidarr/readme.md
Executable file → Normal file
4
jails/lidarr/readme.md → blueprints/lidarr/readme.md
Executable file → Normal file
@ -1,4 +1,6 @@
|
|||||||
# Original README from the lidarr github:
|
# Lidarr
|
||||||
|
|
||||||
|
## Original README from the lidarr github:
|
||||||
|
|
||||||
https://github.com/lidarr/Lidarr
|
https://github.com/lidarr/Lidarr
|
||||||
|
|
10
blueprints/lidarr/update.sh
Executable file
10
blueprints/lidarr/update.sh
Executable file
@ -0,0 +1,10 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the update script for lidarr
|
||||||
|
|
||||||
|
iocage exec "$1" service lidarr stop
|
||||||
|
#TODO insert code to update lidarr itself here
|
||||||
|
iocage exec "$1" chown -R lidarr:lidarr /usr/local/share/lidarr /config
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/lidarr/includes/lidarr.rc /mnt/"${global_dataset_iocage}"/jails/"$1"/root/usr/local/etc/rc.d/lidarr
|
||||||
|
iocage exec "$1" chmod u+x /usr/local/etc/rc.d/lidarr
|
||||||
|
iocage exec "$1" service lidarr restart
|
3
blueprints/mariadb/config.yml
Normal file
3
blueprints/mariadb/config.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
blueprint:
|
||||||
|
mariadb:
|
||||||
|
pkgs: mariadb104-server git php74-session php74-xml php74-ctype php74-openssl php74-filter php74-gd php74-json php74-mysqli php74-mbstring php74-zlib php74-zip php74-bz2 phpMyAdmin5-php74 php74-pdo_mysql php74-mysqli phpMyAdmin5-php74-5.0.1
|
0
jails/mariadb/includes/Caddyfile → blueprints/mariadb/includes/Caddyfile
Executable file → Normal file
0
jails/mariadb/includes/Caddyfile → blueprints/mariadb/includes/Caddyfile
Executable file → Normal file
@ -1,4 +1,5 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
# shellcheck disable=1091,2223,2154,2034
|
||||||
#
|
#
|
||||||
# PROVIDE: caddy
|
# PROVIDE: caddy
|
||||||
# REQUIRE: networking
|
# REQUIRE: networking
|
0
jails/mariadb/includes/my-system.cnf → blueprints/mariadb/includes/my-system.cnf
Executable file → Normal file
0
jails/mariadb/includes/my-system.cnf → blueprints/mariadb/includes/my-system.cnf
Executable file → Normal file
0
jails/mariadb/includes/my.cnf → blueprints/mariadb/includes/my.cnf
Executable file → Normal file
0
jails/mariadb/includes/my.cnf → blueprints/mariadb/includes/my.cnf
Executable file → Normal file
117
blueprints/mariadb/install.sh
Executable file
117
blueprints/mariadb/install.sh
Executable file
@ -0,0 +1,117 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This script installs the current release of Mariadb and PhpMyAdmin into a created jail
|
||||||
|
#####
|
||||||
|
#
|
||||||
|
# Init and Mounts
|
||||||
|
#
|
||||||
|
#####
|
||||||
|
|
||||||
|
# Initialise defaults
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
JAIL_IP="jail_${1}_ip4_addr"
|
||||||
|
JAIL_IP="${!JAIL_IP%/*}"
|
||||||
|
INCLUDES_PATH="${SCRIPT_DIR}/blueprints/mariadb/includes"
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
CERT_EMAIL="jail_${1}_cert_email"
|
||||||
|
CERT_EMAIL="${!CERT_EMAIL:-placeholder@email.fake}"
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
DB_ROOT_PASSWORD="jail_${1}_db_root_password"
|
||||||
|
HOST_NAME="jail_${1}_host_name"
|
||||||
|
DL_FLAGS=""
|
||||||
|
DNS_ENV=""
|
||||||
|
|
||||||
|
# Check that necessary variables were set by nextcloud-config
|
||||||
|
if [ -z "${JAIL_IP}" ]; then
|
||||||
|
echo 'Configuration error: The mariadb jail does NOT accept DHCP'
|
||||||
|
echo 'Please reinstall using a fixed IP adress'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Make sure DB_PATH is empty -- if not, MariaDB/PostgreSQL will choke
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
if [ "$(ls -A "/mnt/${global_dataset_config}/${1}/db")" ]; then
|
||||||
|
echo "Reinstall of mariadb detected... Continuing"
|
||||||
|
REINSTALL="true"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mount database dataset and set zfs preferences
|
||||||
|
createmount "${1}" "${global_dataset_config}"/"${1}"/db /var/db/mysql
|
||||||
|
zfs set recordsize=16K "${global_dataset_config}"/"${1}"/db
|
||||||
|
zfs set primarycache=metadata "${global_dataset_config}"/"${1}"/db
|
||||||
|
|
||||||
|
iocage exec "${1}" chown -R 88:88 /var/db/mysql
|
||||||
|
|
||||||
|
# Install includes fstab
|
||||||
|
iocage exec "${1}" mkdir -p /mnt/includes
|
||||||
|
iocage fstab -a "${1}" "${INCLUDES_PATH}" /mnt/includes nullfs rw 0 0
|
||||||
|
|
||||||
|
iocage exec "${1}" mkdir -p /usr/local/www/phpmyadmin
|
||||||
|
iocage exec "${1}" chown -R www:www /usr/local/www/phpmyadmin
|
||||||
|
|
||||||
|
#####
|
||||||
|
#
|
||||||
|
# Install mariadb, Caddy and PhpMyAdmin
|
||||||
|
#
|
||||||
|
#####
|
||||||
|
|
||||||
|
fetch -o /tmp https://getcaddy.com
|
||||||
|
if ! iocage exec "${1}" bash -s personal "${DL_FLAGS}" < /tmp/getcaddy.com
|
||||||
|
then
|
||||||
|
echo "Failed to download/install Caddy"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
iocage exec "${1}" sysrc mysql_enable="YES"
|
||||||
|
|
||||||
|
# Copy and edit pre-written config files
|
||||||
|
echo "Copying Caddyfile for no SSL"
|
||||||
|
iocage exec "${1}" cp -f /mnt/includes/caddy.rc /usr/local/etc/rc.d/caddy
|
||||||
|
iocage exec "${1}" cp -f /mnt/includes/Caddyfile /usr/local/www/Caddyfile
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
iocage exec "${1}" sed -i '' "s/yourhostnamehere/${!HOST_NAME}/" /usr/local/www/Caddyfile
|
||||||
|
iocage exec "${1}" sed -i '' "s/JAIL-IP/${JAIL_IP}/" /usr/local/www/Caddyfile
|
||||||
|
|
||||||
|
iocage exec "${1}" sysrc caddy_enable="YES"
|
||||||
|
iocage exec "${1}" sysrc php_fpm_enable="YES"
|
||||||
|
iocage exec "${1}" sysrc caddy_cert_email="${CERT_EMAIL}"
|
||||||
|
iocage exec "${1}" sysrc caddy_env="${DNS_ENV}"
|
||||||
|
|
||||||
|
iocage restart "${1}"
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
if [ "${REINSTALL}" == "true" ]; then
|
||||||
|
echo "Reinstall detected, skipping generaion of new config and database"
|
||||||
|
else
|
||||||
|
|
||||||
|
# Secure database, set root password, create Nextcloud DB, user, and password
|
||||||
|
iocage exec "${1}" cp -f /mnt/includes/my-system.cnf /var/db/mysql/my.cnf
|
||||||
|
iocage exec "${1}" mysql -u root -e "DELETE FROM mysql.user WHERE User='';"
|
||||||
|
iocage exec "${1}" mysql -u root -e "DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1');"
|
||||||
|
iocage exec "${1}" mysql -u root -e "DROP DATABASE IF EXISTS test;"
|
||||||
|
iocage exec "${1}" mysql -u root -e "DELETE FROM mysql.db WHERE Db='test' OR Db='test\\_%';"
|
||||||
|
iocage exec "${1}" mysqladmin --user=root password "${!DB_ROOT_PASSWORD}"
|
||||||
|
iocage exec "${1}" mysqladmin reload
|
||||||
|
fi
|
||||||
|
iocage exec "${1}" cp -f /mnt/includes/my.cnf /root/.my.cnf
|
||||||
|
iocage exec "${1}" sed -i '' "s|mypassword|${!DB_ROOT_PASSWORD}|" /root/.my.cnf
|
||||||
|
|
||||||
|
# Save passwords for later reference
|
||||||
|
iocage exec "${1}" echo "MariaDB root password is ${!DB_ROOT_PASSWORD}" > /root/"${1}"_db_password.txt
|
||||||
|
|
||||||
|
|
||||||
|
# Don't need /mnt/includes any more, so unmount it
|
||||||
|
iocage fstab -r "${1}" "${INCLUDES_PATH}" /mnt/includes nullfs rw 0 0
|
||||||
|
|
||||||
|
# Done!
|
||||||
|
echo "Installation complete!"
|
||||||
|
echo "Using your web browser, go to http://${!HOST_NAME} to log in"
|
||||||
|
|
||||||
|
if [ "${REINSTALL}" == "true" ]; then
|
||||||
|
echo "You did a reinstall, please use your old database and account credentials"
|
||||||
|
else
|
||||||
|
echo "Database Information"
|
||||||
|
echo "--------------------"
|
||||||
|
echo "The MariaDB root password is ${!DB_ROOT_PASSWORD}"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
echo "All passwords are saved in /root/${1}_db_password.txt"
|
4
jails/mariadb/readme.md → blueprints/mariadb/readme.md
Executable file → Normal file
4
jails/mariadb/readme.md → blueprints/mariadb/readme.md
Executable file → Normal file
@ -1,4 +1,6 @@
|
|||||||
# Original README from the mariadb github:
|
# MariaDB
|
||||||
|
|
||||||
|
## Original README from the mariadb github:
|
||||||
|
|
||||||
https://github.com/MariaDB/server/
|
https://github.com/MariaDB/server/
|
||||||
|
|
37
blueprints/mariadb/update.sh
Executable file
37
blueprints/mariadb/update.sh
Executable file
@ -0,0 +1,37 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the update script for mariadb
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
JAIL_IP="jail_${1}_ip4_addr"
|
||||||
|
JAIL_IP="${!JAIL_IP%/*}"
|
||||||
|
HOST_NAME="jail_${1}_host_name"
|
||||||
|
INCLUDES_PATH="${SCRIPT_DIR}/blueprints/mariadb/includes"
|
||||||
|
|
||||||
|
# Install includes fstab
|
||||||
|
iocage exec "${1}" mkdir -p /mnt/includes
|
||||||
|
iocage fstab -a "${1}" "${INCLUDES_PATH}" /mnt/includes nullfs rw 0 0
|
||||||
|
|
||||||
|
|
||||||
|
iocage exec "${1}" service caddy stop
|
||||||
|
iocage exec "${1}" service php-fpm stop
|
||||||
|
|
||||||
|
fetch -o /tmp https://getcaddy.com
|
||||||
|
if ! iocage exec "${1}" bash -s personal "${DL_FLAGS}" < /tmp/getcaddy.com
|
||||||
|
then
|
||||||
|
echo "Failed to download/install Caddy"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Copy and edit pre-written config files
|
||||||
|
echo "Copying Caddyfile for no SSL"
|
||||||
|
iocage exec "${1}" cp -f /mnt/includes/caddy /usr/local/etc/rc.d/
|
||||||
|
iocage exec "${1}" cp -f /mnt/includes/Caddyfile /usr/local/www/Caddyfile
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
iocage exec "${1}" sed -i '' "s/yourhostnamehere/${HOST_NAME}/" /usr/local/www/Caddyfile
|
||||||
|
iocage exec "${1}" sed -i '' "s/JAIL-IP/${JAIL_IP}/" /usr/local/www/Caddyfile
|
||||||
|
|
||||||
|
# Don't need /mnt/includes any more, so unmount it
|
||||||
|
iocage fstab -r "${1}" "${INCLUDES_PATH}" /mnt/includes nullfs rw 0 0
|
||||||
|
|
||||||
|
iocage exec "${1}" service caddy start
|
||||||
|
iocage exec "${1}" service php-fpm start
|
0
jails/nextcloud/LICENSE → blueprints/nextcloud/LICENSE
Executable file → Normal file
0
jails/nextcloud/LICENSE → blueprints/nextcloud/LICENSE
Executable file → Normal file
4
blueprints/nextcloud/config.yml
Normal file
4
blueprints/nextcloud/config.yml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
blueprint:
|
||||||
|
nextcloud:
|
||||||
|
pkgs: nano sudo redis php73-ctype gnupg php73-dom php73-gd php73-iconv php73-json php73-mbstring php73-posix php73-simplexml php73-xmlreader php73-xmlwriter php73-zip php73-zlib php73-hash php73-xml php73 php73-pecl-redis php73-session php73-wddx php73-xsl php73-filter php73-pecl-APCu php73-curl php73-fileinfo php73-bz2 php73-intl php73-openssl php73-ldap php73-ftp php73-imap php73-exif php73-gmp php73-pecl-memcache php73-pecl-imagick php73-pecl-smbclient perl5 p5-Locale-gettext help2man texinfo m4 autoconf
|
||||||
|
ports: true
|
0
jails/nextcloud/includes/Caddyfile → blueprints/nextcloud/includes/Caddyfile
Executable file → Normal file
0
jails/nextcloud/includes/Caddyfile → blueprints/nextcloud/includes/Caddyfile
Executable file → Normal file
0
jails/nextcloud/includes/Caddyfile-nossl → blueprints/nextcloud/includes/Caddyfile-nossl
Executable file → Normal file
0
jails/nextcloud/includes/Caddyfile-nossl → blueprints/nextcloud/includes/Caddyfile-nossl
Executable file → Normal file
0
jails/nextcloud/includes/Caddyfile-selfsigned → blueprints/nextcloud/includes/Caddyfile-selfsigned
Executable file → Normal file
0
jails/nextcloud/includes/Caddyfile-selfsigned → blueprints/nextcloud/includes/Caddyfile-selfsigned
Executable file → Normal file
@ -1,4 +1,5 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
# shellcheck disable=1091,2223,2154,2034
|
||||||
#
|
#
|
||||||
# PROVIDE: caddy
|
# PROVIDE: caddy
|
||||||
# REQUIRE: networking
|
# REQUIRE: networking
|
0
jails/nextcloud/includes/my-system.cnf → blueprints/nextcloud/includes/my-system.cnf
Executable file → Normal file
0
jails/nextcloud/includes/my-system.cnf → blueprints/nextcloud/includes/my-system.cnf
Executable file → Normal file
0
jails/nextcloud/includes/my.cnf → blueprints/nextcloud/includes/my.cnf
Executable file → Normal file
0
jails/nextcloud/includes/my.cnf → blueprints/nextcloud/includes/my.cnf
Executable file → Normal file
0
jails/nextcloud/includes/pgpass → blueprints/nextcloud/includes/pgpass
Executable file → Normal file
0
jails/nextcloud/includes/pgpass → blueprints/nextcloud/includes/pgpass
Executable file → Normal file
0
jails/nextcloud/includes/php.ini → blueprints/nextcloud/includes/php.ini
Executable file → Normal file
0
jails/nextcloud/includes/php.ini → blueprints/nextcloud/includes/php.ini
Executable file → Normal file
0
jails/nextcloud/includes/redis.conf → blueprints/nextcloud/includes/redis.conf
Executable file → Normal file
0
jails/nextcloud/includes/redis.conf → blueprints/nextcloud/includes/redis.conf
Executable file → Normal file
0
jails/nextcloud/includes/www-crontab → blueprints/nextcloud/includes/www-crontab
Executable file → Normal file
0
jails/nextcloud/includes/www-crontab → blueprints/nextcloud/includes/www-crontab
Executable file → Normal file
0
jails/nextcloud/includes/www.conf → blueprints/nextcloud/includes/www.conf
Executable file → Normal file
0
jails/nextcloud/includes/www.conf → blueprints/nextcloud/includes/www.conf
Executable file → Normal file
321
blueprints/nextcloud/install.sh
Executable file
321
blueprints/nextcloud/install.sh
Executable file
@ -0,0 +1,321 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This script installs the current release of Nextcloud into a create jail
|
||||||
|
# Based on the example by danb35: https://github.com/danb35/freenas-iocage-nextcloud
|
||||||
|
|
||||||
|
# Initialise defaults
|
||||||
|
# General Defaults
|
||||||
|
JAIL_IP="jail_${1}_ip4_addr"
|
||||||
|
JAIL_IP="${!JAIL_IP%/*}"
|
||||||
|
HOST_NAME="jail_${1}_host_name"
|
||||||
|
TIME_ZONE="jail_${1}_time_zone"
|
||||||
|
INCLUDES_PATH="${SCRIPT_DIR}/blueprints/nextcloud/includes"
|
||||||
|
|
||||||
|
# SSL/CERT Defaults
|
||||||
|
CERT_TYPE="jail_${1}_cert_type"
|
||||||
|
CERT_TYPE="${!CERT_TYPE:-SELFSIGNED_CERT}"
|
||||||
|
CERT_EMAIL="jail_${1}_cert_email"
|
||||||
|
CERT_EMAIL="${!CERT_EMAIL:-placeholder@email.fake}"
|
||||||
|
DNS_PLUGIN="jail_${1}_dns_plugin"
|
||||||
|
DNS_ENV="jail_${1}_dns_env"
|
||||||
|
|
||||||
|
# Database Defaults
|
||||||
|
DB_TYPE="jail_${1}_db_type"
|
||||||
|
DB_TYPE="${!DB_TYPE:-mariadb}"
|
||||||
|
DB_JAIL="jail_${1}_db_jail"
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
DB_HOST="jail_${!DB_JAIL}_ip4_addr"
|
||||||
|
DB_HOST="${!DB_HOST%/*}:3306"
|
||||||
|
|
||||||
|
DB_PASSWORD="jail_${1}_db_password"
|
||||||
|
|
||||||
|
DB_DATABASE="jail_${1}_db_database"
|
||||||
|
DB_DATABASE="${!DB_DATABASE:-$1}"
|
||||||
|
|
||||||
|
DB_USER="jail_${1}_db_user"
|
||||||
|
DB_USER="${!DB_USER:-$DB_DATABASE}"
|
||||||
|
|
||||||
|
ADMIN_PASSWORD="jail_${1}_admin_password"
|
||||||
|
|
||||||
|
#####
|
||||||
|
#
|
||||||
|
# Input Sanity Check
|
||||||
|
#
|
||||||
|
#####
|
||||||
|
|
||||||
|
|
||||||
|
# Check that necessary variables were set by nextcloud-config
|
||||||
|
if [ -z "${JAIL_IP}" ]; then
|
||||||
|
echo 'Configuration error: The Nextcloud jail does NOT accept DHCP'
|
||||||
|
echo 'Please reinstall using a fixed IP adress'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${ADMIN_PASSWORD}" ]; then
|
||||||
|
echo 'Configuration error: The Nextcloud jail requires a admin_password'
|
||||||
|
echo 'Please reinstall using a fixed IP adress'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${!DB_PASSWORD}" ]; then
|
||||||
|
echo 'Configuration error: The Nextcloud Jail needs a database password'
|
||||||
|
echo 'Please reinstall with a defifined: db_password'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
if [ -z "${!TIME_ZONE}" ]; then
|
||||||
|
echo 'Configuration error: !TIME_ZONE must be set'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${!HOST_NAME}" ]; then
|
||||||
|
echo 'Configuration error: !HOST_NAME must be set'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$CERT_TYPE" != "STANDALONE_CERT" ] && [ "$CERT_TYPE" != "DNS_CERT" ] && [ "$CERT_TYPE" != "NO_CERT" ] && [ "$CERT_TYPE" != "SELFSIGNED_CERT" ]; then
|
||||||
|
echo 'Configuration error, cert_type options: STANDALONE_CERT, DNS_CERT, NO_CERT or SELFSIGNED_CERT'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$CERT_TYPE" == "DNS_CERT" ]; then
|
||||||
|
if [ -z "${!DNS_PLUGIN}" ] ; then
|
||||||
|
echo "DNS_PLUGIN must be set to a supported DNS provider."
|
||||||
|
echo "See https://caddyserver.com/docs under the heading of \"DNS Providers\" for list."
|
||||||
|
echo "Be sure to omit the prefix of \"tls.dns.\"."
|
||||||
|
exit 1
|
||||||
|
elif [ -z "${!DNS_ENV}" ] ; then
|
||||||
|
echo "DNS_ENV must be set to a your DNS provider\'s authentication credentials."
|
||||||
|
echo "See https://caddyserver.com/docs under the heading of \"DNS Providers\" for more."
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
DL_FLAGS="tls.dns.${DNS_PLUGIN}"
|
||||||
|
DNS_SETTING="dns ${DNS_PLUGIN}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Make sure DB_PATH is empty -- if not, MariaDB will choke
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
if [ "$(ls -A "/mnt/${global_dataset_config}/${1}/config")" ]; then
|
||||||
|
echo "Reinstall of Nextcloud detected... "
|
||||||
|
REINSTALL="true"
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
#####
|
||||||
|
#
|
||||||
|
# Fstab And Mounts
|
||||||
|
#
|
||||||
|
#####
|
||||||
|
|
||||||
|
# Create and Mount Nextcloud, Config and Files
|
||||||
|
createmount "${1}" "${global_dataset_config}"/"${1}"/config /usr/local/www/nextcloud/config
|
||||||
|
createmount "${1}" "${global_dataset_config}"/"${1}"/themes /usr/local/www/nextcloud/themes
|
||||||
|
createmount "${1}" "${global_dataset_config}"/"${1}"/files /config/files
|
||||||
|
|
||||||
|
# Install includes fstab
|
||||||
|
iocage exec "${1}" mkdir -p /mnt/includes
|
||||||
|
iocage fstab -a "${1}" "${INCLUDES_PATH}" /mnt/includes nullfs rw 0 0
|
||||||
|
|
||||||
|
|
||||||
|
iocage exec "${1}" chown -R www:www /config/files
|
||||||
|
iocage exec "${1}" chmod -R 770 /config/files
|
||||||
|
|
||||||
|
|
||||||
|
#####
|
||||||
|
#
|
||||||
|
# Basic dependency install
|
||||||
|
#
|
||||||
|
#####
|
||||||
|
|
||||||
|
if [ "${DB_TYPE}" = "mariadb" ]; then
|
||||||
|
iocage exec "${1}" pkg install -qy mariadb103-client php73-pdo_mysql php73-mysqli
|
||||||
|
fi
|
||||||
|
|
||||||
|
fetch -o /tmp https://getcaddy.com
|
||||||
|
if ! iocage exec "${1}" bash -s personal "${DL_FLAGS}" < /tmp/getcaddy.com
|
||||||
|
then
|
||||||
|
echo "Failed to download/install Caddy"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
iocage exec "${1}" sysrc redis_enable="YES"
|
||||||
|
iocage exec "${1}" sysrc php_fpm_enable="YES"
|
||||||
|
iocage exec "${1}" sh -c "make -C /usr/ports/www/php73-opcache clean install BATCH=yes"
|
||||||
|
iocage exec "${1}" sh -c "make -C /usr/ports/devel/php73-pcntl clean install BATCH=yes"
|
||||||
|
|
||||||
|
|
||||||
|
#####
|
||||||
|
#
|
||||||
|
# Install Nextcloud
|
||||||
|
#
|
||||||
|
#####
|
||||||
|
|
||||||
|
FILE="latest-18.tar.bz2"
|
||||||
|
if ! iocage exec "${1}" fetch -o /tmp https://download.nextcloud.com/server/releases/"${FILE}" https://download.nextcloud.com/server/releases/"${FILE}".asc https://nextcloud.com/nextcloud.asc
|
||||||
|
then
|
||||||
|
echo "Failed to download Nextcloud"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
iocage exec "${1}" gpg --import /tmp/nextcloud.asc
|
||||||
|
if ! iocage exec "${1}" gpg --verify /tmp/"${FILE}".asc
|
||||||
|
then
|
||||||
|
echo "GPG Signature Verification Failed!"
|
||||||
|
echo "The Nextcloud download is corrupt."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
iocage exec "${1}" tar xjf /tmp/"${FILE}" -C /usr/local/www/
|
||||||
|
iocage exec "${1}" chown -R www:www /usr/local/www/nextcloud/
|
||||||
|
|
||||||
|
|
||||||
|
# Generate and install self-signed cert, if necessary
|
||||||
|
if [ "$CERT_TYPE" == "SELFSIGNED_CERT" ] && [ ! -f "/mnt/${global_dataset_config}/${1}/ssl/privkey.pem" ]; then
|
||||||
|
echo "No ssl certificate present, generating self signed certificate"
|
||||||
|
if [ ! -d "/mnt/${global_dataset_config}/${1}/ssl" ]; then
|
||||||
|
echo "cert folder not existing... creating..."
|
||||||
|
iocage exec "${1}" mkdir /config/ssl
|
||||||
|
fi
|
||||||
|
openssl req -new -newkey rsa:4096 -days 3650 -nodes -x509 -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=${!HOST_NAME}" -keyout "${INCLUDES_PATH}"/privkey.pem -out "${INCLUDES_PATH}"/fullchain.pem
|
||||||
|
iocage exec "${1}" cp /mnt/includes/privkey.pem /config/ssl/privkey.pem
|
||||||
|
iocage exec "${1}" cp /mnt/includes/fullchain.pem /config/ssl/fullchain.pem
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Copy and edit pre-written config files
|
||||||
|
iocage exec "${1}" cp -f /mnt/includes/php.ini /usr/local/etc/php.ini
|
||||||
|
iocage exec "${1}" cp -f /mnt/includes/redis.conf /usr/local/etc/redis.conf
|
||||||
|
iocage exec "${1}" cp -f /mnt/includes/www.conf /usr/local/etc/php-fpm.d/
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$CERT_TYPE" == "STANDALONE_CERT" ] && [ "$CERT_TYPE" == "DNS_CERT" ]; then
|
||||||
|
iocage exec "${1}" cp -f /mnt/includes/remove-staging.sh /root/
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$CERT_TYPE" == "NO_CERT" ]; then
|
||||||
|
echo "Copying Caddyfile for no SSL"
|
||||||
|
iocage exec "${1}" cp -f /mnt/includes/Caddyfile-nossl /usr/local/www/Caddyfile
|
||||||
|
elif [ "$CERT_TYPE" == "SELFSIGNED_CERT" ]; then
|
||||||
|
echo "Copying Caddyfile for self-signed cert"
|
||||||
|
iocage exec "${1}" cp -f /mnt/includes/Caddyfile-selfsigned /usr/local/www/Caddyfile
|
||||||
|
else
|
||||||
|
echo "Copying Caddyfile for Let's Encrypt cert"
|
||||||
|
iocage exec "${1}" cp -f /mnt/includes/Caddyfile /usr/local/www/
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
iocage exec "${1}" cp -f /mnt/includes/caddy.rc /usr/local/etc/rc.d/caddy
|
||||||
|
|
||||||
|
|
||||||
|
iocage exec "${1}" sed -i '' "s/yourhostnamehere/${!HOST_NAME}/" /usr/local/www/Caddyfile
|
||||||
|
iocage exec "${1}" sed -i '' "s/DNS-PLACEHOLDER/${DNS_SETTING}/" /usr/local/www/Caddyfile
|
||||||
|
iocage exec "${1}" sed -i '' "s/JAIL-IP/${JAIL_IP}/" /usr/local/www/Caddyfile
|
||||||
|
iocage exec "${1}" sed -i '' "s|mytimezone|${!TIME_ZONE}|" /usr/local/etc/php.ini
|
||||||
|
|
||||||
|
iocage exec "${1}" sysrc caddy_enable="YES"
|
||||||
|
iocage exec "${1}" sysrc caddy_cert_email="${CERT_EMAIL}"
|
||||||
|
iocage exec "${1}" sysrc caddy_SNI_default="${!HOST_NAME}"
|
||||||
|
iocage exec "${1}" sysrc caddy_env="${!DNS_ENV}"
|
||||||
|
|
||||||
|
iocage restart "${1}"
|
||||||
|
|
||||||
|
if [ "${REINSTALL}" == "true" ]; then
|
||||||
|
echo "Reinstall detected, skipping generaion of new config and database"
|
||||||
|
else
|
||||||
|
|
||||||
|
# Secure database, set root password, create Nextcloud DB, user, and password
|
||||||
|
if [ "${DB_TYPE}" = "mariadb" ]; then
|
||||||
|
iocage exec "mariadb" mysql -u root -e "CREATE DATABASE ${DB_DATABASE};"
|
||||||
|
iocage exec "mariadb" mysql -u root -e "GRANT ALL ON ${DB_DATABASE}.* TO ${DB_USER}@${JAIL_IP} IDENTIFIED BY '${!DB_PASSWORD}';"
|
||||||
|
iocage exec "mariadb" mysqladmin reload
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# Save passwords for later reference
|
||||||
|
iocage exec "${1}" echo "${DB_NAME} root password is ${DB_ROOT_PASSWORD}" > /root/"${1}"_db_password.txt
|
||||||
|
iocage exec "${1}" echo "Nextcloud database password is ${!DB_PASSWORD}" >> /root/"${1}"_db_password.txt
|
||||||
|
iocage exec "${1}" echo "Nextcloud Administrator password is ${ADMIN_PASSWORD}" >> /root/"${1}"_db_password.txt
|
||||||
|
|
||||||
|
# CLI installation and configuration of Nextcloud
|
||||||
|
if [ "${DB_TYPE}" = "mariadb" ]; then
|
||||||
|
iocage exec "${1}" su -m www -c "php /usr/local/www/nextcloud/occ maintenance:install --database=\"mysql\" --database-name=\"${DB_DATABASE}\" --database-user=\"${DB_USER}\" --database-pass=\"${!DB_PASSWORD}\" --database-host=\"${DB_HOST}\" --admin-user=\"admin\" --admin-pass=\"${!ADMIN_PASSWORD}\" --data-dir=\"/config/files\""
|
||||||
|
iocage exec "${1}" su -m www -c "php /usr/local/www/nextcloud/occ config:system:set mysql.utf8mb4 --type boolean --value=\"true\""
|
||||||
|
fi
|
||||||
|
iocage exec "${1}" su -m www -c "php /usr/local/www/nextcloud/occ db:add-missing-indices"
|
||||||
|
iocage exec "${1}" su -m www -c "php /usr/local/www/nextcloud/occ db:convert-filecache-bigint --no-interaction"
|
||||||
|
iocage exec "${1}" su -m www -c "php /usr/local/www/nextcloud/occ config:system:set logtimezone --value=\"${!TIME_ZONE}\""
|
||||||
|
iocage exec "${1}" su -m www -c 'php /usr/local/www/nextcloud/occ config:system:set log_type --value="file"'
|
||||||
|
iocage exec "${1}" su -m www -c 'php /usr/local/www/nextcloud/occ config:system:set logfile --value="/var/log/nextcloud.log"'
|
||||||
|
iocage exec "${1}" su -m www -c 'php /usr/local/www/nextcloud/occ config:system:set loglevel --value="2"'
|
||||||
|
iocage exec "${1}" su -m www -c 'php /usr/local/www/nextcloud/occ config:system:set logrotate_size --value="104847600"'
|
||||||
|
iocage exec "${1}" su -m www -c 'php /usr/local/www/nextcloud/occ config:system:set memcache.local --value="\OC\Memcache\APCu"'
|
||||||
|
iocage exec "${1}" su -m www -c 'php /usr/local/www/nextcloud/occ config:system:set redis host --value="/tmp/redis.sock"'
|
||||||
|
iocage exec "${1}" su -m www -c 'php /usr/local/www/nextcloud/occ config:system:set redis port --value=0 --type=integer'
|
||||||
|
iocage exec "${1}" su -m www -c 'php /usr/local/www/nextcloud/occ config:system:set memcache.locking --value="\OC\Memcache\Redis"'
|
||||||
|
iocage exec "${1}" su -m www -c "php /usr/local/www/nextcloud/occ config:system:set overwritehost --value=\"${!HOST_NAME}\""
|
||||||
|
iocage exec "${1}" su -m www -c "php /usr/local/www/nextcloud/occ config:system:set overwriteprotocol --value=\"https\""
|
||||||
|
if [ "$CERT_TYPE" == "NO_CERT" ]; then
|
||||||
|
iocage exec "${1}" su -m www -c "php /usr/local/www/nextcloud/occ config:system:set overwrite.cli.url --value=\"http://${!HOST_NAME}/\""
|
||||||
|
else
|
||||||
|
iocage exec "${1}" su -m www -c "php /usr/local/www/nextcloud/occ config:system:set overwrite.cli.url --value=\"https://${!HOST_NAME}/\""
|
||||||
|
fi
|
||||||
|
iocage exec "${1}" su -m www -c 'php /usr/local/www/nextcloud/occ config:system:set htaccess.RewriteBase --value="/"'
|
||||||
|
iocage exec "${1}" su -m www -c 'php /usr/local/www/nextcloud/occ maintenance:update:htaccess'
|
||||||
|
iocage exec "${1}" su -m www -c "php /usr/local/www/nextcloud/occ config:system:set trusted_domains 1 --value=\"${!HOST_NAME}\""
|
||||||
|
iocage exec "${1}" su -m www -c "php /usr/local/www/nextcloud/occ config:system:set trusted_domains 2 --value=\"${JAIL_IP}\""
|
||||||
|
iocage exec "${1}" su -m www -c 'php /usr/local/www/nextcloud/occ app:enable encryption'
|
||||||
|
iocage exec "${1}" su -m www -c 'php /usr/local/www/nextcloud/occ encryption:enable'
|
||||||
|
iocage exec "${1}" su -m www -c 'php /usr/local/www/nextcloud/occ encryption:disable'
|
||||||
|
iocage exec "${1}" su -m www -c 'php /usr/local/www/nextcloud/occ background:cron'
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
iocage exec "${1}" touch /var/log/nextcloud.log
|
||||||
|
iocage exec "${1}" chown www /var/log/nextcloud.log
|
||||||
|
iocage exec "${1}" su -m www -c 'php -f /usr/local/www/nextcloud/cron.php'
|
||||||
|
iocage exec "${1}" crontab -u www /mnt/includes/www-crontab
|
||||||
|
|
||||||
|
# Don't need /mnt/includes any more, so unmount it
|
||||||
|
iocage fstab -r "${1}" "${INCLUDES_PATH}" /mnt/includes nullfs rw 0 0
|
||||||
|
|
||||||
|
# Done!
|
||||||
|
echo "Installation complete!"
|
||||||
|
if [ "$CERT_TYPE" == "NO_CERT" ]; then
|
||||||
|
echo "Using your web browser, go to http://${!HOST_NAME} to log in"
|
||||||
|
else
|
||||||
|
echo "Using your web browser, go to https://${!HOST_NAME} to log in"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${REINSTALL}" == "true" ]; then
|
||||||
|
echo "You did a reinstall, please use your old database and account credentials"
|
||||||
|
else
|
||||||
|
|
||||||
|
echo "Default user is admin, password is ${ADMIN_PASSWORD}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo "Database Information"
|
||||||
|
echo "--------------------"
|
||||||
|
echo "Database user = ${DB_USER}"
|
||||||
|
echo "Database password = ${!DB_PASSWORD}"
|
||||||
|
echo ""
|
||||||
|
echo "All passwords are saved in /root/${1}_db_password.txt"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
if [ "$CERT_TYPE" == "STANDALONE_CERT" ] && [ "$CERT_TYPE" == "DNS_CERT" ]; then
|
||||||
|
echo "You have obtained your Let's Encrypt certificate using the staging server."
|
||||||
|
echo "This certificate will not be trusted by your browser and will cause SSL errors"
|
||||||
|
echo "when you connect. Once you've verified that everything else is working"
|
||||||
|
echo "correctly, you should issue a trusted certificate. To do this, run:"
|
||||||
|
echo "iocage exec ${1}/root/remove-staging.sh"
|
||||||
|
echo ""
|
||||||
|
elif [ "$CERT_TYPE" == "SELFSIGNED_CERT" ]; then
|
||||||
|
echo "You have chosen to create a self-signed TLS certificate for your Nextcloud"
|
||||||
|
echo "installation. This certificate will not be trusted by your browser and"
|
||||||
|
echo "will cause SSL errors when you connect. If you wish to replace this certificate"
|
||||||
|
echo "with one obtained elsewhere, the private key is located at:"
|
||||||
|
echo "/config/ssl/privkey.pem"
|
||||||
|
echo "The full chain (server + intermediate certificates together) is at:"
|
||||||
|
echo "/config/ssl/fullchain.pem"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
4
jails/nextcloud/README.md → blueprints/nextcloud/readme.md
Executable file → Normal file
4
jails/nextcloud/README.md → blueprints/nextcloud/readme.md
Executable file → Normal file
@ -1,4 +1,6 @@
|
|||||||
# Original README from the Upstream Nextcloud-iocage install script:
|
# Nextcloud
|
||||||
|
|
||||||
|
## Original README from the Upstream Nextcloud-iocage install script:
|
||||||
|
|
||||||
https://github.com/danb35/freenas-iocage-nextcloud
|
https://github.com/danb35/freenas-iocage-nextcloud
|
||||||
|
|
3
blueprints/organizr/config.yml
Normal file
3
blueprints/organizr/config.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
blueprint:
|
||||||
|
organizr:
|
||||||
|
pkgs: nginx php72 php72-filter php72-curl php72-hash php72-json php72-openssl php72-pdo php72-pdo_sqlite php72-session php72-simplexml php72-sqlite3 php72-zip git
|
0
jails/organizr/includes/custom/organizr.conf → blueprints/organizr/includes/custom/organizr.conf
Executable file → Normal file
0
jails/organizr/includes/custom/organizr.conf → blueprints/organizr/includes/custom/organizr.conf
Executable file → Normal file
0
jails/organizr/includes/custom/phpblock.conf → blueprints/organizr/includes/custom/phpblock.conf
Executable file → Normal file
0
jails/organizr/includes/custom/phpblock.conf → blueprints/organizr/includes/custom/phpblock.conf
Executable file → Normal file
0
jails/organizr/includes/nginx.conf → blueprints/organizr/includes/nginx.conf
Executable file → Normal file
0
jails/organizr/includes/nginx.conf → blueprints/organizr/includes/nginx.conf
Executable file → Normal file
34
blueprints/organizr/install.sh
Executable file
34
blueprints/organizr/install.sh
Executable file
@ -0,0 +1,34 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the install script for Organizr
|
||||||
|
|
||||||
|
iocage exec "$1" sed -i '' -e 's?listen = 127.0.0.1:9000?listen = /var/run/php-fpm.sock?g' /usr/local/etc/php-fpm.d/www.conf
|
||||||
|
iocage exec "$1" sed -i '' -e 's/;listen.owner = www/listen.owner = www/g' /usr/local/etc/php-fpm.d/www.conf
|
||||||
|
iocage exec "$1" sed -i '' -e 's/;listen.group = www/listen.group = www/g' /usr/local/etc/php-fpm.d/www.conf
|
||||||
|
iocage exec "$1" sed -i '' -e 's/;listen.mode = 0660/listen.mode = 0600/g' /usr/local/etc/php-fpm.d/www.conf
|
||||||
|
iocage exec "$1" cp /usr/local/etc/php.ini-production /usr/local/etc/php.ini
|
||||||
|
iocage exec "$1" sed -i '' -e 's?;date.timezone =?date.timezone = "Universal"?g' /usr/local/etc/php.ini
|
||||||
|
iocage exec "$1" sed -i '' -e 's?;cgi.fix_pathinfo=1?cgi.fix_pathinfo=0?g' /usr/local/etc/php.ini
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
mv /mnt/"${global_dataset_iocage}"/jails/"$1"/root/usr/local/etc/nginx/nginx.conf /mnt/"${global_dataset_iocage}"/jails/"$1"/root/usr/local/etc/nginx/nginx.conf.bak
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/organizr/includes/nginx.conf /mnt/"${global_dataset_iocage}"/jails/"$1"/root/usr/local/etc/nginx/nginx.conf
|
||||||
|
cp -Rf "${SCRIPT_DIR}"/blueprints/organizr/includes/custom /mnt/"${global_dataset_iocage}"/jails/"$1"/root/usr/local/etc/nginx/custom
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
if [ ! -d "/mnt/${global_dataset_config}/$1/ssl" ]; then
|
||||||
|
echo "cert folder doesn't exist... creating..."
|
||||||
|
iocage exec "$1" mkdir /config/ssl
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f "/mnt/${global_dataset_config}/$1/ssl/Organizr-Cert.crt" ]; then
|
||||||
|
echo "certificate exists... Skipping cert generation"
|
||||||
|
else
|
||||||
|
echo "No ssl certificate present, generating self signed certificate"
|
||||||
|
openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=localhost" -keyout /mnt/"${global_dataset_config}"/"$1"/ssl/Organizr-Cert.key -out /mnt/"${global_dataset_config}"/"$1"/ssl/Organizr-Cert.crt
|
||||||
|
fi
|
||||||
|
|
||||||
|
iocage exec "$1" git clone https://github.com/causefx/Organizr.git /usr/local/www/Organizr
|
||||||
|
iocage exec "$1" chown -R www:www /usr/local/www /config /usr/local/etc/nginx/nginx.conf /usr/local/etc/nginx/custom
|
||||||
|
iocage exec "$1" ln -s /config/config.php /usr/local/www/Organizr/api/config/config.php
|
||||||
|
iocage exec "$1" sysrc nginx_enable=YES
|
||||||
|
iocage exec "$1" sysrc php_fpm_enable=YES
|
||||||
|
iocage exec "$1" service nginx start
|
||||||
|
iocage exec "$1" service php-fpm start
|
4
jails/organizr/readme.md → blueprints/organizr/readme.md
Executable file → Normal file
4
jails/organizr/readme.md → blueprints/organizr/readme.md
Executable file → Normal file
@ -1,4 +1,6 @@
|
|||||||
# Original README from the Organizr github repo:
|
# Organizr
|
||||||
|
|
||||||
|
## Original README from the Organizr github repo:
|
||||||
|
|
||||||
https://github.com/causefx/Organizr
|
https://github.com/causefx/Organizr
|
||||||
|
|
12
blueprints/organizr/update.sh
Executable file
12
blueprints/organizr/update.sh
Executable file
@ -0,0 +1,12 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the update script for Organizr
|
||||||
|
|
||||||
|
iocage exec "$1" service nginx stop
|
||||||
|
iocage exec "$1" service php-fpm stop
|
||||||
|
# TODO setup cli update for Organizr here.
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/organizr/includes/nginx.conf /mnt/"${global_dataset_iocage}"/jails/"$1"/root/usr/local/etc/nginx/nginx.conf
|
||||||
|
iocage exec "$1" "cd /usr/local/www/Organizr && git pull"
|
||||||
|
iocage exec "$1" chown -R www:www /usr/local/www /config /usr/local/etc/nginx/nginx.conf /usr/local/etc/nginx/custom
|
||||||
|
iocage exec "$1" service nginx start
|
||||||
|
iocage exec "$1" service php-fpm start
|
3
blueprints/plex/config.yml
Normal file
3
blueprints/plex/config.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
blueprint:
|
||||||
|
plex:
|
||||||
|
pkgs: plexmediaserver
|
0
jails/plex/includes/FreeBSD.conf → blueprints/plex/includes/FreeBSD.conf
Executable file → Normal file
0
jails/plex/includes/FreeBSD.conf → blueprints/plex/includes/FreeBSD.conf
Executable file → Normal file
51
blueprints/plex/install.sh
Executable file
51
blueprints/plex/install.sh
Executable file
@ -0,0 +1,51 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the install script for plex
|
||||||
|
|
||||||
|
iocage exec plex mkdir -p /usr/local/etc/pkg/repos
|
||||||
|
|
||||||
|
# Change to to more frequent FreeBSD repo to stay up-to-date with plex more.
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/plex/includes/FreeBSD.conf /mnt/"${global_dataset_iocage}"/jails/"$1"/root/usr/local/etc/pkg/repos/FreeBSD.conf
|
||||||
|
|
||||||
|
|
||||||
|
# Check if datasets for media librarys exist, create them if they do not.
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
createmount "$1" "${global_dataset_media}" /mnt/media
|
||||||
|
createmount "$1" "${global_dataset_media}"/movies /mnt/media/movies
|
||||||
|
createmount "$1" "${global_dataset_media}"/music /mnt/media/music
|
||||||
|
createmount "$1" "${global_dataset_media}"/shows /mnt/media/shows
|
||||||
|
|
||||||
|
# Create plex ramdisk if specified
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
if [ -z "${plex_ramdisk}" ]; then
|
||||||
|
echo "no ramdisk specified for plex, continuing without ramdisk"
|
||||||
|
else
|
||||||
|
iocage fstab -a "$1" tmpfs /tmp_transcode tmpfs rw,size="${plex_ramdisk}",mode=1777 0 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
iocage exec "$1" chown -R plex:plex /config
|
||||||
|
|
||||||
|
# Force update pkg to get latest plex version
|
||||||
|
iocage exec "$1" pkg update
|
||||||
|
iocage exec "$1" pkg upgrade -y
|
||||||
|
|
||||||
|
# Add plex user to video group for future hw-encoding support
|
||||||
|
iocage exec "$1" pw groupmod -n video -m plex
|
||||||
|
|
||||||
|
# Run different install procedures depending on Plex vs Plex Beta
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
if [ "$plex_beta" == "true" ]; then
|
||||||
|
echo "beta enabled in config.yml... using plex beta for install"
|
||||||
|
iocage exec "$1" sysrc "plexmediaserver_plexpass_enable=YES"
|
||||||
|
iocage exec "$1" sysrc plexmediaserver_plexpass_support_path="/config"
|
||||||
|
iocage exec "$1" chown -R plex:plex /usr/local/share/plexmediaserver-plexpass/
|
||||||
|
iocage exec "$1" service plexmediaserver_plexpass restart
|
||||||
|
else
|
||||||
|
echo "beta disabled in config.yml... NOT using plex beta for install"
|
||||||
|
iocage exec "$1" sysrc "plexmediaserver_enable=YES"
|
||||||
|
iocage exec "$1" sysrc plexmediaserver_support_path="/config"
|
||||||
|
iocage exec "$1" chown -R plex:plex /usr/local/share/plexmediaserver/
|
||||||
|
iocage exec "$1" service plexmediaserver restart
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Finished installing plex"
|
14
blueprints/plex/readme.md
Normal file
14
blueprints/plex/readme.md
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# Plex
|
||||||
|
|
||||||
|
### Config Parameters:
|
||||||
|
- beta: set to `true` if you want to run the plex beta (previously known as "plexpass"). Please note: This is not required for plexpass features
|
||||||
|
|
||||||
|
For more information about plex, please see the Plex website:
|
||||||
|
|
||||||
|
### Config Parameters:
|
||||||
|
|
||||||
|
- ramdisk: Specify the `size` parameter to create a transcoding ramdisk under /tmp_transcode. Requires manual setting it un plex to be used for transcoding. (optional)
|
||||||
|
|
||||||
|
# Original plex install script guide
|
||||||
|
|
||||||
|
https://www.ixsystems.com/community/resources/fn11-3-iocage-jails-plex-tautulli-sonarr-radarr-lidarr-jackett-transmission-organizr.58/
|
23
blueprints/plex/update.sh
Executable file
23
blueprints/plex/update.sh
Executable file
@ -0,0 +1,23 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the update script for Plex
|
||||||
|
|
||||||
|
# Run different update procedures depending on Plex vs Plex Beta
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
if [ "$plex_plexpass" == "true" ]; then
|
||||||
|
echo "beta enabled in config.yml... using plex beta for update..."
|
||||||
|
iocage exec "$1" service plexmediaserver_plexpass stop
|
||||||
|
# Plex is updated using PKG already, this is mostly a placeholder
|
||||||
|
iocage exec "$1" chown -R plex:plex /usr/local/share/plexmediaserver-plexpass/
|
||||||
|
iocage exec "$1" service plexmediaserver_plexpass restart
|
||||||
|
else
|
||||||
|
echo "beta disabled in config.yml... NOT using plex beta for update..."
|
||||||
|
iocage exec "$1" service plexmediaserver stop
|
||||||
|
# Plex is updated using PKG already, this is mostly a placeholder
|
||||||
|
iocage exec "$1" chown -R plex:plex /usr/local/share/plexmediaserver/
|
||||||
|
iocage exec "$1" service plexmediaserver restart
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
3
blueprints/radarr/config.yml
Normal file
3
blueprints/radarr/config.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
blueprint:
|
||||||
|
radarr:
|
||||||
|
pkgs: mono mediainfo sqlite3 libgdiplus
|
24
blueprints/radarr/install.sh
Executable file
24
blueprints/radarr/install.sh
Executable file
@ -0,0 +1,24 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the install script for radarr
|
||||||
|
|
||||||
|
# Check if dataset for completed download and it parent dataset exist, create if they do not.
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
createmount "$1" "${global_dataset_downloads}"
|
||||||
|
createmount "$1" "${global_dataset_downloads}"/complete /mnt/fetched
|
||||||
|
|
||||||
|
# Check if dataset for media library and the dataset for movies exist, create if they do not.
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
createmount "$1" "${global_dataset_media}"
|
||||||
|
createmount "$1" "${global_dataset_media}"/movies /mnt/movies
|
||||||
|
|
||||||
|
iocage exec "$1" "fetch https://github.com/Radarr/Radarr/releases/download/v0.2.0.1480/Radarr.develop.0.2.0.1480.linux.tar.gz -o /usr/local/share"
|
||||||
|
iocage exec "$1" "tar -xzvf /usr/local/share/Radarr.develop.0.2.0.1480.linux.tar.gz -C /usr/local/share"
|
||||||
|
iocage exec "$1" rm /usr/local/share/Radarr.develop.0.2.0.1480.linux.tar.gz
|
||||||
|
iocage exec "$1" "pw user add radarr -c radarr -u 352 -d /nonexistent -s /usr/bin/nologin"
|
||||||
|
iocage exec "$1" chown -R radarr:radarr /usr/local/share/Radarr /config
|
||||||
|
iocage exec "$1" mkdir /usr/local/etc/rc.d
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/radarr/includes/radarr.rc /mnt/"${global_dataset_iocage}"/jails/"$1"/root/usr/local/etc/rc.d/radarr
|
||||||
|
iocage exec "$1" chmod u+x /usr/local/etc/rc.d/radarr
|
||||||
|
iocage exec "$1" sysrc "radarr_enable=YES"
|
||||||
|
iocage exec "$1" service radarr restart
|
4
jails/radarr/readme.md → blueprints/radarr/readme.md
Executable file → Normal file
4
jails/radarr/readme.md → blueprints/radarr/readme.md
Executable file → Normal file
@ -1,4 +1,6 @@
|
|||||||
# Original README from the radarr github:
|
# Radarr
|
||||||
|
|
||||||
|
## Original README from the radarr github:
|
||||||
|
|
||||||
https://github.com/Radarr/Radarr
|
https://github.com/Radarr/Radarr
|
||||||
|
|
10
blueprints/radarr/update.sh
Executable file
10
blueprints/radarr/update.sh
Executable file
@ -0,0 +1,10 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the update script for radarr
|
||||||
|
|
||||||
|
iocage exec "$1" service radarr stop
|
||||||
|
#TODO insert code to update radarr itself here
|
||||||
|
iocage exec "$1" chown -R radarr:radarr /usr/local/share/Radarr /config
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/radarr/includes/radarr.rc /mnt/"${global_dataset_iocage}"/jails/"$1"/root/usr/local/etc/rc.d/radarr
|
||||||
|
iocage exec "$1" chmod u+x /usr/local/etc/rc.d/radarr
|
||||||
|
iocage exec "$1" service radarr restart
|
3
blueprints/sonarr/config.yml
Normal file
3
blueprints/sonarr/config.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
blueprint:
|
||||||
|
sonarr:
|
||||||
|
pkgs: mono mediainfo sqlite3
|
24
blueprints/sonarr/install.sh
Executable file
24
blueprints/sonarr/install.sh
Executable file
@ -0,0 +1,24 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the install script for sonarr
|
||||||
|
|
||||||
|
# Check if dataset for completed download and it parent dataset exist, create if they do not.
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
createmount "$1" "${global_dataset_downloads}"
|
||||||
|
createmount "$1" "${global_dataset_downloads}"/complete /mnt/fetched
|
||||||
|
|
||||||
|
# Check if dataset for media library and the dataset for tv shows exist, create if they do not.
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
createmount "$1" "${global_dataset_media}"
|
||||||
|
createmount "$1" "${global_dataset_media}"/shows /mnt/shows
|
||||||
|
|
||||||
|
iocage exec "$1" "fetch http://download.sonarr.tv/v2/master/mono/NzbDrone.master.tar.gz -o /usr/local/share"
|
||||||
|
iocage exec "$1" "tar -xzvf /usr/local/share/NzbDrone.master.tar.gz -C /usr/local/share"
|
||||||
|
iocage exec "$1" rm /usr/local/share/NzbDrone.master.tar.gz
|
||||||
|
iocage exec "$1" "pw user add sonarr -c sonarr -u 351 -d /nonexistent -s /usr/bin/nologin"
|
||||||
|
iocage exec "$1" chown -R sonarr:sonarr /usr/local/share/NzbDrone /config
|
||||||
|
iocage exec "$1" mkdir /usr/local/etc/rc.d
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/sonarr/includes/sonarr.rc /mnt/"${global_dataset_iocage}"/jails/"$1"/root/usr/local/etc/rc.d/sonarr
|
||||||
|
iocage exec "$1" chmod u+x /usr/local/etc/rc.d/sonarr
|
||||||
|
iocage exec "$1" sysrc "sonarr_enable=YES"
|
||||||
|
iocage exec "$1" service sonarr restart
|
4
jails/sonarr/readme.md → blueprints/sonarr/readme.md
Executable file → Normal file
4
jails/sonarr/readme.md → blueprints/sonarr/readme.md
Executable file → Normal file
@ -1,4 +1,6 @@
|
|||||||
# Original README from the sonarr github:
|
# Sonarr
|
||||||
|
|
||||||
|
## Original README from the sonarr github:
|
||||||
|
|
||||||
https://github.com/Sonarr/Sonarr
|
https://github.com/Sonarr/Sonarr
|
||||||
|
|
10
blueprints/sonarr/update.sh
Executable file
10
blueprints/sonarr/update.sh
Executable file
@ -0,0 +1,10 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the update script for sonarr
|
||||||
|
|
||||||
|
iocage exec "$1" service sonarr stop
|
||||||
|
#TODO insert code to update sonarr itself here
|
||||||
|
iocage exec "$1" chown -R sonarr:sonarr /usr/local/share/NzbDrone /config
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${SCRIPT_DIR}"/blueprints/sonarr/includes/sonarr.rc /mnt/"${global_dataset_iocage}"/jails/"$1"/root/usr/local/etc/rc.d/sonarr
|
||||||
|
iocage exec "$1" chmod u+x /usr/local/etc/rc.d/sonarr
|
||||||
|
iocage exec "$1" service sonarr restart
|
3
blueprints/tautulli/config.yml
Normal file
3
blueprints/tautulli/config.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
blueprint:
|
||||||
|
tautulli:
|
||||||
|
pkgs: python2 py27-sqlite3 py27-openssl git
|
11
blueprints/tautulli/install.sh
Executable file
11
blueprints/tautulli/install.sh
Executable file
@ -0,0 +1,11 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the install script for Tautulli
|
||||||
|
|
||||||
|
iocage exec "$1" git clone https://github.com/Tautulli/Tautulli.git /usr/local/share/Tautulli
|
||||||
|
iocage exec "$1" "pw user add tautulli -c tautulli -u 109 -d /nonexistent -s /usr/bin/nologin"
|
||||||
|
iocage exec "$1" chown -R tautulli:tautulli /usr/local/share/Tautulli /config
|
||||||
|
iocage exec "$1" cp /usr/local/share/Tautulli/init-scripts/init.freenas /usr/local/etc/rc.d/tautulli
|
||||||
|
iocage exec "$1" chmod u+x /usr/local/etc/rc.d/tautulli
|
||||||
|
iocage exec "$1" sysrc "tautulli_enable=YES"
|
||||||
|
iocage exec "$1" sysrc "tautulli_flags=--datadir /config"
|
||||||
|
iocage exec "$1" service tautulli start
|
4
jails/tautulli/readme.md → blueprints/tautulli/readme.md
Executable file → Normal file
4
jails/tautulli/readme.md → blueprints/tautulli/readme.md
Executable file → Normal file
@ -1,4 +1,6 @@
|
|||||||
# Original README from the tautulli github:
|
# Tautulli
|
||||||
|
|
||||||
|
## Original README from the tautulli github:
|
||||||
|
|
||||||
https://github.com/Tautulli/Tautulli
|
https://github.com/Tautulli/Tautulli
|
||||||
|
|
9
blueprints/tautulli/update.sh
Executable file
9
blueprints/tautulli/update.sh
Executable file
@ -0,0 +1,9 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the update script for Tautulli
|
||||||
|
|
||||||
|
iocage exec "$1" service tautulli stop
|
||||||
|
# Tautulli is updated through pkg, this is mostly just a placeholder
|
||||||
|
iocage exec "$1" chown -R tautulli:tautulli /usr/local/share/Tautulli /config
|
||||||
|
iocage exec "$1" cp /usr/local/share/Tautulli/init-scripts/init.freenas /usr/local/etc/rc.d/tautulli
|
||||||
|
iocage exec "$1" chmod u+x /usr/local/etc/rc.d/tautulli
|
||||||
|
iocage exec "$1" service tautulli restart
|
3
blueprints/transmission/config.yml
Normal file
3
blueprints/transmission/config.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
blueprint:
|
||||||
|
transmission:
|
||||||
|
pkgs: bash unzip unrar transmission
|
19
blueprints/transmission/install.sh
Executable file
19
blueprints/transmission/install.sh
Executable file
@ -0,0 +1,19 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the install script for transmission
|
||||||
|
|
||||||
|
# Check if dataset Downloads dataset exist, create if they do not.
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
createmount "$1" "${global_dataset_downloads}" /mnt/downloads
|
||||||
|
|
||||||
|
# Check if dataset Complete Downloads dataset exist, create if they do not.
|
||||||
|
createmount "$1" "${global_dataset_downloads}"/complete /mnt/downloads/complete
|
||||||
|
|
||||||
|
# Check if dataset InComplete Downloads dataset exist, create if they do not.
|
||||||
|
createmount "$1" "${global_dataset_downloads}"/incomplete /mnt/downloads/incomplete
|
||||||
|
|
||||||
|
|
||||||
|
iocage exec "$1" chown -R transmission:transmission /config
|
||||||
|
iocage exec "$1" sysrc "transmission_enable=YES"
|
||||||
|
iocage exec "$1" sysrc "transmission_conf_dir=/config"
|
||||||
|
iocage exec "$1" sysrc "transmission_download_dir=/mnt/downloads/complete"
|
||||||
|
iocage exec "$1" service transmission restart
|
4
jails/transmission/readme.md → blueprints/transmission/readme.md
Executable file → Normal file
4
jails/transmission/readme.md → blueprints/transmission/readme.md
Executable file → Normal file
@ -1,4 +1,6 @@
|
|||||||
# Original README from the transmission github:
|
# Transmission
|
||||||
|
|
||||||
|
## Original README from the transmission github:
|
||||||
|
|
||||||
https://github.com/transmission/transmission
|
https://github.com/transmission/transmission
|
||||||
|
|
7
blueprints/transmission/update.sh
Executable file
7
blueprints/transmission/update.sh
Executable file
@ -0,0 +1,7 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the update script for transmission
|
||||||
|
|
||||||
|
iocage exec "$1" service transmission stop
|
||||||
|
# Transmision is updated during PKG update, this file is mostly just a placeholder
|
||||||
|
iocage exec "$1" chown -R transmission:transmission /config
|
||||||
|
iocage exec "$1" service transmission restart
|
3
blueprints/unifi/config.yml
Normal file
3
blueprints/unifi/config.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
blueprint:
|
||||||
|
unifi:
|
||||||
|
pkgs: jq unifi5
|
45
blueprints/unifi/includes/mongodb.conf
Normal file
45
blueprints/unifi/includes/mongodb.conf
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
# mongod.conf
|
||||||
|
|
||||||
|
# for documentation of all options, see:
|
||||||
|
# http://docs.mongodb.org/manual/reference/configuration-options/
|
||||||
|
|
||||||
|
# where to write logging data.
|
||||||
|
systemLog:
|
||||||
|
destination: file
|
||||||
|
logAppend: true
|
||||||
|
path: /var/db/mongodb/mongod.log
|
||||||
|
|
||||||
|
# Where and how to store data.
|
||||||
|
storage:
|
||||||
|
dbPath: /config/mongodb
|
||||||
|
journal:
|
||||||
|
enabled: true
|
||||||
|
# engine:
|
||||||
|
# mmapv1:
|
||||||
|
# wiredTiger:
|
||||||
|
|
||||||
|
# how the process runs
|
||||||
|
processManagement:
|
||||||
|
fork: true # fork and run in background
|
||||||
|
pidFilePath: /var/db/mongodb/mongod.lock # location of pidfile
|
||||||
|
timeZoneInfo: /usr/share/zoneinfo
|
||||||
|
|
||||||
|
# network interfaces
|
||||||
|
net:
|
||||||
|
port: 27017
|
||||||
|
bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces.
|
||||||
|
|
||||||
|
|
||||||
|
#security:
|
||||||
|
|
||||||
|
#operationProfiling:
|
||||||
|
|
||||||
|
#replication:
|
||||||
|
|
||||||
|
#sharding:
|
||||||
|
|
||||||
|
## Enterprise-Only Options
|
||||||
|
|
||||||
|
#auditLog:
|
||||||
|
|
||||||
|
#snmp:
|
64
blueprints/unifi/includes/rc/mongod.rc
Executable file
64
blueprints/unifi/includes/rc/mongod.rc
Executable file
@ -0,0 +1,64 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# shellcheck disable=SC1091,SC2034,SC2223,SC2154,SC1090,SC2046,SC2086,SC2155,SC2181,SC2006
|
||||||
|
|
||||||
|
# PROVIDE: mongod
|
||||||
|
# REQUIRE: NETWORK ldconfig
|
||||||
|
# KEYWORD: shutdown
|
||||||
|
#
|
||||||
|
# Add the following lines to /etc/rc.conf.local or /etc/rc.conf
|
||||||
|
# to enable this service:
|
||||||
|
#
|
||||||
|
# mongod_enable (bool): Set to "NO" by default.
|
||||||
|
# Set it to "YES" to enable mongod.
|
||||||
|
# mongod_limits (bool): Set to "NO" by default.
|
||||||
|
# Set it to yes to run `limits -e -U mongodb`
|
||||||
|
# just before mongod starts.
|
||||||
|
# mongod_dbpath (str): Default to "/var/db/mongodb"
|
||||||
|
# Base database directory.
|
||||||
|
# mongod_flags (str): Custom additional arguments to be passed to mongod.
|
||||||
|
# Default to "--logpath ${mongod_dbpath}/mongod.log --logappend".
|
||||||
|
# mongod_config (str): Default to "/usr/local/etc/mongodb.conf"
|
||||||
|
# Path to config file
|
||||||
|
#
|
||||||
|
|
||||||
|
. /etc/rc.subr
|
||||||
|
|
||||||
|
name="mongod"
|
||||||
|
rcvar=mongod_enable
|
||||||
|
|
||||||
|
load_rc_config $name
|
||||||
|
|
||||||
|
: ${mongod_enable="NO"}
|
||||||
|
: ${mongod_limits="NO"}
|
||||||
|
: ${mongod_dbpath="/config/mongodb"}
|
||||||
|
: ${mongod_flags="--logpath ${mongod_dbpath}/mongod.log --logappend --setParameter=disabledSecureAllocatorDomains=\*"}
|
||||||
|
: ${mongod_user="mongodb"}
|
||||||
|
: ${mongod_group="mongodb"}
|
||||||
|
: ${mongod_config="/usr/local/etc/mongodb.conf"}
|
||||||
|
|
||||||
|
pidfile="${mongod_dbpath}/mongod.lock"
|
||||||
|
command=/usr/local/bin/${name}
|
||||||
|
command_args="--config $mongod_config --dbpath $mongod_dbpath --fork >/dev/null 2>/dev/null"
|
||||||
|
start_precmd="${name}_prestart"
|
||||||
|
|
||||||
|
mongod_create_dbpath()
|
||||||
|
{
|
||||||
|
mkdir "${mongod_dbpath}" >/dev/null 2>/dev/null
|
||||||
|
[ $? -eq 0 ] && chown -R "${mongod_user}":"${mongod_group}" "${mongod_dbpath}"
|
||||||
|
}
|
||||||
|
|
||||||
|
mongod_prestart()
|
||||||
|
{
|
||||||
|
if [ ! -d "${mongod_dbpath}" ]; then
|
||||||
|
mongod_create_dbpath || return 1
|
||||||
|
fi
|
||||||
|
if checkyesno mongod_limits; then
|
||||||
|
# TODO check this and clean this up
|
||||||
|
# Shellcheck disable=SC2046,SC2006
|
||||||
|
eval `/usr/bin/limits -e -U ${mongod_user}` 2>/dev/null
|
||||||
|
else
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
run_rc_command "$1"
|
87
blueprints/unifi/includes/rc/unifi.rc
Executable file
87
blueprints/unifi/includes/rc/unifi.rc
Executable file
@ -0,0 +1,87 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# shellcheck disable=SC1091,SC2034,SC2223,SC2154,SC1090,SC2046,SC2086,SC2155,SC2237
|
||||||
|
#
|
||||||
|
# Created by: Mark Felder <feld@FreeBSD.org>
|
||||||
|
# $FreeBSD: branches/2020Q2/net-mgmt/unifi5/files/unifi.in 512281 2019-09-18 17:37:59Z feld $
|
||||||
|
#
|
||||||
|
|
||||||
|
# PROVIDE: unifi
|
||||||
|
# REQUIRE: LOGIN
|
||||||
|
# KEYWORD: shutdown
|
||||||
|
|
||||||
|
#
|
||||||
|
# Add the following line to /etc/rc.conf to enable `unifi':
|
||||||
|
#
|
||||||
|
# unifi_enable="YES"
|
||||||
|
#
|
||||||
|
# Other configuration settings for unifi that can be set in /etc/rc.conf:
|
||||||
|
#
|
||||||
|
# unifi_user (str)
|
||||||
|
# This is the user that unifi runs as
|
||||||
|
# Set to unifi by default
|
||||||
|
#
|
||||||
|
# unifi_group (str)
|
||||||
|
# This is the group that unifi runs as
|
||||||
|
# Set to unifi by default
|
||||||
|
#
|
||||||
|
# unifi_chdir (str)
|
||||||
|
# This is the directory that unifi chdirs into before starting
|
||||||
|
# Set to /usr/local/share/java/unifi by default
|
||||||
|
#
|
||||||
|
# unifi_java_home (str)
|
||||||
|
# The path to the base directory for the Java to use to run unifi
|
||||||
|
# Defaults to /usr/local/openjdk8
|
||||||
|
#
|
||||||
|
# unifi_javaflags (str)
|
||||||
|
# Flags passed to Java to run unifi
|
||||||
|
# Set to "-Djava.awt.headless=true -Xmx1024M" by default
|
||||||
|
#
|
||||||
|
|
||||||
|
. /etc/rc.subr
|
||||||
|
name=unifi
|
||||||
|
|
||||||
|
rcvar=unifi_enable
|
||||||
|
load_rc_config ${name}
|
||||||
|
|
||||||
|
: ${unifi_enable:=NO}
|
||||||
|
: ${unifi_user:=unifi}
|
||||||
|
: ${unifi_group:=unifi}
|
||||||
|
: ${unifi_chdir=/config/controller/unifi}
|
||||||
|
: ${unifi_java_home=/usr/local/openjdk8}
|
||||||
|
: ${unifi_javaflags="-Djava.awt.headless=true -Xmx1024M"}
|
||||||
|
|
||||||
|
pidfile="/var/run/unifi/${name}.pid"
|
||||||
|
procname=${unifi_java_home}/bin/java
|
||||||
|
command=/usr/sbin/daemon
|
||||||
|
command_args="-f -p ${pidfile} ${unifi_java_home}/bin/java ${unifi_javaflags} com.ubnt.ace.Launcher start"
|
||||||
|
start_precmd=start_precmd
|
||||||
|
stop_precmd=stop_precmd
|
||||||
|
stop_postcmd=stop_postcmd
|
||||||
|
|
||||||
|
export CLASSPATH=$(echo ${unifi_chdir}/lib/*.jar | tr ' ' ':')
|
||||||
|
|
||||||
|
start_precmd()
|
||||||
|
{
|
||||||
|
if [ ! -e /var/run/unifi ] ; then
|
||||||
|
install -d -o unifi -g unifi /var/run/unifi;
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
stop_precmd()
|
||||||
|
{
|
||||||
|
if [ -r ${pidfile} ]; then
|
||||||
|
_UNIFIPID=$(check_pidfile ${pidfile} ${procname})
|
||||||
|
export _UNIFI_CHILDREN=$(pgrep -P ${_UNIFIPID})
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
stop_postcmd()
|
||||||
|
{
|
||||||
|
if ! [ -z ${_UNIFI_CHILDREN} ]; then
|
||||||
|
echo "Cleaning up leftover child processes."
|
||||||
|
kill $sig_stop ${_UNIFI_CHILDREN}
|
||||||
|
wait_for_pids ${_UNIFI_CHILDREN}
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
run_rc_command "$1"
|
36
blueprints/unifi/includes/rc/unifi_poller.rc
Executable file
36
blueprints/unifi/includes/rc/unifi_poller.rc
Executable file
@ -0,0 +1,36 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# shellcheck disable=SC1091,SC2034,SC2223,SC2154,SC1090,SC2046
|
||||||
|
#
|
||||||
|
# FreeBSD rc.d startup script for unifi-poller.
|
||||||
|
#
|
||||||
|
# PROVIDE: unifi-poller
|
||||||
|
# REQUIRE: networking syslog
|
||||||
|
# KEYWORD:
|
||||||
|
|
||||||
|
. /etc/rc.subr
|
||||||
|
|
||||||
|
name="unifi_poller"
|
||||||
|
real_name="unifi-poller"
|
||||||
|
rcvar="unifi_poller_enable"
|
||||||
|
unifi_poller_command="/usr/local/bin/${real_name}"
|
||||||
|
unifi_poller_user="nobody"
|
||||||
|
unifi_poller_config="/config/up.conf"
|
||||||
|
pidfile="/var/run/${real_name}/pid"
|
||||||
|
|
||||||
|
# This runs `daemon` as the `unifi_poller_user` user.
|
||||||
|
command="/usr/sbin/daemon"
|
||||||
|
command_args="-P ${pidfile} -r -t ${real_name} -T ${real_name} -l daemon ${unifi_poller_command} -c ${unifi_poller_config}"
|
||||||
|
|
||||||
|
load_rc_config ${name}
|
||||||
|
: ${unifi_poller_enable:=no}
|
||||||
|
|
||||||
|
# Make a place for the pid file.
|
||||||
|
mkdir -p $(dirname ${pidfile})
|
||||||
|
chown -R $unifi_poller_user $(dirname ${pidfile})
|
||||||
|
|
||||||
|
# Suck in optional exported override variables.
|
||||||
|
# ie. add something like the following to this file: export UP_POLLER_DEBUG=true
|
||||||
|
[ -f "/usr/local/etc/defaults/${real_name}" ] && . "/usr/local/etc/defaults/${real_name}"
|
||||||
|
|
||||||
|
# Go!
|
||||||
|
run_rc_command "$1"
|
106
blueprints/unifi/includes/up.conf
Normal file
106
blueprints/unifi/includes/up.conf
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
# UniFi Poller v2 primary configuration file. TOML FORMAT #
|
||||||
|
###########################################################
|
||||||
|
|
||||||
|
[poller]
|
||||||
|
# Turns on line numbers, microsecond logging, and a per-device log.
|
||||||
|
# The default is false, but I personally leave this on at home (four devices).
|
||||||
|
# This may be noisy if you have a lot of devices. It adds one line per device.
|
||||||
|
debug = false
|
||||||
|
|
||||||
|
# Turns off per-interval logs. Only startup and error logs will be emitted.
|
||||||
|
# Recommend enabling debug with this setting for better error logging.
|
||||||
|
quiet = true
|
||||||
|
|
||||||
|
# Load dynamic plugins. Advanced use; only sample mysql plugin provided by default.
|
||||||
|
plugins = []
|
||||||
|
|
||||||
|
#### OUTPUTS
|
||||||
|
|
||||||
|
# If you don't use an output, you can disable it.
|
||||||
|
|
||||||
|
[prometheus]
|
||||||
|
disable = true
|
||||||
|
# This controls on which ip and port /metrics is exported when mode is "prometheus".
|
||||||
|
# This has no effect in other modes. Must contain a colon and port.
|
||||||
|
http_listen = "0.0.0.0:9130"
|
||||||
|
report_errors = false
|
||||||
|
|
||||||
|
[influxdb]
|
||||||
|
disable = false
|
||||||
|
# InfluxDB does not require auth by default, so the user/password are probably unimportant.
|
||||||
|
url = "dbip"
|
||||||
|
user = "influxdbuser"
|
||||||
|
pass = "influxdbpass"
|
||||||
|
# Be sure to create this database.
|
||||||
|
db = "unifidb"
|
||||||
|
# If your InfluxDB uses a valid SSL cert, set this to true.
|
||||||
|
verify_ssl = false
|
||||||
|
# The UniFi Controller only updates traffic stats about every 30 seconds.
|
||||||
|
# Setting this to something lower may lead to "zeros" in your data.
|
||||||
|
# If you're getting zeros now, set this to "1m"
|
||||||
|
interval = "30s"
|
||||||
|
|
||||||
|
#### INPUTS
|
||||||
|
|
||||||
|
[unifi]
|
||||||
|
# Setting this to true and providing default credentials allows you to skip
|
||||||
|
# configuring controllers in this config file. Instead you configure them in
|
||||||
|
# your prometheus.yml config. Prometheus then sends the controller URL to
|
||||||
|
# unifi-poller when it performs the scrape. This is useful if you have many,
|
||||||
|
# or changing controllers. Most people can leave this off. See wiki for more.
|
||||||
|
dynamic = false
|
||||||
|
|
||||||
|
# The following section contains the default credentials/configuration for any
|
||||||
|
# dynamic controller (see above section), or the primary controller if you do not
|
||||||
|
# provide one and dynamic is disabled. In other words, you can just add your
|
||||||
|
# controller here and delete the following section.
|
||||||
|
[unifi.defaults]
|
||||||
|
#role = "main controller"
|
||||||
|
url = "https://127.0.0.1:8443"
|
||||||
|
user = "unifiuser"
|
||||||
|
pass = "unifipassword"
|
||||||
|
sites = ["all"]
|
||||||
|
save_ids = false
|
||||||
|
save_dpi = false
|
||||||
|
save_sites = true
|
||||||
|
verify_ssl = false
|
||||||
|
|
||||||
|
# The following is optional and used for configurations with multiple controllers.
|
||||||
|
|
||||||
|
# You may repeat the following section to poll multiple controllers.
|
||||||
|
#[[unifi.controller]]
|
||||||
|
# Friendly name used in dashboards. Uses URL if left empty; which is fine.
|
||||||
|
# Avoid changing this later because it will live forever in your database.
|
||||||
|
# Multiple controllers may share a role. This allows grouping during scrapes.
|
||||||
|
#role = ""
|
||||||
|
#url = "https://127.0.0.1:8443"
|
||||||
|
|
||||||
|
# Make a read-only user in the UniFi Admin Settings, allow it access to all sites.
|
||||||
|
#user = "unifipoller"
|
||||||
|
#pass = "4BB9345C-2341-48D7-99F5-E01B583FF77F"
|
||||||
|
|
||||||
|
# If the controller has more than one site, specify which sites to poll here.
|
||||||
|
# Set this to ["default"] to poll only the first site on the controller.
|
||||||
|
# A setting of ["all"] will poll all sites; this works if you only have 1 site too.
|
||||||
|
#sites = ["all"]
|
||||||
|
|
||||||
|
# Enable collection of Intrusion Detection System Data (InfluxDB only).
|
||||||
|
# Only useful if IDS or IPS are enabled on one of the sites.
|
||||||
|
#save_ids = false
|
||||||
|
|
||||||
|
# Enable collection of Deep Packet Inspection data. This data breaks down traffic
|
||||||
|
# types for each client and site, it powers a dedicated DPI dashboard.
|
||||||
|
# Enabling this adds roughly 150 data points per client. That's 6000 metrics for
|
||||||
|
# 40 clients. This adds a little bit of poller run time per interval and causes
|
||||||
|
# more API requests to your controller(s). Don't let these "cons" sway you:
|
||||||
|
# it's cool data. Please provide feedback on your experience with this feature.
|
||||||
|
#save_dpi = false
|
||||||
|
|
||||||
|
# Enable collection of site data. This data powers the Network Sites dashboard.
|
||||||
|
# It's not valuable to everyone and setting this to false will save resources.
|
||||||
|
#save_sites = true
|
||||||
|
|
||||||
|
# If your UniFi controller has a valid SSL certificate (like lets encrypt),
|
||||||
|
# you can enable this option to validate it. Otherwise, any SSL certificate is
|
||||||
|
# valid. If you don't know if you have a valid SSL cert, then you don't have one.
|
||||||
|
#verify_ssl = false
|
119
blueprints/unifi/install.sh
Executable file
119
blueprints/unifi/install.sh
Executable file
@ -0,0 +1,119 @@
|
|||||||
|
#!/usr/local/bin/bash
|
||||||
|
# This file contains the install script for unifi-controller & unifi-poller
|
||||||
|
|
||||||
|
# Initialize variables
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
JAIL_IP="jail_${1}_ip4_addr"
|
||||||
|
JAIL_IP="${!JAIL_IP%/*}"
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
DB_JAIL="jail_${1}_db_jail"
|
||||||
|
|
||||||
|
POLLER="jail_${1}_unifi_poller"
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
DB_IP="jail_${!DB_JAIL}_ip4_addr"
|
||||||
|
DB_IP="${!DB_IP%/*}"
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
DB_NAME="jail_${1}_up_db_name"
|
||||||
|
DB_NAME="${!DB_NAME:-$1}"
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
DB_USER="jail_${1}_up_db_user"
|
||||||
|
DB_USER="${!DB_USER:-$DB_NAME}"
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
DB_PASS="jail_${1}_up_db_password"
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
UP_USER="jail_${1}_up_user"
|
||||||
|
UP_USER="${!UP_USER:-$1}"
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
UP_PASS="jail_${1}_up_password"
|
||||||
|
INCLUDES_PATH="${SCRIPT_DIR}/blueprints/unifi/includes"
|
||||||
|
|
||||||
|
if [ -z "${!DB_PASS}" ]; then
|
||||||
|
echo "up_db_password can't be empty"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${!DB_JAIL}" ]; then
|
||||||
|
echo "db_jail can't be empty"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${!UP_PASS}" ]; then
|
||||||
|
echo "up_password can't be empty"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Enable persistent Unifi Controller data
|
||||||
|
iocage exec "${1}" mkdir -p /config/controller/mongodb
|
||||||
|
iocage exec "${1}" cp -Rp /usr/local/share/java/unifi /config/controller
|
||||||
|
iocage exec "${1}" chown -R mongodb:mongodb /config/controller/mongodb
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${INCLUDES_PATH}"/mongodb.conf /mnt/"${global_dataset_iocage}"/jails/"${1}"/root/usr/local/etc
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${INCLUDES_PATH}"/rc/mongod.rc /mnt/"${global_dataset_iocage}"/jails/"${1}"/root/usr/local/etc/rc.d/mongod
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${INCLUDES_PATH}"/rc/unifi.rc /mnt/"${global_dataset_iocage}"/jails/"${1}"/root/usr/local/etc/rc.d/unifi
|
||||||
|
iocage exec "${1}" sysrc unifi_enable=YES
|
||||||
|
iocage exec "${1}" service unifi start
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
if [[ ! "${!POLLER}" ]]; then
|
||||||
|
echo "Installation complete!"
|
||||||
|
echo "Unifi Controller is accessible at https://${JAIL_IP}:8443."
|
||||||
|
else
|
||||||
|
# Check if influxdb container exists, create unifi database if it does, error if it is not.
|
||||||
|
echo "Checking if the database jail and database exist..."
|
||||||
|
if [[ -d /mnt/"${global_dataset_iocage}"/jails/"${!DB_JAIL}" ]]; then
|
||||||
|
DB_EXISTING=$(iocage exec "${!DB_JAIL}" curl -G http://"${DB_IP}":8086/query --data-urlencode 'q=SHOW DATABASES' | jq '.results [] | .series [] | .values []' | grep "$DB_NAME" | sed 's/"//g' | sed 's/^ *//g')
|
||||||
|
if [[ "$DB_NAME" == "$DB_EXISTING" ]]; then
|
||||||
|
echo "${!DB_JAIL} jail with database ${DB_NAME} already exists. Skipping database creation... "
|
||||||
|
else
|
||||||
|
echo "${!DB_JAIL} jail exists, but database ${DB_NAME} does not. Creating database ${DB_NAME}."
|
||||||
|
if [[ -z "${DB_USER}" ]] || [[ -z "${!DB_PASS}" ]]; then
|
||||||
|
echo "Database username and password not provided. Cannot create database without credentials. Exiting..."
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
# shellcheck disable=SC2027,2086
|
||||||
|
iocage exec "${!DB_JAIL}" "curl -XPOST -u ${DB_USER}:${!DB_PASS} http://"${DB_IP}":8086/query --data-urlencode 'q=CREATE DATABASE ${DB_NAME}'"
|
||||||
|
echo "Database ${DB_NAME} created with username ${DB_USER} with password ${!DB_PASS}."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Influxdb jail does not exist. Unifi-Poller requires Influxdb jail. Please install the Influxdb jail."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Download and install Unifi-Poller
|
||||||
|
FILE_NAME=$(curl -s https://api.github.com/repos/unifi-poller/unifi-poller/releases/latest | jq -r ".assets[] | select(.name | contains(\"amd64.txz\")) | .name")
|
||||||
|
DOWNLOAD=$(curl -s https://api.github.com/repos/unifi-poller/unifi-poller/releases/latest | jq -r ".assets[] | select(.name | contains(\"amd64.txz\")) | .browser_download_url")
|
||||||
|
iocage exec "${1}" fetch -o /config "${DOWNLOAD}"
|
||||||
|
|
||||||
|
# Install downloaded Unifi-Poller package, configure and enable
|
||||||
|
iocage exec "${1}" pkg install -qy /config/"${FILE_NAME}"
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${INCLUDES_PATH}"/up.conf /mnt/"${global_dataset_config}"/"${1}"
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
cp "${INCLUDES_PATH}"/rc/unifi_poller.rc /mnt/"${global_dataset_iocage}"/jails/"${1}"/root/usr/local/etc/rc.d/unifi_poller
|
||||||
|
chmod +x /mnt/"${global_dataset_iocage}"/jails/"${1}"/root/usr/local/etc/rc.d/unifi_poller
|
||||||
|
iocage exec "${1}" sed -i '' "s|influxdbuser|${DB_USER}|" /config/up.conf
|
||||||
|
iocage exec "${1}" sed -i '' "s|influxdbpass|${!DB_PASS}|" /config/up.conf
|
||||||
|
iocage exec "${1}" sed -i '' "s|unifidb|${DB_NAME}|" /config/up.conf
|
||||||
|
iocage exec "${1}" sed -i '' "s|unifiuser|${UP_USER}|" /config/up.conf
|
||||||
|
iocage exec "${1}" sed -i '' "s|unifipassword|${!UP_PASS}|" /config/up.conf
|
||||||
|
iocage exec "${1}" sed -i '' "s|dbip|http://${DB_IP}:8086|" /config/up.conf
|
||||||
|
|
||||||
|
|
||||||
|
iocage exec "${1}" sysrc unifi_poller_enable=YES
|
||||||
|
iocage exec "${1}" service unifi_poller start
|
||||||
|
|
||||||
|
echo "Installation complete!"
|
||||||
|
echo "Unifi Controller is accessible at https://${JAIL_IP}:8443."
|
||||||
|
echo "Please login to the Unifi Controller and add ${UP_USER} as a read-only user."
|
||||||
|
echo "In Grafana, add Unifi-Poller as a data source."
|
||||||
|
fi
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user