remove current content
This commit is contained in:
parent
5632bbfc99
commit
b506958986
25
.github/renovate-config.js
vendored
25
.github/renovate-config.js
vendored
@ -1,25 +0,0 @@
|
|||||||
module.exports = {
|
|
||||||
dryRun: false,
|
|
||||||
username: 'truecharts-admin',
|
|
||||||
gitAuthor: 'truecharts-admin <bot@truecharts.org>',
|
|
||||||
onboarding: false,
|
|
||||||
platform: 'github',
|
|
||||||
repositories: [
|
|
||||||
'truecharts/truetool',
|
|
||||||
],
|
|
||||||
packageRules: [
|
|
||||||
{
|
|
||||||
description: 'lockFileMaintenance',
|
|
||||||
matchUpdateTypes: [
|
|
||||||
'pin',
|
|
||||||
'digest',
|
|
||||||
'patch',
|
|
||||||
'minor',
|
|
||||||
'major',
|
|
||||||
'lockFileMaintenance',
|
|
||||||
],
|
|
||||||
dependencyDashboardApproval: false,
|
|
||||||
stabilityDays: 0,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
23
.github/renovate.json5
vendored
23
.github/renovate.json5
vendored
@ -1,23 +0,0 @@
|
|||||||
{
|
|
||||||
"semanticCommits": "enabled",
|
|
||||||
"extends": ["helpers:pinGitHubActionDigests"],
|
|
||||||
"dependencyDashboard": true,
|
|
||||||
"dependencyDashboardTitle": "Renovate Dashboard 🤖",
|
|
||||||
"suppressNotifications": ["prIgnoreNotification"],
|
|
||||||
"commitMessageTopic": "{{depName}}",
|
|
||||||
"commitMessageExtra": "to {{newVersion}}",
|
|
||||||
"commitMessageSuffix": "",
|
|
||||||
"rebaseWhen": "conflicted",
|
|
||||||
"prConcurrentLimit": 100,
|
|
||||||
"pinDigests": true,
|
|
||||||
"automerge": true,
|
|
||||||
"gitAuthor": "TrueCharts-Admin <bot@truecharts.org>",
|
|
||||||
"packageRules": [
|
|
||||||
// Setup datasources for github actions
|
|
||||||
{
|
|
||||||
"matchManagers": ["github-actions"],
|
|
||||||
"commitMessageTopic": "github-action {{depName}} [skip ci]",
|
|
||||||
"automerge": true,
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
18
.github/workflows/renovate.yml
vendored
18
.github/workflows/renovate.yml
vendored
@ -1,18 +0,0 @@
|
|||||||
name: Renovate
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 */6 * * *"
|
|
||||||
jobs:
|
|
||||||
renovate:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.BOT_TOKEN }}
|
|
||||||
- name: Self-hosted Renovate
|
|
||||||
uses: renovatebot/github-action@d4496c2d9b06c4e43b227fc3f331a434e99eaef5 # v34.73.3
|
|
||||||
with:
|
|
||||||
configurationFile: .github/renovate-config.js
|
|
||||||
token: ${{ secrets.BOT_TOKEN }}
|
|
26
.github/workflows/shellcheck.yml
vendored
26
.github/workflows/shellcheck.yml
vendored
@ -1,26 +0,0 @@
|
|||||||
on:
|
|
||||||
push:
|
|
||||||
pull_request:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
name: 'Lint and Test'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
shellcheck:
|
|
||||||
name: Shellcheck
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3
|
|
||||||
- name: Run ShellCheck
|
|
||||||
uses: ludeeus/action-shellcheck@master
|
|
||||||
with:
|
|
||||||
check_together: 'yes'
|
|
||||||
env:
|
|
||||||
SHELLCHECK_OPTS: -e SC2154
|
|
||||||
|
|
||||||
pre-commit:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3
|
|
||||||
- uses: actions/setup-python@5ccb29d8773c3f3f653e1705f474dfaa8a06a912 # v4
|
|
||||||
- uses: pre-commit/action@646c83fcd040023954eafda54b4db0192ce70507 # tag=v3.0.0
|
|
@ -1,21 +0,0 @@
|
|||||||
# See https://pre-commit.com for more information
|
|
||||||
repos:
|
|
||||||
- repo: https://github.com/Lucas-C/pre-commit-hooks
|
|
||||||
rev: v1.1.10
|
|
||||||
hooks:
|
|
||||||
- id: remove-tabs
|
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
|
||||||
rev: v4.0.1
|
|
||||||
hooks:
|
|
||||||
- id: trailing-whitespace
|
|
||||||
- id: end-of-file-fixer
|
|
||||||
- id: fix-byte-order-marker
|
|
||||||
- id: mixed-line-ending
|
|
||||||
- id: check-merge-conflict
|
|
||||||
- id: check-case-conflict
|
|
||||||
- id: check-executables-have-shebangs
|
|
||||||
- id: check-docstring-first
|
|
||||||
- id: check-symlinks
|
|
||||||
- id: destroyed-symlinks
|
|
||||||
- id: fix-byte-order-marker
|
|
29
LICENSE
29
LICENSE
@ -1,29 +0,0 @@
|
|||||||
BSD 3-Clause License
|
|
||||||
|
|
||||||
Copyright (c) 2022, TrueCharts
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are met:
|
|
||||||
|
|
||||||
1. Redistributions of source code must retain the above copyright notice, this
|
|
||||||
list of conditions and the following disclaimer.
|
|
||||||
|
|
||||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
this list of conditions and the following disclaimer in the documentation
|
|
||||||
and/or other materials provided with the distribution.
|
|
||||||
|
|
||||||
3. Neither the name of the copyright holder nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
||||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
||||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
||||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
||||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
||||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
||||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
131
README.md
131
README.md
@ -1,131 +0,0 @@
|
|||||||
# truetool
|
|
||||||
|
|
||||||
An easy tool for frequently used TrueNAS SCALE CLI utilities.
|
|
||||||
|
|
||||||
Please before using this tool, [read this note](https://truecharts.org/manual/guides/Important-MUST-READ)
|
|
||||||
|
|
||||||
## Table of contents:
|
|
||||||
|
|
||||||
* [Synopsis](#synopsis)
|
|
||||||
* [Arguments](#arguments)
|
|
||||||
* [How to Install](#how-to-install)
|
|
||||||
* [How to Update](#how-to-update)
|
|
||||||
* [Creating a Cron Job](#creating-a-cron-job)
|
|
||||||
* [Additional Information](#additional-information)
|
|
||||||
|
|
||||||
<br>
|
|
||||||
|
|
||||||
## Synopsis
|
|
||||||
|
|
||||||
TrueTool is a command line tool, designed to enable some features of TrueNAS SCALE that are either not-enabled by default or not-available in the Web-GUI.
|
|
||||||
It also offers a few handy shortcuts for commonly required chores, like: Enabling Apt or Helm
|
|
||||||
|
|
||||||
## Arguments
|
|
||||||
|
|
||||||
| Flag | Example | Parameter | Description |
|
|
||||||
| --------------- | ---------------------- | --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| --delete-backup | --delete-backup | None | Opens a menu to delete backups<br>_Useful if you need to delete old system backups or backups from other scripts_ |
|
|
||||||
| --restore | --restore | None | Restore TrueTool specific `ix-applications dataset` snapshot |
|
|
||||||
| --mount | --mount | None | Initiates mounting feature<br>Choose between unmounting and mounting PVC data |
|
|
||||||
| --dns | --dns | None | list all of your applications DNS names and their web ports |
|
|
||||||
| --list-backups | --list-backups | None | Prints a list of backups available |
|
|
||||||
| --helm-enable | --helm-enable | None | Enables Helm command access on SCALE |
|
|
||||||
| --kubeapi-enable | --kubeapi-enable | None | Enables external access to Kuberntes API port |
|
|
||||||
| --apt-enable | --apt-enable | None | Enables Apt command access on SCALE |
|
|
||||||
| --no-color | --no-color | None | Disables showing colors in terminal output, usefull for SCALE Email output |
|
|
||||||
| -U | -U | None | Update applications, ignoring major version changes |
|
|
||||||
| -u | -u | None | Update applications, do NOT update if there was a major version change |
|
|
||||||
| -b | -b 14 | Integer | Backup `ix-applications` dataset<br>_Creates backups up to the number you've chosen_ |
|
|
||||||
| -i | -i nextcloud -i sonarr | String | Applications listed will be ignored during updating<br>_List one application after another as shown in the example_ |
|
|
||||||
| -v | -v | None | Verbose Output<br> |
|
|
||||||
| -t | -t 150 | Integer | Set a custom timeout to be used with either:<br>`-m` <br>_Time the script will wait for application to be "STOPPED"_<br>or<br>`-(u\|U)` <br>_Time the script will wait for application to be either "STOPPED" or "ACTIVE"_ |
|
|
||||||
| -s | -s | None | Sync Catalogs prior to updating |
|
|
||||||
| -p | -p | None | Prune old/unused docker images |
|
|
||||||
|
|
||||||
|
|
||||||
<br>
|
|
||||||
<br>
|
|
||||||
|
|
||||||
## How to Install
|
|
||||||
|
|
||||||
### Choose a folder
|
|
||||||
|
|
||||||
It's important to save the script in a folder that is persistent across TrueNAS System Updates.
|
|
||||||
This saves you from reinstalling or experiencing an accidental lack-of-backups after an update.
|
|
||||||
|
|
||||||
##### New dataset
|
|
||||||
|
|
||||||
In this example we created a `scripts` dataset on the TrueNAS SCALE system, feel free to use another folder.
|
|
||||||
|
|
||||||
##### Root folder
|
|
||||||
|
|
||||||
The `/root` folder houses files for the root user.
|
|
||||||
It's also persistent across updates and hence can be safely used for storing the script.
|
|
||||||
|
|
||||||
### Open a Terminal
|
|
||||||
|
|
||||||
**Change Directory to your scripts folder**
|
|
||||||
|
|
||||||
```
|
|
||||||
cd /mnt/pool/scripts
|
|
||||||
```
|
|
||||||
|
|
||||||
**Git Clone truetool**
|
|
||||||
|
|
||||||
```
|
|
||||||
git clone https://github.com/truecharts/truetool.git
|
|
||||||
```
|
|
||||||
|
|
||||||
**Change Directory to truetool folder**
|
|
||||||
|
|
||||||
```
|
|
||||||
cd truetool
|
|
||||||
```
|
|
||||||
|
|
||||||
From here, you can just run truetool with `bash truetool.sh -ARGUMENTS`
|
|
||||||
|
|
||||||
<br>
|
|
||||||
|
|
||||||
## How to Update
|
|
||||||
|
|
||||||
TrueTool updates itself automatically.
|
|
||||||
|
|
||||||
<br >
|
|
||||||
|
|
||||||
|
|
||||||
## Creating a Cron Job
|
|
||||||
|
|
||||||
1. TrueNAS SCALE GUI
|
|
||||||
2. System Settings
|
|
||||||
3. Advanced
|
|
||||||
4. Cron Jobs
|
|
||||||
1. Click Add
|
|
||||||
|
|
||||||
| Name | Value | Reason |
|
|
||||||
| ---------------------- | -------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| `Description` | TrueTool Update apps | This is up to you, put whatever you think is a good description in here |
|
|
||||||
| `Command` | `bash /PATH/TO/truetool_DIRECTORY/truetool.sh --no-color -b 14 -sup` | This is the command you will be running on your schedule, example: `bash /mnt/speed/scripts/truetool/truetool.sh -b 14 -sup` |
|
|
||||||
| `Run As User` | `root` | Running the script as `root` is REQUIRED. You cannot access all of the kubernetes functions without this user. |
|
|
||||||
| `Schedule` | Up to you, example: `0400` | Again up to you |
|
|
||||||
| `Hide Standard Output` | `False` or Un-ticked | It's best to keep an eye on updates and enable this to receive email reports |
|
|
||||||
| `Hide Standard Error` | `False` or Un-ticked | We definitely want to see what errors occurred during updating |
|
|
||||||
| `Enabled` | `True` or Ticked | This will Enable the script to run on your schedule |
|
|
||||||
|
|
||||||
<br >
|
|
||||||
<br >
|
|
||||||
|
|
||||||
### Additional Information
|
|
||||||
|
|
||||||
#### TrueTool vs HeavyScript
|
|
||||||
|
|
||||||
TrueTool and HeavyScript are based, in essence, based on the original (python based) TrueUpdate and TrueTool.
|
|
||||||
Then Support-Manager for TrueCharts, HeavyBullets8, ported this to Bash and started adding some additional logic and options for tasks we frequently needed our users to do, such as mounting PVC's.
|
|
||||||
|
|
||||||
After a month or so, the TrueCharts Team officially started refactoring this expanded bash-port. Due to personal reasons, HeavyBullets by then decided to separate from TrueCharts after merging the TrueCharts refactor into his own work. The beauty of OpenSource.
|
|
||||||
|
|
||||||
From this point onwards the HeavyScript and TrueTool diverged a bit.
|
|
||||||
We internally review changes within our staff team, to verify we somewhat stick to best-practices. This means, in some cases, we decided not to port certain features from HeavyScript and did decide to add features we think are useful and safe.
|
|
||||||
But this also means we can give guarantees TrueTool works optimally with our Catalog of TrueNAS SCALE Apps, as well as official Apps.
|
|
||||||
|
|
||||||
Users from HeavyScript can safely start using TrueTool, as we've made precautions to ensure the backups take over smoothly.
|
|
||||||
We, however, do _not_ advise using HeavyScript with TrueCharts Apps. Not because it's a bad App, but because we offer an alternative that is validated by our Staff.
|
|
@ -1,125 +0,0 @@
|
|||||||
diff --git plugins/chart_releases_linux/chart_release.py plugins/chart_releases_linux/chart_release.py
|
|
||||||
index 76e3825bc0f..f65cc0eac24 100644
|
|
||||||
--- plugins/chart_releases_linux/chart_release.py
|
|
||||||
+++ plugins/chart_releases_linux/chart_release.py
|
|
||||||
@@ -606,7 +606,7 @@ async def do_delete(self, job, release_name, options):
|
|
||||||
# If we had pre-install jobs, it's possible we have leftover pods which the job did not remove
|
|
||||||
# based on dev specified settings of cleaning it up - let's remove those
|
|
||||||
for pod in await self.middleware.call('k8s.pod.query', [['metadata.namespace', '=', namespace]]):
|
|
||||||
- owner_references = pod['metadata'].get('owner_references')
|
|
||||||
+ owner_references = pod['metadata'].get('ownerReferences')
|
|
||||||
if not isinstance(owner_references, list) or all(
|
|
||||||
owner_reference.get('name') not in pre_install_jobs for owner_reference in owner_references
|
|
||||||
):
|
|
||||||
@@ -658,7 +658,7 @@ async def remove_storage_class_and_dataset(self, release_name, job=None):
|
|
||||||
pvc_volume_ds = os.path.join(release_ds, 'volumes')
|
|
||||||
for pv in await self.middleware.call(
|
|
||||||
'k8s.pv.query', [
|
|
||||||
- ['spec.csi.volume_attributes.openebs\\.io/poolname', '=', pvc_volume_ds]
|
|
||||||
+ ['spec.csi.volumeAttributes.openebs\\.io/poolname', '=', pvc_volume_ds]
|
|
||||||
]
|
|
||||||
):
|
|
||||||
await self.middleware.call('k8s.pv.delete', pv['metadata']['name'])
|
|
||||||
diff --git plugins/chart_releases_linux/resources.py plugins/chart_releases_linux/resources.py
|
|
||||||
index c7180147a5f..941de79da45 100644
|
|
||||||
--- plugins/chart_releases_linux/resources.py
|
|
||||||
+++ plugins/chart_releases_linux/resources.py
|
|
||||||
@@ -158,13 +158,13 @@ async def retrieve_pv_pvc_mapping_internal(self, chart_release):
|
|
||||||
}
|
|
||||||
|
|
||||||
for pv in chart_release['resources']['persistent_volumes']:
|
|
||||||
- claim_name = pv['spec'].get('claim_ref', {}).get('name')
|
|
||||||
+ claim_name = pv['spec'].get('claimRef', {}).get('name')
|
|
||||||
if claim_name:
|
|
||||||
csi_spec = pv['spec']['csi']
|
|
||||||
- volumes_ds = csi_spec['volume_attributes']['openebs.io/poolname']
|
|
||||||
+ volumes_ds = csi_spec['volumeAttributes']['openebs.io/poolname']
|
|
||||||
if (
|
|
||||||
os.path.join(chart_release['dataset'], 'volumes') != volumes_ds or
|
|
||||||
- csi_spec['volume_handle'] not in zfs_volumes
|
|
||||||
+ csi_spec['volumeHandle'] not in zfs_volumes
|
|
||||||
):
|
|
||||||
# We are only going to backup/restore pvc's which were consuming
|
|
||||||
# their respective storage class and we have related zfs volume present
|
|
||||||
@@ -174,8 +174,8 @@ async def retrieve_pv_pvc_mapping_internal(self, chart_release):
|
|
||||||
mapping[claim_name] = {
|
|
||||||
'name': pv_name,
|
|
||||||
'pv_details': pv,
|
|
||||||
- 'dataset': os.path.join(volumes_ds, csi_spec['volume_handle']),
|
|
||||||
- 'zv_details': zfs_volumes[csi_spec['volume_handle']],
|
|
||||||
+ 'dataset': os.path.join(volumes_ds, csi_spec['volumeHandle']),
|
|
||||||
+ 'zv_details': zfs_volumes[csi_spec['volumeHandle']],
|
|
||||||
}
|
|
||||||
return mapping
|
|
||||||
|
|
||||||
@@ -247,11 +247,11 @@ async def get_workload_storage_details(self):
|
|
||||||
# because of chart release reclaim policy being retain
|
|
||||||
for pv in await self.middleware.call(
|
|
||||||
'k8s.pv.query', [[
|
|
||||||
- 'spec.csi.volume_attributes.openebs\\.io/poolname', '^',
|
|
||||||
+ 'spec.csi.volumeAttributes.openebs\\.io/poolname', '^',
|
|
||||||
f'{os.path.join(k8s_config["dataset"], "releases")}/'
|
|
||||||
]]
|
|
||||||
):
|
|
||||||
- dataset = pv['spec']['csi']['volume_attributes']['openebs.io/poolname']
|
|
||||||
+ dataset = pv['spec']['csi']['volumeAttributes']['openebs.io/poolname']
|
|
||||||
rl = dataset.split('/', 4)
|
|
||||||
if len(rl) > 4:
|
|
||||||
mapping['persistent_volumes'][rl[3]].append(pv)
|
|
||||||
diff --git plugins/chart_releases_linux/scale_workload.py plugins/chart_releases_linux/scale_workload.py
|
|
||||||
index 117dab3a79c..e9525150278 100644
|
|
||||||
--- plugins/chart_releases_linux/scale_workload.py
|
|
||||||
+++ plugins/chart_releases_linux/scale_workload.py
|
|
||||||
@@ -246,10 +246,10 @@ async def get_workload_to_pod_mapping(self, namespace):
|
|
||||||
for r in await self.middleware.call(
|
|
||||||
f'k8s.{key}.query', [
|
|
||||||
['metadata.namespace', '=', namespace],
|
|
||||||
- ['metadata', 'rin', 'owner_references'],
|
|
||||||
+ ['metadata', 'rin', 'ownerReferences'],
|
|
||||||
], {'select': ['metadata']}
|
|
||||||
):
|
|
||||||
- for owner_reference in filter(lambda o: o.get('uid'), r['metadata']['owner_references'] or []):
|
|
||||||
+ for owner_reference in filter(lambda o: o.get('uid'), r['metadata']['ownerReferences'] or []):
|
|
||||||
mapping[key][owner_reference['uid']][r['metadata']['uid']] = r
|
|
||||||
|
|
||||||
pod_mapping = defaultdict(list)
|
|
||||||
diff --git plugins/kubernetes_linux/restore.py plugins/kubernetes_linux/restore.py
|
|
||||||
index 4897e3f8b7a..ec13a332b6e 100644
|
|
||||||
--- plugins/kubernetes_linux/restore.py
|
|
||||||
+++ plugins/kubernetes_linux/restore.py
|
|
||||||
@@ -218,7 +218,11 @@ def restore_backup(self, job, backup_name, options):
|
|
||||||
failed_pv_restores.append(f'Unable to create ZFS Volume for {pvc!r} PVC: {e}')
|
|
||||||
continue
|
|
||||||
|
|
||||||
+ # We need to safely access claim_ref vollume attribute keys as with k8s client api re-write
|
|
||||||
+ # camel casing which was done by kubernetes asyncio package is not happening anymore
|
|
||||||
pv_spec = pv['pv_details']['spec']
|
|
||||||
+ claim_ref = pv_spec.get('claim_ref') or pv_spec['claimRef']
|
|
||||||
+ pv_volume_attrs = pv_spec['csi'].get('volume_attributes') or pv_spec['csi']['volumeAttributes']
|
|
||||||
try:
|
|
||||||
self.middleware.call_sync('k8s.pv.create', {
|
|
||||||
'metadata': {
|
|
||||||
@@ -229,18 +233,18 @@ def restore_backup(self, job, backup_name, options):
|
|
||||||
'storage': pv_spec['capacity']['storage'],
|
|
||||||
},
|
|
||||||
'claimRef': {
|
|
||||||
- 'name': pv_spec['claim_ref']['name'],
|
|
||||||
- 'namespace': pv_spec['claim_ref']['namespace'],
|
|
||||||
+ 'name': claim_ref['name'],
|
|
||||||
+ 'namespace': claim_ref['namespace'],
|
|
||||||
},
|
|
||||||
'csi': {
|
|
||||||
'volumeAttributes': {
|
|
||||||
'openebs.io/poolname': RE_POOL.sub(
|
|
||||||
- f'{k8s_pool}\\1', pv_spec['csi']['volume_attributes']['openebs.io/poolname']
|
|
||||||
+ f'{k8s_pool}\\1', pv_volume_attrs['openebs.io/poolname']
|
|
||||||
)
|
|
||||||
},
|
|
||||||
- 'volumeHandle': pv_spec['csi']['volume_handle'],
|
|
||||||
+ 'volumeHandle': pv_spec['csi'].get('volume_handle') or pv_spec['csi']['volumeHandle'],
|
|
||||||
},
|
|
||||||
- 'storageClassName': pv_spec['storage_class_name'],
|
|
||||||
+ 'storageClassName': pv_spec.get('storage_class_name') or pv_spec['storageClassName'],
|
|
||||||
},
|
|
||||||
})
|
|
||||||
except Exception as e:
|
|
@ -1,14 +0,0 @@
|
|||||||
diff --git plugins/kubernetes_linux/backup.py plugins/kubernetes_linux/backup.py
|
|
||||||
index 365cd1718b4..1046a64c2a5 100644
|
|
||||||
index d8a48d45f89..365cd1718b4 100644
|
|
||||||
--- plugins/kubernetes_linux/backup.py
|
|
||||||
+++ plugins/kubernetes_linux/backup.py
|
|
||||||
@@ -61,7 +61,8 @@ def backup_chart_releases(self, job, backup_name):
|
|
||||||
['metadata.namespace', '=', chart_release['namespace']]
|
|
||||||
]
|
|
||||||
)
|
|
||||||
- for secret in sorted(secrets, key=lambda d: d['metadata']['name']):
|
|
||||||
+ # We ignore this keeping in line with helm behaviour where the secret malformed is ignored by helm
|
|
||||||
+ for secret in sorted(filter(lambda d: d.get('data'), secrets), key=lambda d: d['metadata']['name']):
|
|
||||||
with open(os.path.join(secrets_dir, secret['metadata']['name']), 'w') as f:
|
|
||||||
f.write(self.middleware.call_sync('k8s.secret.export_to_yaml_internal', secret))
|
|
@ -1,96 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
## Simple shortcut to just list the backups without promts and such
|
|
||||||
listBackups(){
|
|
||||||
echo -e "${BWhite}Backup Listing Tool${Color_Off}"
|
|
||||||
clear -x && echo "pulling all restore points.."
|
|
||||||
list_backups=$(cli -c 'app kubernetes list_backups' | grep -v system-update | sort -t '_' -Vr -k2,7 | tr -d " \t\r" | awk -F '|' '{print $2}' | nl | column -t)
|
|
||||||
[[ -z "$list_backups" ]] && echo -e "${IRed}No restore points available${Color_Off}" && exit || echo "Detected Backups:" && echo "$list_backups"
|
|
||||||
}
|
|
||||||
export -f listBackups
|
|
||||||
|
|
||||||
## Lists backups, except system-created backups, and promts which one to delete
|
|
||||||
deleteBackup(){
|
|
||||||
echo -e "${BWhite}Backup Deletion Tool${Color_Off}"
|
|
||||||
clear -x && echo "pulling all restore points.."
|
|
||||||
list_delete_backups=$(cli -c 'app kubernetes list_backups' | grep -v system-update | sort -t '_' -Vr -k2,7 | tr -d " \t\r" | awk -F '|' '{print $2}' | nl | column -t)
|
|
||||||
clear -x
|
|
||||||
# shellcheck disable=SC2015
|
|
||||||
[[ -z "$list_delete_backups" ]] && echo -e "${IRed}No restore points available${Color_Off}" && exit || { title; echo -e "Choose a restore point to delete\nThese may be out of order if they are not TrueTool backups" ; }
|
|
||||||
# shellcheck disable=SC2015
|
|
||||||
echo "$list_delete_backups" && read -rt 600 -p "Please type a number: " selection && restore_point=$(echo "$list_delete_backups" | grep ^"$selection " | awk '{print $2}')
|
|
||||||
[[ -z "$selection" ]] && echo "${IRed}Your selection cannot be empty${Color_Off}" && exit #Check for valid selection. If none, kill script
|
|
||||||
[[ -z "$restore_point" ]] && echo "Invalid Selection: $selection, was not an option" && exit #Check for valid selection. If none, kill script
|
|
||||||
echo -e "\nWARNING:\nYou CANNOT go back after deleting your restore point" || { echo "${IRed}FAILED${Color_Off}"; exit; }
|
|
||||||
# shellcheck disable=SC2015
|
|
||||||
echo -e "\n\nYou have chosen:\n$restore_point\n\nWould you like to continue?" && echo -e "1 Yes\n2 No" && read -rt 120 -p "Please type a number: " yesno || { echo "${IRed}FAILED${Color_Off}"; exit; }
|
|
||||||
if [[ $yesno == "1" ]]; then
|
|
||||||
echo -e "\nDeleting $restore_point" && cli -c 'app kubernetes delete_backup backup_name=''"'"$restore_point"'"' &>/dev/null && echo -e "${IGreen}Sucessfully deleted${Color_Off}" || echo -e "${IRed}Deletion FAILED${Color_Off}"
|
|
||||||
elif [[ $yesno == "2" ]]; then
|
|
||||||
echo "You've chosen NO, killing script."
|
|
||||||
else
|
|
||||||
echo -e "${IRed}Invalid Selection${Color_Off}"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
export -f deleteBackup
|
|
||||||
|
|
||||||
## Creates backups and deletes backups if a "backups to keep"-count is exceeded.
|
|
||||||
# backups-to-keep takes only heavyscript and truetool created backups into account, as other backups aren't guaranteed to be sorted correctly
|
|
||||||
backup(){
|
|
||||||
echo -e "${BWhite}Backup Tool${Color_Off}"
|
|
||||||
echo -e "\nNumber of backups was set to $number_of_backups"
|
|
||||||
date=$(date '+%Y_%m_%d_%H_%M_%S')
|
|
||||||
[[ "$verbose" == "true" ]] && cli -c 'app kubernetes backup_chart_releases backup_name=''"'TrueTool_"$date"'"'
|
|
||||||
[[ -z "$verbose" ]] && echo -e "\nNew Backup Name:" && cli -c 'app kubernetes backup_chart_releases backup_name=''"'TrueTool_"$date"'"' | tail -n 1
|
|
||||||
mapfile -t list_create_backups < <(cli -c 'app kubernetes list_backups' | grep 'HeavyScript\|TrueTool_' | sort -t '_' -Vr -k2,7 | awk -F '|' '{print $2}'| tr -d " \t\r")
|
|
||||||
# shellcheck disable=SC2309
|
|
||||||
if [[ ${#list_create_backups[@]} -gt "number_of_backups" ]]; then
|
|
||||||
echo -e "\nDeleting the oldest backup(s) for exceeding limit:"
|
|
||||||
overflow=$(( ${#list_create_backups[@]} - "$number_of_backups" ))
|
|
||||||
mapfile -t list_overflow < <(cli -c 'app kubernetes list_backups' | grep "TrueTool_" | sort -t '_' -V -k2,7 | awk -F '|' '{print $2}'| tr -d " \t\r" | head -n "$overflow")
|
|
||||||
for i in "${list_overflow[@]}"
|
|
||||||
do
|
|
||||||
cli -c 'app kubernetes delete_backup backup_name=''"'"$i"'"' &> /dev/null || echo "${IRed}FAILED${Color_Off} to delete $i"
|
|
||||||
echo "$i"
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
export -f backup
|
|
||||||
|
|
||||||
## Lists available backup and prompts the users to select a backup to restore
|
|
||||||
restore(){
|
|
||||||
echo -e "${BWhite}Backup Restoration Tool${Color_Off}"
|
|
||||||
clear -x && echo "pulling restore points.."
|
|
||||||
list_restore_backups=$(cli -c 'app kubernetes list_backups' | grep "TrueTool_" | sort -t '_' -Vr -k2,7 | tr -d " \t\r" | awk -F '|' '{print $2}' | nl | column -t)
|
|
||||||
clear -x
|
|
||||||
# shellcheck disable=SC2015
|
|
||||||
[[ -z "$list_restore_backups" ]] && echo "No TrueTool restore points available" && exit || { title; echo "Choose a restore point" ; }
|
|
||||||
echo "$list_restore_backups" && read -rt 600 -p "Please type a number: " selection && restore_point=$(echo "$list_restore_backups" | grep ^"$selection " | awk '{print $2}')
|
|
||||||
[[ -z "$selection" ]] && echo "Your selection cannot be empty" && exit #Check for valid selection. If none, kill script
|
|
||||||
[[ -z "$restore_point" ]] && echo "Invalid Selection: $selection, was not an option" && exit #Check for valid selection. If none, kill script
|
|
||||||
echo -e "\nWARNING:\nThis is NOT guranteed to work\nThis is ONLY supposed to be used as a LAST RESORT\nConsider rolling back your applications instead if possible" || { echo "${IRed}FAILED${Color_Off}"; exit; }
|
|
||||||
# shellcheck disable=SC2015
|
|
||||||
echo -e "\n\nYou have chosen:\n$restore_point\n\nWould you like to continue?" && echo -e "1 Yes\n2 No" && read -rt 120 -p "Please type a number: " yesno || { echo "${IRed}FAILED${Color_Off}"; exit; }
|
|
||||||
if [[ $yesno == "1" ]]; then
|
|
||||||
echo -e "\nStarting Restore, this will take a ${BWhite}LONG${Color_Off} time."
|
|
||||||
pool=$(cli -c 'app kubernetes config' | grep -E "pool\s\|" | awk -F '|' '{print $3}' | tr -d " \t\n\r")
|
|
||||||
echo "Correcting PVC mountpoints..."
|
|
||||||
for pvc in $(zfs list -t filesystem -r "$pool"/ix-applications -o name -H | grep "/ix-applications/" | grep "volumes/pvc")
|
|
||||||
do
|
|
||||||
zfs set mountpoint=legacy "${pvc}" || echo "Fixing PVC mountpoints Failed for ${pvc}... Continuing..."
|
|
||||||
done
|
|
||||||
# Ensure readonly is turned off
|
|
||||||
if ! zfs set readonly=off "$pool"/ix-applications;then
|
|
||||||
echo -e "Error: Failed to set ZFS ReadOnly to \"off\""
|
|
||||||
echo -e "After the restore, attempt to run the following command manually:"
|
|
||||||
echo "zfs set readonly=off $pool/ix-applications"
|
|
||||||
fi
|
|
||||||
echo "Triggering restore process..."
|
|
||||||
cli -c 'app kubernetes restore_backup backup_name=''"'"$restore_point"'"' || echo "Restore ${IRed}FAILED${Color_Off}"
|
|
||||||
elif [[ $yesno == "2" ]]; then
|
|
||||||
echo "You've chosen NO, killing script. Good luck."
|
|
||||||
else
|
|
||||||
echo -e "${IRed}Invalid Selection${Color_Off}"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
export -f restore
|
|
@ -1,42 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
helmEnable(){
|
|
||||||
echo -e "${BWhite}Enabling Helm${Color_Off}"
|
|
||||||
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml && echo -e "${IGreen}Helm Enabled${Color_Off}"|| echo -e "${IRed}Helm Enable FAILED${Color_Off}"
|
|
||||||
}
|
|
||||||
export -f helmEnable
|
|
||||||
|
|
||||||
aptEnable(){
|
|
||||||
echo -e "${BWhite}Enabling Apt-Commands${Color_Off}"
|
|
||||||
chmod +x /usr/bin/apt* && echo -e "${IGreen}APT enabled${Color_Off}"|| echo -e "${IRed}APT Enable FAILED${Color_Off}"
|
|
||||||
}
|
|
||||||
export -f aptEnable
|
|
||||||
|
|
||||||
kubeapiEnable(){
|
|
||||||
local -r comment='iX Custom Rule to drop connection requests to k8s cluster from external sources'
|
|
||||||
echo -e "${BWhite}Enabling Kubernetes API${Color_Off}"
|
|
||||||
if iptables -t filter -L INPUT 2> /dev/null | grep -q "${comment}" ; then
|
|
||||||
iptables -D INPUT -p tcp -m tcp --dport 6443 -m comment --comment "${comment}" -j DROP && echo -e "${IGreen}Kubernetes API enabled${Color_Off}"|| echo -e "${IRed}Kubernetes API Enable FAILED${Color_Off}"
|
|
||||||
else
|
|
||||||
echo -e "${IGreen}Kubernetes API already enabled${Color_Off}"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
export -f kubeapiEnable
|
|
||||||
|
|
||||||
# Prune unused docker images to prevent dataset/snapshot bloat related slowdowns on SCALE
|
|
||||||
prune(){
|
|
||||||
echo -e "${BWhite}Docker Prune${Color_Off}"
|
|
||||||
echo "Pruning Docker Images..."
|
|
||||||
docker image prune -af | grep "^Total" && echo -e "${IGreen}Docker Prune Successfull${Color_Off}" || echo "Docker Prune ${IRed}FAILED${Color_Off}"
|
|
||||||
|
|
||||||
# TODO Switch to middleware prune on next release
|
|
||||||
# midclt call container.prune '{"remove_unused_images": true, "remove_stopped_containers": true}' &> /dev/null && echo "Docker Prune completed"|| echo "Docker Prune ${IRed}FAILED${Color_Off}"
|
|
||||||
}
|
|
||||||
export -f prune
|
|
||||||
|
|
||||||
#
|
|
||||||
sync(){
|
|
||||||
echo -e "${BWhite}Starting Catalog Sync...${Color_Off}"
|
|
||||||
cli -c 'app catalog sync_all' &> /dev/null && echo -e "${IGreen}Catalog sync complete${Color_Off}" || echo -e "${IRed}Catalog Sync Failed${Color_Off}"
|
|
||||||
}
|
|
||||||
export -f sync
|
|
@ -1,113 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
|
|
||||||
# Reset
|
|
||||||
Color_Off='\033[0m' # Text Reset
|
|
||||||
|
|
||||||
# Regular Colors
|
|
||||||
Black='\033[0;30m' # Black
|
|
||||||
Red='\033[0;31m' # Red
|
|
||||||
Green='\033[0;32m' # Green
|
|
||||||
Yellow='\033[0;33m' # Yellow
|
|
||||||
Blue='\033[0;34m' # Blue
|
|
||||||
Purple='\033[0;35m' # Purple
|
|
||||||
Cyan='\033[0;36m' # Cyan
|
|
||||||
White='\033[0;37m' # White
|
|
||||||
|
|
||||||
# Bold
|
|
||||||
BBlack='\033[1;30m' # Black
|
|
||||||
BRed='\033[1;31m' # Red
|
|
||||||
BGreen='\033[1;32m' # Green
|
|
||||||
BYellow='\033[1;33m' # Yellow
|
|
||||||
BBlue='\033[1;34m' # Blue
|
|
||||||
BPurple='\033[1;35m' # Purple
|
|
||||||
BCyan='\033[1;36m' # Cyan
|
|
||||||
BWhite='\033[1;37m' # White
|
|
||||||
|
|
||||||
# Underline
|
|
||||||
UBlack='\033[4;30m' # Black
|
|
||||||
URed='\033[4;31m' # Red
|
|
||||||
UGreen='\033[4;32m' # Green
|
|
||||||
UYellow='\033[4;33m' # Yellow
|
|
||||||
UBlue='\033[4;34m' # Blue
|
|
||||||
UPurple='\033[4;35m' # Purple
|
|
||||||
UCyan='\033[4;36m' # Cyan
|
|
||||||
UWhite='\033[4;37m' # White
|
|
||||||
|
|
||||||
# High Intensity
|
|
||||||
IBlack='\033[0;90m' # Black
|
|
||||||
IRed='\033[0;91m' # Red
|
|
||||||
IGreen='\033[0;92m' # Green
|
|
||||||
IYellow='\033[0;93m' # Yellow
|
|
||||||
IBlue='\033[0;94m' # Blue
|
|
||||||
IPurple='\033[0;95m' # Purple
|
|
||||||
ICyan='\033[0;96m' # Cyan
|
|
||||||
IWhite='\033[0;97m' # White
|
|
||||||
|
|
||||||
|
|
||||||
# Bold High Intensity
|
|
||||||
BIBlack='\033[1;90m' # Black
|
|
||||||
BIRed='\033[1;91m' # Red
|
|
||||||
BIGreen='\033[1;92m' # Green
|
|
||||||
BIYellow='\033[1;93m' # Yellow
|
|
||||||
BIBlue='\033[1;94m' # Blue
|
|
||||||
BIPurple='\033[1;95m' # Purple
|
|
||||||
BICyan='\033[1;96m' # Cyan
|
|
||||||
BIWhite='\033[1;97m' # White
|
|
||||||
|
|
||||||
noColor(){
|
|
||||||
# Reset
|
|
||||||
Color_Off=""
|
|
||||||
|
|
||||||
# Regular Colors
|
|
||||||
Black=""
|
|
||||||
Red=""
|
|
||||||
Green=""
|
|
||||||
Yellow=""
|
|
||||||
Blue=""
|
|
||||||
Purple=""
|
|
||||||
Cyan=""
|
|
||||||
White=""
|
|
||||||
|
|
||||||
# Bold
|
|
||||||
BBlack=""
|
|
||||||
BRed=""
|
|
||||||
BGreen=""
|
|
||||||
BYellow=""
|
|
||||||
BBlue=""
|
|
||||||
BPurple=""
|
|
||||||
BCyan=""
|
|
||||||
BWhite=""
|
|
||||||
|
|
||||||
# Underline
|
|
||||||
UBlack=""
|
|
||||||
URed=""
|
|
||||||
UGreen=""
|
|
||||||
UYellow=""
|
|
||||||
UBlue=""
|
|
||||||
UPurple=""
|
|
||||||
UCyan=""
|
|
||||||
UWhite=""
|
|
||||||
|
|
||||||
# High Intensity
|
|
||||||
IBlack=""
|
|
||||||
IRed=""
|
|
||||||
IGreen=""
|
|
||||||
IYellow=""
|
|
||||||
IBlue=""
|
|
||||||
IPurple=""
|
|
||||||
ICyan=""
|
|
||||||
IWhite=""
|
|
||||||
|
|
||||||
|
|
||||||
# Bold High Intensity
|
|
||||||
BIBlack=""
|
|
||||||
BIRed=""
|
|
||||||
BIGreen=""
|
|
||||||
BIYellow=""
|
|
||||||
BIBlue=""
|
|
||||||
BIPurple=""
|
|
||||||
BICyan=""
|
|
||||||
BIWhite=""
|
|
||||||
}
|
|
||||||
export -f noColor
|
|
@ -1,27 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
dns(){
|
|
||||||
echo -e "${BWhite}Service DNS Names Tool${Color_Off}"
|
|
||||||
clear -x
|
|
||||||
echo "Generating Internal Service DNS Names..."
|
|
||||||
#ignored dependency pods, may need to add more in the future.
|
|
||||||
dep_ignore="\-cronjob\-|^kube-system|\ssvclb|NAME|\-memcached\-.[^custom\-app]|\-postgresql\-.[^custom\-app]|\-redis\-.[^custom\-app]|\-mariadb\-.[^custom\-app]|\-promtail\-.[^custom\-app]"
|
|
||||||
|
|
||||||
# Pulling pod names
|
|
||||||
mapfile -t main < <(k3s kubectl get pods -A | grep -Ev "$dep_ignore" | sort)
|
|
||||||
|
|
||||||
# Pulling all ports
|
|
||||||
all_ports=$(k3s kubectl get service -A)
|
|
||||||
|
|
||||||
clear -x
|
|
||||||
count=0
|
|
||||||
for i in "${main[@]}"
|
|
||||||
do
|
|
||||||
[[ count -le 0 ]] && echo -e "\n" && ((count++))
|
|
||||||
appName=$(echo "$i" | awk '{print $2}' | sed 's/-[^-]*-[^-]*$//' | sed 's/-0//')
|
|
||||||
ixName=$(echo "$i" | awk '{print $1}')
|
|
||||||
port=$(echo "$all_ports" | grep -E "\s$appName\s" | awk '{print $6}' | grep -Eo "^[[:digit:]]+{1}")
|
|
||||||
[[ -n "$port" ]] && echo -e "$appName.$ixName.svc.cluster.local $port"
|
|
||||||
done | uniq | nl -b t | sed 's/\s\s\s$/- -------- ----/' | column -t -R 1 -N "#,DNS_Name,Port" -L
|
|
||||||
}
|
|
||||||
export -f dns
|
|
@ -1,35 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
help(){
|
|
||||||
[[ $help == "true" ]] && clear -x
|
|
||||||
echo ""
|
|
||||||
echo -e "${BWhite}Basic Utilities${Color_Off}"
|
|
||||||
echo "--mount | Initiates mounting feature, choose between unmounting and mounting PVC data"
|
|
||||||
echo "--restore | Opens a menu to restore a \"truetool\" backup that was taken on your \"ix-applications\" dataset"
|
|
||||||
echo "--delete-backup | Opens a menu to delete backups on your system"
|
|
||||||
echo "--list-backups | Prints a list of backups available"
|
|
||||||
echo "--helm-enable | Enables Helm command access on SCALE"
|
|
||||||
echo "--apt-enable | Enables Apt command access on SCALE"
|
|
||||||
echo "--kubeapi-enable | Enables external access to Kuberntes API port"
|
|
||||||
echo "--dns | List all of your applications DNS names and their web ports"
|
|
||||||
echo
|
|
||||||
echo -e "${BWhite}Update Options${Color_Off}"
|
|
||||||
echo "-U | Update all applications, ignores versions"
|
|
||||||
echo "-u | Update all applications, does not update Major releases"
|
|
||||||
echo "-b | Back-up your ix-applications dataset, specify a number after -b"
|
|
||||||
echo "-i | Add application to ignore list, one by one, see example below."
|
|
||||||
echo "-v | verbose output"
|
|
||||||
echo "-t | Set a custom timeout in seconds when checking if either an App or Mountpoint correctly Started, Stopped or (un)Mounted. Defaults to 500 seconds"
|
|
||||||
echo "-s | sync catalog"
|
|
||||||
echo "-p | Prune unused/old docker images"
|
|
||||||
echo
|
|
||||||
echo -e "${BWhite}Examples${Color_Off}"
|
|
||||||
echo "bash truetool.sh -b 14 -i portainer -i arch -i sonarr -i radarr -t 600 -vsUp"
|
|
||||||
echo "bash /mnt/tank/scripts/truetool.sh -t 150 --mount"
|
|
||||||
echo "bash /mnt/tank/scripts/truetool.sh --dns"
|
|
||||||
echo "bash /mnt/tank/scripts/truetool.sh --restore"
|
|
||||||
echo "bash /mnt/tank/scripts/truetool.sh --delete-backup"
|
|
||||||
echo
|
|
||||||
exit
|
|
||||||
}
|
|
||||||
export -f help
|
|
@ -1,57 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
mountPVC(){
|
|
||||||
echo -e "${BWhite}PVC Mounting Tool${Color_Off}"
|
|
||||||
clear -x
|
|
||||||
title
|
|
||||||
echo -e "1 Mount\n2 Unmount All" && read -rt 600 -p "Please type a number: " selection
|
|
||||||
[[ -z "$selection" ]] && echo "Your selection cannot be empty" && exit #Check for valid selection. If none, kill script
|
|
||||||
if [[ $selection == "1" ]]; then
|
|
||||||
list=$(k3s kubectl get pvc -A | sort -u | awk '{print NR-1, "\t" $1 "\t" $2 "\t" $4}' | column -t | sed "s/^0/ /")
|
|
||||||
echo "$list" && read -rt 120 -p "Please type a number: " selection
|
|
||||||
[[ -z "$selection" ]] && echo "Your selection cannot be empty" && exit #Check for valid selection. If none, kill script
|
|
||||||
app=$(echo -e "$list" | grep ^"$selection " | awk '{print $2}' | cut -c 4- )
|
|
||||||
[[ -z "$app" ]] && echo "Invalid Selection: $selection, was not an option" && exit #Check for valid selection. If none, kill script
|
|
||||||
pvc=$(echo -e "$list" | grep ^"$selection ")
|
|
||||||
status=$(cli -m csv -c 'app chart_release query name,status' | grep -E "^$app\b" | awk -F ',' '{print $2}'| tr -d " \t\n\r")
|
|
||||||
if [[ "$status" != "STOPPED" ]]; then
|
|
||||||
[[ -z $timeout ]] && echo -e "\nDefault Timeout: 500" && timeout=500 || echo -e "\nCustom Timeout: $timeout"
|
|
||||||
SECONDS=0 && echo -e "\nScaling down $app" && midclt call chart.release.scale "$app" '{"replica_count": 0}' &> /dev/null
|
|
||||||
else
|
|
||||||
echo -e "\n$app is already stopped"
|
|
||||||
fi
|
|
||||||
while [[ "$SECONDS" -le "$timeout" && "$status" != "STOPPED" ]]
|
|
||||||
do
|
|
||||||
status=$(cli -m csv -c 'app chart_release query name,status' | grep -E "^$app\b" | awk -F ',' '{print $2}'| tr -d " \t\n\r")
|
|
||||||
echo -e "Waiting $((timeout-SECONDS)) more seconds for $app to be STOPPED" && sleep 5
|
|
||||||
done
|
|
||||||
data_name=$(echo "$pvc" | awk '{print $3}')
|
|
||||||
volume_name=$(echo "$pvc" | awk '{print $4}')
|
|
||||||
full_path=$(zfs list | grep "$volume_name" | awk '{print $1}')
|
|
||||||
echo -e "\nMounting\n$full_path\nTo\n/mnt/truetool/$data_name" && zfs set mountpoint="/truetool/$data_name" "$full_path" && echo -e "Mounted, Use the Unmount All option to unmount\n"
|
|
||||||
exit
|
|
||||||
elif [[ $selection == "2" ]]; then
|
|
||||||
mapfile -t unmount_array < <(basename -a /mnt/truetool/* | sed "s/*//")
|
|
||||||
[[ -z ${unmount_array[*]} ]] && echo "Theres nothing to unmount" && exit
|
|
||||||
for i in "${unmount_array[@]}"
|
|
||||||
do
|
|
||||||
main=$(k3s kubectl get pvc -A | grep -E "\s$i\s" | awk '{print $1, $2, $4}')
|
|
||||||
app=$(echo "$main" | awk '{print $1}' | cut -c 4-)
|
|
||||||
pvc=$(echo "$main" | awk '{print $3}')
|
|
||||||
mapfile -t path < <(find /mnt/*/ix-applications/releases/"$app"/volumes/ -maxdepth 0 | cut -c 6-)
|
|
||||||
if [[ "${#path[@]}" -gt 1 ]]; then #if there is another app with the same name on another pool, use the current pools application, since the other instance is probably old, or unused.
|
|
||||||
echo "$i is a name used on more than one pool.. attempting to use your current kubernetes apps pool"
|
|
||||||
pool=$(cli -c 'app kubernetes config' | grep -E "dataset\s\|" | awk -F '|' '{print $3}' | awk -F '/' '{print $1}' | tr -d " \t\n\r")
|
|
||||||
full_path=$(find /mnt/"$pool"/ix-applications/releases/"$app"/volumes/ -maxdepth 0 | cut -c 6-)
|
|
||||||
zfs set mountpoint=legacy "$full_path""$pvc" && echo "$i unmounted" && rmdir /mnt/truetool/"$i" || echo "${IRed}FAILED${Color_Off} to unmount $i"
|
|
||||||
else
|
|
||||||
# shellcheck disable=SC2128
|
|
||||||
zfs set mountpoint=legacy "$path""$pvc" && echo "$i unmounted" && rmdir /mnt/truetool/"$i" || echo "${IRed}FAILED${Color_Off} to unmount $i"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
rmdir /mnt/truetool
|
|
||||||
else
|
|
||||||
echo -e "${IRed}Invalid selection, \"$selection\" was not an option${Color_Off}"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
export -f mountPVC
|
|
@ -1,72 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
|
|
||||||
no_args(){
|
|
||||||
echo "0 Show Help"
|
|
||||||
echo "1 List Internal Service DNS Names"
|
|
||||||
echo "2 Mount and Unmount PVC storage for easy access"
|
|
||||||
echo "3 List Backups"
|
|
||||||
echo "4 Create a Backup"
|
|
||||||
echo "5 Restore a Backup"
|
|
||||||
echo "6 Delete a Backup"
|
|
||||||
echo "7 Enable Helm Commands"
|
|
||||||
echo "8 Enable Apt and Apt-Get Commands"
|
|
||||||
echo "9 Update All Apps"
|
|
||||||
echo "10 Enable external access to Kuberntes API port"
|
|
||||||
read -rt 600 -p "Please select an option by number: " selection
|
|
||||||
|
|
||||||
case $selection in
|
|
||||||
0)
|
|
||||||
help="true"
|
|
||||||
;;
|
|
||||||
1)
|
|
||||||
dns="true"
|
|
||||||
;;
|
|
||||||
2)
|
|
||||||
mountPVC="true"
|
|
||||||
;;
|
|
||||||
3)
|
|
||||||
listBackups="true"
|
|
||||||
;;
|
|
||||||
4)
|
|
||||||
read -rt 600 -p "Please type the max number of backups to keep: " backups
|
|
||||||
re='^[0-9]+$'
|
|
||||||
number_of_backups=$backups
|
|
||||||
! [[ $backups =~ $re ]] && echo -e "Error: -b needs to be assigned an interger\n\"""$number_of_backups""\" is not an interger" >&2 && exit
|
|
||||||
[[ "$number_of_backups" -le 0 ]] && echo "Error: Number of backups is required to be at least 1" && exit
|
|
||||||
;;
|
|
||||||
5)
|
|
||||||
restore="true"
|
|
||||||
;;
|
|
||||||
6)
|
|
||||||
deleteBackup="true"
|
|
||||||
;;
|
|
||||||
7)
|
|
||||||
helmEnable="true"
|
|
||||||
;;
|
|
||||||
8)
|
|
||||||
aptEnable="true"
|
|
||||||
;;
|
|
||||||
9)
|
|
||||||
echo ""
|
|
||||||
echo "1 Update Apps Excluding likely breaking major changes"
|
|
||||||
echo "2 Update Apps Including likely breaking major changes"
|
|
||||||
read -rt 600 -p "Please select an option by number: " updateType
|
|
||||||
if [[ "$updateType" == "1" ]]; then
|
|
||||||
update_apps="true"
|
|
||||||
elif [[ "$updateType" == "2" ]]; then
|
|
||||||
update_all_apps="true"
|
|
||||||
else
|
|
||||||
echo "INVALID ENTRY" && exit 1
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
10)
|
|
||||||
kubeapiEnable="true"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Unknown option" && exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
echo ""
|
|
||||||
}
|
|
||||||
export -f no_args
|
|
@ -1,18 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
patchv22120(){
|
|
||||||
echo "Applying 22.12 HotPatch 1"
|
|
||||||
( wget -q -P /tmp https://github.com/truecharts/truetool/raw/main/hotpatch/2212/HP1.patch && echo "download completed" || echo "download failed" ) && ( patch -N -s -p0 -d /usr/lib/python3/dist-packages/middlewared/ < /tmp/HP1.patch && service middlewared restart && echo "waiting 20 seconds for middleware restart..." && sleep 20 && echo "patch completed" || echo "patch failed or skipped, not critical" ) && rm -rf /tmp/HP1.patch
|
|
||||||
echo "Applying 22.12 HotPatch 2"
|
|
||||||
( wget -q -P /tmp https://github.com/truecharts/truetool/raw/main/hotpatch/2212/HP2.patch && echo "download completed" || echo "download failed" ) && ( patch -N -s -p0 -d /usr/lib/python3/dist-packages/middlewared/ < /tmp/HP2.patch && service middlewared restart && echo "waiting 20 seconds for middleware restart..." && sleep 20 && echo "patch completed" || echo "patch failed or skipped, not critical" ) && rm -rf /tmp/HP2.patch
|
|
||||||
}
|
|
||||||
export -f patchv22120
|
|
||||||
|
|
||||||
|
|
||||||
hotpatch(){
|
|
||||||
echo "Starting hotpatcher..."
|
|
||||||
if [ "$(cli -m csv -c 'system version' | awk -F '-' '{print $3}')" == "22.12.0" ]; then
|
|
||||||
patchv22120
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
export -f hotpatch
|
|
@ -1,22 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Fancy ascii title.
|
|
||||||
title(){
|
|
||||||
if [[ -z $titleShown ]]; then
|
|
||||||
echo -e "${IRed} _______ _____ _ _ ";
|
|
||||||
echo " |__ __| / ____| | | | ";
|
|
||||||
echo " | |_ __ _ _ ___| | | |__ __ _ _ __| |_ ___ ";
|
|
||||||
echo -e "${IYellow} | | '__| | | |/ _ \ | | '_ \ / _\` | '__| __/ __|";
|
|
||||||
echo " | | | | |_| | __/ |____| | | | (_| | | | |_\__ \\";
|
|
||||||
echo -e "${IGreen} __|_|_| \__,_|\___|\_____|_| |_|\__,_|_| \__|___/";
|
|
||||||
echo " |__ __| |__ __| | | ";
|
|
||||||
echo -e "${IBlue} | |_ __ _ _ ___| | ___ ___ | | ";
|
|
||||||
echo " | | '__| | | |/ _ \ |/ _ \ / _ \| | ";
|
|
||||||
echo -e "${IPurple} | | | | |_| | __/ | (_) | (_) | | ";
|
|
||||||
echo " |_|_| \__,_|\___|_|\___/ \___/|_| ";
|
|
||||||
echo " ";
|
|
||||||
echo -e "${Color_Off} ";
|
|
||||||
fi
|
|
||||||
titleShown='true'
|
|
||||||
}
|
|
||||||
export -f title
|
|
@ -1,123 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
update_apps(){
|
|
||||||
echo -e "${BWhite}App Updater${Color_Off}"
|
|
||||||
[[ -z $timeout ]] && echo -e "Default Timeout: 500" && timeout=500 || echo -e "\nCustom Timeout: $timeout"
|
|
||||||
[[ "$timeout" -le 120 ]] && echo "Warning: Your timeout is set low and may lead to premature rollbacks or skips"
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Creating list of Apps to update..."
|
|
||||||
|
|
||||||
# Render a list of ignored applications, so users can verify their ignores got parsed correctly.
|
|
||||||
if [[ -z ${ignore[*]} ]]; then
|
|
||||||
echo "No apps added to ignore list, continuing..."
|
|
||||||
else
|
|
||||||
echo "ignored applications:"
|
|
||||||
for ignored in "${ignore[@]}"
|
|
||||||
do
|
|
||||||
echo "${ignored}"
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
mapfile -t array < <(cli -m csv -c 'app chart_release query name,update_available,human_version,human_latest_version,container_images_update_available,status' | grep -E ",true(,|$)" | sort)
|
|
||||||
[[ -z ${array[*]} ]] && echo -e "\nThere are no updates available or middleware timed out" && return 0 || echo -e "\n${#array[@]} update(s) available:"
|
|
||||||
PIDlist=()
|
|
||||||
|
|
||||||
# Draft a list of app names, seperate from actuall execution
|
|
||||||
# This prevents outputs getting mixed together
|
|
||||||
for i in "${array[@]}"
|
|
||||||
do
|
|
||||||
app_name=$(echo "$i" | awk -F ',' '{print $1}') #print out first catagory, name.
|
|
||||||
echo "$app_name"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Updating Apps..."
|
|
||||||
|
|
||||||
# Create a background task for each update as async solution
|
|
||||||
for i in "${array[@]}"
|
|
||||||
do
|
|
||||||
executeUpdate "${i}" &
|
|
||||||
PIDlist+=($!)
|
|
||||||
done
|
|
||||||
echo ""
|
|
||||||
echo "Waiting for update results..."
|
|
||||||
|
|
||||||
# Wait for all the async updates to complete
|
|
||||||
for p in "${PIDlist[@]}"
|
|
||||||
do
|
|
||||||
wait "${p}" ||:
|
|
||||||
done
|
|
||||||
|
|
||||||
}
|
|
||||||
export -f update_apps
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# This is a combination of stopping previously-stopped apps and apps stuck Deploying after update
|
|
||||||
after_update_actions(){
|
|
||||||
SECONDS=0
|
|
||||||
count=0
|
|
||||||
sleep 15
|
|
||||||
|
|
||||||
# Keep this running and exit the endless-loop based on a timer, instead of a countered-while-loop
|
|
||||||
# shellcheck disable=SC2050
|
|
||||||
while [[ "0" != "1" ]]
|
|
||||||
do
|
|
||||||
(( count++ ))
|
|
||||||
status=$(cli -m csv -c 'app chart_release query name,update_available,human_version,human_latest_version,status' | grep "^$app_name," | awk -F ',' '{print $2}')
|
|
||||||
if [[ "$status" == "ACTIVE" && "$startstatus" == "STOPPED" ]]; then
|
|
||||||
[[ "$verbose" == "true" ]] && echo "Returing to STOPPED state.."
|
|
||||||
midclt call chart.release.scale "$app_name" '{"replica_count": 0}' &> /dev/null && echo "Stopped"|| echo "FAILED"
|
|
||||||
break
|
|
||||||
elif [[ "$SECONDS" -ge "$timeout" && "$status" == "DEPLOYING" && "$failed" != "true" ]]; then
|
|
||||||
echo -e "Error: Run Time($SECONDS) for $app_name has exceeded Timeout($timeout)\nIf this is a slow starting application, set a higher timeout with -t\nIf this applicaion is always DEPLOYING, you can disable all probes under the Healthcheck Probes Liveness section in the edit configuration\nReverting update.."
|
|
||||||
midclt call chart.release.rollback "$app_name" "{\"item_version\": \"$rollback_version\"}" &> /dev/null
|
|
||||||
[[ "$startstatus" == "STOPPED" ]] && failed="true" && after_update_actions && unset failed #run back after_update_actions function if the app was stopped prior to update
|
|
||||||
break
|
|
||||||
elif [[ "$SECONDS" -ge "$timeout" && "$status" == "DEPLOYING" && "$failed" == "true" ]]; then
|
|
||||||
echo -e "Error: Run Time($SECONDS) for $app_name has exceeded Timeout($timeout)\nThe application failed to be ACTIVE even after a rollback,\nManual intervention is required\nAbandoning"
|
|
||||||
break
|
|
||||||
elif [[ "$status" == "STOPPED" ]]; then
|
|
||||||
[[ "$count" -le 1 && "$verbose" == "true" ]] && echo "Verifying Stopped.." && sleep 15 && continue #if reports stopped on FIRST time through loop, double check
|
|
||||||
[[ "$count" -le 1 && -z "$verbose" ]] && sleep 15 && continue #if reports stopped on FIRST time through loop, double check
|
|
||||||
echo "Stopped" && break #if reports stopped any time after the first loop, assume its extermal services.
|
|
||||||
elif [[ "$status" == "ACTIVE" ]]; then
|
|
||||||
[[ "$count" -le 1 && "$verbose" == "true" ]] && echo "Verifying Active.." && sleep 15 && continue #if reports active on FIRST time through loop, double check
|
|
||||||
[[ "$count" -le 1 && -z "$verbose" ]] && sleep 15 && continue #if reports active on FIRST time through loop, double check
|
|
||||||
echo "Active" && break #if reports active any time after the first loop, assume actually active.
|
|
||||||
else
|
|
||||||
[[ "$verbose" == "true" ]] && echo "Waiting $((timeout-SECONDS)) more seconds for $app_name to be ACTIVE"
|
|
||||||
sleep 15
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
export -f after_update_actions
|
|
||||||
|
|
||||||
# Determine what all the required information for the App to update, check it and execute the update using the SCALE API
|
|
||||||
executeUpdate(){
|
|
||||||
app_name=$(echo "$1" | awk -F ',' '{print $1}') #print out first catagory, name.
|
|
||||||
old_app_ver=$(echo "$1" | awk -F ',' '{print $4}' | awk -F '_' '{print $1}' | awk -F '.' '{print $1}') #previous/current Application MAJOR Version
|
|
||||||
new_app_ver=$(echo "$1" | awk -F ',' '{print $5}' | awk -F '_' '{print $1}' | awk -F '.' '{print $1}') #new Application MAJOR Version
|
|
||||||
old_chart_ver=$(echo "$1" | awk -F ',' '{print $4}' | awk -F '_' '{print $2}' | awk -F '.' '{print $1}') # Old Chart MAJOR version
|
|
||||||
new_chart_ver=$(echo "$1" | awk -F ',' '{print $5}' | awk -F '_' '{print $2}' | awk -F '.' '{print $1}') # New Chart MAJOR version
|
|
||||||
status=$(echo "$1" | awk -F ',' '{print $2}') #status of the app: STOPPED / DEPLOYING / ACTIVE
|
|
||||||
startstatus=$status
|
|
||||||
diff_app=$(diff <(echo "$old_app_ver") <(echo "$new_app_ver")) #caluclating difference in major app versions
|
|
||||||
diff_chart=$(diff <(echo "$old_chart_ver") <(echo "$new_chart_ver")) #caluclating difference in Chart versions
|
|
||||||
old_full_ver=$(echo "$1" | awk -F ',' '{print $4}') #Upgraded From
|
|
||||||
new_full_ver=$(echo "$1" | awk -F ',' '{print $5}') #Upraded To
|
|
||||||
rollback_version=$(echo "$1" | awk -F ',' '{print $4}' | awk -F '_' '{print $2}')
|
|
||||||
printf '%s\0' "${ignore[@]}" | grep -iFxqz "${app_name}" && echo -e "\n$app_name\nIgnored, skipping" && return #If application is on ignore list, skip
|
|
||||||
if [[ "$diff_app" == "$diff_chart" || "$update_all_apps" == "true" ]]; then #continue to update
|
|
||||||
[[ "$verbose" == "true" ]] && echo "Updating.."
|
|
||||||
# shellcheck disable=SC2015
|
|
||||||
cli -c 'app chart_release upgrade release_name=''"'"$app_name"'"' &> /dev/null && echo -e "Updated $app_name\n$old_full_ver\n$new_full_ver" && after_update_actions || { echo -e "$app_name: update ${IRed}FAILED${Color_Off}"; return; }
|
|
||||||
else
|
|
||||||
echo -e "\n$app_name\nMajor Release, update manually"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
export -f executeUpdate
|
|
@ -1,31 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
## AutoUpdate TrueTool using Git
|
|
||||||
updater(){
|
|
||||||
echo -e "${BWhite}Checking for updates...${Color_Off}"
|
|
||||||
git remote set-url origin "${targetRepo}"
|
|
||||||
BRANCH=$(git rev-parse --abbrev-ref HEAD)
|
|
||||||
git fetch -q
|
|
||||||
git update-index -q --refresh
|
|
||||||
if [[ $(git status --branch --porcelain) == *"behind"* ]]; then
|
|
||||||
echo -e "${IPurple}TrueTool requires update${Color_Off}"
|
|
||||||
git reset --hard -q
|
|
||||||
git checkout -q "${BRANCH}"
|
|
||||||
git pull -q
|
|
||||||
echo "script updated"
|
|
||||||
if [[ "$CHANGED" == "true" ]]; then
|
|
||||||
echo "LOOP DETECTED, exiting"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "restarting script after update..."
|
|
||||||
export CHANGED="true"
|
|
||||||
. "${SCRIPT_DIR}/truetool.sh" "$@"
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo -e "${IGreen}script up-to-date${Color_Off}"
|
|
||||||
export CHANGED="false"
|
|
||||||
fi
|
|
||||||
echo ""
|
|
||||||
}
|
|
||||||
export -f updater
|
|
179
truetool.sh
179
truetool.sh
@ -1,179 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Constants
|
|
||||||
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]:-$0}"; )" &> /dev/null && pwd 2> /dev/null; )";
|
|
||||||
dir=$(basename "$SCRIPT_DIR")
|
|
||||||
|
|
||||||
# Change this if you want to fork the project
|
|
||||||
enableUpdate="true"
|
|
||||||
targetRepo="https://github.com/truecharts/truetool.git"
|
|
||||||
|
|
||||||
# CD to the folder containing the script to ensure consistent runs
|
|
||||||
cd "${SCRIPT_DIR}" || echo -e "ERROR: Something went wrong accessing the script directory"
|
|
||||||
|
|
||||||
# Includes
|
|
||||||
# shellcheck source=includes/backup.sh
|
|
||||||
source includes/backup.sh
|
|
||||||
# shellcheck source=includes/chores.sh
|
|
||||||
source includes/chores.sh
|
|
||||||
# shellcheck source=includes/colors.sh
|
|
||||||
source includes/colors.sh
|
|
||||||
# shellcheck source=includes/dns.sh
|
|
||||||
source includes/dns.sh
|
|
||||||
# shellcheck source=includes/help.sh
|
|
||||||
source includes/help.sh
|
|
||||||
# shellcheck source=includes/help.sh
|
|
||||||
source includes/patch.sh
|
|
||||||
# shellcheck source=includes/mount.sh
|
|
||||||
source includes/mount.sh
|
|
||||||
# shellcheck source=includes/no_args.sh
|
|
||||||
source includes/no_args.sh
|
|
||||||
# shellcheck source=includes/title.sh
|
|
||||||
source includes/title.sh
|
|
||||||
# shellcheck source=includes/update.sh
|
|
||||||
source includes/update.sh
|
|
||||||
# shellcheck source=includes/update_self.sh
|
|
||||||
source includes/update_self.sh
|
|
||||||
|
|
||||||
#If no argument is passed, set flag to show menu
|
|
||||||
if [[ -z "$*" || "-" == "$*" || "--" == "$*" ]]; then
|
|
||||||
no_args="true"
|
|
||||||
else
|
|
||||||
|
|
||||||
# Parse script options
|
|
||||||
while getopts ":si:b:t:uUpSv-:" opt
|
|
||||||
do
|
|
||||||
case $opt in
|
|
||||||
-)
|
|
||||||
case "${OPTARG}" in
|
|
||||||
help)
|
|
||||||
help="true"
|
|
||||||
;;
|
|
||||||
dns)
|
|
||||||
dns="true"
|
|
||||||
;;
|
|
||||||
mount)
|
|
||||||
mountPVC="true"
|
|
||||||
;;
|
|
||||||
restore)
|
|
||||||
restore="true"
|
|
||||||
;;
|
|
||||||
delete-backup)
|
|
||||||
deleteBackup="true"
|
|
||||||
;;
|
|
||||||
list-backups)
|
|
||||||
listBackups="true"
|
|
||||||
;;
|
|
||||||
helm-enable)
|
|
||||||
helmEnable="true"
|
|
||||||
;;
|
|
||||||
apt-enable)
|
|
||||||
aptEnable="true"
|
|
||||||
;;
|
|
||||||
kubeapi-enable)
|
|
||||||
kubeapiEnable="true"
|
|
||||||
;;
|
|
||||||
no-color)
|
|
||||||
noColor
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo -e "Invalid Option \"--$OPTARG\"\n" && help
|
|
||||||
exit
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
\?)
|
|
||||||
echo -e "Invalid Option \"-$OPTARG\"\n" && help
|
|
||||||
exit
|
|
||||||
;;
|
|
||||||
:)
|
|
||||||
echo -e "Option: \"-$OPTARG\" requires an argument\n" && help
|
|
||||||
exit
|
|
||||||
;;
|
|
||||||
b)
|
|
||||||
re='^[0-9]+$'
|
|
||||||
number_of_backups=$OPTARG
|
|
||||||
! [[ $OPTARG =~ $re ]] && echo -e "Error: -b needs to be assigned an interger\n\"""$number_of_backups""\" is not an interger" >&2 && exit
|
|
||||||
[[ "$number_of_backups" -le 0 ]] && echo "Error: Number of backups is required to be at least 1" && exit
|
|
||||||
;;
|
|
||||||
i)
|
|
||||||
ignore+=("$OPTARG")
|
|
||||||
;;
|
|
||||||
t)
|
|
||||||
re='^[0-9]+$'
|
|
||||||
timeout=$OPTARG
|
|
||||||
! [[ $timeout =~ $re ]] && echo -e "Error: -t needs to be assigned an interger\n\"""$timeout""\" is not an interger" >&2 && exit
|
|
||||||
;;
|
|
||||||
s)
|
|
||||||
sync="true"
|
|
||||||
;;
|
|
||||||
U)
|
|
||||||
update_all_apps="true"
|
|
||||||
;;
|
|
||||||
u)
|
|
||||||
update_apps="true"
|
|
||||||
;;
|
|
||||||
p)
|
|
||||||
prune="true"
|
|
||||||
;;
|
|
||||||
v)
|
|
||||||
verbose="true"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo -e "Invalid Option \"--$OPTARG\"\n" && help
|
|
||||||
exit
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
title
|
|
||||||
|
|
||||||
[[ "$enableUpdate" == "true" ]] && updater "$@"
|
|
||||||
|
|
||||||
## Always check if a hotpatch needs to be applied
|
|
||||||
hotpatch
|
|
||||||
|
|
||||||
# Show menu if menu flag is set
|
|
||||||
if [[ "$no_args" == "true" ]]; then
|
|
||||||
no_args
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Exit if incompatable functions are called
|
|
||||||
[[ "$update_all_apps" == "true" && "$update_apps" == "true" ]] && echo -e "-U and -u cannot BOTH be called" && exit
|
|
||||||
|
|
||||||
## Exit if unsafe combinations are used
|
|
||||||
# Restore and update right after eachother, might cause super weird issues tha are hard to bugtrace
|
|
||||||
[[ ( "$update_all_apps" == "true" || "$update_apps" == "true" ) && ( "$restore" == "true" ) ]] && echo -e "Update and Restore cannot both be done in the same run..." && exit
|
|
||||||
|
|
||||||
# Backup Deletion is generally considered to be a "once in a while" thing and not great to sync with automated updates for that reason
|
|
||||||
[[ ( "$update_all_apps" == "true" || "$update_apps" == "true" ) && ( "$deleteBackup" == "true" ) ]] && echo -e "Update Backup-Deletion cannot both be done in the same run..." && exit
|
|
||||||
|
|
||||||
# Backup Deletion is generally considered to be a "once in a while" thing and not great to sync with automated updates for that reason
|
|
||||||
[[ ( "$update_all_apps" == "true" || "$update_apps" == "true" ) && ( "$deleteBackup" == "true" ) ]] && echo -e "Update and Backup-Deletion cannot both be done in the same run..." && exit
|
|
||||||
|
|
||||||
# Backup listing is a printout, which would either clutter the output or be already outdated when combined with backup
|
|
||||||
[[ ( "$update_all_apps" == "true" || "$update_apps" == "true" ) && ( "$listBackups" == "true" ) ]] && echo -e "Update and Listing Backups cannot both be done in the same run..." && exit
|
|
||||||
|
|
||||||
# Backup backup would be done after a backup is restored, which would lead to a backup that is... the same as the one restored...
|
|
||||||
[[ ( "$restore" == "true" && "$number_of_backups" -ge 1 )]] && echo -e "Restoring a backup and making a backup cannot both be done in the same run..." && exit
|
|
||||||
|
|
||||||
# While technically possible, this is asking for user error... where a user by habit mistakes one prompt, for the other.
|
|
||||||
[[ ( "$restore" == "true" && "$deleteBackup" == "true" )]] && echo -e "restoring a backup and deleting a backup cannot both be done in the same run..." && exit
|
|
||||||
|
|
||||||
|
|
||||||
# Continue to call functions in specific order
|
|
||||||
[[ "$help" == "true" ]] && help
|
|
||||||
[[ "$helmEnable" == "true" ]] && helmEnable
|
|
||||||
[[ "$aptEnable" == "true" ]] && aptEnable
|
|
||||||
[[ "$kubeapiEnable" == "true" ]] && kubeapiEnable
|
|
||||||
[[ "$aptEnable" == "true" || "$helmEnable" == "true" || "$kubeapiEnable" == "true" ]] && exit
|
|
||||||
[[ "$listBackups" == "true" ]] && listBackups && exit
|
|
||||||
[[ "$deleteBackup" == "true" ]] && deleteBackup && exit
|
|
||||||
[[ "$dns" == "true" ]] && dns && exit
|
|
||||||
[[ "$restore" == "true" ]] && restore && exit
|
|
||||||
[[ "$mountPVC" == "true" ]] && mountPVC && exit
|
|
||||||
[[ "$number_of_backups" -ge 1 ]] && backup
|
|
||||||
[[ "$sync" == "true" ]] && sync
|
|
||||||
[[ "$update_all_apps" == "true" || "$update_apps" == "true" ]] && update_apps
|
|
||||||
[[ "$prune" == "true" ]] && prune
|
|
Loading…
Reference in New Issue
Block a user