Merge pull request #251 from truecharts/hotpatch
Add first attempt at 22.12 hotpatch
This commit is contained in:
commit
2b7988a8e6
125
hotpatch/2212/HP1.patch
Normal file
125
hotpatch/2212/HP1.patch
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
diff --git plugins/chart_releases_linux/chart_release.py plugins/chart_releases_linux/chart_release.py
|
||||||
|
index 76e3825bc0f..f65cc0eac24 100644
|
||||||
|
--- plugins/chart_releases_linux/chart_release.py
|
||||||
|
+++ plugins/chart_releases_linux/chart_release.py
|
||||||
|
@@ -606,7 +606,7 @@ async def do_delete(self, job, release_name, options):
|
||||||
|
# If we had pre-install jobs, it's possible we have leftover pods which the job did not remove
|
||||||
|
# based on dev specified settings of cleaning it up - let's remove those
|
||||||
|
for pod in await self.middleware.call('k8s.pod.query', [['metadata.namespace', '=', namespace]]):
|
||||||
|
- owner_references = pod['metadata'].get('owner_references')
|
||||||
|
+ owner_references = pod['metadata'].get('ownerReferences')
|
||||||
|
if not isinstance(owner_references, list) or all(
|
||||||
|
owner_reference.get('name') not in pre_install_jobs for owner_reference in owner_references
|
||||||
|
):
|
||||||
|
@@ -658,7 +658,7 @@ async def remove_storage_class_and_dataset(self, release_name, job=None):
|
||||||
|
pvc_volume_ds = os.path.join(release_ds, 'volumes')
|
||||||
|
for pv in await self.middleware.call(
|
||||||
|
'k8s.pv.query', [
|
||||||
|
- ['spec.csi.volume_attributes.openebs\\.io/poolname', '=', pvc_volume_ds]
|
||||||
|
+ ['spec.csi.volumeAttributes.openebs\\.io/poolname', '=', pvc_volume_ds]
|
||||||
|
]
|
||||||
|
):
|
||||||
|
await self.middleware.call('k8s.pv.delete', pv['metadata']['name'])
|
||||||
|
diff --git plugins/chart_releases_linux/resources.py plugins/chart_releases_linux/resources.py
|
||||||
|
index c7180147a5f..941de79da45 100644
|
||||||
|
--- plugins/chart_releases_linux/resources.py
|
||||||
|
+++ plugins/chart_releases_linux/resources.py
|
||||||
|
@@ -158,13 +158,13 @@ async def retrieve_pv_pvc_mapping_internal(self, chart_release):
|
||||||
|
}
|
||||||
|
|
||||||
|
for pv in chart_release['resources']['persistent_volumes']:
|
||||||
|
- claim_name = pv['spec'].get('claim_ref', {}).get('name')
|
||||||
|
+ claim_name = pv['spec'].get('claimRef', {}).get('name')
|
||||||
|
if claim_name:
|
||||||
|
csi_spec = pv['spec']['csi']
|
||||||
|
- volumes_ds = csi_spec['volume_attributes']['openebs.io/poolname']
|
||||||
|
+ volumes_ds = csi_spec['volumeAttributes']['openebs.io/poolname']
|
||||||
|
if (
|
||||||
|
os.path.join(chart_release['dataset'], 'volumes') != volumes_ds or
|
||||||
|
- csi_spec['volume_handle'] not in zfs_volumes
|
||||||
|
+ csi_spec['volumeHandle'] not in zfs_volumes
|
||||||
|
):
|
||||||
|
# We are only going to backup/restore pvc's which were consuming
|
||||||
|
# their respective storage class and we have related zfs volume present
|
||||||
|
@@ -174,8 +174,8 @@ async def retrieve_pv_pvc_mapping_internal(self, chart_release):
|
||||||
|
mapping[claim_name] = {
|
||||||
|
'name': pv_name,
|
||||||
|
'pv_details': pv,
|
||||||
|
- 'dataset': os.path.join(volumes_ds, csi_spec['volume_handle']),
|
||||||
|
- 'zv_details': zfs_volumes[csi_spec['volume_handle']],
|
||||||
|
+ 'dataset': os.path.join(volumes_ds, csi_spec['volumeHandle']),
|
||||||
|
+ 'zv_details': zfs_volumes[csi_spec['volumeHandle']],
|
||||||
|
}
|
||||||
|
return mapping
|
||||||
|
|
||||||
|
@@ -247,11 +247,11 @@ async def get_workload_storage_details(self):
|
||||||
|
# because of chart release reclaim policy being retain
|
||||||
|
for pv in await self.middleware.call(
|
||||||
|
'k8s.pv.query', [[
|
||||||
|
- 'spec.csi.volume_attributes.openebs\\.io/poolname', '^',
|
||||||
|
+ 'spec.csi.volumeAttributes.openebs\\.io/poolname', '^',
|
||||||
|
f'{os.path.join(k8s_config["dataset"], "releases")}/'
|
||||||
|
]]
|
||||||
|
):
|
||||||
|
- dataset = pv['spec']['csi']['volume_attributes']['openebs.io/poolname']
|
||||||
|
+ dataset = pv['spec']['csi']['volumeAttributes']['openebs.io/poolname']
|
||||||
|
rl = dataset.split('/', 4)
|
||||||
|
if len(rl) > 4:
|
||||||
|
mapping['persistent_volumes'][rl[3]].append(pv)
|
||||||
|
diff --git plugins/chart_releases_linux/scale_workload.py plugins/chart_releases_linux/scale_workload.py
|
||||||
|
index 117dab3a79c..e9525150278 100644
|
||||||
|
--- plugins/chart_releases_linux/scale_workload.py
|
||||||
|
+++ plugins/chart_releases_linux/scale_workload.py
|
||||||
|
@@ -246,10 +246,10 @@ async def get_workload_to_pod_mapping(self, namespace):
|
||||||
|
for r in await self.middleware.call(
|
||||||
|
f'k8s.{key}.query', [
|
||||||
|
['metadata.namespace', '=', namespace],
|
||||||
|
- ['metadata', 'rin', 'owner_references'],
|
||||||
|
+ ['metadata', 'rin', 'ownerReferences'],
|
||||||
|
], {'select': ['metadata']}
|
||||||
|
):
|
||||||
|
- for owner_reference in filter(lambda o: o.get('uid'), r['metadata']['owner_references'] or []):
|
||||||
|
+ for owner_reference in filter(lambda o: o.get('uid'), r['metadata']['ownerReferences'] or []):
|
||||||
|
mapping[key][owner_reference['uid']][r['metadata']['uid']] = r
|
||||||
|
|
||||||
|
pod_mapping = defaultdict(list)
|
||||||
|
diff --git plugins/kubernetes_linux/restore.py plugins/kubernetes_linux/restore.py
|
||||||
|
index 4897e3f8b7a..ec13a332b6e 100644
|
||||||
|
--- plugins/kubernetes_linux/restore.py
|
||||||
|
+++ plugins/kubernetes_linux/restore.py
|
||||||
|
@@ -218,7 +218,11 @@ def restore_backup(self, job, backup_name, options):
|
||||||
|
failed_pv_restores.append(f'Unable to create ZFS Volume for {pvc!r} PVC: {e}')
|
||||||
|
continue
|
||||||
|
|
||||||
|
+ # We need to safely access claim_ref vollume attribute keys as with k8s client api re-write
|
||||||
|
+ # camel casing which was done by kubernetes asyncio package is not happening anymore
|
||||||
|
pv_spec = pv['pv_details']['spec']
|
||||||
|
+ claim_ref = pv_spec.get('claim_ref') or pv_spec['claimRef']
|
||||||
|
+ pv_volume_attrs = pv_spec['csi'].get('volume_attributes') or pv_spec['csi']['volumeAttributes']
|
||||||
|
try:
|
||||||
|
self.middleware.call_sync('k8s.pv.create', {
|
||||||
|
'metadata': {
|
||||||
|
@@ -229,18 +233,18 @@ def restore_backup(self, job, backup_name, options):
|
||||||
|
'storage': pv_spec['capacity']['storage'],
|
||||||
|
},
|
||||||
|
'claimRef': {
|
||||||
|
- 'name': pv_spec['claim_ref']['name'],
|
||||||
|
- 'namespace': pv_spec['claim_ref']['namespace'],
|
||||||
|
+ 'name': claim_ref['name'],
|
||||||
|
+ 'namespace': claim_ref['namespace'],
|
||||||
|
},
|
||||||
|
'csi': {
|
||||||
|
'volumeAttributes': {
|
||||||
|
'openebs.io/poolname': RE_POOL.sub(
|
||||||
|
- f'{k8s_pool}\\1', pv_spec['csi']['volume_attributes']['openebs.io/poolname']
|
||||||
|
+ f'{k8s_pool}\\1', pv_volume_attrs['openebs.io/poolname']
|
||||||
|
)
|
||||||
|
},
|
||||||
|
- 'volumeHandle': pv_spec['csi']['volume_handle'],
|
||||||
|
+ 'volumeHandle': pv_spec['csi'].get('volume_handle') or pv_spec['csi']['volumeHandle'],
|
||||||
|
},
|
||||||
|
- 'storageClassName': pv_spec['storage_class_name'],
|
||||||
|
+ 'storageClassName': pv_spec.get('storage_class_name') or pv_spec['storageClassName'],
|
||||||
|
},
|
||||||
|
})
|
||||||
|
except Exception as e:
|
16
includes/patch.sh
Normal file
16
includes/patch.sh
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
patchv22120(){
|
||||||
|
echo "Applying 22.12 HotPatch 1"
|
||||||
|
( wget -q -P /tmp https://github.com/truecharts/truetool/raw/master/hotpatch/2212/HP1.patch && echo "download completed" || echo "download failed" ) && ( patch -s -p0 -d /usr/lib/python3/dist-packages/middlewared/ < /tmp/HP1.patch && echo "patch completed" || echo "patch failed" ) && rm -rf /tmp/HP1.patch
|
||||||
|
}
|
||||||
|
export -f patchv22120
|
||||||
|
|
||||||
|
|
||||||
|
hotpatch(){
|
||||||
|
echo "Starting hotpatcher..."
|
||||||
|
if [ "$(cli -m csv -c 'system version' | awk -F '-' '{print $3}')" == "22.12.0" ]; then
|
||||||
|
patchv22120
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
export -f hotpatch
|
@ -22,6 +22,8 @@ source includes/colors.sh
|
|||||||
source includes/dns.sh
|
source includes/dns.sh
|
||||||
# shellcheck source=includes/help.sh
|
# shellcheck source=includes/help.sh
|
||||||
source includes/help.sh
|
source includes/help.sh
|
||||||
|
# shellcheck source=includes/help.sh
|
||||||
|
source includes/patch.sh
|
||||||
# shellcheck source=includes/mount.sh
|
# shellcheck source=includes/mount.sh
|
||||||
source includes/mount.sh
|
source includes/mount.sh
|
||||||
# shellcheck source=includes/no_args.sh
|
# shellcheck source=includes/no_args.sh
|
||||||
@ -134,6 +136,9 @@ if [[ "$no_args" == "true" ]]; then
|
|||||||
no_args
|
no_args
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
## Always check if a hotpatch needs to be applied
|
||||||
|
hotpatch
|
||||||
|
|
||||||
## Exit if incompatable functions are called
|
## Exit if incompatable functions are called
|
||||||
[[ "$update_all_apps" == "true" && "$update_apps" == "true" ]] && echo -e "-U and -u cannot BOTH be called" && exit
|
[[ "$update_all_apps" == "true" && "$update_apps" == "true" ]] && echo -e "-U and -u cannot BOTH be called" && exit
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user