mirror of
https://gitlab.com/futo-org/fcast.git
synced 2025-06-24 21:25:23 +00:00
Initial commit of new updater
This commit is contained in:
parent
698c10f356
commit
869ac1433f
16 changed files with 952 additions and 295 deletions
|
@ -1,150 +0,0 @@
|
|||
import os
|
||||
import hashlib
|
||||
import boto3
|
||||
from botocore.client import Config
|
||||
import shutil
|
||||
from functools import cmp_to_key
|
||||
|
||||
ACCOUNT_ID = os.environ.get('R2_ACCOUNT_ID')
|
||||
ACCESS_KEY_ID = os.environ.get('R2_ACCESS_KEY_ID')
|
||||
SECRET_ACCESS_KEY = os.environ.get('R2_SECRET_ACCESS_KEY')
|
||||
BUCKET_NAME = os.environ.get('R2_BUCKET_NAME')
|
||||
|
||||
DEPLOY_DIR = os.environ.get('FCAST_DO_RUNNER_DEPLOY_DIR')
|
||||
TEMP_DIR = os.path.join(DEPLOY_DIR, 'temp')
|
||||
LOCAL_CACHE_DIR = os.path.join(DEPLOY_DIR, 'cache')
|
||||
|
||||
# Customizable CI parameters
|
||||
CACHE_VERSION_AMOUNT = int(os.environ.get('CACHE_VERSION_AMOUNT', default="-1"))
|
||||
RELEASE_CANDIDATE = bool(os.environ.get('RELEASE_CANDIDATE', default=False))
|
||||
RELEASE_CANDIDATE_VERSION = int(os.environ.get('RELEASE_CANDIDATE_VERSION', default="1"))
|
||||
|
||||
# Utility functions
|
||||
def compare_versions(x, y):
|
||||
x_parts = x.split('.')
|
||||
y_parts = y.split('.')
|
||||
|
||||
for i in range(len(x_parts)):
|
||||
if x_parts[i] < y_parts[i]:
|
||||
return -1
|
||||
elif x_parts[i] > y_parts[i]:
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
# Initial setup
|
||||
|
||||
# Note: Cloudflare R2 docs outdated, secret is not supposed to be hashed...
|
||||
|
||||
# Hash the secret access key using SHA-256
|
||||
#hashed_secret_key = hashlib.sha256(SECRET_ACCESS_KEY.encode()).hexdigest()
|
||||
|
||||
# Configure the S3 client for Cloudflare R2
|
||||
s3 = boto3.client('s3',
|
||||
endpoint_url=f'https://{ACCOUNT_ID}.r2.cloudflarestorage.com',
|
||||
aws_access_key_id=ACCESS_KEY_ID,
|
||||
# aws_secret_access_key=hashed_secret_key,
|
||||
aws_secret_access_key=SECRET_ACCESS_KEY,
|
||||
config=Config(
|
||||
signature_version='s3v4'
|
||||
)
|
||||
)
|
||||
list_response = s3.list_objects_v2(Bucket=BUCKET_NAME, Prefix='electron/')
|
||||
bucket_files = list_response.get('Contents', [])
|
||||
bucket_versions_full = sorted(set(map(lambda x: x['Key'].split('/')[1], bucket_files)), key=cmp_to_key(compare_versions), reverse=True)
|
||||
bucket_versions = bucket_versions_full if CACHE_VERSION_AMOUNT < 0 else bucket_versions_full[:CACHE_VERSION_AMOUNT]
|
||||
os.makedirs(TEMP_DIR, exist_ok=True)
|
||||
|
||||
# CI functions
|
||||
|
||||
def copy_artifacts_to_local_cache():
|
||||
if len(os.listdir('/artifacts')) == 0:
|
||||
print('No artifacts were built...')
|
||||
return None
|
||||
|
||||
print('Copying artifacts to cache...')
|
||||
# All artifact should have same version in format: /artifacts/PKG/OS/ARCH/fcast-receiver-VERSION-OS-ARCH.PKG
|
||||
version = os.listdir('/artifacts/zip/linux/x64')[0].split('-')[2]
|
||||
dst = os.path.join(TEMP_DIR, version)
|
||||
print(f'Current app version: {version}')
|
||||
|
||||
shutil.copytree('/artifacts', dst, dirs_exist_ok=True, ignore=shutil.ignore_patterns('*.w*'))
|
||||
for dir in os.listdir('/artifacts'):
|
||||
shutil.rmtree(os.path.join('/artifacts', dir))
|
||||
|
||||
return version
|
||||
|
||||
def sync_local_cache():
|
||||
print('Syncing local cache with s3...')
|
||||
local_files = []
|
||||
for root, _, files in os.walk(LOCAL_CACHE_DIR):
|
||||
for filename in files:
|
||||
rel_path = os.path.relpath(os.path.join(root, filename), LOCAL_CACHE_DIR)
|
||||
version = os.path.relpath(rel_path, 'electron/').split('/')[0]
|
||||
|
||||
if version in bucket_versions:
|
||||
local_files.append(rel_path)
|
||||
else:
|
||||
print(f'Purging file from local cache: {rel_path}')
|
||||
os.remove(os.path.join(root, filename))
|
||||
|
||||
for obj in bucket_files:
|
||||
filename = obj['Key']
|
||||
save_path = os.path.join(LOCAL_CACHE_DIR, filename)
|
||||
|
||||
if filename not in local_files:
|
||||
print(f'Downloading file: {filename}')
|
||||
get_response = s3.get_object(Bucket=BUCKET_NAME, Key=filename)
|
||||
|
||||
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
||||
with open(save_path, 'wb') as file:
|
||||
file.write(get_response['Body'].read())
|
||||
|
||||
def upload_local_cache(current_version):
|
||||
print('Uploading local cache to s3...')
|
||||
shutil.copytree(TEMP_DIR, os.path.join(LOCAL_CACHE_DIR, 'electron'), dirs_exist_ok=True)
|
||||
|
||||
local_files = []
|
||||
for root, _, files in os.walk(LOCAL_CACHE_DIR):
|
||||
for filename in files:
|
||||
full_path = os.path.join(root, filename)
|
||||
rel_path = os.path.relpath(full_path, LOCAL_CACHE_DIR)
|
||||
version = rel_path.split('/')[1]
|
||||
|
||||
if RELEASE_CANDIDATE and version == current_version:
|
||||
rc_path = full_path[:full_path.rfind('.')] + f'-rc{RELEASE_CANDIDATE_VERSION}' + full_path[full_path.rfind('.'):]
|
||||
os.rename(full_path, rc_path)
|
||||
rel_path = os.path.relpath(rc_path, LOCAL_CACHE_DIR)
|
||||
|
||||
local_files.append(rel_path)
|
||||
|
||||
for file_path in local_files:
|
||||
if file_path not in map(lambda x: x['Key'], bucket_files):
|
||||
print(f'Uploading file: {file_path}')
|
||||
|
||||
with open(os.path.join(LOCAL_CACHE_DIR, file_path), 'rb') as file:
|
||||
put_response = s3.put_object(
|
||||
Body=file,
|
||||
Bucket=BUCKET_NAME,
|
||||
Key=file_path,
|
||||
)
|
||||
|
||||
def generate_delta_updates(current_version):
|
||||
pass
|
||||
|
||||
# generate html previous version browsing (based off of bucket + and local if does not have all files)
|
||||
def generate_previous_releases_page():
|
||||
pass
|
||||
|
||||
def update_website():
|
||||
pass
|
||||
|
||||
# CI Operations
|
||||
current_version = copy_artifacts_to_local_cache()
|
||||
sync_local_cache()
|
||||
# generate_delta_updates(current_version)
|
||||
upload_local_cache(current_version)
|
||||
# generate_previous_releases_page()
|
||||
# update_website()
|
||||
|
||||
shutil.rmtree(TEMP_DIR)
|
1
receivers/electron/scripts/deploy/__init__.py
Normal file
1
receivers/electron/scripts/deploy/__init__.py
Normal file
|
@ -0,0 +1 @@
|
|||
from .util import *
|
252
receivers/electron/scripts/deploy/deploy.py
Normal file
252
receivers/electron/scripts/deploy/deploy.py
Normal file
|
@ -0,0 +1,252 @@
|
|||
import os
|
||||
import hashlib
|
||||
import json
|
||||
import shutil
|
||||
from functools import cmp_to_key
|
||||
from util import BUCKET_NAME, S3Client, PackageFormat, ArtifactVersion, compare_versions, generate_update_tarball
|
||||
|
||||
DEPLOY_DIR = os.environ.get('FCAST_DO_RUNNER_DEPLOY_DIR')
|
||||
TEMP_DIR = os.path.join(DEPLOY_DIR, 'temp')
|
||||
LOCAL_CACHE_DIR = os.path.join(DEPLOY_DIR, 'cache')
|
||||
BASE_DOWNLOAD_URL = BUCKET_NAME.replace('-', '.')
|
||||
EXCLUDED_DELTA_VERSIONS = ["1.0.14"]
|
||||
|
||||
# Version tracking for migration support
|
||||
RELEASES_JSON_VERSION = '1'
|
||||
|
||||
# Customizable CI parameters
|
||||
CACHE_VERSION_AMOUNT = int(os.environ.get('CACHE_VERSION_AMOUNT', default="-1"))
|
||||
|
||||
s3 = S3Client(CACHE_VERSION_AMOUNT, EXCLUDED_DELTA_VERSIONS)
|
||||
|
||||
# CI functions
|
||||
def ensure_files_exist(dirs, files):
|
||||
for d in dirs:
|
||||
os.makedirs(d, exist_ok=True)
|
||||
|
||||
for f in files:
|
||||
if not os.path.exists(os.path.join(LOCAL_CACHE_DIR, f)):
|
||||
s3.download_file(os.path.join(LOCAL_CACHE_DIR, f), f)
|
||||
|
||||
def copy_artifacts_to_local_cache():
|
||||
version = None
|
||||
with open(os.path.join(LOCAL_CACHE_DIR, 'electron', 'releases.json') , 'r') as file:
|
||||
releases = json.load(file)
|
||||
version = ArtifactVersion(releases['currentVersion'], 'stable', None)
|
||||
|
||||
if len(os.listdir('/artifacts')) == 0:
|
||||
print('No artifacts were built...')
|
||||
return version
|
||||
|
||||
print('Copying artifacts to cache...')
|
||||
# Picking a random package that exists from the build pipeline
|
||||
artifact = PackageFormat(os.listdir('/artifacts/zip/linux/x64')[0])
|
||||
version = ArtifactVersion(artifact.version, artifact.channel, artifact.channel_version)
|
||||
dst = os.path.join(TEMP_DIR, version.version)
|
||||
|
||||
shutil.copytree('/artifacts', dst, dirs_exist_ok=True, ignore=shutil.ignore_patterns('*.w*'))
|
||||
for dir in os.listdir('/artifacts'):
|
||||
shutil.rmtree(os.path.join('/artifacts', dir))
|
||||
|
||||
print(f'Current app version: {version}')
|
||||
return version
|
||||
|
||||
def sync_local_cache():
|
||||
print('Syncing local cache with s3...')
|
||||
local_files = []
|
||||
for root, _, files in os.walk(LOCAL_CACHE_DIR):
|
||||
for filename in files:
|
||||
rel_path = os.path.relpath(os.path.join(root, filename), LOCAL_CACHE_DIR)
|
||||
version = os.path.relpath(rel_path, 'electron/').split('/')[0]
|
||||
|
||||
if version in s3.get_versions() or filename == 'releases.json':
|
||||
local_files.append(rel_path)
|
||||
elif filename != 'releases.json':
|
||||
print(f'Purging file from local cache: {rel_path}')
|
||||
os.remove(os.path.join(root, filename))
|
||||
|
||||
for obj in s3.get_bucket_files():
|
||||
filename = obj['Key']
|
||||
path = os.path.join(LOCAL_CACHE_DIR, filename)
|
||||
|
||||
if filename not in local_files:
|
||||
s3.download_file(path, filename)
|
||||
|
||||
def upload_local_cache():
|
||||
print('Uploading local cache to s3...')
|
||||
shutil.copytree(TEMP_DIR, os.path.join(LOCAL_CACHE_DIR, 'electron'), dirs_exist_ok=True)
|
||||
|
||||
local_files = []
|
||||
for root, _, files in os.walk(LOCAL_CACHE_DIR):
|
||||
for filename in files:
|
||||
full_path = os.path.join(root, filename)
|
||||
rel_path = os.path.relpath(full_path, LOCAL_CACHE_DIR)
|
||||
local_files.append(rel_path)
|
||||
|
||||
for file_path in local_files:
|
||||
if file_path not in map(lambda x: x['Key'], s3.get_bucket_files()) or os.path.basename(file_path) == 'releases.json':
|
||||
s3.upload_file(os.path.join(LOCAL_CACHE_DIR, file_path), file_path)
|
||||
|
||||
# TODO: WIP
|
||||
def generate_delta_updates(artifact_version):
|
||||
delta_info = {}
|
||||
|
||||
releases = None
|
||||
with open(os.path.join(LOCAL_CACHE_DIR, 'electron', 'releases.json') , 'r') as file:
|
||||
releases = json.load(file)
|
||||
|
||||
# Get sha digest from base version for integrity validation
|
||||
print('Generating sha digests from previous updates...')
|
||||
for root, _, files in os.walk(LOCAL_CACHE_DIR):
|
||||
for filename in filter(lambda f: f.endswith('.zip'), files):
|
||||
full_path = os.path.join(root, filename)
|
||||
rel_path = os.path.relpath(full_path, os.path.join(LOCAL_CACHE_DIR, 'electron'))
|
||||
package = PackageFormat(rel_path)
|
||||
|
||||
if package.channel != artifact_version.channel or package.version in EXCLUDED_DELTA_VERSIONS:
|
||||
continue
|
||||
|
||||
print(f'Generating sha digests from: {full_path}')
|
||||
artifact_name, digest = generate_update_tarball(full_path, rel_path, TEMP_DIR, package)
|
||||
print(f'DIGEST INFO: {artifact_name} {digest}')
|
||||
|
||||
os_dict = delta_info.get(package.channel, {})
|
||||
arch_dict = os_dict.get(package.os, {})
|
||||
version_dict = arch_dict.get(package.arch, {})
|
||||
|
||||
delta_entry = {
|
||||
'path': os.path.join(TEMP_DIR, os.path.dirname(rel_path), artifact_name),
|
||||
'digest': digest,
|
||||
}
|
||||
|
||||
version_dict[package.version] = delta_entry
|
||||
arch_dict[package.arch] = version_dict
|
||||
os_dict[package.os] = arch_dict
|
||||
delta_info[package.channel] = os_dict
|
||||
|
||||
|
||||
# TODO: Add limit on amount of delta patches to create (either fixed number or by memory savings)
|
||||
# TODO: Parallelize bsdiff invocation since its single-threaded, provided enough RAM available
|
||||
print('Generating delta updates...')
|
||||
previous_versions = filter(lambda v: v not in EXCLUDED_DELTA_VERSIONS, releases['previousVersions'])
|
||||
for delta_version in previous_versions:
|
||||
# Create delta patches
|
||||
for root, _, files in os.walk(TEMP_DIR):
|
||||
for filename in filter(lambda f: f.endswith('.zip'), files):
|
||||
full_path = os.path.join(root, filename)
|
||||
rel_path = os.path.relpath(full_path, TEMP_DIR)
|
||||
package = PackageFormat(rel_path)
|
||||
|
||||
if package.version in EXCLUDED_DELTA_VERSIONS:
|
||||
continue
|
||||
|
||||
artifact_name, digest = generate_update_tarball(full_path, rel_path, TEMP_DIR, package)
|
||||
base_file = delta_info[package.channel][package.os][package.arch][delta_version]['path']
|
||||
new_file = os.path.join(os.path.dirname(full_path), artifact_name)
|
||||
delta_file = os.path.join(os.path.dirname(full_path), f'{package.name}-{package.version}-{package.os_pretty}-{package.arch}-delta-{delta_version}.delta')
|
||||
command = f'bsdiff {base_file} {new_file} {delta_file}'
|
||||
|
||||
print(f'temp skipping delta generation: {command}')
|
||||
# print(f'Generating delta update: {command}')
|
||||
# os.system(command)
|
||||
# os.remove(base_file)
|
||||
# os.remove(new_file)
|
||||
|
||||
return delta_info
|
||||
|
||||
def generate_releases_json(artifact_version, delta_info):
|
||||
print('Generating releases.json...')
|
||||
releases = None
|
||||
with open(os.path.join(LOCAL_CACHE_DIR, 'electron', 'releases.json') , 'r') as file:
|
||||
releases = json.load(file)
|
||||
|
||||
current_version = releases.get('currentVersion', '0.0.0')
|
||||
current_releases = releases.get('currentReleases', {})
|
||||
channel_current_versions = releases.get('channelCurrentVersions', {})
|
||||
|
||||
all_versions = releases.get('allVersions', [])
|
||||
if current_version not in all_versions:
|
||||
all_versions.append(current_version)
|
||||
|
||||
for root, _, files in os.walk(TEMP_DIR):
|
||||
# Only offer zip and delta updates. Other packages will update from zip packages
|
||||
for filename in filter(lambda f: f.endswith('.zip') or f.endswith('.delta'), files):
|
||||
full_path = os.path.join(root, filename)
|
||||
rel_path = os.path.relpath(full_path, TEMP_DIR)
|
||||
package = PackageFormat(rel_path)
|
||||
url = f'https://{BASE_DOWNLOAD_URL}/electron/{rel_path}'
|
||||
|
||||
digest = ''
|
||||
with open(full_path, 'rb') as file:
|
||||
digest = hashlib.sha256(file.read()).hexdigest()
|
||||
|
||||
os_dict = current_releases.get(package.channel, {})
|
||||
arch_dict = os_dict.get(package.os, {})
|
||||
entry_dict = arch_dict.get(package.arch, {})
|
||||
|
||||
if package.is_delta:
|
||||
delta_dict = entry_dict.get('deltas', {})
|
||||
delta_entry = {
|
||||
'deltaUrl': url,
|
||||
'sha256Digest': digest,
|
||||
'baseVersion': package.delta_base_version,
|
||||
'baseSha256Digest': delta_info[package.channel][package.os][package.arch][package.delta_base_version]['digest'],
|
||||
}
|
||||
delta_dict[package.delta_base_version] = delta_entry
|
||||
entry_dict['deltas'] = delta_dict
|
||||
else:
|
||||
entry_dict['url'] = url
|
||||
entry_dict['sha256Digest'] = digest
|
||||
|
||||
arch_dict[package.arch] = entry_dict
|
||||
os_dict[package.os] = arch_dict
|
||||
current_releases[package.channel] = os_dict
|
||||
|
||||
if package.channel != 'stable':
|
||||
channel_current_versions[package.channel] = max(int(package.channel_version), channel_current_versions.get(package.channel, 0))
|
||||
|
||||
if artifact_version.channel == 'stable' and max([artifact_version.version, current_version], key=cmp_to_key(compare_versions)):
|
||||
releases['currentVersion'] = artifact_version.version
|
||||
else:
|
||||
releases['currentVersion'] = current_version
|
||||
|
||||
releases['previousVersions'] = s3.get_versions(full=True)
|
||||
releases['fileVersion'] = RELEASES_JSON_VERSION
|
||||
releases['allVersions'] = all_versions
|
||||
releases['channelCurrentVersions'] = channel_current_versions
|
||||
releases['currentReleases'] = current_releases
|
||||
|
||||
with open(os.path.join(LOCAL_CACHE_DIR, 'electron', 'releases.json') , 'w') as file:
|
||||
json.dump(releases, file, indent=4)
|
||||
|
||||
def generate_previous_releases_page():
|
||||
pass
|
||||
|
||||
def update_website():
|
||||
pass
|
||||
|
||||
# CI Operations
|
||||
ensure_files_exist(dirs=[
|
||||
'/artifacts',
|
||||
DEPLOY_DIR,
|
||||
TEMP_DIR,
|
||||
LOCAL_CACHE_DIR,
|
||||
os.path.join(LOCAL_CACHE_DIR, 'electron')
|
||||
],
|
||||
files=[
|
||||
os.path.join('electron', 'releases.json')
|
||||
])
|
||||
artifact_version = copy_artifacts_to_local_cache()
|
||||
sync_local_cache()
|
||||
|
||||
# Disabling delta update generation for now...
|
||||
# delta_info = generate_delta_updates(artifact_version)
|
||||
delta_info = {}
|
||||
|
||||
generate_releases_json(artifact_version, delta_info)
|
||||
upload_local_cache()
|
||||
# generate_previous_releases_page()
|
||||
# update_website()
|
||||
|
||||
print('Cleaning up...')
|
||||
shutil.rmtree(TEMP_DIR)
|
211
receivers/electron/scripts/deploy/util.py
Normal file
211
receivers/electron/scripts/deploy/util.py
Normal file
|
@ -0,0 +1,211 @@
|
|||
import boto3
|
||||
import hashlib
|
||||
import os
|
||||
import requests
|
||||
import shutil
|
||||
from botocore.client import Config
|
||||
from collections import namedtuple
|
||||
from functools import cmp_to_key
|
||||
|
||||
CLOUDFLARE_CACHE_TOKEN = os.environ.get('CLOUDFLARE_CACHE_TOKEN')
|
||||
ZONE_ID = os.environ.get('CLOUDFLARE_ZONE_ID')
|
||||
ACCOUNT_ID = os.environ.get('R2_ACCOUNT_ID')
|
||||
ACCESS_KEY_ID = os.environ.get('R2_ACCESS_KEY_ID')
|
||||
SECRET_ACCESS_KEY = os.environ.get('R2_SECRET_ACCESS_KEY')
|
||||
BUCKET_NAME = os.environ.get('R2_BUCKET_NAME')
|
||||
|
||||
class S3Client:
|
||||
def __init__(self, cache_version_amount, excluded_delta_versions):
|
||||
# Note: Cloudflare R2 docs outdated, secret is not supposed to be hashed...
|
||||
|
||||
# Hash the secret access key using SHA-256
|
||||
#hashed_secret_key = hashlib.sha256(SECRET_ACCESS_KEY.encode()).hexdigest()
|
||||
|
||||
# Configure the S3 client for Cloudflare R2
|
||||
self.s3 = boto3.client('s3',
|
||||
endpoint_url=f'https://{ACCOUNT_ID}.r2.cloudflarestorage.com',
|
||||
aws_access_key_id=ACCESS_KEY_ID,
|
||||
# aws_secret_access_key=hashed_secret_key,
|
||||
aws_secret_access_key=SECRET_ACCESS_KEY,
|
||||
config=Config(
|
||||
signature_version='s3v4'
|
||||
)
|
||||
)
|
||||
list_response = self.s3.list_objects_v2(Bucket=BUCKET_NAME, Prefix='electron/')
|
||||
self.bucket_files = list_response.get('Contents', [])
|
||||
|
||||
bucket_files_versions = filter(lambda x: x['Key'] != 'electron/releases.json', self.bucket_files)
|
||||
self.bucket_versions_full = sorted(set(map(lambda x: x['Key'].split('/')[1], bucket_files_versions)), key=cmp_to_key(compare_versions), reverse=True)
|
||||
self.bucket_versions = self.bucket_versions_full if cache_version_amount < 0 else self.bucket_versions_full[:cache_version_amount]
|
||||
self.bucket_delta_versions = [v for v in self.bucket_versions if v not in excluded_delta_versions]
|
||||
|
||||
def get_bucket_files(self):
|
||||
return self.bucket_files
|
||||
|
||||
def get_versions(self, full=False):
|
||||
return self.bucket_versions_full if full else self.bucket_versions
|
||||
|
||||
def download_file(self, full_path, s3_path):
|
||||
print(f'Downloading file: {s3_path}')
|
||||
get_response = self.s3.get_object(Bucket=BUCKET_NAME, Key=s3_path)
|
||||
|
||||
os.makedirs(os.path.dirname(full_path), exist_ok=True)
|
||||
with open(full_path, 'wb') as file:
|
||||
file.write(get_response['Body'].read())
|
||||
|
||||
def upload_file(self, full_path, s3_path):
|
||||
print(f'Uploading file: {s3_path}')
|
||||
|
||||
domain = BUCKET_NAME.replace('-', '.')
|
||||
purge_response = requests.post(
|
||||
f'https://api.cloudflare.com/client/v4/zones/{ZONE_ID}/purge_cache',
|
||||
headers={
|
||||
'Authorization': f'Bearer {CLOUDFLARE_CACHE_TOKEN}',
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
json={
|
||||
'files': [f'https://{domain}/{s3_path}']
|
||||
}
|
||||
)
|
||||
|
||||
if purge_response.status_code != 200:
|
||||
print(f'Error while purging cache: {purge_response}')
|
||||
|
||||
with open(full_path, 'rb') as file:
|
||||
put_response = self.s3.put_object(
|
||||
Body=file,
|
||||
Bucket=BUCKET_NAME,
|
||||
Key=s3_path,
|
||||
)
|
||||
|
||||
# Utility types
|
||||
class PackageFormat:
|
||||
"""Parses an artifact path to extract package information
|
||||
|
||||
Artifact format: ((VERSION)?/PKG/(OS/ARCH|ARCH)/)?fcast-receiver-VERSION-OS-ARCH(-setup|-delta-DELTA_BASE_VERSION)?(-CHANNEL-CHANNEL_VERSION)?.PKG
|
||||
"""
|
||||
|
||||
def __init__(self, path):
|
||||
self.version = None
|
||||
self.type = None
|
||||
self.os = None
|
||||
self.os_pretty = None
|
||||
self.arch = None
|
||||
self.name = None
|
||||
self.is_delta = False
|
||||
self.delta_base_version = None
|
||||
self.channel = None
|
||||
self.channel_version = None
|
||||
|
||||
dirs = path.split('/')
|
||||
file = path.split('-')
|
||||
self.name = 'fcast-receiver'
|
||||
|
||||
if len(dirs) > 1:
|
||||
parse_index = 0
|
||||
|
||||
if dirs[parse_index].count('.') > 0:
|
||||
self.version = dirs[parse_index]
|
||||
self.type = dirs[parse_index + 1]
|
||||
parse_index += 2
|
||||
else:
|
||||
self.type = dirs[parse_index]
|
||||
parse_index += 1
|
||||
|
||||
if self.type == 'zip':
|
||||
self.os = dirs[parse_index]
|
||||
self.os_pretty = 'windows' if self.os == 'win32' else 'macOS' if self.os == 'darwin' else 'linux'
|
||||
self.arch = dirs[parse_index + 1]
|
||||
parse_index += 2
|
||||
else:
|
||||
if self.type == 'wix':
|
||||
self.os = 'win32'
|
||||
self.os_pretty = 'windows'
|
||||
self.arch = dirs[parse_index]
|
||||
elif self.type == 'dmg':
|
||||
self.os = 'darwin'
|
||||
self.os_pretty = 'macOS'
|
||||
self.arch = dirs[parse_index]
|
||||
elif self.type == 'deb' or self.type == 'rpm':
|
||||
self.os = 'linux'
|
||||
self.os_pretty = 'linux'
|
||||
self.arch = dirs[parse_index]
|
||||
parse_index += 1
|
||||
|
||||
# Unsupported package format (e.g. 1.0.14)
|
||||
if self.version == '1.0.14':
|
||||
return
|
||||
|
||||
file = dirs[parse_index].split('-')
|
||||
|
||||
self.version = file[2]
|
||||
channel_index = 5
|
||||
if len(file) == channel_index:
|
||||
self.channel = 'stable'
|
||||
return
|
||||
|
||||
if file[channel_index] == 'delta':
|
||||
self.is_delta = True
|
||||
self.delta_base_version = file[channel_index + 1].replace('.delta', '')
|
||||
channel_index += 2
|
||||
elif file[channel_index] == 'setup':
|
||||
channel_index += 1
|
||||
|
||||
if len(file) > channel_index:
|
||||
self.channel = file[channel_index]
|
||||
version = file[channel_index + 1]
|
||||
self.channel_version = version[:version.rfind('.')]
|
||||
else:
|
||||
self.channel = 'stable'
|
||||
|
||||
def packageNamePretty(self):
|
||||
if self.channel != 'stable':
|
||||
return f'{self.name}-{self.version}-{self.os_pretty}-{self.arch}-{self.channel}-{self.channel_version}'
|
||||
else:
|
||||
return f'{self.name}-{self.version}-{self.os_pretty}-{self.arch}'
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f'''PackageFormat(type={self.type}, version={self.version}, os={self.os}, arch={self.arch},
|
||||
is_delta={self.is_delta}, delta_base_version={self.delta_base_version}, channel={self.channel},
|
||||
channel_version={self.channel_version})'''
|
||||
|
||||
ArtifactVersion = namedtuple('ArtifactVersion', ['version', 'channel', 'channel_version'])
|
||||
|
||||
# Utility functions
|
||||
def compare_versions(x, y):
|
||||
x_parts = x.split('.')
|
||||
y_parts = y.split('.')
|
||||
|
||||
for i in range(len(x_parts)):
|
||||
if x_parts[i] < y_parts[i]:
|
||||
return -1
|
||||
elif x_parts[i] > y_parts[i]:
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
def generate_update_tarball(full_path, rel_path, working_dir, package):
|
||||
if package.os == 'darwin':
|
||||
temp_working_dir = os.path.join(working_dir, os.path.dirname(rel_path), f'{package.name}-{package.os}-{package.arch}')
|
||||
extract_dir = temp_working_dir
|
||||
else:
|
||||
temp_working_dir = os.path.join(working_dir, os.path.dirname(rel_path))
|
||||
extract_dir = os.path.join(temp_working_dir, f'{package.name}-{package.os}-{package.arch}')
|
||||
|
||||
shutil.unpack_archive(full_path, temp_working_dir)
|
||||
|
||||
if package.os == 'darwin':
|
||||
shutil.make_archive(os.path.join(working_dir, os.path.dirname(rel_path), package.packageNamePretty()), 'tar', extract_dir)
|
||||
shutil.rmtree(temp_working_dir)
|
||||
|
||||
temp_working_dir = os.path.join(working_dir, os.path.dirname(rel_path))
|
||||
else:
|
||||
shutil.make_archive(os.path.join(temp_working_dir, package.packageNamePretty()), 'tar', extract_dir)
|
||||
shutil.rmtree(extract_dir)
|
||||
|
||||
digest = ''
|
||||
artifact_name = f'{package.packageNamePretty()}.tar'
|
||||
with open(os.path.join(temp_working_dir, artifact_name), 'rb') as file:
|
||||
digest = hashlib.sha256(file.read()).hexdigest()
|
||||
|
||||
return artifact_name, digest
|
Loading…
Add table
Add a link
Reference in a new issue