This commit is contained in:
2025-08-05 15:15:36 +02:00
parent 4bd960ed05
commit 7fabb4163a
192 changed files with 14901 additions and 0 deletions

0
_archive/web/__init__.py Normal file
View File

46
_archive/web/auth.py Normal file
View File

@@ -0,0 +1,46 @@
from datetime import datetime, timedelta
import jwt
from jwt.exceptions import ExpiredSignatureError, InvalidTokenError
class JWTHandler:
import os
SECRET_KEY = os.getenv('JWT_SECRET_KEY')
if not SECRET_KEY:
raise EnvironmentError('JWT_SECRET_KEY environment variable is not set')
ALGORITHM = 'HS256'
ACCESS_TOKEN_EXPIRE_MINUTES = 30
def __init__(self, secret_key=None, algorithm=None, expire_minutes=None):
if secret_key:
self.SECRET_KEY = secret_key
if algorithm:
self.ALGORITHM = algorithm
if expire_minutes:
self.ACCESS_TOKEN_EXPIRE_MINUTES = expire_minutes
def create_access_token(self, data: dict):
to_encode = data.copy()
expire = datetime.utcnow() + timedelta(
minutes=self.ACCESS_TOKEN_EXPIRE_MINUTES
)
to_encode.update({'exp': expire})
return jwt.encode(to_encode, self.SECRET_KEY, algorithm=self.ALGORITHM)
def verify_access_token(self, token: str):
try:
payload = jwt.decode(
token, self.SECRET_KEY, algorithms=[self.ALGORITHM]
)
email: str = payload.get('sub')
if email is None:
raise InvalidTokenError
return email
except (ExpiredSignatureError, InvalidTokenError):
raise InvalidTokenError
def new(secret_key=None, algorithm=None, expire_minutes=None) -> JWTHandler:
return JWTHandler(secret_key, algorithm, expire_minutes)

204
_archive/web/deduper.py Normal file
View File

@@ -0,0 +1,204 @@
import hashlib
from typing import Dict
import os
from pathlib import Path
import json
import shutil
class Deduper:
"""
Tools to start from an existing directory to make sure we don't have duplicates in template
"""
def __init__(self, path: str):
self.path = Path(path).expanduser().resolve()
self.path.mkdir(parents=True, exist_ok=True)
self.hash_dict: Dict[str, str] = {} #key is the hash, #value is the relative path of the object in relation to the deduper
self.meta_file = self.path / ".meta.json"
#from IPython import embed;embed()
if self.meta_file.exists():
self.import_dict()
else:
self.load_assets()
def load_assets(self):
"""Load all the existing files and calculate their hashes"""
self.hash_dict = {}
for root, _, files in os.walk(self.path):
for file in files:
file_path = os.path.join(root, file)
relative_path = os.path.relpath(file_path, self.path)
print(f" - load deduped file {file_path}")
if not file.startswith('.'):
file_hash = self._calculate_md5(file_path)
if file_hash in self.hash_dict:
raise Exception(f"duplicate in dedupe pool: {file_path}")
self.hash_dict[file_hash] = relative_path
self.export_dict()
def _calculate_md5(self, file_path: str) -> str:
hash_md5 = hashlib.md5()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def export_dict(self):
"""Export the hash dictionary to a JSON file"""
with open(self.meta_file, 'w') as f:
json.dump(self.hash_dict, f, indent=2)
def import_dict(self):
"""Import the hash dictionary from a JSON file"""
with open(self.meta_file, 'r') as f:
self.hash_dict = json.load(f)
def path_check(self, path: str = "") -> str:
"""
Calculate MD5 from the path to look up the file in the deduper.
Will return empty string if not found
"""
file_path = Path(path)
if not file_path.exists():
print(f" **ERROR: File '{path}' does not exist.")
return ""
# raise FileNotFoundError(f"File '{path}' does not exist.")
file_hash = self._calculate_md5(str(file_path))
return self.path_find(file_hash=file_hash)
def path_find(self, path: str = "",file_hash: str = "") -> str:
"""
return the relative path of the found object in relation to the dedupe stor
will return empty string if not found
"""
res = []
if path!="":
input_path = Path(path)
input_filename = input_path.name
for _, stored_path in self.hash_dict.items():
stored_path_path = Path(stored_path)
if stored_path_path.name.lower() == input_filename.lower():
if len(input_path.parts) == 1:
res.append(stored_path)
elif input_path.as_posix().lower() == stored_path_path.as_posix().lower():
res.append(stored_path)
if len(res)==1:
return res[0]
elif len(res)==0:
return ""
else:
raise Exception(f"found more than 1: {path}")
elif file_hash!="":
if file_hash in self.hash_dict:
return self.hash_dict[file_hash]
return ""
else:
raise Exception("need to input path or file_hash")
def add(self, source_path: str, dest_path_rel: str = "",dest_dir_rel = "") -> str:
"""
Add a file to the specified path in the dedupe pool if it doesn't exist.
Args:
source_path (str): Path to the source file to be copied.
dest_path_rel (str): Path where the file should be copied to, relative to the dedupe pool.
Returns:
str: The path of the file in the dedupe pool if successful, empty string if failed.
"""
source_path0 = Path(source_path)
if not dest_path_rel:
if dest_dir_rel:
dest_dir_rel=dest_dir_rel.strip("/")
dest_path_rel = f"{dest_dir_rel}/{source_path0.name}"
else:
dest_path_rel = source_path0.name
# dest_path is the relative path
# Check if the file already exists in the dedupe pool
existing_path = self.path_check(source_path)
if existing_path:
print(f"File already exists in dedupe pool: {existing_path}")
return existing_path
dest_path_rel = self._relpath_find_new(dest_path_rel)
dest_path = self.path / dest_path_rel
dest_path.parent.mkdir(parents=True, exist_ok=True)
try:
shutil.copy2(source_path, dest_path)
print(f"File copied successfully to: {dest_path}")
except IOError as e:
raise Exception(f"Unable to add file {source_path} to {dest_path}.\n{e}")
# Add the new file to the hash dictionary
relative_path = dest_path.relative_to(self.path).as_posix()
file_hash = self._calculate_md5(dest_path.as_posix())
self.hash_dict[file_hash] = relative_path
self.export_dict()
return relative_path
def path_find_full(self, path: str = "",file_hash: str = "" ) -> str:
mypath = self.path_find(path=path,file_hash=file_hash)
return str(self.path / mypath)
def _relpath_find_new(self, rel_path: str) -> str:
"""
find full path which doesn't exist yet
"""
if not rel_path:
raise ValueError("rel_path cannot be empty")
# Construct the full path
full_path = self.path / rel_path
# Check if the file exists
if not full_path.exists():
return rel_path
rel_path_obj = Path(rel_path)
rel_path_no_extension = str(rel_path_obj.with_suffix(''))
rel_path_extension = rel_path_obj.suffix
new_rel_path = f"{rel_path_no_extension}{rel_path_extension}"
# Check if filename exists in the dedupe pool
counter = 2
new_full_path = self.path / new_rel_path
while new_full_path.exists():
# Update path for the next iteration
new_rel_path = f"{rel_path_no_extension}_{counter}{rel_path_extension}"
new_full_path = self.path / new_rel_path
counter += 1
return new_rel_path
def svg_get(self, name: str) -> str:
"""
Get the SVG content based on the name (in lowercase) and match on the SVG name only.
Args:
name (str): The name of the SVG file to retrieve.
Returns:
str: The content of the SVG file if found, empty string otherwise.
"""
name = Path(name).name.lower()
name = name.lower()
if not name.endswith('.svg'):
name += '.svg'
for _, stored_path in self.hash_dict.items():
stored_path_obj = Path(stored_path)
if stored_path_obj.name.lower() == name:
full_path = self.path / stored_path
try:
with open(full_path, 'r') as svg_file:
return svg_file.read()
except IOError as e:
raise Exception(f"Error reading SVG file {full_path}: {e}")
raise Exception(f"SVG file '{name}' not found in the dedupe pool.")

View File

@@ -0,0 +1,308 @@
import hashlib
import os
import shutil
from urllib.parse import urlparse
import redis
import requests
from bs4 import BeautifulSoup
# from typing import Dict
from colorama import Fore
# from herotools.extensions import check_and_add_extension
from web.deduper import Deduper
image_movie_extensions = (
'.jpg',
'.jpeg',
'.png',
'.gif',
'.bmp',
'.webp',
'.mp3',
'.mp4',
'.avi',
'.mov',
'.wmv',
'.flv',
'.webm',
)
# import pudb; pudb.set_trace()
class HTMLTemplateConverter:
def __init__(
self,
src_dir: str,
dest_dir: str,
static_dir: str = '',
reset: bool = False,
):
self.src_dir = src_dir
self.dest_dir = dest_dir
if reset and os.path.exists(self.dest_dir):
print(' - reset')
shutil.rmtree(self.dest_dir)
if static_dir == '':
static_dir = f'{dest_dir}/static'
self.static_dir = static_dir
os.makedirs(self.dest_dir, exist_ok=True)
os.makedirs(self.static_dir, exist_ok=True)
# Create a relative symlink called 'static' in dest_dir pointing to self.static_dir
static_link_path = os.path.join(self.dest_dir, 'static')
if not os.path.exists(static_link_path):
os.symlink(self.static_dir, static_link_path)
self.deduper_static = Deduper(static_dir)
if reset:
self.deduper_static.load_assets()
self.redis_client = redis.StrictRedis(host='localhost', port=6379, db=0)
self.cache_expiration = 3600 # 1 hour
def download_file(self, myurl: str, remove: bool = False) -> str:
# Check if the file is already in Redis cache
key = f'web.download.{myurl}'
cached_path = self.redis_client.get(key)
if cached_path:
print(f' - download cached {myurl}')
temp_path = cached_path.decode('utf-8')
else:
print(f' - download {myurl}')
response = requests.get(myurl)
if response.status_code == 200:
if '?' in myurl:
local_filename = hashlib.md5(
myurl.encode('utf-8')
).hexdigest()
else:
url_path = urlparse(myurl).path
base_name, extension = os.path.splitext(
os.path.basename(url_path)
)
local_filename = base_name + extension
# Download to temporary directory
temp_dir = os.path.join('/tmp/files')
os.makedirs(temp_dir, exist_ok=True)
temp_path = os.path.join(temp_dir, local_filename)
with open(temp_path, 'wb') as f:
f.write(response.content)
# Update Redis cache
self.redis_client.setex(key, self.cache_expiration, temp_path)
else:
raise Exception(f'ERROR: failed to download {myurl}')
if remove:
os.remove(temp_path)
self.redis_client.delete(key)
return temp_path
def add_to_static(self, file_path: str, dest_dir_rel: str = '') -> str:
"""
add path to the static directory
returns the path as need to be used in the template for the file link
"""
# Check if the file path exists
if not os.path.exists(file_path):
file_path2 = f'{self.src_dir}/{file_path}'
if not os.path.exists(file_path2):
print(
f"{Fore.RED}ERROR: File '{file_path}' or '{file_path2}' does not exist.{Fore.RESET}"
)
# raise FileNotFoundError(f"File '{file_path}' and {file_path2} does not exist.")
return f'error/{file_path2}'
else:
file_path = file_path2
# Calculate hash for the file to be added
file_dedupe_location = self.deduper_static.path_check(file_path)
if file_dedupe_location:
return file_dedupe_location
return self.deduper_static.add(
source_path=file_path, dest_dir_rel=dest_dir_rel
)
def add_file(
self,
src_file_path: str,
file_path: str,
remove: bool = False,
dest_dir_rel: str = '',
) -> str:
print(
f' - addfile {file_path} for dest_dir_rel:{dest_dir_rel}\n from out of file: {src_file_path}'
)
if 'fonts.googleapis.com' in file_path:
return file_path
if file_path.startswith('http://') or file_path.startswith('https://'):
try:
temp_path = self.download_file(file_path)
except Exception:
print(
f"{Fore.RED}ERROR DOWNLOAD: File '{file_path}'.{Fore.RESET}"
)
return f'/error/download/{file_path}'
# import pudb; pudb.set_trace()
# from IPython import embed;embed()
# s
src_file_path = ''
r = self.add_file(
src_file_path, temp_path, remove=True, dest_dir_rel=dest_dir_rel
)
return f'{r}'
else:
if not os.path.exists(file_path):
# now we need to go relative in relation to the src_file_path
file_path2 = os.path.abspath(
os.path.join(os.path.dirname(src_file_path), file_path)
)
if os.path.exists(file_path2):
file_path = file_path2
else:
print(
f"{Fore.RED}ERROR: File '{file_path}' or `{file_path2}` does not exist.{Fore.RESET}"
)
return f'/error/{file_path}'
# raise FileNotFoundError(f"File '{file_path}' or `{file_path2}` does not exist.")
# Check if file exists inself.deduper
existing_path = self.deduper_static.path_check(file_path)
if existing_path:
return f'/static/{existing_path}'
return self.add_to_static(file_path, dest_dir_rel=dest_dir_rel)
def convert(self) -> None:
os.makedirs(self.dest_dir, exist_ok=True)
for root, _, files in os.walk(self.src_dir):
for file in files:
if file.endswith('.html'):
src_file_path = os.path.abspath(os.path.join(root, file))
rel_path = os.path.relpath(src_file_path, self.src_dir)
dest_file_path = os.path.join(self.dest_dir, rel_path)
os.makedirs(os.path.dirname(dest_file_path), exist_ok=True)
with open(
src_file_path, 'r', encoding='utf-8'
) as html_file:
html_content = html_file.read()
soup = BeautifulSoup(html_content, 'html.parser')
svg_elements = soup.find_all('svg')
for i, svg in enumerate(svg_elements, start=1):
svg_file_path = '/tmp/my.svg'
with open(
svg_file_path, 'w', encoding='utf-8'
) as svg_file:
svg_file.write(str(svg))
svg_path = self.add_file(
src_file_path,
file_path=svg_file_path,
dest_dir_rel='svg',
)
svg_file_path_in_out = os.path.join(
'static', 'svg', os.path.basename(svg_path)
)
svg.replace_with(
f"{{% include '{svg_file_path_in_out}' %}}"
)
os.remove(svg_file_path)
for link in soup.find_all('link', href=True):
href = link['href']
base_href = href.split('?')[0] if '?' in href else href
if base_href.endswith('.css'):
new_href = self.add_file(
src_file_path, base_href, dest_dir_rel='css'
)
link['href'] = new_href
else:
# Check if base_href is an image or movie file
if base_href.lower().endswith(
image_movie_extensions
):
new_src = self.add_file(
src_file_path, base_href, dest_dir_rel='img'
)
# Assuming the original attribute was 'src' for images/movies
else:
# Handle other types of files or links here if needed
if href.startswith(
'http://'
) or href.startswith('https://'):
new_src = self.add_file(src_file_path, href)
else:
new_src = self.add_file(
src_file_path, base_href
)
# from IPython import embed;embed()
# s
if link.has_key('src'):
link['src'] = new_src
elif link.has_key('href'):
link['href'] = new_src
# if "pro-tailwind.min" in href:
# from IPython import embed;embed()
# w
for script in soup.find_all('script', src=True):
src = script['src']
src_href = src.split('?')[0] if '?' in src else src
if src_href.endswith('.js'):
new_src = self.add_file(
src_file_path, src_href, dest_dir_rel='js'
)
script['src'] = new_src
for img in soup.find_all('img', src=True):
src = img['src']
new_src = self.add_file(
src_file_path, src, dest_dir_rel='img'
)
img['src'] = new_src
jinja_template = str(soup.prettify())
with open(
dest_file_path, 'w', encoding='utf-8'
) as dest_file:
dest_file.write(jinja_template)
# Example usage
#
# converter = HTMLTemplateConverter("source_directory", "destination_directory")
# converter.convert_html_to_jinja()
def new(
src_dir: str, dest_dir: str, static_dir: str = '', reset: bool = False
) -> HTMLTemplateConverter:
f = HTMLTemplateConverter(
src_dir, dest_dir, static_dir=static_dir, reset=reset
)
f.convert()
return f