Compare commits
No commits in common. "d2952c3ac07f307a5fcb7c07649e2feb7b779451" and "4f9cac6f085b776f9280b151bf6d53d6e7126231" have entirely different histories.
d2952c3ac0
...
4f9cac6f08
68
analytics.py
68
analytics.py
@ -114,13 +114,10 @@ def parse_timestamp(ts_str):
|
|||||||
# If it's some other ValueError, re-raise it.
|
# If it's some other ValueError, re-raise it.
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def log_file_access(rel_path, filesize, mime, ip_address, user_agent, device_id, cached, method="GET"):
|
def log_file_access(rel_path, filesize, mime, ip_address, user_agent, device_id, cached):
|
||||||
"""Insert a file access record into the database and prune entries older than 10 minutes,
|
"""Insert a file access record into the database and prune entries older than 10 minutes,
|
||||||
and track today’s files separately in folder_today. HTTP method is *not* persisted to the
|
and track today’s files separately in folder_today."""
|
||||||
database; it is kept only in the in-memory buffer to distinguish HEAD vs GET for the
|
|
||||||
recent-logs feed."""
|
|
||||||
global file_access_temp, folder_today, folder_yesterday
|
global file_access_temp, folder_today, folder_yesterday
|
||||||
http_method = (method or "GET").upper()
|
|
||||||
|
|
||||||
# Create a timezone-aware timestamp
|
# Create a timezone-aware timestamp
|
||||||
now = datetime.now(timezone.utc).astimezone()
|
now = datetime.now(timezone.utc).astimezone()
|
||||||
@ -195,7 +192,6 @@ def log_file_access(rel_path, filesize, mime, ip_address, user_agent, device_id,
|
|||||||
|
|
||||||
# Finally, insert the new access at the top of the temp log
|
# Finally, insert the new access at the top of the temp log
|
||||||
# Keep existing columns stable; append raw geo data for map use.
|
# Keep existing columns stable; append raw geo data for map use.
|
||||||
# Keep method only in memory for the 10-minute feed (DB remains untouched by method).
|
|
||||||
file_access_temp.insert(0, [
|
file_access_temp.insert(0, [
|
||||||
iso_ts, # 0 timestamp
|
iso_ts, # 0 timestamp
|
||||||
rel_path, # 1 path
|
rel_path, # 1 path
|
||||||
@ -208,8 +204,7 @@ def log_file_access(rel_path, filesize, mime, ip_address, user_agent, device_id,
|
|||||||
city, # 8 city
|
city, # 8 city
|
||||||
country, # 9 country
|
country, # 9 country
|
||||||
lat, # 10 latitude
|
lat, # 10 latitude
|
||||||
lon, # 11 longitude
|
lon # 11 longitude
|
||||||
http_method # 12 http method (in-memory only)
|
|
||||||
])
|
])
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@ -254,55 +249,20 @@ def return_folder_yesterday():
|
|||||||
|
|
||||||
|
|
||||||
def return_file_access():
|
def return_file_access():
|
||||||
"""Return recent audio file access logs from memory (the last 10 minutes)."""
|
"""Return recent file access logs from memory (the last 10 minutes)."""
|
||||||
global file_access_temp
|
global file_access_temp
|
||||||
|
if file_access_temp:
|
||||||
def is_audio(entry):
|
# Create a timezone-aware cutoff time
|
||||||
"""Check whether a log entry references an audio file."""
|
cutoff_time = datetime.now(timezone.utc).astimezone() - timedelta(minutes=10)
|
||||||
mime_val = (entry[3] or "").lower()
|
# Only keep entries with timestamps greater than or equal to cutoff_time
|
||||||
if mime_val.startswith("audio/"):
|
file_access_temp[:] = [
|
||||||
return True
|
entry for entry in file_access_temp
|
||||||
path_val = (entry[1] or "").lower()
|
if datetime.fromisoformat(entry[0]) >= cutoff_time
|
||||||
return path_val.endswith((
|
]
|
||||||
".mp3", ".wav", ".flac", ".m4a", ".aac",
|
return file_access_temp
|
||||||
".ogg", ".wma", ".aiff", ".alac", ".opus"
|
else:
|
||||||
))
|
|
||||||
|
|
||||||
def is_get(entry):
|
|
||||||
"""Allow only GET requests in the recent feed to avoid HEAD-prefetch noise."""
|
|
||||||
method_val = "GET"
|
|
||||||
if len(entry) > 12 and entry[12]:
|
|
||||||
method_val = str(entry[12]).upper()
|
|
||||||
return method_val != "HEAD"
|
|
||||||
|
|
||||||
def has_bytes(entry):
|
|
||||||
"""Ignore zero-byte requests (e.g., Apple prefetches asking for 0 bytes)."""
|
|
||||||
try:
|
|
||||||
size_val = entry[2]
|
|
||||||
if size_val is None:
|
|
||||||
return False
|
|
||||||
# handle str or numeric
|
|
||||||
size_num = float(size_val)
|
|
||||||
return size_num > 0
|
|
||||||
except Exception:
|
|
||||||
return False
|
|
||||||
|
|
||||||
if not file_access_temp:
|
|
||||||
return []
|
return []
|
||||||
|
|
||||||
# Create a timezone-aware cutoff time
|
|
||||||
cutoff_time = datetime.now(timezone.utc).astimezone() - timedelta(minutes=10)
|
|
||||||
# Only keep entries with timestamps greater than or equal to cutoff_time
|
|
||||||
file_access_temp[:] = [
|
|
||||||
entry for entry in file_access_temp
|
|
||||||
if datetime.fromisoformat(entry[0]) >= cutoff_time
|
|
||||||
]
|
|
||||||
audio_entries = [
|
|
||||||
entry for entry in file_access_temp
|
|
||||||
if is_audio(entry) and is_get(entry) and has_bytes(entry)
|
|
||||||
]
|
|
||||||
return audio_entries
|
|
||||||
|
|
||||||
|
|
||||||
def return_file_access_with_geo():
|
def return_file_access_with_geo():
|
||||||
"""Return recent file access logs with geographic coordinates from the database."""
|
"""Return recent file access logs with geographic coordinates from the database."""
|
||||||
|
|||||||
180
app.py
180
app.py
@ -35,7 +35,6 @@ import helperfunctions as hf
|
|||||||
import search_db_analyzer as sdb
|
import search_db_analyzer as sdb
|
||||||
import fnmatch
|
import fnmatch
|
||||||
import openpyxl
|
import openpyxl
|
||||||
from collections import OrderedDict
|
|
||||||
|
|
||||||
app_config = auth.return_app_config()
|
app_config = auth.return_app_config()
|
||||||
BASE_DIR = os.path.realpath(app_config['BASE_DIR'])
|
BASE_DIR = os.path.realpath(app_config['BASE_DIR'])
|
||||||
@ -45,27 +44,6 @@ cache_image = diskcache.Cache('./filecache_image', size_limit= app_config['filec
|
|||||||
cache_video = diskcache.Cache('./filecache_video', size_limit= app_config['filecache_size_limit_video'] * 1024**3)
|
cache_video = diskcache.Cache('./filecache_video', size_limit= app_config['filecache_size_limit_video'] * 1024**3)
|
||||||
cache_other = diskcache.Cache('./filecache_other', size_limit= app_config['filecache_size_limit_other'] * 1024**3)
|
cache_other = diskcache.Cache('./filecache_other', size_limit= app_config['filecache_size_limit_other'] * 1024**3)
|
||||||
|
|
||||||
_logged_request_ids = OrderedDict()
|
|
||||||
_logged_request_ids_lock = threading.Lock()
|
|
||||||
_LOGGED_REQUEST_IDS_MAX = 2048
|
|
||||||
|
|
||||||
|
|
||||||
def _is_duplicate_request(req_id: str) -> bool:
|
|
||||||
if not req_id:
|
|
||||||
return False
|
|
||||||
with _logged_request_ids_lock:
|
|
||||||
return req_id in _logged_request_ids
|
|
||||||
|
|
||||||
|
|
||||||
def _mark_request_logged(req_id: str):
|
|
||||||
if not req_id:
|
|
||||||
return
|
|
||||||
with _logged_request_ids_lock:
|
|
||||||
_logged_request_ids[req_id] = None
|
|
||||||
_logged_request_ids.move_to_end(req_id)
|
|
||||||
if len(_logged_request_ids) > _LOGGED_REQUEST_IDS_MAX:
|
|
||||||
_logged_request_ids.popitem(last=False)
|
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_proto=1)
|
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_proto=1)
|
||||||
|
|
||||||
@ -636,14 +614,13 @@ def human_readable_size(num_bytes):
|
|||||||
num /= 1024
|
num /= 1024
|
||||||
|
|
||||||
@app.route('/icon/<string:size>.png')
|
@app.route('/icon/<string:size>.png')
|
||||||
@app.route('/icons/<string:size>.png') # legacy path
|
|
||||||
def serve_resized_icon(size):
|
def serve_resized_icon(size):
|
||||||
cached_image_bytes = get_cached_image(size)
|
cached_image_bytes = get_cached_image(size)
|
||||||
response = send_file(
|
response = send_file(
|
||||||
io.BytesIO(cached_image_bytes),
|
io.BytesIO(cached_image_bytes),
|
||||||
mimetype='image/png'
|
mimetype='image/png'
|
||||||
)
|
)
|
||||||
response.headers['Cache-Control'] = 'public, max-age=86400, immutable'
|
response.headers['Cache-Control'] = 'public, max-age=86400'
|
||||||
return response
|
return response
|
||||||
|
|
||||||
@app.route('/custom_logo/<string:filename>.png')
|
@app.route('/custom_logo/<string:filename>.png')
|
||||||
@ -727,10 +704,7 @@ def api_browse(subpath):
|
|||||||
|
|
||||||
|
|
||||||
root, *relative_parts = subpath.split('/')
|
root, *relative_parts = subpath.split('/')
|
||||||
base_path = session.get('folders', {}).get(root)
|
base_path = session['folders'][root]
|
||||||
if not base_path:
|
|
||||||
app.logger.warning(f"Requested root '{root}' not found in session folders")
|
|
||||||
return jsonify({'error': 'Directory not found'}), 404
|
|
||||||
directory = os.path.join(base_path, *relative_parts)
|
directory = os.path.join(base_path, *relative_parts)
|
||||||
|
|
||||||
playfile = None
|
playfile = None
|
||||||
@ -792,9 +766,6 @@ def serve_file(subpath):
|
|||||||
if not os.path.isfile(full_path):
|
if not os.path.isfile(full_path):
|
||||||
app.logger.error(f"File not found: {full_path}")
|
app.logger.error(f"File not found: {full_path}")
|
||||||
return "File not found", 404
|
return "File not found", 404
|
||||||
|
|
||||||
filesize = os.path.getsize(full_path)
|
|
||||||
filename = os.path.basename(full_path)
|
|
||||||
|
|
||||||
# 2) Prep request info
|
# 2) Prep request info
|
||||||
mime, _ = mimetypes.guess_type(full_path)
|
mime, _ = mimetypes.guess_type(full_path)
|
||||||
@ -803,37 +774,11 @@ def serve_file(subpath):
|
|||||||
is_audio_get = mime.startswith('audio/') and request.method == 'GET'
|
is_audio_get = mime.startswith('audio/') and request.method == 'GET'
|
||||||
ip_address = request.remote_addr
|
ip_address = request.remote_addr
|
||||||
user_agent = request.headers.get('User-Agent')
|
user_agent = request.headers.get('User-Agent')
|
||||||
range_header = request.headers.get('Range', '')
|
|
||||||
req_id = request.args.get('req') or request.headers.get('X-Request-Id')
|
|
||||||
|
|
||||||
def is_range_prefetch(header, ua):
|
# skip logging on cache hits or on audio GETs (per your rules)
|
||||||
"""
|
|
||||||
Detect tiny range requests (common Apple prefetch) so we can skip logging duplicates.
|
|
||||||
"""
|
|
||||||
if not header:
|
|
||||||
return False
|
|
||||||
try:
|
|
||||||
if not header.lower().startswith('bytes='):
|
|
||||||
return False
|
|
||||||
range_spec = header.split('=', 1)[1]
|
|
||||||
start_str, end_str = range_spec.split('-', 1)
|
|
||||||
if not start_str.isdigit() or not end_str.isdigit():
|
|
||||||
return False
|
|
||||||
start = int(start_str)
|
|
||||||
end = int(end_str)
|
|
||||||
length = end - start + 1
|
|
||||||
if length <= 1024 and start == 0:
|
|
||||||
return True
|
|
||||||
except Exception:
|
|
||||||
return False
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Logging: log every client GET (cached or not), but skip CDN prefetches (X-Cache-Request)
|
|
||||||
# and HEAD probes to avoid double-counting. Also skip tiny range-prefetches (e.g., Apple).
|
|
||||||
do_log = (
|
do_log = (
|
||||||
not is_cache_request # skip if upstream CDN asked us to cache
|
not is_cache_request # skip if upstream CDN asked us to cache
|
||||||
and request.method != 'HEAD'
|
and not is_audio_get # skip audio GETs
|
||||||
and not is_range_prefetch(range_header, user_agent)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# 3) Pick cache
|
# 3) Pick cache
|
||||||
@ -846,16 +791,6 @@ def serve_file(subpath):
|
|||||||
else:
|
else:
|
||||||
cache = cache_other
|
cache = cache_other
|
||||||
|
|
||||||
# Plain HEAD requests (without X-Cache-Request) should not populate the cache.
|
|
||||||
# They are just probes and would otherwise turn the first real GET into a “cached hit”.
|
|
||||||
if request.method == 'HEAD' and not is_cache_request:
|
|
||||||
response = make_response('', 200)
|
|
||||||
response.headers['Content-Type'] = mime
|
|
||||||
response.headers['Content-Length'] = str(filesize)
|
|
||||||
response.headers['Accept-Ranges'] = 'bytes'
|
|
||||||
response.headers['Cache-Control'] = 'public, max-age=86400'
|
|
||||||
return response
|
|
||||||
|
|
||||||
# 4) Image and thumbnail handling first
|
# 4) Image and thumbnail handling first
|
||||||
if mime.startswith('image/'):
|
if mime.startswith('image/'):
|
||||||
small = request.args.get('thumbnail') == 'true'
|
small = request.args.get('thumbnail') == 'true'
|
||||||
@ -909,18 +844,15 @@ def serve_file(subpath):
|
|||||||
response.headers['Cache-Control'] = 'public, max-age=86400'
|
response.headers['Cache-Control'] = 'public, max-age=86400'
|
||||||
|
|
||||||
if do_log and not small:
|
if do_log and not small:
|
||||||
if not _is_duplicate_request(req_id):
|
a.log_file_access(
|
||||||
a.log_file_access(
|
cache_key,
|
||||||
cache_key,
|
os.path.getsize(file_path),
|
||||||
os.path.getsize(file_path),
|
mime,
|
||||||
mime,
|
ip_address,
|
||||||
ip_address,
|
user_agent,
|
||||||
user_agent,
|
session['device_id'],
|
||||||
session['device_id'],
|
cached_hit
|
||||||
cached_hit,
|
)
|
||||||
request.method
|
|
||||||
)
|
|
||||||
_mark_request_logged(req_id)
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
# 5) Non-image branch: check if cached, otherwise create partial cache file
|
# 5) Non-image branch: check if cached, otherwise create partial cache file
|
||||||
@ -939,36 +871,12 @@ def serve_file(subpath):
|
|||||||
cache_key = hashlib.md5(subpath.encode('utf-8')).hexdigest()
|
cache_key = hashlib.md5(subpath.encode('utf-8')).hexdigest()
|
||||||
cache_dir = os.path.join(cache.directory, cache_key[:2])
|
cache_dir = os.path.join(cache.directory, cache_key[:2])
|
||||||
os.makedirs(cache_dir, exist_ok=True)
|
os.makedirs(cache_dir, exist_ok=True)
|
||||||
fd, cache_file_path = tempfile.mkstemp(
|
cache_file_path = os.path.join(cache_dir, f"{cache_key}.tmp")
|
||||||
prefix=f"{cache_key}_",
|
|
||||||
suffix=".tmp",
|
|
||||||
dir=cache_dir
|
|
||||||
)
|
|
||||||
os.close(fd)
|
|
||||||
|
|
||||||
# Write an initial chunk synchronously so the temp file exists with data
|
|
||||||
initial_bytes = 0
|
|
||||||
try:
|
|
||||||
with open(full_path, 'rb') as source, open(cache_file_path, 'wb') as dest:
|
|
||||||
chunk = source.read(1024 * 1024) # 1MB
|
|
||||||
if chunk:
|
|
||||||
dest.write(chunk)
|
|
||||||
dest.flush()
|
|
||||||
initial_bytes = len(chunk)
|
|
||||||
except Exception as e:
|
|
||||||
app.logger.error(f"Failed to prime cache file for {subpath}: {e}")
|
|
||||||
if os.path.exists(cache_file_path):
|
|
||||||
try:
|
|
||||||
os.remove(cache_file_path)
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
abort(503, description="Service temporarily unavailable - cache initialization failed")
|
|
||||||
|
|
||||||
# Start copying to our cache file in chunks
|
# Start copying to our cache file in chunks
|
||||||
def copy_to_cache_chunked(start_offset):
|
def copy_to_cache_chunked():
|
||||||
try:
|
try:
|
||||||
with open(full_path, 'rb') as source, open(cache_file_path, 'ab') as dest:
|
with open(full_path, 'rb') as source, open(cache_file_path, 'wb') as dest:
|
||||||
source.seek(start_offset)
|
|
||||||
while True:
|
while True:
|
||||||
chunk = source.read(1024 * 1024) # 1MB chunks
|
chunk = source.read(1024 * 1024) # 1MB chunks
|
||||||
if not chunk:
|
if not chunk:
|
||||||
@ -978,11 +886,6 @@ def serve_file(subpath):
|
|||||||
|
|
||||||
# Once complete, register with diskcache for proper management
|
# Once complete, register with diskcache for proper management
|
||||||
try:
|
try:
|
||||||
if subpath in cache:
|
|
||||||
if os.path.exists(cache_file_path):
|
|
||||||
os.remove(cache_file_path)
|
|
||||||
app.logger.info(f"Cache already populated for {subpath}, skipped duplicate registration")
|
|
||||||
return
|
|
||||||
with open(cache_file_path, 'rb') as f:
|
with open(cache_file_path, 'rb') as f:
|
||||||
cache.set(subpath, f, read=True)
|
cache.set(subpath, f, read=True)
|
||||||
# Remove our temp file since diskcache now has it
|
# Remove our temp file since diskcache now has it
|
||||||
@ -1001,11 +904,29 @@ def serve_file(subpath):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
# Start the background copy
|
# Start the background copy
|
||||||
cache_thread = threading.Thread(target=copy_to_cache_chunked, args=(initial_bytes,), daemon=True)
|
cache_thread = threading.Thread(target=copy_to_cache_chunked, daemon=True)
|
||||||
cache_thread.start()
|
cache_thread.start()
|
||||||
file_path = cache_file_path
|
|
||||||
|
# Wait for initial data to be written
|
||||||
|
max_wait = 5.0 # Maximum 5 seconds
|
||||||
|
wait_interval = 0.05 # Check every 50ms
|
||||||
|
elapsed = 0
|
||||||
|
|
||||||
|
while elapsed < max_wait:
|
||||||
|
if os.path.exists(cache_file_path) and os.path.getsize(cache_file_path) > 0:
|
||||||
|
file_path = cache_file_path
|
||||||
|
break
|
||||||
|
time.sleep(wait_interval)
|
||||||
|
elapsed += wait_interval
|
||||||
|
else:
|
||||||
|
# Cache file not ready - abort
|
||||||
|
app.logger.error(f"Cache file not created in time for {subpath}")
|
||||||
|
abort(503, description="Service temporarily unavailable - cache initialization failed")
|
||||||
|
|
||||||
# 6) Build response for non-image
|
# 6) Build response for non-image
|
||||||
|
filesize = os.path.getsize(full_path)
|
||||||
|
filename = os.path.basename(full_path)
|
||||||
|
|
||||||
if as_attachment:
|
if as_attachment:
|
||||||
download_name = filename
|
download_name = filename
|
||||||
mimetype = 'application/octet-stream'
|
mimetype = 'application/octet-stream'
|
||||||
@ -1031,10 +952,7 @@ def serve_file(subpath):
|
|||||||
# No data available yet, wait a bit
|
# No data available yet, wait a bit
|
||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
|
|
||||||
if request.method == 'HEAD':
|
response = make_response(generate())
|
||||||
response = make_response('', 200)
|
|
||||||
else:
|
|
||||||
response = make_response(generate())
|
|
||||||
response.headers['Content-Type'] = mimetype
|
response.headers['Content-Type'] = mimetype
|
||||||
response.headers['Content-Length'] = str(filesize)
|
response.headers['Content-Length'] = str(filesize)
|
||||||
response.headers['Accept-Ranges'] = 'bytes'
|
response.headers['Accept-Ranges'] = 'bytes'
|
||||||
@ -1060,24 +978,18 @@ def serve_file(subpath):
|
|||||||
response.headers['Content-Disposition'] = 'inline'
|
response.headers['Content-Disposition'] = 'inline'
|
||||||
|
|
||||||
response.headers['Cache-Control'] = 'public, max-age=86400'
|
response.headers['Cache-Control'] = 'public, max-age=86400'
|
||||||
|
|
||||||
if request.method == 'HEAD':
|
|
||||||
response.set_data(b'')
|
|
||||||
|
|
||||||
# 7) Logging
|
# 7) Logging
|
||||||
if do_log:
|
if do_log:
|
||||||
if not _is_duplicate_request(req_id):
|
a.log_file_access(
|
||||||
a.log_file_access(
|
subpath,
|
||||||
subpath,
|
filesize,
|
||||||
filesize,
|
mime,
|
||||||
mime,
|
ip_address,
|
||||||
ip_address,
|
user_agent,
|
||||||
user_agent,
|
session['device_id'],
|
||||||
session['device_id'],
|
cached_hit
|
||||||
cached_hit,
|
)
|
||||||
request.method
|
|
||||||
)
|
|
||||||
_mark_request_logged(req_id)
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
8
auth.py
8
auth.py
@ -152,17 +152,17 @@ def require_secret(f):
|
|||||||
for token_in_session in session.get('valid_tokens', []):
|
for token_in_session in session.get('valid_tokens', []):
|
||||||
try:
|
try:
|
||||||
token_item = decode_token(token_in_session)
|
token_item = decode_token(token_in_session)
|
||||||
# print(f"DEBUG: Decoded token: {token_item}")
|
print(f"DEBUG: Decoded token: {token_item}")
|
||||||
for folder_info in token_item.get('folders', []):
|
for folder_info in token_item.get('folders', []):
|
||||||
# print(f"DEBUG: Adding folder '{folder_info['foldername']}' -> '{folder_info['folderpath']}'")
|
print(f"DEBUG: Adding folder '{folder_info['foldername']}' -> '{folder_info['folderpath']}'")
|
||||||
session['folders'][folder_info['foldername']] = folder_info['folderpath']
|
session['folders'][folder_info['foldername']] = folder_info['folderpath']
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"ERROR: Failed to process token: {e}")
|
print(f"ERROR: Failed to process token: {e}")
|
||||||
|
|
||||||
# Mark session as modified to ensure it's saved
|
# Mark session as modified to ensure it's saved
|
||||||
session.modified = True
|
session.modified = True
|
||||||
# print(f"DEBUG: Final session['folders'] keys: {list(session['folders'].keys())}")
|
print(f"DEBUG: Final session['folders'] keys: {list(session['folders'].keys())}")
|
||||||
# print(f"DEBUG: session['valid_tokens']: {session.get('valid_tokens', [])}")
|
print(f"DEBUG: session['valid_tokens']: {session.get('valid_tokens', [])}")
|
||||||
|
|
||||||
# 6) If we have folders, proceed; otherwise show index
|
# 6) If we have folders, proceed; otherwise show index
|
||||||
if session['folders']:
|
if session['folders']:
|
||||||
|
|||||||
@ -16,7 +16,8 @@ services:
|
|||||||
propagation: rshared
|
propagation: rshared
|
||||||
environment:
|
environment:
|
||||||
- FLASK_APP=app.py
|
- FLASK_APP=app.py
|
||||||
- APP_ENV=${APP_ENV:-production}
|
- FLASK_ENV=development
|
||||||
|
- FLASK_DEBUG=1
|
||||||
networks:
|
networks:
|
||||||
- traefik
|
- traefik
|
||||||
labels:
|
labels:
|
||||||
@ -37,15 +38,10 @@ services:
|
|||||||
# Internal port
|
# Internal port
|
||||||
- "traefik.http.services.${CONTAINER_NAME}.loadbalancer.server.port=5000"
|
- "traefik.http.services.${CONTAINER_NAME}.loadbalancer.server.port=5000"
|
||||||
|
|
||||||
|
# Dev server with autoreload for live code changes
|
||||||
command: >
|
command: >
|
||||||
sh -c "pip install -r requirements.txt &&
|
sh -c "pip install -r requirements.txt &&
|
||||||
if [ \"$APP_ENV\" = \"development\" ]; then
|
flask run --host=0.0.0.0 --port=5000 --reload"
|
||||||
export FLASK_ENV=development FLASK_DEBUG=1 &&
|
|
||||||
flask run --host=0.0.0.0 --port=5000 --reload;
|
|
||||||
else
|
|
||||||
export FLASK_ENV=production FLASK_DEBUG=0 &&
|
|
||||||
gunicorn -w 1 -k eventlet -b 0.0.0.0:5000 app:app;
|
|
||||||
fi"
|
|
||||||
|
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
|
|||||||
@ -24,15 +24,6 @@ if ! command -v jq >/dev/null 2>&1; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Ensure an NFS mount helper is available before doing any work.
|
|
||||||
if ! command -v mount.nfs >/dev/null 2>&1 && \
|
|
||||||
! command -v mount.nfs4 >/dev/null 2>&1 && \
|
|
||||||
[ ! -x /sbin/mount.nfs ] && [ ! -x /usr/sbin/mount.nfs ]; then
|
|
||||||
echo "[ERROR] NFS client utilities are missing (mount.nfs/mount.nfs4 not found)."
|
|
||||||
echo "[ERROR] Install the 'nfs-common' package on this host and re-run the script."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# extract the server names at the top level:
|
# extract the server names at the top level:
|
||||||
SERVERS=($(jq -r 'keys[]' "$CONFIG_FILE"))
|
SERVERS=($(jq -r 'keys[]' "$CONFIG_FILE"))
|
||||||
|
|
||||||
@ -48,9 +39,7 @@ is_tunnel_active() {
|
|||||||
# Check if the given mount point is currently mounted
|
# Check if the given mount point is currently mounted
|
||||||
is_nfs_mounted() {
|
is_nfs_mounted() {
|
||||||
local mount_point=$1
|
local mount_point=$1
|
||||||
local fstype
|
mount | grep -q "${mount_point}"
|
||||||
fstype=$(findmnt -rn -T "${mount_point}" -o FSTYPE 2>/dev/null)
|
|
||||||
[[ "${fstype}" == nfs* ]]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Check if the mount point directory is accessible (i.e. can be listed)
|
# Check if the mount point directory is accessible (i.e. can be listed)
|
||||||
@ -59,33 +48,6 @@ is_mount_accessible() {
|
|||||||
ls -1 "${mount_point}" >/dev/null 2>&1
|
ls -1 "${mount_point}" >/dev/null 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
# Run the NFS mount command and verify it really succeeded.
|
|
||||||
mount_nfs_share() {
|
|
||||||
local mount_point=$1
|
|
||||||
local nfs_share=$2
|
|
||||||
local local_port=$3
|
|
||||||
local mount_opts="ro,port=${local_port},nolock,soft,timeo=5,retrans=3"
|
|
||||||
local mount_output
|
|
||||||
|
|
||||||
if ! mount_output=$(sudo mount -t nfs -o "${mount_opts}" 127.0.0.1:"${nfs_share}" "${mount_point}" 2>&1); then
|
|
||||||
echo "[ERROR] Failed to mount ${nfs_share} at ${mount_point}: ${mount_output}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! is_nfs_mounted "${mount_point}"; then
|
|
||||||
echo "[ERROR] Mount command returned success but ${mount_point} is not an active NFS mount."
|
|
||||||
echo "[DEBUG] Mount output: ${mount_output}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! is_mount_accessible "${mount_point}"; then
|
|
||||||
echo "[ERROR] ${mount_point} is mounted but not accessible (check tunnel/NFS server)."
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "[SUCCESS] NFS share mounted successfully at ${mount_point}."
|
|
||||||
}
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# Main Loop: Process Each Server and Its Mount Points
|
# Main Loop: Process Each Server and Its Mount Points
|
||||||
###############################################################################
|
###############################################################################
|
||||||
@ -142,22 +104,25 @@ for server in "${SERVERS[@]}"; do
|
|||||||
if is_nfs_mounted "${MOUNT_POINT}"; then
|
if is_nfs_mounted "${MOUNT_POINT}"; then
|
||||||
if ! is_mount_accessible "${MOUNT_POINT}"; then
|
if ! is_mount_accessible "${MOUNT_POINT}"; then
|
||||||
echo "[WARNING] Mount point ${MOUNT_POINT} is not accessible. Attempting to remount..."
|
echo "[WARNING] Mount point ${MOUNT_POINT} is not accessible. Attempting to remount..."
|
||||||
if sudo umount "${MOUNT_POINT}"; then
|
sudo umount "${MOUNT_POINT}"
|
||||||
sleep 2
|
sleep 2
|
||||||
if mount_nfs_share "${MOUNT_POINT}" "${NFS_SHARE}" "${LOCAL_PORT}"; then
|
sudo mount -t nfs -o ro,port="${LOCAL_PORT}",nolock,soft,timeo=5,retrans=3 127.0.0.1:"${NFS_SHARE}" "${MOUNT_POINT}"
|
||||||
echo "[SUCCESS] Remounted successfully and folder is now accessible."
|
if is_mount_accessible "${MOUNT_POINT}"; then
|
||||||
else
|
echo "[SUCCESS] Remounted successfully and folder is now accessible."
|
||||||
echo "[ERROR] Remount failed, folder still not accessible."
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
echo "[ERROR] Failed to unmount ${MOUNT_POINT} during remount attempt."
|
echo "[ERROR] Remount failed, folder still not accessible."
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "[INFO] NFS share is mounted and accessible at ${MOUNT_POINT}."
|
echo "[INFO] NFS share is mounted and accessible at ${MOUNT_POINT}."
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "[INFO] NFS share is not mounted at ${MOUNT_POINT}. Attempting to mount..."
|
echo "[INFO] NFS share is not mounted at ${MOUNT_POINT}. Attempting to mount..."
|
||||||
mount_nfs_share "${MOUNT_POINT}" "${NFS_SHARE}" "${LOCAL_PORT}"
|
sudo mount -t nfs -o ro,port="${LOCAL_PORT}",nolock,soft,timeo=5,retrans=3 127.0.0.1:"${NFS_SHARE}" "${MOUNT_POINT}"
|
||||||
|
if is_mount_accessible "${MOUNT_POINT}"; then
|
||||||
|
echo "[SUCCESS] NFS share mounted successfully at ${MOUNT_POINT}."
|
||||||
|
else
|
||||||
|
echo "[ERROR] Failed to mount NFS share ${NFS_SHARE} at ${MOUNT_POINT} or folder not accessible!"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
done
|
done
|
||||||
|
|||||||
@ -263,9 +263,6 @@ function preload_audio() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const TRACK_CLICK_DEBOUNCE_MS = 3000;
|
|
||||||
let lastTrackClick = { url: null, ts: 0 };
|
|
||||||
|
|
||||||
// Attach event listeners for directory, breadcrumb, file, and transcript links.
|
// Attach event listeners for directory, breadcrumb, file, and transcript links.
|
||||||
function attachEventListeners() {
|
function attachEventListeners() {
|
||||||
// Directory link clicks.
|
// Directory link clicks.
|
||||||
@ -298,33 +295,20 @@ document.querySelectorAll('.play-file').forEach(link => {
|
|||||||
event.preventDefault();
|
event.preventDefault();
|
||||||
|
|
||||||
const { fileType, url: relUrl, index } = this.dataset;
|
const { fileType, url: relUrl, index } = this.dataset;
|
||||||
const now = Date.now();
|
|
||||||
|
// Remove the class from all file items.
|
||||||
|
document.querySelectorAll('.file-item').forEach(item => {
|
||||||
|
item.classList.remove('currently-playing');
|
||||||
|
});
|
||||||
|
|
||||||
if (fileType === 'music') {
|
if (fileType === 'music') {
|
||||||
// If this is the same track already loaded, ignore to avoid extra GETs and unselects.
|
|
||||||
if (player.currentRelUrl === relUrl) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the class from all file items.
|
|
||||||
document.querySelectorAll('.file-item').forEach(item => {
|
|
||||||
item.classList.remove('currently-playing');
|
|
||||||
});
|
|
||||||
|
|
||||||
// Debounce repeated clicks on the same track to avoid extra GETs.
|
|
||||||
if (lastTrackClick.url === relUrl && now - lastTrackClick.ts < TRACK_CLICK_DEBOUNCE_MS) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
lastTrackClick = { url: relUrl, ts: now };
|
|
||||||
|
|
||||||
// Update the current music index.
|
// Update the current music index.
|
||||||
currentMusicIndex = index !== undefined ? parseInt(index) : -1;
|
currentMusicIndex = index !== undefined ? parseInt(index) : -1;
|
||||||
|
|
||||||
// Mark the clicked item as currently playing.
|
// Mark the clicked item as currently playing.
|
||||||
this.closest('.file-item').classList.add('currently-playing');
|
this.closest('.file-item').classList.add('currently-playing');
|
||||||
|
|
||||||
const reqId = crypto.randomUUID ? crypto.randomUUID() : (Date.now().toString(36) + Math.random().toString(36).slice(2));
|
player.loadTrack(relUrl);
|
||||||
player.loadTrack(relUrl, reqId);
|
|
||||||
|
|
||||||
// Delay preloading to avoid blocking playback.
|
// Delay preloading to avoid blocking playback.
|
||||||
setTimeout(preload_audio, 1000);
|
setTimeout(preload_audio, 1000);
|
||||||
@ -334,9 +318,7 @@ document.querySelectorAll('.play-file').forEach(link => {
|
|||||||
openGalleryModal(relUrl);
|
openGalleryModal(relUrl);
|
||||||
} else {
|
} else {
|
||||||
// serve like a download
|
// serve like a download
|
||||||
const reqId = crypto.randomUUID ? crypto.randomUUID() : (Date.now().toString(36) + Math.random().toString(36).slice(2));
|
window.location.href = `/media/${relUrl}`;
|
||||||
const urlWithReq = `/media/${relUrl}${relUrl.includes('?') ? '&' : '?'}req=${encodeURIComponent(reqId)}`;
|
|
||||||
window.location.href = urlWithReq;
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@ -168,11 +168,7 @@ class SimpleAudioPlayer {
|
|||||||
document.body.removeChild(a);
|
document.body.removeChild(a);
|
||||||
}
|
}
|
||||||
|
|
||||||
async loadTrack(relUrl, reqId) {
|
async loadTrack(relUrl) {
|
||||||
this.currentRelUrl = relUrl;
|
|
||||||
const requestId = reqId || (crypto.randomUUID ? crypto.randomUUID() : (Date.now().toString(36) + Math.random().toString(36).slice(2)));
|
|
||||||
const urlWithReq = `/media/${relUrl}${relUrl.includes('?') ? '&' : '?'}req=${encodeURIComponent(requestId)}`;
|
|
||||||
|
|
||||||
this.audio.pause();
|
this.audio.pause();
|
||||||
this.container.style.display = 'block';
|
this.container.style.display = 'block';
|
||||||
this.nowInfo.textContent = 'Loading…';
|
this.nowInfo.textContent = 'Loading…';
|
||||||
@ -181,13 +177,13 @@ class SimpleAudioPlayer {
|
|||||||
this.abortCtrl = new AbortController();
|
this.abortCtrl = new AbortController();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const head = await fetch(urlWithReq, {
|
const head = await fetch(`/media/${relUrl}`, {
|
||||||
method: 'HEAD',
|
method: 'HEAD',
|
||||||
signal: this.abortCtrl.signal
|
signal: this.abortCtrl.signal
|
||||||
});
|
});
|
||||||
if (!head.ok) throw new Error(`Status ${head.status}`);
|
if (!head.ok) throw new Error(`Status ${head.status}`);
|
||||||
|
|
||||||
this.audio.src = urlWithReq;
|
this.audio.src = `/media/${relUrl}`;
|
||||||
await this.audio.play();
|
await this.audio.play();
|
||||||
|
|
||||||
// Full breadcrumb
|
// Full breadcrumb
|
||||||
@ -201,7 +197,7 @@ class SimpleAudioPlayer {
|
|||||||
navigator.mediaSession.metadata = new MediaMetadata({
|
navigator.mediaSession.metadata = new MediaMetadata({
|
||||||
title : file.replace(/\.[^/.]+$/, ''),
|
title : file.replace(/\.[^/.]+$/, ''),
|
||||||
artist: parts.pop(),
|
artist: parts.pop(),
|
||||||
artwork: [{ src:'/icon/logo-192x192.png', sizes:'192x192', type:'image/png' }]
|
artwork: [{ src:'/icons/logo-192x192.png', sizes:'192x192', type:'image/png' }]
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
@ -269,3 +265,5 @@ class SimpleAudioPlayer {
|
|||||||
|
|
||||||
// Initialize instance
|
// Initialize instance
|
||||||
const player = new SimpleAudioPlayer();
|
const player = new SimpleAudioPlayer();
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -101,8 +101,7 @@
|
|||||||
{% block content %}
|
{% block content %}
|
||||||
<div class="page-content">
|
<div class="page-content">
|
||||||
<div class="section-header">
|
<div class="section-header">
|
||||||
<h2 style="margin: 0;">Verbindungen der letzten 10 Minuten (nur Audio)</h2>
|
<h2 style="margin: 0;">Verbindungen der letzten 10 Minuten</h2>
|
||||||
<p class="text-muted" style="margin: 4px 0 0 0;">Diese Ansicht listet ausschließlich Zugriffe auf Audio-Dateien.</p>
|
|
||||||
<div class="stats">
|
<div class="stats">
|
||||||
<div class="stat-item">
|
<div class="stat-item">
|
||||||
<div class="stat-label">Last Connection</div>
|
<div class="stat-label">Last Connection</div>
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user