Compare commits

...

11 Commits

Author SHA1 Message Date
d2952c3ac0 fix: logging 2026-01-04 15:22:06 +00:00
495f33aa68 exclude tiny range requests from logging 2025-12-25 22:17:50 +00:00
311fd89a54 only audio for recent 10 minute log 2025-12-25 20:59:59 +00:00
5015075694 fix: counting cache 2025-12-25 20:46:41 +00:00
d3743d2685 fix comment 2025-12-23 09:18:14 +00:00
ed7c142521 fix logging 2025-12-23 09:06:52 +00:00
08f9eef1f0 fix logging 2025-12-23 09:04:32 +00:00
cd2592a4b1 fix caching with gunicorn 2025-12-22 15:37:49 +00:00
55a0a2dce1 fix icon loading 2025-12-22 15:27:49 +00:00
036ab856eb fix caching 2025-12-22 15:23:08 +00:00
b8713fcc7e more stable nfs mount 2025-12-22 13:33:06 +00:00
8 changed files with 283 additions and 95 deletions

View File

@ -114,10 +114,13 @@ def parse_timestamp(ts_str):
# If it's some other ValueError, re-raise it. # If it's some other ValueError, re-raise it.
raise raise
def log_file_access(rel_path, filesize, mime, ip_address, user_agent, device_id, cached): def log_file_access(rel_path, filesize, mime, ip_address, user_agent, device_id, cached, method="GET"):
"""Insert a file access record into the database and prune entries older than 10 minutes, """Insert a file access record into the database and prune entries older than 10 minutes,
and track todays files separately in folder_today.""" and track todays files separately in folder_today. HTTP method is *not* persisted to the
database; it is kept only in the in-memory buffer to distinguish HEAD vs GET for the
recent-logs feed."""
global file_access_temp, folder_today, folder_yesterday global file_access_temp, folder_today, folder_yesterday
http_method = (method or "GET").upper()
# Create a timezone-aware timestamp # Create a timezone-aware timestamp
now = datetime.now(timezone.utc).astimezone() now = datetime.now(timezone.utc).astimezone()
@ -192,6 +195,7 @@ def log_file_access(rel_path, filesize, mime, ip_address, user_agent, device_id,
# Finally, insert the new access at the top of the temp log # Finally, insert the new access at the top of the temp log
# Keep existing columns stable; append raw geo data for map use. # Keep existing columns stable; append raw geo data for map use.
# Keep method only in memory for the 10-minute feed (DB remains untouched by method).
file_access_temp.insert(0, [ file_access_temp.insert(0, [
iso_ts, # 0 timestamp iso_ts, # 0 timestamp
rel_path, # 1 path rel_path, # 1 path
@ -204,7 +208,8 @@ def log_file_access(rel_path, filesize, mime, ip_address, user_agent, device_id,
city, # 8 city city, # 8 city
country, # 9 country country, # 9 country
lat, # 10 latitude lat, # 10 latitude
lon # 11 longitude lon, # 11 longitude
http_method # 12 http method (in-memory only)
]) ])
return True return True
@ -249,20 +254,55 @@ def return_folder_yesterday():
def return_file_access(): def return_file_access():
"""Return recent file access logs from memory (the last 10 minutes).""" """Return recent audio file access logs from memory (the last 10 minutes)."""
global file_access_temp global file_access_temp
if file_access_temp:
# Create a timezone-aware cutoff time def is_audio(entry):
cutoff_time = datetime.now(timezone.utc).astimezone() - timedelta(minutes=10) """Check whether a log entry references an audio file."""
# Only keep entries with timestamps greater than or equal to cutoff_time mime_val = (entry[3] or "").lower()
file_access_temp[:] = [ if mime_val.startswith("audio/"):
entry for entry in file_access_temp return True
if datetime.fromisoformat(entry[0]) >= cutoff_time path_val = (entry[1] or "").lower()
] return path_val.endswith((
return file_access_temp ".mp3", ".wav", ".flac", ".m4a", ".aac",
else: ".ogg", ".wma", ".aiff", ".alac", ".opus"
))
def is_get(entry):
"""Allow only GET requests in the recent feed to avoid HEAD-prefetch noise."""
method_val = "GET"
if len(entry) > 12 and entry[12]:
method_val = str(entry[12]).upper()
return method_val != "HEAD"
def has_bytes(entry):
"""Ignore zero-byte requests (e.g., Apple prefetches asking for 0 bytes)."""
try:
size_val = entry[2]
if size_val is None:
return False
# handle str or numeric
size_num = float(size_val)
return size_num > 0
except Exception:
return False
if not file_access_temp:
return [] return []
# Create a timezone-aware cutoff time
cutoff_time = datetime.now(timezone.utc).astimezone() - timedelta(minutes=10)
# Only keep entries with timestamps greater than or equal to cutoff_time
file_access_temp[:] = [
entry for entry in file_access_temp
if datetime.fromisoformat(entry[0]) >= cutoff_time
]
audio_entries = [
entry for entry in file_access_temp
if is_audio(entry) and is_get(entry) and has_bytes(entry)
]
return audio_entries
def return_file_access_with_geo(): def return_file_access_with_geo():
"""Return recent file access logs with geographic coordinates from the database.""" """Return recent file access logs with geographic coordinates from the database."""

180
app.py
View File

@ -35,6 +35,7 @@ import helperfunctions as hf
import search_db_analyzer as sdb import search_db_analyzer as sdb
import fnmatch import fnmatch
import openpyxl import openpyxl
from collections import OrderedDict
app_config = auth.return_app_config() app_config = auth.return_app_config()
BASE_DIR = os.path.realpath(app_config['BASE_DIR']) BASE_DIR = os.path.realpath(app_config['BASE_DIR'])
@ -44,6 +45,27 @@ cache_image = diskcache.Cache('./filecache_image', size_limit= app_config['filec
cache_video = diskcache.Cache('./filecache_video', size_limit= app_config['filecache_size_limit_video'] * 1024**3) cache_video = diskcache.Cache('./filecache_video', size_limit= app_config['filecache_size_limit_video'] * 1024**3)
cache_other = diskcache.Cache('./filecache_other', size_limit= app_config['filecache_size_limit_other'] * 1024**3) cache_other = diskcache.Cache('./filecache_other', size_limit= app_config['filecache_size_limit_other'] * 1024**3)
_logged_request_ids = OrderedDict()
_logged_request_ids_lock = threading.Lock()
_LOGGED_REQUEST_IDS_MAX = 2048
def _is_duplicate_request(req_id: str) -> bool:
if not req_id:
return False
with _logged_request_ids_lock:
return req_id in _logged_request_ids
def _mark_request_logged(req_id: str):
if not req_id:
return
with _logged_request_ids_lock:
_logged_request_ids[req_id] = None
_logged_request_ids.move_to_end(req_id)
if len(_logged_request_ids) > _LOGGED_REQUEST_IDS_MAX:
_logged_request_ids.popitem(last=False)
app = Flask(__name__) app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_proto=1) app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_proto=1)
@ -614,13 +636,14 @@ def human_readable_size(num_bytes):
num /= 1024 num /= 1024
@app.route('/icon/<string:size>.png') @app.route('/icon/<string:size>.png')
@app.route('/icons/<string:size>.png') # legacy path
def serve_resized_icon(size): def serve_resized_icon(size):
cached_image_bytes = get_cached_image(size) cached_image_bytes = get_cached_image(size)
response = send_file( response = send_file(
io.BytesIO(cached_image_bytes), io.BytesIO(cached_image_bytes),
mimetype='image/png' mimetype='image/png'
) )
response.headers['Cache-Control'] = 'public, max-age=86400' response.headers['Cache-Control'] = 'public, max-age=86400, immutable'
return response return response
@app.route('/custom_logo/<string:filename>.png') @app.route('/custom_logo/<string:filename>.png')
@ -704,7 +727,10 @@ def api_browse(subpath):
root, *relative_parts = subpath.split('/') root, *relative_parts = subpath.split('/')
base_path = session['folders'][root] base_path = session.get('folders', {}).get(root)
if not base_path:
app.logger.warning(f"Requested root '{root}' not found in session folders")
return jsonify({'error': 'Directory not found'}), 404
directory = os.path.join(base_path, *relative_parts) directory = os.path.join(base_path, *relative_parts)
playfile = None playfile = None
@ -767,6 +793,9 @@ def serve_file(subpath):
app.logger.error(f"File not found: {full_path}") app.logger.error(f"File not found: {full_path}")
return "File not found", 404 return "File not found", 404
filesize = os.path.getsize(full_path)
filename = os.path.basename(full_path)
# 2) Prep request info # 2) Prep request info
mime, _ = mimetypes.guess_type(full_path) mime, _ = mimetypes.guess_type(full_path)
mime = mime or 'application/octet-stream' mime = mime or 'application/octet-stream'
@ -774,11 +803,37 @@ def serve_file(subpath):
is_audio_get = mime.startswith('audio/') and request.method == 'GET' is_audio_get = mime.startswith('audio/') and request.method == 'GET'
ip_address = request.remote_addr ip_address = request.remote_addr
user_agent = request.headers.get('User-Agent') user_agent = request.headers.get('User-Agent')
range_header = request.headers.get('Range', '')
req_id = request.args.get('req') or request.headers.get('X-Request-Id')
# skip logging on cache hits or on audio GETs (per your rules) def is_range_prefetch(header, ua):
"""
Detect tiny range requests (common Apple prefetch) so we can skip logging duplicates.
"""
if not header:
return False
try:
if not header.lower().startswith('bytes='):
return False
range_spec = header.split('=', 1)[1]
start_str, end_str = range_spec.split('-', 1)
if not start_str.isdigit() or not end_str.isdigit():
return False
start = int(start_str)
end = int(end_str)
length = end - start + 1
if length <= 1024 and start == 0:
return True
except Exception:
return False
return False
# Logging: log every client GET (cached or not), but skip CDN prefetches (X-Cache-Request)
# and HEAD probes to avoid double-counting. Also skip tiny range-prefetches (e.g., Apple).
do_log = ( do_log = (
not is_cache_request # skip if upstream CDN asked us to cache not is_cache_request # skip if upstream CDN asked us to cache
and not is_audio_get # skip audio GETs and request.method != 'HEAD'
and not is_range_prefetch(range_header, user_agent)
) )
# 3) Pick cache # 3) Pick cache
@ -791,6 +846,16 @@ def serve_file(subpath):
else: else:
cache = cache_other cache = cache_other
# Plain HEAD requests (without X-Cache-Request) should not populate the cache.
# They are just probes and would otherwise turn the first real GET into a “cached hit”.
if request.method == 'HEAD' and not is_cache_request:
response = make_response('', 200)
response.headers['Content-Type'] = mime
response.headers['Content-Length'] = str(filesize)
response.headers['Accept-Ranges'] = 'bytes'
response.headers['Cache-Control'] = 'public, max-age=86400'
return response
# 4) Image and thumbnail handling first # 4) Image and thumbnail handling first
if mime.startswith('image/'): if mime.startswith('image/'):
small = request.args.get('thumbnail') == 'true' small = request.args.get('thumbnail') == 'true'
@ -844,15 +909,18 @@ def serve_file(subpath):
response.headers['Cache-Control'] = 'public, max-age=86400' response.headers['Cache-Control'] = 'public, max-age=86400'
if do_log and not small: if do_log and not small:
a.log_file_access( if not _is_duplicate_request(req_id):
cache_key, a.log_file_access(
os.path.getsize(file_path), cache_key,
mime, os.path.getsize(file_path),
ip_address, mime,
user_agent, ip_address,
session['device_id'], user_agent,
cached_hit session['device_id'],
) cached_hit,
request.method
)
_mark_request_logged(req_id)
return response return response
# 5) Non-image branch: check if cached, otherwise create partial cache file # 5) Non-image branch: check if cached, otherwise create partial cache file
@ -871,12 +939,36 @@ def serve_file(subpath):
cache_key = hashlib.md5(subpath.encode('utf-8')).hexdigest() cache_key = hashlib.md5(subpath.encode('utf-8')).hexdigest()
cache_dir = os.path.join(cache.directory, cache_key[:2]) cache_dir = os.path.join(cache.directory, cache_key[:2])
os.makedirs(cache_dir, exist_ok=True) os.makedirs(cache_dir, exist_ok=True)
cache_file_path = os.path.join(cache_dir, f"{cache_key}.tmp") fd, cache_file_path = tempfile.mkstemp(
prefix=f"{cache_key}_",
suffix=".tmp",
dir=cache_dir
)
os.close(fd)
# Write an initial chunk synchronously so the temp file exists with data
initial_bytes = 0
try:
with open(full_path, 'rb') as source, open(cache_file_path, 'wb') as dest:
chunk = source.read(1024 * 1024) # 1MB
if chunk:
dest.write(chunk)
dest.flush()
initial_bytes = len(chunk)
except Exception as e:
app.logger.error(f"Failed to prime cache file for {subpath}: {e}")
if os.path.exists(cache_file_path):
try:
os.remove(cache_file_path)
except:
pass
abort(503, description="Service temporarily unavailable - cache initialization failed")
# Start copying to our cache file in chunks # Start copying to our cache file in chunks
def copy_to_cache_chunked(): def copy_to_cache_chunked(start_offset):
try: try:
with open(full_path, 'rb') as source, open(cache_file_path, 'wb') as dest: with open(full_path, 'rb') as source, open(cache_file_path, 'ab') as dest:
source.seek(start_offset)
while True: while True:
chunk = source.read(1024 * 1024) # 1MB chunks chunk = source.read(1024 * 1024) # 1MB chunks
if not chunk: if not chunk:
@ -886,6 +978,11 @@ def serve_file(subpath):
# Once complete, register with diskcache for proper management # Once complete, register with diskcache for proper management
try: try:
if subpath in cache:
if os.path.exists(cache_file_path):
os.remove(cache_file_path)
app.logger.info(f"Cache already populated for {subpath}, skipped duplicate registration")
return
with open(cache_file_path, 'rb') as f: with open(cache_file_path, 'rb') as f:
cache.set(subpath, f, read=True) cache.set(subpath, f, read=True)
# Remove our temp file since diskcache now has it # Remove our temp file since diskcache now has it
@ -904,29 +1001,11 @@ def serve_file(subpath):
pass pass
# Start the background copy # Start the background copy
cache_thread = threading.Thread(target=copy_to_cache_chunked, daemon=True) cache_thread = threading.Thread(target=copy_to_cache_chunked, args=(initial_bytes,), daemon=True)
cache_thread.start() cache_thread.start()
file_path = cache_file_path
# Wait for initial data to be written
max_wait = 5.0 # Maximum 5 seconds
wait_interval = 0.05 # Check every 50ms
elapsed = 0
while elapsed < max_wait:
if os.path.exists(cache_file_path) and os.path.getsize(cache_file_path) > 0:
file_path = cache_file_path
break
time.sleep(wait_interval)
elapsed += wait_interval
else:
# Cache file not ready - abort
app.logger.error(f"Cache file not created in time for {subpath}")
abort(503, description="Service temporarily unavailable - cache initialization failed")
# 6) Build response for non-image # 6) Build response for non-image
filesize = os.path.getsize(full_path)
filename = os.path.basename(full_path)
if as_attachment: if as_attachment:
download_name = filename download_name = filename
mimetype = 'application/octet-stream' mimetype = 'application/octet-stream'
@ -952,7 +1031,10 @@ def serve_file(subpath):
# No data available yet, wait a bit # No data available yet, wait a bit
time.sleep(0.1) time.sleep(0.1)
response = make_response(generate()) if request.method == 'HEAD':
response = make_response('', 200)
else:
response = make_response(generate())
response.headers['Content-Type'] = mimetype response.headers['Content-Type'] = mimetype
response.headers['Content-Length'] = str(filesize) response.headers['Content-Length'] = str(filesize)
response.headers['Accept-Ranges'] = 'bytes' response.headers['Accept-Ranges'] = 'bytes'
@ -979,17 +1061,23 @@ def serve_file(subpath):
response.headers['Cache-Control'] = 'public, max-age=86400' response.headers['Cache-Control'] = 'public, max-age=86400'
if request.method == 'HEAD':
response.set_data(b'')
# 7) Logging # 7) Logging
if do_log: if do_log:
a.log_file_access( if not _is_duplicate_request(req_id):
subpath, a.log_file_access(
filesize, subpath,
mime, filesize,
ip_address, mime,
user_agent, ip_address,
session['device_id'], user_agent,
cached_hit session['device_id'],
) cached_hit,
request.method
)
_mark_request_logged(req_id)
return response return response

View File

@ -152,17 +152,17 @@ def require_secret(f):
for token_in_session in session.get('valid_tokens', []): for token_in_session in session.get('valid_tokens', []):
try: try:
token_item = decode_token(token_in_session) token_item = decode_token(token_in_session)
print(f"DEBUG: Decoded token: {token_item}") # print(f"DEBUG: Decoded token: {token_item}")
for folder_info in token_item.get('folders', []): for folder_info in token_item.get('folders', []):
print(f"DEBUG: Adding folder '{folder_info['foldername']}' -> '{folder_info['folderpath']}'") # print(f"DEBUG: Adding folder '{folder_info['foldername']}' -> '{folder_info['folderpath']}'")
session['folders'][folder_info['foldername']] = folder_info['folderpath'] session['folders'][folder_info['foldername']] = folder_info['folderpath']
except Exception as e: except Exception as e:
print(f"ERROR: Failed to process token: {e}") print(f"ERROR: Failed to process token: {e}")
# Mark session as modified to ensure it's saved # Mark session as modified to ensure it's saved
session.modified = True session.modified = True
print(f"DEBUG: Final session['folders'] keys: {list(session['folders'].keys())}") # print(f"DEBUG: Final session['folders'] keys: {list(session['folders'].keys())}")
print(f"DEBUG: session['valid_tokens']: {session.get('valid_tokens', [])}") # print(f"DEBUG: session['valid_tokens']: {session.get('valid_tokens', [])}")
# 6) If we have folders, proceed; otherwise show index # 6) If we have folders, proceed; otherwise show index
if session['folders']: if session['folders']:

View File

@ -16,8 +16,7 @@ services:
propagation: rshared propagation: rshared
environment: environment:
- FLASK_APP=app.py - FLASK_APP=app.py
- FLASK_ENV=development - APP_ENV=${APP_ENV:-production}
- FLASK_DEBUG=1
networks: networks:
- traefik - traefik
labels: labels:
@ -38,10 +37,15 @@ services:
# Internal port # Internal port
- "traefik.http.services.${CONTAINER_NAME}.loadbalancer.server.port=5000" - "traefik.http.services.${CONTAINER_NAME}.loadbalancer.server.port=5000"
# Dev server with autoreload for live code changes
command: > command: >
sh -c "pip install -r requirements.txt && sh -c "pip install -r requirements.txt &&
flask run --host=0.0.0.0 --port=5000 --reload" if [ \"$APP_ENV\" = \"development\" ]; then
export FLASK_ENV=development FLASK_DEBUG=1 &&
flask run --host=0.0.0.0 --port=5000 --reload;
else
export FLASK_ENV=production FLASK_DEBUG=0 &&
gunicorn -w 1 -k eventlet -b 0.0.0.0:5000 app:app;
fi"
networks: networks:

View File

@ -24,6 +24,15 @@ if ! command -v jq >/dev/null 2>&1; then
exit 1 exit 1
fi fi
# Ensure an NFS mount helper is available before doing any work.
if ! command -v mount.nfs >/dev/null 2>&1 && \
! command -v mount.nfs4 >/dev/null 2>&1 && \
[ ! -x /sbin/mount.nfs ] && [ ! -x /usr/sbin/mount.nfs ]; then
echo "[ERROR] NFS client utilities are missing (mount.nfs/mount.nfs4 not found)."
echo "[ERROR] Install the 'nfs-common' package on this host and re-run the script."
exit 1
fi
# extract the server names at the top level: # extract the server names at the top level:
SERVERS=($(jq -r 'keys[]' "$CONFIG_FILE")) SERVERS=($(jq -r 'keys[]' "$CONFIG_FILE"))
@ -39,7 +48,9 @@ is_tunnel_active() {
# Check if the given mount point is currently mounted # Check if the given mount point is currently mounted
is_nfs_mounted() { is_nfs_mounted() {
local mount_point=$1 local mount_point=$1
mount | grep -q "${mount_point}" local fstype
fstype=$(findmnt -rn -T "${mount_point}" -o FSTYPE 2>/dev/null)
[[ "${fstype}" == nfs* ]]
} }
# Check if the mount point directory is accessible (i.e. can be listed) # Check if the mount point directory is accessible (i.e. can be listed)
@ -48,6 +59,33 @@ is_mount_accessible() {
ls -1 "${mount_point}" >/dev/null 2>&1 ls -1 "${mount_point}" >/dev/null 2>&1
} }
# Run the NFS mount command and verify it really succeeded.
mount_nfs_share() {
local mount_point=$1
local nfs_share=$2
local local_port=$3
local mount_opts="ro,port=${local_port},nolock,soft,timeo=5,retrans=3"
local mount_output
if ! mount_output=$(sudo mount -t nfs -o "${mount_opts}" 127.0.0.1:"${nfs_share}" "${mount_point}" 2>&1); then
echo "[ERROR] Failed to mount ${nfs_share} at ${mount_point}: ${mount_output}"
return 1
fi
if ! is_nfs_mounted "${mount_point}"; then
echo "[ERROR] Mount command returned success but ${mount_point} is not an active NFS mount."
echo "[DEBUG] Mount output: ${mount_output}"
return 1
fi
if ! is_mount_accessible "${mount_point}"; then
echo "[ERROR] ${mount_point} is mounted but not accessible (check tunnel/NFS server)."
return 1
fi
echo "[SUCCESS] NFS share mounted successfully at ${mount_point}."
}
############################################################################### ###############################################################################
# Main Loop: Process Each Server and Its Mount Points # Main Loop: Process Each Server and Its Mount Points
############################################################################### ###############################################################################
@ -104,25 +142,22 @@ for server in "${SERVERS[@]}"; do
if is_nfs_mounted "${MOUNT_POINT}"; then if is_nfs_mounted "${MOUNT_POINT}"; then
if ! is_mount_accessible "${MOUNT_POINT}"; then if ! is_mount_accessible "${MOUNT_POINT}"; then
echo "[WARNING] Mount point ${MOUNT_POINT} is not accessible. Attempting to remount..." echo "[WARNING] Mount point ${MOUNT_POINT} is not accessible. Attempting to remount..."
sudo umount "${MOUNT_POINT}" if sudo umount "${MOUNT_POINT}"; then
sleep 2 sleep 2
sudo mount -t nfs -o ro,port="${LOCAL_PORT}",nolock,soft,timeo=5,retrans=3 127.0.0.1:"${NFS_SHARE}" "${MOUNT_POINT}" if mount_nfs_share "${MOUNT_POINT}" "${NFS_SHARE}" "${LOCAL_PORT}"; then
if is_mount_accessible "${MOUNT_POINT}"; then echo "[SUCCESS] Remounted successfully and folder is now accessible."
echo "[SUCCESS] Remounted successfully and folder is now accessible." else
echo "[ERROR] Remount failed, folder still not accessible."
fi
else else
echo "[ERROR] Remount failed, folder still not accessible." echo "[ERROR] Failed to unmount ${MOUNT_POINT} during remount attempt."
fi fi
else else
echo "[INFO] NFS share is mounted and accessible at ${MOUNT_POINT}." echo "[INFO] NFS share is mounted and accessible at ${MOUNT_POINT}."
fi fi
else else
echo "[INFO] NFS share is not mounted at ${MOUNT_POINT}. Attempting to mount..." echo "[INFO] NFS share is not mounted at ${MOUNT_POINT}. Attempting to mount..."
sudo mount -t nfs -o ro,port="${LOCAL_PORT}",nolock,soft,timeo=5,retrans=3 127.0.0.1:"${NFS_SHARE}" "${MOUNT_POINT}" mount_nfs_share "${MOUNT_POINT}" "${NFS_SHARE}" "${LOCAL_PORT}"
if is_mount_accessible "${MOUNT_POINT}"; then
echo "[SUCCESS] NFS share mounted successfully at ${MOUNT_POINT}."
else
echo "[ERROR] Failed to mount NFS share ${NFS_SHARE} at ${MOUNT_POINT} or folder not accessible!"
fi
fi fi
done done

View File

@ -263,6 +263,9 @@ function preload_audio() {
} }
} }
const TRACK_CLICK_DEBOUNCE_MS = 3000;
let lastTrackClick = { url: null, ts: 0 };
// Attach event listeners for directory, breadcrumb, file, and transcript links. // Attach event listeners for directory, breadcrumb, file, and transcript links.
function attachEventListeners() { function attachEventListeners() {
// Directory link clicks. // Directory link clicks.
@ -295,20 +298,33 @@ document.querySelectorAll('.play-file').forEach(link => {
event.preventDefault(); event.preventDefault();
const { fileType, url: relUrl, index } = this.dataset; const { fileType, url: relUrl, index } = this.dataset;
const now = Date.now();
// Remove the class from all file items.
document.querySelectorAll('.file-item').forEach(item => {
item.classList.remove('currently-playing');
});
if (fileType === 'music') { if (fileType === 'music') {
// If this is the same track already loaded, ignore to avoid extra GETs and unselects.
if (player.currentRelUrl === relUrl) {
return;
}
// Remove the class from all file items.
document.querySelectorAll('.file-item').forEach(item => {
item.classList.remove('currently-playing');
});
// Debounce repeated clicks on the same track to avoid extra GETs.
if (lastTrackClick.url === relUrl && now - lastTrackClick.ts < TRACK_CLICK_DEBOUNCE_MS) {
return;
}
lastTrackClick = { url: relUrl, ts: now };
// Update the current music index. // Update the current music index.
currentMusicIndex = index !== undefined ? parseInt(index) : -1; currentMusicIndex = index !== undefined ? parseInt(index) : -1;
// Mark the clicked item as currently playing. // Mark the clicked item as currently playing.
this.closest('.file-item').classList.add('currently-playing'); this.closest('.file-item').classList.add('currently-playing');
player.loadTrack(relUrl); const reqId = crypto.randomUUID ? crypto.randomUUID() : (Date.now().toString(36) + Math.random().toString(36).slice(2));
player.loadTrack(relUrl, reqId);
// Delay preloading to avoid blocking playback. // Delay preloading to avoid blocking playback.
setTimeout(preload_audio, 1000); setTimeout(preload_audio, 1000);
@ -318,7 +334,9 @@ document.querySelectorAll('.play-file').forEach(link => {
openGalleryModal(relUrl); openGalleryModal(relUrl);
} else { } else {
// serve like a download // serve like a download
window.location.href = `/media/${relUrl}`; const reqId = crypto.randomUUID ? crypto.randomUUID() : (Date.now().toString(36) + Math.random().toString(36).slice(2));
const urlWithReq = `/media/${relUrl}${relUrl.includes('?') ? '&' : '?'}req=${encodeURIComponent(reqId)}`;
window.location.href = urlWithReq;
} }
}); });
}); });

View File

@ -168,7 +168,11 @@ class SimpleAudioPlayer {
document.body.removeChild(a); document.body.removeChild(a);
} }
async loadTrack(relUrl) { async loadTrack(relUrl, reqId) {
this.currentRelUrl = relUrl;
const requestId = reqId || (crypto.randomUUID ? crypto.randomUUID() : (Date.now().toString(36) + Math.random().toString(36).slice(2)));
const urlWithReq = `/media/${relUrl}${relUrl.includes('?') ? '&' : '?'}req=${encodeURIComponent(requestId)}`;
this.audio.pause(); this.audio.pause();
this.container.style.display = 'block'; this.container.style.display = 'block';
this.nowInfo.textContent = 'Loading…'; this.nowInfo.textContent = 'Loading…';
@ -177,13 +181,13 @@ class SimpleAudioPlayer {
this.abortCtrl = new AbortController(); this.abortCtrl = new AbortController();
try { try {
const head = await fetch(`/media/${relUrl}`, { const head = await fetch(urlWithReq, {
method: 'HEAD', method: 'HEAD',
signal: this.abortCtrl.signal signal: this.abortCtrl.signal
}); });
if (!head.ok) throw new Error(`Status ${head.status}`); if (!head.ok) throw new Error(`Status ${head.status}`);
this.audio.src = `/media/${relUrl}`; this.audio.src = urlWithReq;
await this.audio.play(); await this.audio.play();
// Full breadcrumb // Full breadcrumb
@ -197,7 +201,7 @@ class SimpleAudioPlayer {
navigator.mediaSession.metadata = new MediaMetadata({ navigator.mediaSession.metadata = new MediaMetadata({
title : file.replace(/\.[^/.]+$/, ''), title : file.replace(/\.[^/.]+$/, ''),
artist: parts.pop(), artist: parts.pop(),
artwork: [{ src:'/icons/logo-192x192.png', sizes:'192x192', type:'image/png' }] artwork: [{ src:'/icon/logo-192x192.png', sizes:'192x192', type:'image/png' }]
}); });
} }
} catch (err) { } catch (err) {
@ -265,5 +269,3 @@ class SimpleAudioPlayer {
// Initialize instance // Initialize instance
const player = new SimpleAudioPlayer(); const player = new SimpleAudioPlayer();

View File

@ -101,7 +101,8 @@
{% block content %} {% block content %}
<div class="page-content"> <div class="page-content">
<div class="section-header"> <div class="section-header">
<h2 style="margin: 0;">Verbindungen der letzten 10 Minuten</h2> <h2 style="margin: 0;">Verbindungen der letzten 10 Minuten (nur Audio)</h2>
<p class="text-muted" style="margin: 4px 0 0 0;">Diese Ansicht listet ausschließlich Zugriffe auf Audio-Dateien.</p>
<div class="stats"> <div class="stats">
<div class="stat-item"> <div class="stat-item">
<div class="stat-label">Last Connection</div> <div class="stat-label">Last Connection</div>