From 889209fe3d8d7a1fc60e813cceae1622ff228f78 Mon Sep 17 00:00:00 2001 From: Mike Date: Mon, 19 Jan 2026 08:17:05 +0200 Subject: [PATCH] Added Services thanks to guilara --- ADN/__init__.py | 1015 ++++++++++++++++++++++ ADN/__pycache__/__init__.cpython-310.pyc | Bin 0 -> 27580 bytes ADN/config.yaml | 29 + 3 files changed, 1044 insertions(+) create mode 100644 ADN/__init__.py create mode 100644 ADN/__pycache__/__init__.cpython-310.pyc create mode 100644 ADN/config.yaml diff --git a/ADN/__init__.py b/ADN/__init__.py new file mode 100644 index 0000000..d356934 --- /dev/null +++ b/ADN/__init__.py @@ -0,0 +1,1015 @@ +import base64 +import json +import re +import time +import uuid +import subprocess +import tempfile +import shutil +from typing import Generator, Optional, Union, List, Any +from pathlib import Path +from functools import partial + +import click +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import padding +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes +from langcodes import Language + +from unshackle.core import binaries +from unshackle.core.config import config +from unshackle.core.manifests import HLS +from unshackle.core.search_result import SearchResult +from unshackle.core.service import Service +from unshackle.core.session import session +from unshackle.core.titles import Episode, Series +from unshackle.core.tracks import Tracks, Chapters, Chapter +from unshackle.core.tracks.audio import Audio +from unshackle.core.tracks.video import Video +from unshackle.core.tracks.audio import Audio +from unshackle.core.tracks.subtitle import Subtitle + + +class VideoNoAudio(Video): + """ + Video track qui enlève automatiquement l'audio après téléchargement. + Nécessaire car ADN fournit des streams HLS avec audio muxé. + """ + + def download(self, session, prepare_drm, max_workers=None, progress=None, *, cdm=None): + """Override : télécharge puis demuxe pour enlever l'audio.""" + import logging + log = logging.getLogger('ADN.VideoNoAudio') + + # Téléchargement normal + super().download(session, prepare_drm, max_workers, progress, cdm=cdm) + + # Si pas de path, échec du téléchargement + if not self.path or not self.path.exists(): + return + + # Vérifier FFmpeg disponible + if not binaries.FFMPEG: + log.warning("FFmpeg not found, cannot remove audio from video") + return + + # Demuxer : enlever l'audio + if progress: + progress(downloaded="Removing audio") + + original_path = self.path + noaudio_path = original_path.with_stem(f"{original_path.stem}_noaudio") + + try: + log.debug(f"Removing audio from {original_path.name}") + + result = subprocess.run( + [ + binaries.FFMPEG, + '-i', str(original_path), + '-vcodec', 'copy', # Copie vidéo sans réencodage + '-an', # Enlève l'audio + '-y', + str(noaudio_path) + ], + capture_output=True, + text=True, + timeout=120 + ) + + if result.returncode != 0: + log.error(f"FFmpeg demux failed: {result.stderr}") + noaudio_path.unlink(missing_ok=True) + return + + if not noaudio_path.exists() or noaudio_path.stat().st_size < 1000: + log.error("Demuxed video is empty or too small") + noaudio_path.unlink(missing_ok=True) + return + + # Remplacer le fichier original + log.debug(f"Video demuxed successfully: {noaudio_path.stat().st_size} bytes") + original_path.unlink() + noaudio_path.rename(original_path) + + if progress: + progress(downloaded="Downloaded") + + except subprocess.TimeoutExpired: + log.error("FFmpeg demux timeout") + noaudio_path.unlink(missing_ok=True) + except Exception as e: + log.error(f"Failed to demux video: {e}") + noaudio_path.unlink(missing_ok=True) + + +class AudioExtracted(Audio): + """ + Audio track déjà extrait d'un flux HLS muxé. + Override download() pour copier le fichier au lieu de télécharger. + """ + + def __init__(self, *args, extracted_path: Path, **kwargs): + # URL vide pour éviter que curl essaie de télécharger + super().__init__(*args, url="", **kwargs) + self.extracted_path = extracted_path + + def download(self, session, prepare_drm, max_workers=None, progress=None, *, cdm=None): + """Override : copie le fichier extrait au lieu de télécharger.""" + if not self.extracted_path or not self.extracted_path.exists(): + if progress: + progress(downloaded="[red]FAILED") + raise ValueError(f"Extracted audio file not found: {self.extracted_path}") + + # Créer le path de destination (même logique que Track.download) + track_type = self.__class__.__name__ + save_path = config.directories.temp / f"{track_type}_{self.id}.m4a" + + if progress: + progress(downloaded="Copying", total=100, completed=0) + + # Copier le fichier extrait vers le path final + config.directories.temp.mkdir(parents=True, exist_ok=True) + shutil.copy2(self.extracted_path, save_path) + + self.path = save_path + + if progress: + progress(downloaded="Downloaded", completed=100) + + +class SubtitleEmbedded(Subtitle): + """ + Subtitle avec contenu embarqué (data URI). + Override download() pour écrire le contenu directement. + """ + + def __init__(self, *args, embedded_content: str, **kwargs): + # URL vide pour éviter que curl essaie de télécharger + super().__init__(*args, url="", **kwargs) + self.embedded_content = embedded_content + + def download(self, session, prepare_drm, max_workers=None, progress=None, *, cdm=None): + """Override : écrit le contenu embarqué au lieu de télécharger.""" + if not self.embedded_content: + if progress: + progress(downloaded="[red]FAILED") + raise ValueError("No embedded content in subtitle") + + # Créer le path de destination + track_type = "Subtitle" + save_path = config.directories.temp / f"{track_type}_{self.id}.{self.codec.extension}" + + if progress: + progress(downloaded="Writing", total=100, completed=0) + + # Écrire le contenu + config.directories.temp.mkdir(parents=True, exist_ok=True) + save_path.write_text(self.embedded_content, encoding='utf-8') + + self.path = save_path + + if progress: + progress(downloaded="Downloaded", completed=100) + + +class ADN(Service): + """ + Service code for Animation Digital Network (ADN). + + \b + Version: 3.2.1 (FINAL - Full multi-audio/subtitle support with custom Track classes) + Authorization: Credentials + Robustness: + Video: Clear HLS (Highest Quality) + Audio: Pre-extracted from muxed streams with AudioExtracted class + Subs: AES-128 Encrypted JSON -> ASS format with SubtitleEmbedded class + + Technical Solution: + - ADN provides HLS streams with muxed video+audio (not separable) + - AudioExtracted: Extracts audio in get_tracks(), copies during download() + - SubtitleEmbedded: Decrypts and converts to ASS, writes during download() + - Result: MKV with 1 video + multiple audio tracks + subtitles + + Custom Track Classes: + - AudioExtracted: Bypasses curl file:// limitation with direct file copy + - SubtitleEmbedded: Bypasses requests data: limitation with direct write + Made by: guilara_tv + """ + + + + RSA_PUBLIC_KEY = """-----BEGIN PUBLIC KEY----- +MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCbQrCJBRmaXM4gJidDmcpWDssg +numHinCLHAgS4buMtdH7dEGGEUfBofLzoEdt1jqcrCDT6YNhM0aFCqbLOPFtx9cg +/X2G/G5bPVu8cuFM0L+ehp8s6izK1kjx3OOPH/kWzvstM5tkqgJkNyNEvHdeJl6 +KhS+IFEqwvZqgbBpKuwIDAQAB +-----END PUBLIC KEY-----""" + + TITLE_RE = r"^(?:https?://(?:www\.)?animationdigitalnetwork\.com/video/[^/]+/)?(?P\d+)" + + @staticmethod + def get_session(): + return session("okhttp4") + + @staticmethod + @click.command( + name="ADN", + short_help="Téléchargement depuis Animation Digital Network", + help=( + "Télécharge des séries ou films depuis ADN.\n\n" + "TITLE : L'URL de la série ou son ID (ex: 1125).\n\n" + "SYSTÈME DE SÉLECTION :\n" + " - Simple : '-e 1-5' (épisodes 1 à 5)\n" + " - Saisons : '-e S2' ou '-e S02' (toute la saison 2) ou '-e S2E1-12'\n" + " - Mixte : '-e 1,3,S2E5' ou '-e 1,3,S02E05'\n" + " - Bonus : '-e NC1,OAV1'" + ) + ) + @click.argument("title", type=str, required=True) + @click.option( + "-e", "--episode", "select", type=str, + help="Sélection : numéros, plages (5-10), saisons (S1, S2) ou combiné (S1E5)." + ) + @click.option( + "--but", is_flag=True, + help="Inverse la sélection : télécharge tout SAUF les épisodes spécifiés avec -e." + ) + @click.option( + "--all", "all_eps", is_flag=True, + help="Ignore toutes les restrictions et télécharge l'intégralité de la série." + ) + @click.pass_context + def cli(ctx, **kwargs) -> "ADN": + return ADN(ctx, **kwargs) + + def __init__(self, ctx, title: str, select: Optional[str] = None, but: bool = False, all_eps: bool = False): + self.title = title + self.select_str = select + self.but = but + self.all_eps = all_eps + self.access_token: Optional[str] = None + self.refresh_token: Optional[str] = None + self.token_expiration: Optional[int] = None + + super().__init__(ctx) + + self.locale = self.config.get("params", {}).get("locale", "fr") + self.session.headers.update(self.config.get("headers", {})) + self.session.headers["x-target-distribution"] = self.locale + + + @staticmethod + def _timecode_to_ms(tc: str) -> int: + """Convert HH:MM:SS timecode to milliseconds.""" + parts = tc.split(':') + hours = int(parts[0]) + minutes = int(parts[1]) + seconds = int(parts[2]) + return (hours * 3600 + minutes * 60 + seconds) * 1000 + + @property + def auth_header(self) -> dict: + return { + "Authorization": f"Bearer {self.access_token}", + "X-Access-Token": self.access_token + } + + def ensure_authenticated(self) -> None: + """Vérifie le token et rafraîchit si nécessaire.""" + current_time = int(time.time()) + + if self.access_token and self.token_expiration and current_time < (self.token_expiration - 60): + return + + cache_key = f"adn_auth_{self.credential.sha1 if self.credential else 'default'}" + cached = self.cache.get(cache_key) + + if cached and not cached.expired: + self.access_token = cached.data["access_token"] + self.refresh_token = cached.data["refresh_token"] + self.token_expiration = cached.data["token_expiration"] + self.session.headers.update(self.auth_header) + self.log.debug("Loaded authentication from cache") + else: + self.authenticate(credential=self.credential) + + def authenticate(self, cookies=None, credential=None) -> None: + super().authenticate(cookies, credential) + + if self.refresh_token: + try: + self._do_refresh() + return + except Exception: + self.log.warning("Refresh failed, proceeding to full login") + + if not credential: + raise ValueError("Credentials required for ADN") + + response = self.session.post( + url=self.config["endpoints"]["login"], + json={ + "username": credential.username, + "password": credential.password, + "source": "Web" + } + ) + + if response.status_code != 200: + self.log.error(f"Login failed: {response.status_code} - {response.text}") + response.raise_for_status() + + self._save_tokens(response.json()) + + def _do_refresh(self): + response = self.session.post( + url=self.config["endpoints"]["refresh"], + json={"refreshToken": self.refresh_token}, + headers=self.auth_header + ) + if response.status_code != 200: + raise ValueError("Token refresh failed") + self._save_tokens(response.json()) + + def _save_tokens(self, data: dict): + self.access_token = data["accessToken"] + self.refresh_token = data["refreshToken"] + expires_in = data.get("expires_in", 3600) + self.token_expiration = int(time.time()) + expires_in + self.session.headers.update(self.auth_header) + + def _parse_select(self, ep_id: str, short_number: str, season_num: int) -> bool: + """Retourne True si l'épisode doit être inclus.""" + if self.all_eps or not self.select_str: + return True + + # Préparation des identifiants possibles pour cet épisode + # On teste : "30353" (id), "1" (numéro), "S02E01" (format complet), "S02" (saison entière) + candidates = [ + str(ep_id), + str(short_number).lstrip("0"), + f"S{season_num:02d}E{int(short_number):02d}" if str(short_number).isdigit() else "", + f"S{season_num:02d}" + ] + + parts = re.split(r'[ ,]+', self.select_str.strip().upper()) + selection: set[str] = set() + + for part in parts: + if '-' in part: + start_p, end_p = part.split('-', 1) + # Gestion des plages S02E01-S02E04 + m_start = re.match(r'^S(\d+)E(\d+)$', start_p) + m_end = re.match(r'^S(\d+)E(\d+)$', end_p) + + if m_start and m_end: + s_start, e_start = map(int, m_start.groups()) + s_end, e_end = map(int, m_end.groups()) + if s_start == s_end: # Même saison + for i in range(e_start, e_end + 1): + selection.add(f"S{s_start:02d}E{i:02d}") + continue + + # Plages classiques (1-10) + nums = re.findall(r'\d+', part) + if len(nums) >= 2: + for i in range(int(nums[0]), int(nums[1]) + 1): + selection.add(str(i)) + else: + selection.add(part.lstrip("0")) + + included = any(c in selection for c in candidates if c) + return not included if self.but else included + + def get_titles(self) -> Series: + """Récupère les épisodes avec le titre réel de la série.""" + show_id = self.parse_show_id(self.title) + + # 1. Récupérer d'abord les infos globales du show pour avoir le titre propre + show_url = self.config["endpoints"]["show"].format(show_id=show_id) + show_res = self.session.get(show_url).json() + + # On extrait le titre de la série (ex: "Demon Slave") + # C'est ce titre qui servira de nom au dossier unique + series_title = show_res["videos"][0]["show"]["title"] if show_res.get("videos") else "ADN Show" + + # 2. Récupérer ensuite la structure par saisons + url_seasons = self.config["endpoints"].get("seasons") + if not url_seasons: + url_seasons = "https://gw.api.animationdigitalnetwork.com/video/show/{show_id}/seasons?maxAgeCategory=18&order=asc" + + res = self.session.get(url_seasons.format(show_id=show_id)).json() + + if not res.get("seasons"): + self.log.error(f"Aucune saison trouvée pour l'ID {show_id}") + return Series([]) + + episodes = [] + for season_data in res["seasons"]: + s_val = str(season_data.get("season", "1")) + season_num = int(s_val) if s_val.isdigit() else 1 + + for vid in season_data.get("videos", []): + video_id = str(vid["id"]) + + # Nettoyage du numéro d'épisode (on ne garde que les chiffres) + num_match = re.search(r'\d+', str(vid.get("number", "0"))) + short_number = num_match.group() if num_match else "0" + + # Logique de sélection (SxxEyy) + if not self._parse_select(video_id, short_number, season_num): + continue + + # Création de l'épisode + episodes.append(Episode( + id_=video_id, + service=self.__class__, + title=series_title, # Dossier : "Demon Slave" + season=season_num, # Saison : 2 + number=int(short_number), + name=vid.get("name") or "", # Nom : "La grande réunion..." + data=vid + )) + + episodes.sort(key=lambda x: (x.season, x.number)) + return Series(episodes) + + def get_tracks(self, title: Episode) -> Tracks: + """ + Récupère les pistes en pré-extrayant les audios. + Les audios sont extraits maintenant et seront copiés pendant download(). + """ + self.ensure_authenticated() + vid_id = title.id + + # Configuration du lecteur + config_url = self.config["endpoints"]["player_config"].format(video_id=vid_id) + config_res = self.session.get(config_url).json() + + player_opts = config_res["player"]["options"] + if not player_opts["user"]["hasAccess"]: + raise PermissionError("No access to this video (Premium required?)") + + # Token du lecteur + refresh_url = player_opts["user"].get("refreshTokenUrl") or self.config["endpoints"]["player_refresh"] + token_res = self.session.post( + refresh_url, + headers={"X-Player-Refresh-Token": player_opts["user"]["refreshToken"]} + ).json() + + player_token = token_res["token"] + links_url = player_opts["video"].get("url") or self.config["endpoints"]["player_links"].format(video_id=vid_id) + + # Chiffrement RSA + rand_key = uuid.uuid4().hex[:16] + payload = json.dumps({"k": rand_key, "t": player_token}).encode('utf-8') + + public_key = serialization.load_pem_public_key( + self.RSA_PUBLIC_KEY.encode('utf-8'), + backend=default_backend() + ) + + encrypted = public_key.encrypt(payload, padding.PKCS1v15()) + auth_header_val = base64.b64encode(encrypted).decode('utf-8') + + # Récupération des liens + links_res = self.session.get( + links_url, + params={"freeWithAds": "true", "adaptive": "true", "withMetadata": "true", "source": "Web"}, + headers={"X-Player-Token": auth_header_val} + ).json() + + tracks = Tracks() + streaming_links = links_res.get("links", {}).get("streaming", {}) + + # Map des langues + lang_map = { + "vf": "fr", + "vostf": "ja", + "vde": "de", + "vostde": "ja", + } + + # Priorité: VOSTF (original) pour la vidéo principale + priority_order = ["vostf", "vf", "vde", "vostde"] + available_streams = {k: v for k, v in streaming_links.items() if k in lang_map} + + sorted_streams = sorted( + available_streams.keys(), + key=lambda x: priority_order.index(x) if x in priority_order else 999 + ) + + if not sorted_streams: + raise ValueError("No supported streams found") + + # Vidéo principale (VOSTF ou premier disponible) + primary_stream = sorted_streams[0] + primary_lang = lang_map[primary_stream] + + self.log.info(f"Primary video stream: {primary_stream} ({primary_lang})") + + video_track = self._get_video_track( + streaming_links[primary_stream], + primary_stream, + primary_lang, + is_original=(primary_stream in ["vostf", "vostde"]) + ) + + if video_track: + tracks.add(video_track) + self.log.info(f"Video track added: {video_track.width}x{video_track.height}") + + # Extraire audios pour toutes les langues disponibles + for stream_type in sorted_streams: + audio_lang = lang_map[stream_type] + is_original = stream_type in ["vostf", "vostde"] + + self.log.info(f"Processing audio for: {stream_type} ({audio_lang})") + + audio_track = self._extract_audio_track( + streaming_links[stream_type], + stream_type, + audio_lang, + is_original, + title + ) + + if audio_track: + tracks.add(audio_track, warn_only=True) + self.log.info(f"Audio track added: {audio_lang}") + + # Stocker les données de chapitres pour get_chapters() + if "video" in links_res: + title.data["chapter_data"] = links_res["video"] + self.log.debug(f"Stored chapter data: intro={links_res['video'].get('tcIntroStart')}, ending={links_res['video'].get('tcEndingStart')}") + + # Sous-titres + self._process_subtitles(links_res, rand_key, title, tracks) + + if not tracks.videos: + raise ValueError("No video tracks were successfully added") + + return tracks + + def _get_video_track(self, stream_data: dict, stream_type: str, lang: str, is_original: bool): + """Récupère la piste vidéo principale (sans audio).""" + try: + m3u8_url = self._resolve_stream_url(stream_data, stream_type) + if not m3u8_url: + return None + + hls_manifest = HLS.from_url(url=m3u8_url, session=self.session) + hls_tracks = hls_manifest.to_tracks(language=lang) + + if not hls_tracks.videos: + self.log.warning(f"No video tracks found for {stream_type}") + return None + + # Meilleure qualité + best_video = max( + hls_tracks.videos, + key=lambda v: (v.height or 0, v.width or 0, v.bitrate or 0) + ) + + # Convertir en VideoNoAudio pour demuxer automatiquement + video_no_audio = VideoNoAudio( + id_=best_video.id, + url=best_video.url, + codec=best_video.codec, + language=Language.get(lang), + is_original_lang=is_original, + bitrate=best_video.bitrate, + descriptor=best_video.descriptor, + width=best_video.width, + height=best_video.height, + fps=best_video.fps, + range_=best_video.range, + data=best_video.data, + ) + + video_no_audio.data["stream_type"] = stream_type + + return video_no_audio + + except Exception as e: + self.log.error(f"Failed to get video track for {stream_type}: {e}") + return None + + def _extract_audio_track(self, stream_data: dict, stream_type: str, lang: str, is_original: bool, title: Episode): + """ + Extrait l'audio et retourne un AudioExtracted. + L'audio est extrait MAINTENANT et sera copié pendant download(). + """ + if not binaries.FFMPEG: + self.log.warning("FFmpeg not found, cannot extract audio") + return None + + try: + m3u8_url = self._resolve_stream_url(stream_data, stream_type) + if not m3u8_url: + return None + + # Créer un répertoire temp pour ADN dans le temp d'Unshackle + adn_temp = config.directories.temp / "adn_audio_extracts" + adn_temp.mkdir(parents=True, exist_ok=True) + + # Nom de fichier unique basé sur video_id + langue + audio_filename = f"audio_{title.id}_{stream_type}.m4a" + audio_path = adn_temp / audio_filename + + # Si déjà extrait, réutiliser + if audio_path.exists() and audio_path.stat().st_size > 1000: + self.log.debug(f"Reusing existing extracted audio: {audio_path}") + else: + + # Extraire avec FFmpeg + result = subprocess.run( + [ + binaries.FFMPEG, + '-i', m3u8_url, + '-vn', + '-acodec', 'copy', + '-y', + str(audio_path) + ], + capture_output=True, + text=True, + timeout=300 + ) + + if result.returncode != 0: + self.log.error(f"FFmpeg failed for {stream_type}: {result.stderr}") + audio_path.unlink(missing_ok=True) + return None + + if not audio_path.exists() or audio_path.stat().st_size < 1000: + self.log.error(f"Extracted audio is invalid for {stream_type}") + audio_path.unlink(missing_ok=True) + return None + + # Créer AudioExtracted avec le fichier pré-extrait + audio_track = AudioExtracted( + id_=f"audio-{stream_type}-{lang}", + extracted_path=audio_path, + codec=Audio.Codec.AAC, + language=Language.get(lang), + is_original_lang=is_original, + bitrate=128000, + channels=2.0, + ) + + return audio_track + + except subprocess.TimeoutExpired: + self.log.error(f"FFmpeg timeout for {stream_type}") + return None + except Exception as e: + self.log.error(f"Failed to extract audio for {stream_type}: {e}") + return None + + def _resolve_stream_url(self, stream_data: dict, stream_type: str) -> Optional[str]: + """Résout l'URL du stream.""" + preferred_keys = ["fhd", "hd", "auto", "sd", "mobile"] + + m3u8_url = None + for key in preferred_keys: + if key in stream_data and stream_data[key]: + m3u8_url = stream_data[key] + break + + if not m3u8_url: + return None + + try: + resp = self.session.get(m3u8_url, timeout=12) + if resp.status_code != 200: + return None + + content_type = resp.headers.get("Content-Type", "") + resp_text = resp.text.strip() + + if "application/json" in content_type or resp_text.startswith("{"): + try: + json_data = resp.json() + real_location = json_data.get("location") + if real_location: + return real_location + except json.JSONDecodeError: + pass + + return m3u8_url + + except Exception as e: + self.log.error(f"Failed to resolve URL for {stream_type}: {e}") + return None + + def _process_subtitles(self, links_res: dict, rand_key: str, title: Episode, tracks: Tracks): + """Traite les sous-titres.""" + subs_root = links_res.get("links", {}).get("subtitles", {}) + if "all" not in subs_root: + self.log.debug("No subtitles available") + return + + aes_key_bytes = bytes.fromhex(rand_key + '7fac1178830cfe0c') + + try: + sub_loc_res = self.session.get(subs_root["all"]).json() + encrypted_sub_res = self.session.get(sub_loc_res["location"]).text + + self.log.debug(f"Encrypted subtitle length: {len(encrypted_sub_res)}") + + iv_b64 = encrypted_sub_res[:24] + payload_b64 = encrypted_sub_res[24:] + + iv = base64.b64decode(iv_b64) + ciphertext = base64.b64decode(payload_b64) + + self.log.debug(f"IV length: {len(iv)}, Ciphertext length: {len(ciphertext)}") + + cipher = Cipher(algorithms.AES(aes_key_bytes), modes.CBC(iv), backend=default_backend()) + decryptor = cipher.decryptor() + decrypted_padded = decryptor.update(ciphertext) + decryptor.finalize() + + # TOUJOURS retirer le padding PKCS7 (Python ne le fait pas automatiquement) + pad_len = decrypted_padded[-1] + if not (1 <= pad_len <= 16): + self.log.error(f"Invalid PKCS7 padding length: {pad_len}") + return + + # Vérifier que tous les bytes de padding ont la même valeur + padding = decrypted_padded[-pad_len:] + if not all(b == pad_len for b in padding): + self.log.error(f"Invalid PKCS7 padding bytes") + return + + decrypted_json = decrypted_padded[:-pad_len].decode('utf-8') + self.log.debug(f"Decrypted JSON length: {len(decrypted_json)}") + + + subs_data = json.loads(decrypted_json) + + + if not isinstance(subs_data, dict): + self.log.error(f"subs_data is not a dict! Type: {type(subs_data)}") + return + + if len(subs_data) == 0: + self.log.warning("subs_data is empty!") + return + + # Debug chaque clé + for key in subs_data.keys(): + value = subs_data[key] + if isinstance(value, list) and len(value) > 0: + self.log.debug(f" First item type: {type(value[0])}") + self.log.debug(f" First item keys: {value[0].keys() if isinstance(value[0], dict) else 'NOT A DICT'}") + self.log.debug(f" First item sample: {str(value[0])[:200]}") + processed_langs = set() + + for sub_lang_key, cues in subs_data.items(): + + if not isinstance(cues, list): + self.log.warning(f"Cues for {sub_lang_key} is not a list! Type: {type(cues)}") + continue + + if len(cues) == 0: + self.log.debug(f"No subtitles for {sub_lang_key} (normal for dubbed versions)") + continue + + self.log.debug(f" Cues count: {len(cues)}") + self.log.debug(f" First cue: {cues[0]}") + + if "vf" in sub_lang_key.lower() or "vostf" in sub_lang_key.lower(): + target_lang = "fr" + elif "vde" in sub_lang_key.lower() or "vostde" in sub_lang_key.lower(): + target_lang = "de" + else: + self.log.debug(f"Skipping subtitle language: {sub_lang_key}") + continue + + if target_lang in processed_langs: + self.log.debug(f"Already processed {target_lang}, skipping") + continue + + processed_langs.add(target_lang) + + # Convertir en ASS + ass_content = self._json_to_ass(cues, title.title, title.number) + + # Vérifier si le fichier ASS a du contenu + event_count = ass_content.count("Dialogue:") + self.log.debug(f"Generated ASS with {event_count} dialogue events") + + if event_count == 0: + self.log.warning(f"ASS file has no dialogue events!") + self.log.warning(f"First cue was: {cues[0] if cues else 'EMPTY LIST'}") + + # Créer SubtitleEmbedded avec le contenu ASS directement + subtitle = SubtitleEmbedded( + id_=f"sub-{target_lang}-{sub_lang_key}", + embedded_content=ass_content, # Contenu ASS directement + codec=Subtitle.Codec.SubStationAlphav4, + language=Language.get(target_lang), + forced=False, + sdh=False, + ) + + tracks.add(subtitle, warn_only=True) + self.log.info(f"Subtitle added: {target_lang} ({event_count} events)") + + except json.JSONDecodeError as e: + self.log.error(f"Failed to decode JSON: {e}") + self.log.error(f"Decrypted data (first 500 chars): {decrypted_json[:500] if 'decrypted_json' in locals() else 'NOT DECRYPTED'}") + except Exception as e: + self.log.error(f"Failed to process subtitles: {e}") + import traceback + self.log.debug(traceback.format_exc()) + + def get_chapters(self, title: Episode) -> Chapters: + """ + Crée les chapitres à partir des timecodes ADN. + - Si tcIntroStart existe: + - Si tcIntroStart != "00:00:00": ajouter "Prologue" à 00:00:00 + - Ajouter "Opening" à tcIntroStart + - Ajouter "Episode" à tcIntroEnd + - Sinon: ajouter "Episode" à 00:00:00 + - Si tcEndingStart existe: + - Ajouter "Ending Start" à tcEndingStart + - Ajouter "Ending End" à tcEndingEnd + """ + chapters = Chapters() + + # Récupérer les données de chapitres stockées dans get_tracks() + chapter_data = title.data.get("chapter_data", {}) + if not chapter_data: + self.log.debug("No chapter data available") + return chapters + + tc_intro_start = chapter_data.get("tcIntroStart") + tc_intro_end = chapter_data.get("tcIntroEnd") + tc_ending_start = chapter_data.get("tcEndingStart") + tc_ending_end = chapter_data.get("tcEndingEnd") + + self.log.debug(f"Chapter timecodes: intro={tc_intro_start}->{tc_intro_end}, ending={tc_ending_start}->{tc_ending_end}") + + try: + if tc_intro_start: + # Si l'intro ne commence pas à 00:00:00, ajouter un prologue + if tc_intro_start != "00:00:00": + chapters.add(Chapter( + timestamp=0, + name="Prologue" + )) + self.log.debug("Added Prologue chapter at 00:00:00") + + # Opening + chapters.add(Chapter( + timestamp=self._timecode_to_ms(tc_intro_start), + name="Opening" + )) + self.log.debug(f"Added Opening chapter at {tc_intro_start}") + + # Episode (après l'intro) + if tc_intro_end: + chapters.add(Chapter( + timestamp=self._timecode_to_ms(tc_intro_end), + name="Episode" + )) + self.log.debug(f"Added Episode chapter at {tc_intro_end}") + else: + # Pas d'intro, épisode commence à 00:00:00 + chapters.add(Chapter( + timestamp=0, + name="Episode" + )) + self.log.debug("Added Episode chapter at 00:00:00 (no intro)") + + # Ending + if tc_ending_start: + chapters.add(Chapter( + timestamp=self._timecode_to_ms(tc_ending_start), + name="Ending Start" + )) + self.log.debug(f"Added Ending Start chapter at {tc_ending_start}") + + if tc_ending_end: + chapters.add(Chapter( + timestamp=self._timecode_to_ms(tc_ending_end), + name="Ending End" + )) + self.log.debug(f"Added Ending End chapter at {tc_ending_end}") + + self.log.info(f"✓ Created {len(chapters)} chapters") + + except Exception as e: + self.log.error(f"Failed to create chapters: {e}") + import traceback + self.log.debug(traceback.format_exc()) + + return chapters + + def search(self) -> Generator[SearchResult, None, None]: + res = self.session.get( + self.config["endpoints"]["search"], + params={"search": self.title, "limit": 20, "offset": 0} + ).json() + + for show in res.get("shows", []): + yield SearchResult( + id_=str(show["id"]), + title=show["title"], + label=show["type"], + description=show.get("summary", "")[:300], + url=f"https://animationdigitalnetwork.com/video/{show['id']}", + image=show.get("image") + ) + + def parse_show_id(self, input_str: str) -> str: + if input_str.isdigit(): + return input_str + match = re.match(self.TITLE_RE, input_str) + if match: + return match.group("id") + raise ValueError(f"Invalid ADN Show ID/URL: {input_str}") + + def _json_to_ass(self, cues: List[dict], title: str, ep_num: Union[int, str]) -> str: + """Convertit les sous-titres JSON en ASS.""" + header = """[Script Info] +ScriptType: v4.00+ +WrapStyle: 0 +PlayResX: 1280 +PlayResY: 720 +ScaledBorderAndShadow: yes + +[V4+ Styles] +Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding +Style: Default,Arial,50,&H00FFFFFF,&H00FFFFFF,&H00000000,&H00000000,-1,0,0,0,100,100,0,0,1,1.95,0,2,0,0,70,0 + +[Events] +Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text +""" + events = [] + pos_align_map = {"start": 1, "end": 3} + line_align_map = {"middle": 8, "end": 4} + + def format_time(seconds: float) -> str: + """Format exact d'adn : HH:MM:SS.CC (centisecondes sur 2 chiffres)""" + secs = int(seconds) + centiseconds = round((seconds - secs) * 100) + + hours = secs // 3600 + minutes = (secs % 3600) // 60 + remaining_seconds = secs % 60 + + # Padding sur 2 chiffres pour TOUT (hours inclus) + return f"{hours:02d}:{minutes:02d}:{remaining_seconds:02d}.{centiseconds:02d}" + + for cue in cues: + start_time = cue.get("startTime", 0) + end_time = cue.get("endTime", 0) + text = cue.get("text", "") + + # Skip si texte vide + if not text or not text.strip(): + continue + + # Nettoyage EXACT du code adn + text = text.replace(' \\N', '\\N') # remove space before \\N at end + if text.endswith('\\N'): + text = text[:-2] # remove \\N at end + text = text.replace('\r', '') + text = text.replace('\n', '\\N') + text = re.sub(r'\\N +', r'\\N', text) # \\N followed by spaces + text = re.sub(r' +\\N', r'\\N', text) # spaces followed by \\N + text = re.sub(r'(\\N)+', r'\\N', text) # multiple \\N + text = re.sub(r']*>([^<]*)', r'{\\b1}\1{\\b0}', text) + text = re.sub(r']*>([^<]*)', r'{\\i1}\1{\\i0}', text) + text = re.sub(r']*>([^<]*)', r'{\\u1}\1{\\u0}', text) + text = text.replace('<', '<').replace('>', '>').replace('&', '&') + text = re.sub(r'<[^>]>', '', text) # remove any remaining single tags + if text.endswith('\\N'): + text = text[:-2] + text = text.rstrip() # remove trailing spaces + + # Skip après nettoyage si vide + if not text.strip(): + continue + + p_align = pos_align_map.get(cue.get("positionAlign"), 2) + l_align = line_align_map.get(cue.get("lineAlign", ""), 0) + align_val = p_align + l_align + + start = format_time(start_time) + end = format_time(end_time) + + style_mod = f"{{\\a{align_val}}}" if align_val != 2 else "" + events.append(f"Dialogue: 0,{start},{end},Default,,0,0,0,,{style_mod}{text}") + + self.log.debug(f"Converted {len(events)} subtitle events from {len(cues)} cues") + + if not events: + self.log.warning(f"No subtitle events generated - all cues were empty or invalid (total cues: {len(cues)})") + + return header + "\n".join(events) \ No newline at end of file diff --git a/ADN/__pycache__/__init__.cpython-310.pyc b/ADN/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7177bda12cdeca69d604517a4b781aaa560c117 GIT binary patch literal 27580 zcma*Qd2}2{njctMcNGeS0*I3YsT3tqC=vj{OHxDB0fL~&770-VCACUa*AyxfK!J5J zGK(at3;i^vJl#DrZ1>KL*JHQZ276|`&3+#5j-Q|Xe2#5CAG`J*_VYa3Y5RG${l_l7 z_VY3AW7s3fzuy;Gg#t*m4XPp|n#?m+KRMxSXl<=laEOlm~JHh>w)_R0eZ{5*95FRfcoJ;*XU_ zDx_QqTxf9~wQ$ATal{@uV_+a^k%8R)djldfLYseb@IA9Iir|yJuFX4T}8pZpl zeFE=a!uy!D7w>!R7x6xe_kGql-pB0;yuWN2OM&eEhx~Sn*@%~T!>-z@;MSBEf2;16 zYSlv7i@Z~X#|vL9Ij$F)t8QcsFMPS+uHhN07nEBnl;Q1lYz1GbSztaLXIZ7{D!hG` zy;5kD-TZQ)c-^jA*^n2VFV)v<Epch-HmmC!BMG?)C zQ2kYfrZ`@Feyvb|i)=?{Hg+4L95m+&-uod~ZCl~%kARNd=Fyt{WB}FgRq^icRAal~hpM;t&8)x2V zluWx?e(-~vwpnPnwF*Y|M#HYyRo5&(Utk-}LR~%hfn&N4?w23jFRm5Ts>DxAVT%aD zAQVcw3>0lW{i%V{On_aNmS*8iSvQ-W<)o2|sw;Jv{5kTrNcpngGe4zcDZ zam0-HTXpgANd_qKYT+#lxH?D={R$6EVt8Mgx2eT^^Si@tT!RtnSJoZT_2 z%zC5bVAf!A*k&D*h*Q%>+V)hOR-7A)%?xJQbob;|Cc>LD=gw8?_NrN}xtxPlYZ6nk z$~R?KYBIw)_f}M`V&3GcYo@O-0t7@)OU%TiRl8L!*9w+x<>L8jO{QyeaA!JQ#G~L; zsTrHPS;Vq0df{TNzR?U#6{-l|xccxmT690;rHfem4Q1zR4Y%HKy|8QFcDE^o}5&8WG&;o6RuJlpAGb8trk+RYj{CoMtA`g7);Rk_@IE^|U@%Gqx;W}uy7qxGLo;xaoq41*T&Rx2^@P-$=RRD2d z{#&5PdB?RYURd0SWiK~Yy>PWqvAv|zSgxxYo9~5Gqw0k~NxY=8F_=}3xEH}3!X^@R zTnj!g+NhRG)$1&)fB|;gyi;m|94lMOOJCI^zHqw^Qe}CGh1*42wx4VYdF=9v8fG(l z>dMB3W9KbZ@p>wS+xc5Hb)CdJj^?Z?v`0OSgldMVLl_LN7kdjvq)^VY*SVBGY2=Od zbKmd`?iY!CzF00ePCkD>@aV0xv)39`XANYyY;XIfj!z$(p84YUYuE$1{W#aK2(IPx zB~aRYe!9M)PM}5HA)KFq6Avbh;ZP{Ef14arV!^q3$Xee$VZ!lv-KTjk7fRQ#b#$F@-QSh_oQ1|+qZh!gv!NtXF)Qjiy z9QAzOOXl;~aT{g&Q~CUjMxpG-#PWHoR?O$6-CnOhG)22yM%Wc}K|7b}TcYz6oP+o| zQ8@8H*cgvU*kk)8SwsH)?h9~s?3Y11yb_e%GGv86PJt0jSP?7wiDAX8_{X7KRCdo8 zcF&}jmThz4HaAMwwi(O1W`c_Pef4u0!$K z12vJ=6b@9QL;`1D|&xOI>V{Ot2y;BLDbg$NB5S?{`f& zZUB2Ec1Z4?C8HJkb>rLI9gJ2W58ikQ1ZvR>HB{LPWJBsWGVsgk8Kth_H5*pv@esck z#?J3}(d$^itB#WIBA-b9=%LsrLmjg zm+fo3f@XHU>Z~c>vHLT&r^FY2&PP2TfDnCnZtlXxg|mtiyJ>c2o6jb#U_aR2&Sp)| z$Uh2CS56jkv3YDaAfpd0WRz|3en>q7zZY?9E?DYBu~w;!W zRn$==z8{o4>N@<3*J&>!zFR0a>;;lo^)>?3J9PRP)nkEz7Qw)S&E=o>Lha9kLy`P0;?|2`7)h1>5!IsNnu9w?nd4A5)Rlznc7`M zK&IFUB)y5BGY%&c41+dhjCg1-C{)@Q2&F;aNUOr1KQ{H+i%wld&TL%w}Sm=>@pS~wip$`zj;rd-pT2i5b$Y?|{^b6^b${`jMCDZ2c7b?rP1rDR35Fs?B z9iw?YBJE7dF`&X=?U0zMP3~9rd^8uTwOf7 zmqEeOfV@uQejyc6AEJfot8Bj}5PFDO4De|K{c4#G2jEF`fs%*fJZ+A(``I3W{}xps zsTZoiGPpXOG}dIYixxl{APum%{)40gvi>7T8^QXI;t84nN)hqtz+vPCu?OK#3LTI< z>v3@L;N!W*Te0;7Ya&mt-$4mbqdJeAeaM*+O3)8VfM0f?@j9B;`5v>Td;v&L+aF;2 zGkzg4P&NwDMBu5#8b7>zAO9P=W~mBhPK$@?0F$o3@r>zQ>^%s!paW`+q1*&i3C>kj zhWjYea95^Y%0jrb$uuz?m%XaNS(`L(+f3V}!?(wlX%7PV=L`mSvkO3sLb-??72!H?$H>L(s2#_yftdwKDhPklirbJ7 zIh*j7($XM8(pKNyh=gPi3)#<)-(wBpeF*Pi#P(Wm37QhVV|4$^rLD_SZm->E_lyBi zu`|}7omq(s^D=Jj|F}2Tk9HkEO~dxUO4xeJGCxKsxjptELWWj?*3;HO2^qHDww|$` z{WzQ(v94HeO9{_ehfu<(^}IFlaUeHlWhKX9>j+ZqwI)&O6n@k80LntG)=_H)G5hc~ zj*^dA$MLowEj)mbr%?L|>m))<>y$lYy?~rA;`fr|{E{_`n5TEgFz-szdf7US*n^$e zVcp-AsP&5VDq^0oUPFnmqcqkvhq~g{8EYOf&-%4LhuY6t3kW%cwmff70F4@335^BN z&U4lq2+vyQk$>3kLCOo(ml1N{N<9z?n7Y7M7-t9e>GO zs20@*;4#bm^3q$2=G3d^+|m*U6$7e!ynD~+Do)D0Y8Th4r6LA>sa6K>fw7hFDFLG( zwNcjB2-J>7>UJ}-c0@+poFG=aS$vr(6LOhr|O*iyDY866sK$;kH8B|@;b9$RG3iL-AnoSA$3ow>P_Z!FH8oj+50`^__} zZ_h8kt>(Xc=1QgTl}jgAzg)7;R*Loa&N|L&vf8MeFIDF+o}XJ?I=S4q)? zm)~u?RBW8PG;{HYy;gt8d7;#N^Vs$E+b7<7>+<=d*WYX2blgj)-0L@1zkGdhV{zf; zdCUHC`Gw@0YfDEioLjhY>t^o8>hhWTn~hr+&d$9(cP1%4Tv$B2yVK1}U!8bucFlF` z&TIH8@ZY+1>)LeowL+UhS(-pqHGR4^4Kd-UtbwB+eD&yuM~-G+n|ST=>5}#8HS0+B zvAq&Iq~^!S{klM9?-1P&F6Lsj>#X&p8bU-DGazgMIRdYTtHcM)|2_g7mK6^+`v~TB z#WkOi=A-pr&}KF{8&of|(4+&B=jPY!avg#X(etYSI6D}}5($N(gBm%X=!P-T0_k!kYJO*V`Kx{e;j4X+7H^omge3P3af7=W>uHPju%ph2Ocdt3AIJNY?8luN(OB3NFgry1i{L)hXuFN8rT zl2LE}(a#^Y%YZ{16cBwN9{@}r$Qyv_0Y0?Apyxvtnv#c74lY!3aHDSQPGBqEG6aAQ ztS7dT7Mgl5h$n>pdn|`SdTPCQD~%`P!;JUS`*Bv}G}LyOn9)TqN@@hLsJC-Rr78Q zY9_B&*(-qf*8JCugywCcj6w_rRnd$pS!^$a%G58jFp-A;FvFsaI!K6Vv zP~W1{srgBw)sK+EVM$Tob=&_^!O`F-@VaJ-pQ(+!ctN+=k@S#9Fc*HLdHkGna9V*a ztZ8UI4A*D@Z5ad4yBQQeI4bx<;tz{I3|uKPh|g+82LmI}sEh=x*dT{J-aI_7w=whl z`PoaCX0f9aS{8l@`&y+`E|(mO7r}Q0o)KAto&cJ)N~zi)whfh>6x&6Dg!=U5+YP{%%2%BKfHE9vn3ACs zz|NF0YBcxAVC_t|W4zeH&Ug*t#Ul13IFv|?!B3#=$c7f1u`^((faRg-y>pE&wyc@{ z%G8|9pQ)>yKUjHx1m)my9*^CP?tw~f4aAL;^(TlIp4#fgGwrX_zO9TUYAI301=jmn8-O`OE9bJw z#&I1xO>{jG~TIa4+QW(O}9*;>Uj({2K^1M>+4Gl(on? z)c)g~;;zpy=g-0+xV!~2fjL3ggtO}DCScR85LJ&*E`k(5Wf^vkbY;a5f{fuC*veX>)SCFVZ z5o5(#?K*@SJ>S7tIrUlvZHBJ)lp7Q&8k5Hs*_@TjVbewvJ6 zs0>Z^yXV4hB2|$?)c)i8vx#6ua_l8yw z+c;hI> z99*{O@q&5@2KMsGgl~CVkCDk|F6zWcF_2T0(N6%(TLXw@3 z-pzEYr`3BssD92(w^A(-%n_tTt$mw)-wbT^p{+f-7p*?s1Ag!uH*dU*`Z88eD~kFu zZhtF-9;U8G)Sq(Gs54{rk_QxDg1%0<30*Ems_%}F`WxTB@iJ>d-((D>{FmJU!6AW? zejIOw&!PmdCS(KlsK4FnPhsn55na!Skaop};G?QGP_Kr~J-^%$)qt;n4S4WFpicnF zA%r>8S3P6KGl1F69Jf4ULH!KAZ!_|NIr-rc&zMqwoUiGxE=^FtT@d%#M)r&nPKce}1$UFY29%nW|CH_&`mxKMDnAkI49^l6b@oL8qiS+av7jHv&S)e#Ga zd_w`IPLWbXe9f-}!G(UG(GeXR0hEX(5prx73cG?^T=PPeLfwn5s#>FtjZDER&h|nD z%kp9?rK&}NG*pJ56Dm|UL>2)nB+K=GW7YpVoxjTpl0~3bCCV|5`gI1t21L0z7-Ha& zuLDJ|LSqd^ox6=9t$v2dZdl&=e=nI6J_$clxt?}f5( zd#uiIEXh%&u+2mxDdQ06MfmrJV^Jdxqlb)mOglA`0qIH@Tx(f*kp|D3I^Sl+k7a$w61$(u7 zhMFJ^!TtpnTJ|xp&!8Z&P3o_u9HaxIycualoENMFp1Pz+yKKKi+Qmsq=PFWwbc1Rb zWZb#h+{7x@*HU1WnsCsXu>jWpJD@H}*oj4`vA75NwV41V=>cUcyD3_-p&h-TG)xg* z1)0sF$XH3RvaiYb^fg*ZfL0kNO3@1{4S+S>V?@!t#K1Q(bgm7=P#T!!COk3JmZ)U zH&NcHzmKe5RBwUm@8LxNGa-gB4Gop7Jo_T1L>g@Pw@mZzI|+Xiz8%vZ6FopagNx2d z!NKQ~X}EXPjHp8uoNUQt~Q_|5OM(DIbFog zPmH2M5PGshUa!B>^>@8}xh%~hzfK?JAEA@#AJO+~bpA0M89P|_ zdJ!Rj>R(A%0YtKDsS&1nn;~HbC?jAYA$g4Th91l7xcXUgC?iuiO0reFBAjlfc%gUR0u{x~xnH=604 zUt|Y&xeryu@e`)$$M}SVj6~Kl)-|E)CV${DLH-0$0QrMys*t}h6zM+EBVi+EU;!&| z4nr1%K`jh=Bn;#L+-Lno%rjkPa*PJRAb{&&m%y6noT;%u38d<7P|A+AVkA#6=8af= zV*wH-l<{GwjQ%ki4eNF#T;PW=wC%A5tUZ4aobhH7Oi!u>qGt_$LSo%(!SEJDFTLJ} z5;FST?Y{#>A*9h%E44My>T&nbT$hB;9fG-SFY=8@J|dLBP+AG7c=>PC8v7({?QNye z-hFpN(%wFI^iE(i)9OP@#|7z(So`_h?6(e3sf*v!pM<{!;%y!LzVR(>>rHiTyw$%o zb`PW+qKk$qlJ4~dz{~P$E^t&)jd`geH{E|pf!jm5up8B z2du-bLCDWXT0?Lrzb6LLh9Ip*g%)*6v|N4x9YdIt3(b!((G+8P56 z9NFC4+KW2RwleExYp-?OIw3|Gy6q=h$gMZI?-(~=c-`u|89?eaFthmj5CWgIPMy}R ze9n47hhn5(yob%vda3jNCBFOEL161ps}FO)Xl3q&(ZkI8^WQO?+Ze@J8O03P(w7^5 z1ZzOFP$vF%?LdI$!3CLi70AMa`&wPPQJ_UVLPgW)Xe%D(z1RseLEqiBDWPLl3V;@1 zyvi_YB^6_-TYPZeF-e~ov#nj+E>U*1TyMQx*sv8Xd{%gzK^dSBxu_2HVl}aSh5Z(> zid9T3B`U#lP99RAAG zWogIM6)i7o*i(c@v`OG^7_1RVk4QBla_3ThF|_D)79Ua)20v|F_Y7Bk9WU4tV*WV~ zKynZY)L&qdaIObJx&0pWy#A6O<}i!CwyE;J(_S_>hwS0g|gYO7x7IS=jWH!K-6; zsCNk!$)qjSBvd10>G!J$tI?V>U_t;MI9j!!Gq43g5itMcbwK)0)GV6-+SHhCf;Lmzy;o1^2K9FGVfZ=FydJ3C+D6;W zJ@3*2z_;gx+(QF=^&eR&$`k{10)yH>0BpvrdG$UlxoO3ouT{$%&>F~z7SXPoetsag z`pl2)@XdWou$Dx-{Xk#AjpeOsuXri9c!9npibanSlb{=fB46=(-Qt3HC8qg2hCr`G zUy*;y23Ol{;;(DZPN+YJ)@v@1+>v@jhhI^^X#t?TzRR|fBL=mqHuC&yhzB)j(2|qx zNiVc!-}WL_qXM}wD)wWxB3|O{;rWPs-LB;8jpcHwD3Eme%F_uT}Mu2jRDI+j2qt1?O7g2Pue~yOG zh@jbR!u4x!tUL4=v^1_#$mc8x4>X5U06*GG`lR|t`5^!!rDT}Jh^$n|xoR$h~_ z{=!Y1=AcCjBy2g2l}qb^-G0Y<L|{A!)|Dp|!k>o2!QYQY2Mt&gA`h6_-;c+KampbIu|N7*7@%n!xHA+V z2$6WCh{lo-{2AUI!`SdoqH)*~8{veG7#j|)_3+>t#dPxC5VhmrW8>=?{QB`5z;6$JgZK?u z@w+KN06`ow8?qAMnG)}5{mt-314q7+IP$eeS}+1=0WARkje~NUXL=9%9Lv~DejVC+ z?hN$Faj#V41iIYK9u-XCj!4NK+^>N_0<2T2LoshoIJAq9`IDVq^c5vSwm`9Euj6S= z*$HGmYKraXlU|~fdEyo(;_5hBfyM{wI75J^Q3t+@XRw~KjFk`=y5OR0BiLk5aiX5& z7mY0gH=thLP=gHYY{mLG3U{&z2tcyOjt82DxX11$NsmjjvY%yB>OUa6)-QZS-vVpQ zbnQ!dnNQ~!oP_xrth(#iM5JZXMlZBNx-58IUI{(pJZv7=KDq_+*u7)O7(EK&6xXP? zhZLz+hNZo~oI!`w02<=OsRAWglA+Ime)*tk>}hYh;nG=NZqQsIzp0s>CZN8ZQYsO|6w3usqe$3M|EP zNxu1HFS(2my&L4xnqXCHn&zvwSlzDAcLJr4@bjHTWRQ9(&=ZQsJS7EM5#oFFvtX2B zbQqL^!gBY&=#P_q(O?>O7HNGVYVb)DV25E0QA6&8^Y}S`84Yl$YoO6e5ZJh&bPwv(#Pu-VSZ6PmGID7Fs(O)PE?Y#@U!Q3Ox|i`!wX&}5Q?0OPhHmYqlmg*d zY8}Ms);(>K05UR&97DwGbnE?g{A^b{0_vCj5{9>l+T%IhA!|fx7;S}Eekdn;GQ_Q1%Lfj5CexBbGGMxO0g(!3p?PJrlRcPxia4KwWhZX6zw06-y zWsTX}mi^32a~Bq`E-cP1UiC%4f-m^}RJpHt=nK!k`Ye#9u8X+=Z6Cp%+K+Mcfq`69 z@85E~K&_&CVu8~Qdmx z2EY;h_j)#_3F|eyYidNos@|X@v@{MLyjr!(PIHgn23mB`OixN4-=>~B#!OQHs?-e>!-Kf{-yiRA14pBTgP^7qR zd69XJB`nbA=2aJIWg-$!oZ9*#x~T!ux6nL_!d0BusMK?5{e@|^OB02h&%*KOs#;{B zbL`HODCP@@m?y zW`LGM2o(FNqcm&rjIV)TgS91XDVuw@SCr3km}~-9n=E;-Lc=UxQ2W15A3-58QBnT{ zKB(_07&}C8e9@H$1z0|2=!lBb{I{n zLs-|H5}78KV3?e4@)GQh2I*nvi38s4UOjPrTtu8f*;9>fNBvk ziIIjJGa!~RlxHyeKaWPzpJF8hgP%r2UHrs|@Doqq(@wMfFVT4g&JJ;dh!R%|FvZN1*eM+%(iMW;)G4Wd2yVGQj+xW5db`!(eDJC)X+sBtRTl2 z!hC>Thry)alf282;qV5DJ5(1T|NAbbjzKF~I9^<#~tTWNstDTp~~-hD9= z*aR3%D}`_2#O^n-*M|bv4fTsw-*vQE0&~q6ZpTX>G+D^gwv(nx{YfpDBE5dxtwKZ%FqF1zLV6TSIm;UW>fr??I)sOfAceFL& zj@=15|BqkZFk$@dl%O~KDshHcBe3E4camH*S6c0%Kw7%zrsr>MRI(55#KlB+W9nhP0sP$}XcbUj%c9`=gt%>b7b>GS&1-W&8)x&H@NA++P>WU#zJ^W2S{s>hQP^id#EYi-Qt%EK3 za^UXKE*LC6i{EqiLQsi}Z5^_vFemoHGiRD3S9wo^HWa{y=uC-ifHSRd(N_Rx&)X0n%w2Gu%CMl$`q(X? zi8hS3n9IDlQYaof_Toz~otP=E*fT}-736R3TWBM7oikR@`BYckwOR8ax8Y{bg?Bq4 z&7nzC-%!P)w4Go@2Ik@C_|w|_7)w5I!RLPop}uI=fisf}Zx_A;`M?$=^qkCod6e9R+W$6+`Ul+F!tew?0&bs`&FL}dvkxH(` zdAd17q3m2qL98N}Z1PAD?!eg{Oojl#$;G#>nsesa3-ec-BfD}s1zx0p%Kj=VOU&bt znidv@w;P4y44p1{W?*kHw?PZ9+N^-$ZICDq5dF}`%YRIhV7w5KmjT=d;$C~9D?4A#Zk&{obk*Jq)( zlcwY6X(rC%glKIQ^)&bRSF<4r&tS?izGemkt`nHJiV4QfFxcP$P4I^%w1)U7yAwR! z?9=0p#+$baGQ10yE?<4$ym(>hYBP*hYSHZ+PP_;V)?%+dq9ZUEtvQN-{X>qmc2$AA zfXQ6)SEkl+U`)ms9QWIlZSJ_Sh^(O;Cq@4cZqF@(wdTZ%%!yMoGgQYYCyQG((h#up z{L7Q$EB%;d_i{>{U6{Y}{^hF+XPf)C`B;C}cZM51%pTVatZ<<;O7bH5uB8~^G$>Lu zlPgv!huP5|(Vn3`sZutqOfxaO$@ksZu)wP@oU0#=QjUDGn_F{TT1)@l*-pR(qoer70^)bFr{ zlne(C%)pFYOOyYe;Srgqa++S>MsS5W`|*vSsv_4vSFW!WZl2Wg>TyJ>98>5sSdPf8 zUV;FEO)ajM#0Q6!3*@|F_JHQzw|QJDV1&_yK92?V8FH%GV&_u(|_4)GxVg}bA?Sa@Tcty7p#&Upi2*M22Rt$zcHPlYN@R940<%L&} z056WLVh^PyjawW-0WeAFHU9h}eE$n+^8bp44MNzUK%R!!F$e|5aFD8$Bt#7}IF2~j z06~R9U^9&;#qYu3u#pTU@dg`n!k(~$ggONOreO^k{7Asi;JB0tp≶8{<%-j3ea$ z1pFlGp*w&w2IWMFxl5#vK1wGJ7}5W0;KYFO5+G8XHG#NufMDsT&EY5SA3JP~IO6;X zT>2w)mnTlBB^L%vgc_PsI)h*UAiB^%8*C9kI+$0&!UyaWgn#(L@WJ{BK3KOvvqB$4 z2?*~xAFc-ivjvMGxCyyF1fo2c6Bu5CwfQ!#{@4T@kOTy9xfR@s;6Q;~`FB^>r_0bM zn9Qm>+J!43cBUR+dwgiNuT$^tR3d`3qUtYpm4?3I zl8K$A4Y6N--LW01VPW3Fe&`p)t*r0=&==YI5V>fklH^LX#|7 z2;jut7Oq~>bz!9Tm8n;fQ34DCQ+1B2uxR@><6n=n6ia*ps*X4Q4nA}TRGo(bjpo*< zJEt8d&c1Ge|G|}@*hlf!KCb789bqTql(|$gyEW??y0o|NKHx6@c&evgF%Qnn%*uZU zXU)PoP!k;FJ9t^uWS2b1>e~rAwdu0v+KJzSc@FktN!#6qFG}y<9on59n*UB}wzA3_ zBRZ|=&hS`c{7!Y*4tzo1JMGm;O|diZ8_|`!(~e!e^mE~{BM;s?ITY@;#jQW!hq_H4 zwmE6_6ykG9e~ge*J*Pozp6~u5N{8HFod~y`)k2y zD8UJWU9eIY16Y{CkecHj3;jJi4a02{?Lu-f{{uG1bYlOtJFOUpw&VTJ(H-BM)LEYp z)vkstq3%hxnL8IxKjPFQ`knWTXauh%Mzh{F%>|=4?Fd+U}DjdZQh$CyC4TQrmL1_E27sA|t z;i(@h1i75{UJ%z6^-}1z(mEZf zv&8m+_Gj)!MI_uO=Og>|kvKVS{BRKMk`)EL_)p*mXUSVWyl8D@1t&;=4F3{BVZaQ7 zY)7j*gj@Rt)5e@eg*GLE606Hxria@$^Z#vQd6)oIcsa8#A;|QJz=*{zmn}}2gNP&(#ACAB4kZ(T zi8PoWus;aH&6tmH(_E5l42-#HLY^p9alN~g?erbx$^Y}Sb9TAd+LN?kGs}BGd z_{y+!iv3D>{{O}LElV_wB< z0Mh(hqBsds6^+pWXbiqBE&d+yvwmx6$e%&mi_y6^e>7T!;zT4+RdVzUL2>< zA&1!~8J~2kx0MzMJl;yeJ{%T51l9pb_gPu+vHu6`y>VKa@Fak^%qDovt%Nnz${=jI z)erY*3l~+ls07!?aA>tn#nTK9>qLYPJO&>Q?IG=P-)cPKd@iYatrMNNlRkurQHB)~ z_}mdFzOtv9hoDH}Jx5fSgNy6J;`BXGFrEg?FkQ%E>U^*y)-UD-Sp9vN)Gr$Uxp{JW zX68upJyobLxf=v&W|Fk(y<$6G!Tl}AU+TQRKWn~td2k)Ldf(;nOI3Fg@iA0Kx5IWLH^BfdJ)Rt-=FBYBz2O%bN^WY;IYSLVS2oIZV zwb2fEtKq`_&ThY+fuP&**UGq?`U36=D-|crcVIcIm=uNJ2;X%ZIpJkXzv4gN$Fp82 zVlSM;U1O_dB-MS8=1!V^T}UMzLZULRh2bskljbD|oTci;w&zOQ^X???-jYiJlm6hH z)mP|G&heK0$x}0vhtAL7Qf>b2_Rh$kg!~^fsFbG}ybTALQ@RTsLXnb<3=1E~*G8}8Zrso+LbXch31|q2G zaVdKH(v<1>d2^!3>nZfDCR`}E75z9EwbBZYpJ(;8k6UP>x<}{#r1L#Gn{Z%6_iK3I zR3~pk$M(SP^0wxY#;P96x`B~#kjwLy42K3YS@`Ujnj0@}?u zV1&4FFw}fbgnLn*YV2qm7VgSR`+9uvt0+dZ?XVWMImtFL}|_|=IIzIyt@!`ahEmtSqB zzjp1~^0Ccp$M~GtM55BJL?xZ5agqQ*)vYVvr5ma&B!50 z8ZSRG4mG2vng3NB#HcyAqLZK))}$OV7UN%A-kYJXT`PFTW;4~nVa=JzN5*8cf3m$C zW$DaJPBxQ1(Ig{3+<_l`3{+pnhXEnZ)O@oGgKj4vB2R-x3vw^i8*ImY0_T%T_;3fnDvc-yxJBTdxF_Hni#jbPR z!<8Oj&vC*!j{Tv*twK0t@b+>t$V>6+5)Dqme*QbGfP~cxFV|{iRp2{mi->@7H$Ln^ z+znM{)jwp!pQH0<==>I)2Xy`w9V)_9km*L~qrtyA!&jPQDq0ySno=kNoLUI|S^B^T z;5-c-iXw_^s)B(-K#{J?XZhkwo9`gWZ{jB(jh8luhr_WTx2b!Zx1DWZ#{x+ut&DWQO z2m}5bY}PYu`DysPIIXX!HBk+|Mxl=ajn%#4!0K*Dhq(Rl#%_15Tw0ckslCKXqlzoH zYh_1qV^a4~Ztj36E$}}jDAdEhVCd7d+y}Y2iDURYtdYz9^hDDIXQP7Yp-M%4 zNMrm->50;Fb!$b$p(iETu6J7F=+3q}Lu)b;#Js8Ja6~WQr91!k1Kj9`dkQk__R!8i z#PIlcR5`6%gf^P2dNco@ph*d^jC(u(4bt`feCMHJ9Rq(Tb0PE0;4}Xf Dd~gCr literal 0 HcmV?d00001 diff --git a/ADN/config.yaml b/ADN/config.yaml new file mode 100644 index 0000000..3e77996 --- /dev/null +++ b/ADN/config.yaml @@ -0,0 +1,29 @@ +# Animation Digital Network API Configuration + +# Endpoints API +endpoints: + # Authentification + login: "https://gw.api.animationdigitalnetwork.com/authentication/login" + refresh: "https://gw.api.animationdigitalnetwork.com/authentication/refresh" + +# Catalogue + search: "https://gw.api.animationdigitalnetwork.com/show/catalog" + show: "https://gw.api.animationdigitalnetwork.com/video/show/{show_id}?maxAgeCategory=18&limit=-1&order=asc" + seasons: "https://gw.api.animationdigitalnetwork.com/video/show/{show_id}/seasons?maxAgeCategory=18&order=asc" + +# Player & Lecture + player_config: "https://gw.api.animationdigitalnetwork.com/player/video/{video_id}/configuration" + player_refresh: "https://gw.api.animationdigitalnetwork.com/player/refresh/token" + player_links: "https://gw.api.animationdigitalnetwork.com/player/video/{video_id}/link" + +# Headers par défaut +headers: + User-Agent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" + Origin: "https://animationdigitalnetwork.com" + Referer: "https://animationdigitalnetwork.com/" + Content-Type: "application/json" + X-Target-Distribution: "fr" + +# Paramètres +params: + locale: "fr" \ No newline at end of file