340 lines
12 KiB
Python
340 lines
12 KiB
Python
from __future__ import annotations
|
|
|
|
import click
|
|
import json
|
|
import re
|
|
import time
|
|
|
|
from pathlib import Path
|
|
from typing import Any, Optional
|
|
from http.cookiejar import CookieJar
|
|
|
|
from devine.core.config import config
|
|
from devine.core.utils.collections import as_list
|
|
|
|
|
|
from devine.core.credential import Credential
|
|
from devine.core.manifests import DASH
|
|
from devine.core.service import Service
|
|
from devine.core.titles import Episode, Movies, Series, Title_T, Titles_T
|
|
from devine.core.tracks import Chapter, Tracks
|
|
|
|
|
|
class UNEXT(Service):
|
|
"""
|
|
Service code for the Unext streaming service (https://video.unext.jp/).
|
|
|
|
\b
|
|
Authorization: Cookies
|
|
|
|
\b
|
|
Note: - No jp proxies needed - emejing!
|
|
"""
|
|
|
|
ALIASES = ["UNXT", "UNEXT", "U-NEXT", "unxt", "unext", "u-next"]
|
|
GEOFENCE = []
|
|
TITLE_RE = [
|
|
r"^https?://(?:www\.)?video\.unext\.jp/(?P<category>title/|\?td=|episode/)?(?P<id>[a-zA-Z0-9-]+)(?:/)?(?P<epsid>E[\d\w]+)?"
|
|
]
|
|
|
|
@staticmethod
|
|
@click.command(name="UNEXT", short_help="https://video.unext.jp/")
|
|
@click.argument("title", type=str)
|
|
@click.option("-l", "--lang", default="ja", help="Specify language")
|
|
@click.option("-ft", "--force-title", required=False, help="Force using this title as titlename.")
|
|
@click.option("-nt", "--notitle", is_flag=True, default=False, help="Dont grab episode title.")
|
|
@click.pass_context
|
|
def cli(ctx: click.Context, **kwargs: Any) -> "UNEXT":
|
|
return UNEXT(ctx, **kwargs)
|
|
|
|
def __init__(self, ctx, title: str, lang: str, force_title: str, notitle: bool):
|
|
super().__init__(ctx)
|
|
m = self.parse_title(ctx, title)
|
|
if m and m.get("category"):
|
|
self.category = m.get("category")
|
|
else:
|
|
self.category = "regular"
|
|
if m and m.get("epsid"):
|
|
self.epsid = m.get("epsid")
|
|
else:
|
|
self.epsid = None
|
|
self.lang = lang
|
|
self.forcetitle = force_title
|
|
self.notitle = notitle
|
|
self.profile = ctx.obj.profile
|
|
|
|
self.endroll = None
|
|
|
|
def get_titles(self):
|
|
# First API call for title stage data
|
|
resp = self.session.post(
|
|
url=self.config["endpoints"]["cc"],
|
|
headers={"Content-Type": "application/json"},
|
|
json={
|
|
"operationName": "cosmo_getVideoTitle",
|
|
"variables": {"code": self.title},
|
|
"query": self.config["queries"]["vidtitle"].replace(r"\n", "\n"),
|
|
},
|
|
)
|
|
try:
|
|
stage_data = resp.json()["data"]["webfront_title_stage"]
|
|
except Exception:
|
|
if "Token expired" in resp.text:
|
|
return None
|
|
self.log.error(f" - Got an error!, resp: {resp.text}")
|
|
return None
|
|
|
|
if stage_data.get("hasSubtitle"):
|
|
self.log.warn(" - This title probably has hardcoded subs if no subs available in the manifest")
|
|
|
|
# Second API call for episodes
|
|
res2 = self.session.post(
|
|
url=self.config["endpoints"]["cc"],
|
|
headers={"Content-Type": "application/json"},
|
|
json={
|
|
"operationName": "cosmo_getTitle",
|
|
"variables": {
|
|
"id": self.title,
|
|
"episodeCode": stage_data["currentEpisode"]["id"],
|
|
"episodePageSize": 1000,
|
|
"episodePage": 1,
|
|
},
|
|
"query": self.config["queries"]["title"].replace(r"\n", "\n"),
|
|
},
|
|
)
|
|
try:
|
|
data2 = res2.json()["data"]
|
|
self.log.debug(data2)
|
|
except Exception:
|
|
self.log.error(f" - Got an error!, resp: {res2.text}")
|
|
return None
|
|
|
|
title_stage = data2["webfront_title_stage"]
|
|
episodes = data2["webfront_title_titleEpisodes"]["episodes"]
|
|
title_ = stage_data["titleName"].strip()
|
|
publish_style = title_stage.get("publishStyleCode", "")
|
|
titles = []
|
|
|
|
if publish_style == "VOD_SINGLE":
|
|
ep = episodes[0]
|
|
if ep.get("displayNo", "") == "予告編":
|
|
eps_num = 0
|
|
else:
|
|
num_match = re.search(r"\d+", ep.get("displayNo", "")) or re.search(r"\d+", ep.get("episodeName", ""))
|
|
eps_num = int(num_match.group(0)) if num_match else 0
|
|
if ep.get("displayNo", "") == "最終話":
|
|
eps_num += 1
|
|
|
|
eps_name = (
|
|
None
|
|
if self.notitle
|
|
else ("Trailer" if ep.get("displayNo", "") == "予告編" else ep.get("episodeName", "").strip())
|
|
)
|
|
titles.append(
|
|
Movies(
|
|
id_=ep["id"],
|
|
title=self.forcetitle if self.forcetitle else title_,
|
|
year=stage_data.get("productionYear", 0),
|
|
number=eps_num,
|
|
name=eps_name,
|
|
language=self.lang,
|
|
service=self.__class__,
|
|
data=ep,
|
|
)
|
|
)
|
|
elif publish_style == "VOD_MULTI":
|
|
episode_objs = []
|
|
last_eps = []
|
|
for ep in episodes:
|
|
# Compute eps_num from displayNo or episodeName
|
|
if ep.get("displayNo", "") == "予告編":
|
|
eps_num = 0
|
|
else:
|
|
num_match = re.search(r"\d+", ep.get("displayNo", "")) or re.search(
|
|
r"\d+", ep.get("episodeName", "")
|
|
)
|
|
eps_num = int(num_match.group(0)) if num_match else 0
|
|
last_eps.append(eps_num)
|
|
if ep.get("displayNo", "") == "最終話":
|
|
eps_num = max(last_eps) + 1
|
|
|
|
# Extract and clean episode name
|
|
ep_name = ep.get("episodeName", "")
|
|
match = re.search(r"-(.*?)-", ep_name)
|
|
ep_class = match.group(1) if match else ""
|
|
clean_name = re.sub(r"\s*-\s*.*?\s*-\s*", "", ep_name)
|
|
|
|
episode_id = ep["id"]
|
|
if len(episode_id) < 4:
|
|
episode_id += "5349"
|
|
|
|
episode_obj = Episode(
|
|
id_=episode_id,
|
|
title=title_,
|
|
season="1",
|
|
number=eps_num,
|
|
name=clean_name,
|
|
language=self.lang,
|
|
service=self.__class__,
|
|
data=ep,
|
|
)
|
|
episode_obj.episode_class = ep_class
|
|
|
|
episode_objs.append(episode_obj)
|
|
titles.append(Series(episode_objs))
|
|
else:
|
|
raise NotImplementedError("The current title category is not implemented yet!")
|
|
|
|
return titles
|
|
|
|
def get_tracks(self, title: Title_T) -> Tracks:
|
|
tracks = Tracks()
|
|
res = self.session.post(
|
|
url=self.config["endpoints"]["cc"],
|
|
headers={"Content-Type": "application/json"},
|
|
json={
|
|
"operationName": "cosmo_getPlaylistUrl",
|
|
"variables": {
|
|
"code": title.id,
|
|
"playMode": "caption",
|
|
"bitrateLow": 192,
|
|
"bitrateHigh": None,
|
|
"validationOnly": False,
|
|
},
|
|
"query": self.config["queries"]["playlist"].replace(r"\n", "\n"),
|
|
},
|
|
)
|
|
try:
|
|
data = res.json()["data"]["webfront_playlistUrl"]
|
|
self.log.debug(data)
|
|
except Exception:
|
|
self.log.error(f" - Got an error!, resp: {res.text}")
|
|
|
|
if data["resultStatus"] == 476:
|
|
self.log.error(" - This title need a rent! No playback returned.")
|
|
elif data["resultStatus"] == 462:
|
|
self.log.error(
|
|
" - Playing on another device. Playback on multiple devices at the same time is not possible. (462)"
|
|
)
|
|
|
|
self.play_token = data["playToken"]
|
|
manifest_url = next(x["playlistUrl"] for x in data["urlInfo"][0]["movieProfile"] if x["type"] == "DASH")
|
|
manifest_url += f"&play_token={self.play_token}"
|
|
self.log.debug(manifest_url)
|
|
self.log.info(f"MPD: {manifest_url}")
|
|
|
|
self.endroll = data["urlInfo"][0].get("endrollStartPosition")
|
|
|
|
tracks.add(
|
|
DASH.from_url(url=manifest_url, session=self.session, source=self.ALIASES[0]).to_tracks(title.language)
|
|
)
|
|
|
|
for track in tracks:
|
|
track.needs_proxy = False
|
|
if isinstance(track.url, list):
|
|
track.url = list(map(lambda x: re.sub(r"(\?|\&)play_token=.*", "", x), track.url))
|
|
else:
|
|
track.url = re.sub(r"(\?|\&)play_token=.*", "", track.url)
|
|
return tracks
|
|
|
|
def get_chapters(self, title: Titles_T):
|
|
if self.endroll:
|
|
|
|
def to_hms(s):
|
|
s = int(s)
|
|
m, s = divmod(s, 60)
|
|
h, m = divmod(m, 60)
|
|
return "{:02}:{:02}:{:02}".format(int(h), int(m), int(s))
|
|
|
|
chaps = [
|
|
Chapter(
|
|
number=1,
|
|
title="Start",
|
|
timecode="00:00:00.000",
|
|
)
|
|
]
|
|
|
|
chaps.append(Chapter(number=2, title="The End", timecode=to_hms(self.endroll)))
|
|
|
|
return chaps
|
|
else:
|
|
return []
|
|
|
|
def get_widevine_service_certificate(self, **_: Any) -> None:
|
|
return None
|
|
|
|
def get_widevine_license(self, *, challenge: bytes) -> None:
|
|
return self.session.post(
|
|
url=self.config["endpoints"]["license"], params={"play_token": self.play_token}, data=challenge
|
|
).content
|
|
|
|
# Service specific functions
|
|
|
|
def authenticate(
|
|
self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None
|
|
) -> Optional[str]:
|
|
self.session.headers.update(
|
|
{
|
|
"User-Agent": "ATVE/6.2.0 Android/10 build/6A226 maker/Google model/Chromecast FW/QTS2.200918.0337115981",
|
|
"X-Forwarded-For": "103.140.112.106",
|
|
"Origin": "https://video.unext.jp",
|
|
"Referer": "https://video.unext.jp/",
|
|
}
|
|
)
|
|
self.session.cookies.update(cookies)
|
|
|
|
def get_token(self) -> str:
|
|
token_cache_path = Path(config.directories.cache / self.__class__.__name__ / f"token.json")
|
|
if token_cache_path.is_file():
|
|
token = json.loads(token_cache_path.read_text(encoding="utf-8"))
|
|
if token.get("exp", 0) > int(time.time()):
|
|
# not expired, lets use
|
|
self.log.info(" + Using cached token...")
|
|
return token["_at"]
|
|
# expired, refresh
|
|
self.log.info(" + Refreshing and using cached auth tokens...")
|
|
return self.save_token(self.refresh(), token_cache_path)
|
|
self.log.info(" + Caching new token...")
|
|
return self.save_token(self.refresh(), token_cache_path)
|
|
|
|
@staticmethod
|
|
def save_token(token: str, to: Path) -> str:
|
|
data = {}
|
|
data["_at"] = token
|
|
data["exp"] = int(time.time()) + 10800 # add 3 hours from now to be safe
|
|
# lets cache the token
|
|
to.parent.mkdir(parents=True, exist_ok=True)
|
|
to.write_text(json.dumps(data), encoding="utf8")
|
|
# finally return the token
|
|
return token
|
|
|
|
def refresh(self) -> str:
|
|
res = self.session.get(
|
|
url=self.config["endpoints"]["home"],
|
|
headers={
|
|
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,"
|
|
"image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
|
},
|
|
)
|
|
_at = re.search(r"(?<=_at=)([\w\d\.\-]+)", res.headers.get("Set-Cookie"))
|
|
if _at:
|
|
_at = _at.group(1)
|
|
else:
|
|
self.log.error(f" - Failed to refresh token: {res.text}")
|
|
return _at
|
|
|
|
def parse_title(self, ctx, title):
|
|
title = title or ctx.parent.params.get("title")
|
|
if not title:
|
|
self.log.error(" - No title ID specified")
|
|
if not getattr(self, "TITLE_RE"):
|
|
self.title = title
|
|
return {}
|
|
for regex in as_list(self.TITLE_RE):
|
|
m = re.search(regex, title)
|
|
if m:
|
|
self.title = m.group("id")
|
|
return m.groupdict()
|
|
self.log.warning(f" - Unable to parse title ID {title!r}, using as-is")
|
|
self.title = title
|