]> git.gir.st - subscriptionfeed.git/blob - app/common/common.py
increase usefulness of logging of deleted videos
[subscriptionfeed.git] / app / common / common.py
1 import os
2 import re
3 import json
4 import base64
5 import sqlite3
6 import requests
7 import hmac, hashlib
8 import requests_cache
9 import dateutil.parser
10 from xml.etree import ElementTree
11 from configparser import ConfigParser
12 from datetime import datetime, timezone
13 from urllib.parse import parse_qs, urlparse
14
15 cf = ConfigParser()
16 config_filename = os.environ.get('YT_CONFIG', '/etc/yt/config.ini')
17 cf.read(config_filename)
18 if not 'global' in cf: # todo: full config check
19 raise Exception("Configuration file not found or empty")
20
21 # Note: currently expiring after 10 minutes. googlevideo-urls are valid for 5h59m, but this makes reddit very stale and premiere videos won't start. TODO: exipre when video is livestream/premiere/etc
22 requests_cache.install_cache(backend='memory', expire_after=10*60, allowable_codes=(200,), allowable_methods=('GET', 'HEAD', 'POST'))
23
24 # Note: requests-cache doesn't use redis expiry, so we need this in all backends:
25 # https://github.com/reclosedev/requests-cache/issues/58#issuecomment-164537971
26 # TODO: only run for long-running processes, i.e. the frontend
27 from threading import Timer
28 def purge_cache(sec):
29 requests_cache.remove_expired_responses()
30 t = Timer(sec, purge_cache, args=(sec,))
31 t.setDaemon(True)
32 t.start()
33 purge_cache(10*60)
34
35 # for debugging purposes, monkey patch requests session to store each requests-request in a flask-request's g object (url and response). we can then use a flask error_handler to include the request data in the error log.
36 # since we also call config from outside the flask appcontext, it is wrapped in a try-catch block.
37 from flask import g
38 import requests
39 from requests import Session as OriginalSession
40 class _NSASession(OriginalSession):
41 def request(self, method, url, params=None, data=None, json=None, **kwargs):
42 response = super(_NSASession, self).request(
43 method, url, params=params, data=data, json=json, **kwargs
44 )
45 try:
46 if 'api_requests' not in g:
47 g.api_requests = []
48 g.api_requests.append((url, params, json, response.text))
49 except RuntimeError: pass # not within flask (e.g. utils.py)
50 return response
51 requests.Session = requests.sessions.Session = _NSASession
52
53 class G:
54 """
55 null-coalescing version of dict.get() that also works on lists.
56
57 the | operator is overloaded to achieve similar looking code to jq(1) filters.
58 the first found key is used: dict(foo=1)|G('bar','foo') returns 1.
59 """
60 def __init__(self, *keys):
61 self.keys = keys
62 def __ror__(self, other):
63 for key in self.keys:
64 try: return other[key]
65 except: continue
66 return None
67 class _Text:
68 """ parses youtube's .runs[].text and .simpleText variants """
69 def __ror__(self, other): # Note: only returning runs[0], not concat'ing all!
70 return other|G('simpleText') or other|G('runs')|G(0)|G('text')
71 text = _Text()
72
73 def fetch_xml(feed_type, feed_id):
74 # TODO: handle requests.exceptions.ConnectionError
75 r = requests.get("https://www.youtube.com/feeds/videos.xml", {
76 feed_type: feed_id,
77 })
78 if not r.ok:
79 return None
80
81 return r.content
82
83 def parse_xml(xmldata):
84 ns = {
85 'atom':"http://www.w3.org/2005/Atom",
86 'yt': "http://www.youtube.com/xml/schemas/2015",
87 'media':"http://search.yahoo.com/mrss/",
88 'at': "http://purl.org/atompub/tombstones/1.0",
89 }
90
91 feed = ElementTree.fromstring(xmldata)
92
93 if feed.find('at:deleted-entry',ns):
94 del_entry = feed.find('at:deleted-entry',ns)
95 del_author = del_entry.find('at:by',ns)
96 _, _, vid = del_entry.get('ref').rpartition(':')
97 _, _, channel_id = del_author.find('atom:uri',ns).text.rpartition('/')
98 author = del_author.find('atom:name',ns).text
99 entry = [{
100 'deleted': True,
101 'video_id': vid,
102 'channel_id': channel_id,
103 'author': author,
104 }]
105 return None, None, entry, None, None
106
107 title = feed.find('atom:title',ns).text
108 author = feed.find('atom:author/atom:name',ns).text \
109 if feed.find('atom:author',ns) else None
110 # for /user/<> endpoint: find out UC-id:
111 # for playlists: this is who created the playlist:
112 try: channel_id = feed.find('yt:channelId',ns).text
113 except:channel_id = None
114 # for pullsub: if this exists, we're looking at a playlist:
115 try: playlist_id = feed.find('yt:playlistId',ns).text
116 except:playlist_id = None
117 videos = []
118 for entry in feed.findall('atom:entry',ns):
119 videos.append({
120 'video_id': entry.find('yt:videoId',ns).text,
121 'title': entry.find('atom:title',ns).text,
122 'published': entry.find('atom:published',ns).text,
123 'channel_id': entry.find('yt:channelId',ns).text,
124 'author': entry.find('atom:author',ns).find('atom:name',ns).text,
125 # extra fields for pull_subs/webhook:
126 'updated': entry.find('atom:updated',ns).text,
127 })
128
129 return title, author, videos, channel_id, playlist_id
130
131 def update_channel(db, xmldata, from_webhook=False):
132 if not xmldata: return False
133
134 # Note: websub does not return global author, hence taking from first video
135 title, author, videos, channel, playlist = parse_xml(xmldata)
136
137 c = db.cursor()
138 for i, video in enumerate(videos):
139 if video.get('deleted'):
140 # Note: Deletion events are not just fired for actual deletions,
141 # but also for unlisting videos and livestreams that just ended
142 # (even postLiveDVR ones). Hence, we don't follow it.
143 flask_logger(f"ignoring deleted/unlisted video or ended livestream {video['video_id']} by {video['channel_id']} ({video['author']})")
144 break
145
146 c.execute("SELECT 1 FROM videos WHERE id=?",(video['video_id'],))
147 new_video = len(c.fetchall()) < 1
148 if new_video:
149 # TODO: call store_video_metadata(video_id) here instead and pass video-fallback-metadata to it
150 _, _, meta, _, _ = get_video_info(video['video_id'], metaOnly=True)
151 # The 'published' timestamp sent in websub POSTs are often wrong (e.g.:
152 # video gets uploaded as unlisted on day A and set to public on day B;
153 # the webhook is sent on day B, but 'published' says A. The video
154 # therefore looks like it's just an update to an older video).
155 # g_v_i gives is the date the video was published to viewers, so we
156 # prefer that. But since g_v_i only returns the date without time,
157 # we still use xmlfeed's date if it's the same date.
158 published = dateutil.parser.parse(video['published'])
159 length = None
160 livestream = None
161 premiere = None
162 shorts = None
163 if meta:
164 meta = video_metadata(meta)
165 published2 = dateutil.parser.parse(meta['published'])
166 if published < published2: # g_v_i date is more accurate:
167 published = published2
168 length = meta['length']
169 livestream = meta['livestream']
170 premiere = meta['premiere']
171 shorts = meta['shorts']
172
173 now = datetime.now(timezone.utc)
174
175 # we pretend that all videos uploaded this week were uploaded just
176 # now, so the user sees it at the top of the feed, and it doesn't
177 # get inserted somewhere further down.
178 if (now - published).days < 7:
179 timestamp = now
180 else:#, it's just an update to an older video.
181 timestamp = published
182
183 c.execute("""
184 INSERT OR IGNORE INTO videos
185 (id, channel_id, title, length, livestream, premiere, shorts, published, crawled)
186 VALUES (?, ?, ?, ?, ?, ?, ?, datetime(?), datetime(?))
187 """, (
188 video['video_id'],
189 video['channel_id'],
190 video['title'],
191 length,
192 livestream,
193 premiere,
194 shorts,
195 published,
196 timestamp
197 ))
198 else:
199 # update video title (everything else can't change)
200 c.execute("""
201 UPDATE OR IGNORE videos
202 SET title = ?
203 WHERE id = ?
204 """, (
205 video['title'],
206 video['video_id'],
207 ))
208
209 # for channels, this is obviously always the same, but playlists can
210 # consist of videos from different channels:
211 if i == 0 or playlist:
212 c.execute("""
213 INSERT OR REPLACE INTO channels (id, name)
214 VALUES (?, ?)
215 """, (video['channel_id'], video['author']))
216
217 # keep track of which videos are in a playlist, so we can show the user
218 # why a video is in their feed:
219 if playlist:
220 c.execute("""
221 INSERT OR IGNORE INTO playlist_videos (video_id, playlist_id)
222 VALUES (?, ?)
223 """, (video['video_id'], playlist))
224
225 if playlist and not from_webhook: # Note: playlists can't get updated via websub
226 c.execute("""
227 INSERT OR REPLACE INTO playlists (id, name, author)
228 VALUES (?, ?, ?)
229 """, (playlist, title, channel))
230 c.execute("""
231 INSERT OR REPLACE INTO channels (id, name)
232 VALUES (?, ?)
233 """, (channel, author))
234
235 db.commit()
236
237 return True
238
239 def is_agegated(metadata):
240 playabilityStatus = metadata['playabilityStatus']
241 return bool(
242 playabilityStatus.get("status") == "CONTENT_CHECK_REQUIRED"
243 or playabilityStatus.get("desktopLegacyAgeGateReason")
244 )
245
246 def get_video_info(video_id, *, metaOnly=False, _agegate_bypass=False):
247 """
248 returns: best-quality muxed video stream, stream map, player_response, error-type/mesage
249 error types: player, malformed, livestream, geolocked, agegated, no-url, exhausted
250 """
251 player_error, metadata = None, None # for 'exhausted'
252 with sqlite3.connect(cf['global']['database']) as conn:
253 c = conn.cursor()
254 c.execute("SELECT * FROM captcha_cookies")
255 cookies = dict(c.fetchall())
256 today = datetime.now(timezone.utc).strftime("%Y%m%d")
257 # XXX: anticaptcha hasn't been adapted
258 key = "AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8"
259 # ANDROID returns streams that are not throttled or cipher-scambled, but less metadata than WEB.
260 # TVHTML5* returns throttled and possibly ciphered streams, but bypasses age-gate. atm, we don't decipher them.
261 # TODO: unscramble TVHTML5* streams (especially &n= throttling)
262 client = {
263 (False, False): { 'clientName': 'ANDROID', 'clientVersion': '17.31.35', 'androidSdkVersion': 30},
264 (False, True): { 'clientName': 'TVHTML5_SIMPLY_EMBEDDED_PLAYER', 'clientVersion': '2.0' },
265 (True, False): { 'clientName': 'WEB', 'clientVersion':f'2.{today}.01.01' },
266 }[(metaOnly, _agegate_bypass)]
267 r = requests.post("https://www.youtube-nocookie.com/youtubei/v1/player", params={'key': key}, json={
268 'videoId': video_id,
269 'context': {
270 'client': {
271 'gl': 'US',
272 'hl': 'en',
273 **client,
274 },
275 'thirdParty': {'embedUrl': 'https://www.youtube.com/'}
276 },
277 "racyCheckOk": True, # seems to do nothing, cargo-culted
278 "contentCheckOk": True, # fix "This video may be inappropriate for some users."
279 }, cookies=cookies, headers={"User-Agent": "com.google.android.youtube/17.31.35 (Linux; U; Android 11) gzip"})
280
281 if not r or r.status_code == 429:
282 return None, None, None, 'banned', 'possible IP ban'
283
284 metadata = r.json()
285 if "error" in metadata:
286 return None, None, metadata, "malformed", metadata.get("error",{}).get("message","")
287 playabilityStatus = metadata['playabilityStatus']['status']
288 if playabilityStatus != "OK":
289 playabilityReason = metadata['playabilityStatus'].get('reason',
290 '//'.join(metadata['playabilityStatus'].get('messages',[])))
291 player_error = f"{playabilityStatus}: {playabilityReason}"
292 if (is_agegated(metadata)
293 and not metaOnly # only need metadata (e.g. called from pubsubhubbub)
294 and not _agegate_bypass
295 ):
296 _, _, metadata_embed, error_embed, errormsg_embed = get_video_info(video_id, _agegate_bypass=True)
297 if error_embed == "player": # agegate bypass failed?
298 return None, None, metadata, 'agegated', player_error
299 elif not error_embed or error_embed in ('livestream','geolocked','scrambled'):
300 metadata = metadata_embed
301 else:
302 return None, None, metadata, error_embed, errormsg_embed
303 else:
304 # without videoDetails, there's only the error message
305 maybe_metadata = metadata if 'videoDetails' in metadata else None
306 return None, None, maybe_metadata, 'player', player_error
307
308 # livestreams have no adaptive/muxed formats:
309 is_live = metadata['videoDetails'].get('isLive', False)
310
311 if not 'formats' in metadata['streamingData'] and not is_live:
312 return None, None, metadata, 'no-url', player_error
313
314 formats = metadata['streamingData'].get('formats',[])
315 adaptive = metadata['streamingData'].get('adaptiveFormats',[])
316 stream_map = {
317 'adaptive_video': [a for a in adaptive if a['mimeType'].startswith('video/')],
318 'adaptive_audio': [a for a in adaptive if a['mimeType'].startswith('audio/')],
319 'muxed': formats,
320 'hlsManifestUrl': metadata['streamingData'].get('hlsManifestUrl'),
321 }
322
323 try:
324 url = sorted(formats, key=lambda k: k['height'], reverse=True)[0]['url']
325
326 # ip-locked videos can be recovered if the proxy module is loaded:
327 is_geolocked = 'gcr' in parse_qs(urlparse(url).query)
328 except:
329 url = None
330 is_geolocked = False
331
332 is_drm = formats and 'signatureCipher' in formats[0]
333
334 nonfatal = 'livestream' if is_live \
335 else 'geolocked' if is_geolocked \
336 else 'scrambled' if is_drm \
337 else None
338
339 return url, stream_map, metadata, nonfatal, None
340
341 def video_metadata(metadata):
342 if not metadata:
343 return {}
344
345 meta1 = metadata['videoDetails']
346 # With ANDROID player API, we don't get microformat => no publishDate!
347 meta2 = metadata.get('microformat',{}).get('playerMicroformatRenderer',{})
348
349 # sometimes, we receive the notification so early that the length is not
350 # yet populated. Nothing we can do about it. meta1 and meta2 use a
351 # different rounding strategy, meta2 is sometimes (incorrectly) 1s longer.
352 length = int(meta1.get('lengthSeconds',0)) or int(meta2.get('lengthSeconds',0)) or None
353
354 scheduled_time = metadata.get('playabilityStatus',{}) \
355 .get('liveStreamability',{}).get('liveStreamabilityRenderer',{}) \
356 .get('offlineSlate',{}).get('liveStreamOfflineSlateRenderer',{}) \
357 .get('scheduledStartTime')
358 if scheduled_time:
359 scheduled_time = datetime.fromtimestamp(int(scheduled_time)) \
360 .strftime("%Y-%m-%dT%H:%M:%SZ")
361 published_at = (
362 meta2.get('liveBroadcastDetails',{}) .get('startTimestamp') or
363 scheduled_time or
364 f"{meta2.get('publishDate','1970-01-01')}T00:00:00Z"
365 )
366
367 # the actual video streams have exact information:
368 # Note that we use x:1 (cinema style) aspect ratios, omitting the ':1' part.
369 try:
370 sd = metadata['streamingData']
371 some_stream = (sd.get('adaptiveFormats',[]) + sd.get('formats',[]))[0]
372 aspect_ratio = some_stream['width'] / some_stream['height']
373 # if that's unavailable (e.g. on livestreams), fall back to 16:9 (later)
374 except:
375 aspect_ratio = None
376
377 # shorts are <= 60 seconds and vertical or square. if we were unable to
378 # determine it, we set it to None.
379 is_short = (
380 True if (length or 61) <= 60 and (aspect_ratio or 2) <= 1 else
381 False if (length or 0) > 60 or (aspect_ratio or 0) > 1 else
382 None
383 )
384
385 # Note: 'premiere' videos have livestream=False and published= will be the
386 # start of the premiere.
387 return {
388 'title': meta1['title'],
389 'author': meta1['author'],
390 'channel_id': meta1['channelId'],
391 'published': published_at,
392 'views': int(meta1['viewCount']),
393 'length': length,
394 'aspect': aspect_ratio or 16/9,
395 'livestream': meta1['isLiveContent'],
396 'premiere': meta1.get('isUpcoming') and not meta1['isLiveContent'],
397 'shorts': is_short,
398 }
399
400 def mkthumbs(thumbs):
401 output = {str(e['height']): e['url'] for e in thumbs}
402 largest=next(iter(sorted(output.keys(),reverse=True,key=int)),None)
403 return {**output, 'largest': largest}
404
405 def store_video_metadata(video_id):
406 # check if we know about it, and if not, fetch and store video metadata
407 with sqlite3.connect(cf['global']['database']) as conn:
408 c = conn.cursor()
409 c.execute("SELECT 1 from videos where id = ?", (video_id,))
410 new_video = len(c.fetchall()) < 1
411 if new_video:
412 _, _, meta, _, _ = get_video_info(video_id, metaOnly=True)
413 if meta:
414 meta = video_metadata(meta)
415 c.execute("""
416 INSERT OR IGNORE INTO videos (id, channel_id, title, length, livestream, premiere, shorts, published, crawled)
417 VALUES (?, ?, ?, ?, ?, ?, ?, datetime(?), datetime(?))
418 """, (
419 video_id,
420 meta['channel_id'],
421 meta['title'],
422 meta['length'],
423 meta['livestream'],
424 meta['premiere'],
425 meta['shorts'],
426 meta['published'],
427 meta['published'],
428 ))
429 c.execute("""
430 INSERT OR REPLACE INTO channels (id, name)
431 VALUES (?, ?)
432 """, (meta['channel_id'], meta['author']))
433
434 def fetch_video_flags(token, video_ids):
435 with sqlite3.connect(cf['global']['database']) as conn:
436 c = conn.cursor()
437 c.execute("""
438 SELECT video_id,display
439 FROM flags
440 WHERE user = ?
441 AND display IS NOT NULL
442 AND video_id IN ({})
443 -- AND display = 'pinned'
444 """.format(",".join(["?"]*len(video_ids))), (token,*video_ids))
445 flags = c.fetchall()
446 pinned = [video for video,disp in flags if disp == 'pinned']
447 hidden = [video for video,disp in flags if disp == 'hidden']
448
449 return pinned, hidden
450
451 def apply_video_flags(token, rows):
452 video_ids = [card['content']['video_id'] for card in rows if 'video_id' in card['content']]
453 pinned, hidden = fetch_video_flags(token, video_ids)
454 return sorted([
455 {'type':v['type'], 'content':{**v['content'], 'pinned': v['content']['video_id'] in pinned if 'video_id' in v['content'] else False}}
456 for v in rows
457 if 'video_id' not in v['content'] or v['content']['video_id'] not in hidden
458 ], key=lambda v:v['content']['pinned'], reverse=True)
459
460 from werkzeug.exceptions import NotFound
461 class NoFallbackException(NotFound): pass
462 def fallback_route(*args, **kwargs): # TODO: worthy as a flask-extension?
463 """
464 finds the next route that matches the current url rule, and executes it.
465 args, kwargs: pass all arguments of the current route
466 """
467 from flask import current_app, request, g
468
469 # build a list of endpoints that match the current request's url rule:
470 matching = [
471 rule.endpoint
472 for rule in current_app.url_map.iter_rules()
473 if rule.rule == request.url_rule.rule
474 ]
475 current = matching.index(request.endpoint)
476
477 # since we can't change request.endpoint, we always get the original
478 # endpoint back. so for repeated fall throughs, we use the g object to
479 # increment how often we want to fall through.
480 if not '_fallback_next' in g:
481 g._fallback_next = 0
482 g._fallback_next += 1
483
484 next_ep = current + g._fallback_next
485
486 if next_ep < len(matching):
487 return current_app.view_functions[matching[next_ep]](*args, **kwargs)
488 else:
489 raise NoFallbackException
490
491 def websub_url_hmac(key, feed_id, timestamp, nonce):
492 """ generate sha1 hmac, as required by websub/pubsubhubbub """
493 sig_input = f"{feed_id}:{timestamp}:{nonce}".encode('ascii')
494 return hmac.new(key.encode('ascii'), sig_input, hashlib.sha1).hexdigest()
495
496 def websub_body_hmac(key, body):
497 return hmac.new(key.encode('ascii'), body, hashlib.sha1).hexdigest()
498
499 def flask_logger(msg, level="warning"):
500 level = dict(
501 CRITICAL=50,
502 ERROR=40,
503 WARNING=30,
504 INFO=20,
505 DEBUG=10,
506 NOTSET=0,
507 ).get(level.upper(), 0)
508 try:
509 from flask import current_app
510 current_app.logger.log(level, msg)
511 except:
512 pass
513
514 def log_unknown_card(data):
515 import json
516 try:
517 from flask import request
518 source = request.url
519 except: source = "unknown"
520 with open("/tmp/innertube.err", "a", encoding="utf-8", errors="backslashreplace") as f:
521 f.write(f"\n/***** {source} *****/\n")
522 json.dump(data, f, indent=2)
Imprint / Impressum