]> git.gir.st - subscriptionfeed.git/blob - app/common/common.py
Revert "use all muxed stream sources" for breaking edge-cases
[subscriptionfeed.git] / app / common / common.py
1 import os
2 import re
3 import json
4 import html
5 import base64
6 import sqlite3
7 import requests
8 import hmac, hashlib
9 import requests_cache
10 import dateutil.parser
11 from xml.etree import ElementTree
12 from configparser import ConfigParser
13 from datetime import datetime, timezone
14 from urllib.parse import parse_qs, urlparse
15
16 cf = ConfigParser()
17 config_filename = os.environ.get('YT_CONFIG', '/etc/yt/config.ini')
18 cf.read(config_filename)
19 if not 'global' in cf: # todo: full config check
20 raise Exception("Configuration file not found or empty")
21
22 # Note: currently expiring after 10 minutes. googlevideo-urls are valid for 5h59m, but this makes reddit very stale and premiere videos won't start. TODO: exipre when video is livestream/premiere/etc
23 requests_cache.install_cache(backend='memory', expire_after=10*60, allowable_codes=(200,))
24
25 # Note: this should only be required for the 'memory' backed cache.
26 # TODO: only run for long-running processes, i.e. the frontend
27 from threading import Timer
28 def purge_cache(sec):
29 requests_cache.remove_expired_responses()
30 t = Timer(sec, purge_cache, args=(sec,))
31 t.setDaemon(True)
32 t.start()
33 purge_cache(10*60)
34
35 # for debugging purposes, monkey patch requests session to store each requests-request in a flask-request's g object (url and response). we can then use a flask error_handler to include the request data in the error log.
36 # since we also call config from outside the flask appcontext, it is wrapped in a try-catch block.
37 from flask import g
38 import requests
39 from requests import Session as OriginalSession
40 class _NSASession(OriginalSession):
41 def request(self, method, url, params=None, data=None, **kwargs):
42 response = super(_NSASession, self).request(
43 method, url, params, data, **kwargs
44 )
45 try:
46 if 'api_requests' not in g:
47 g.api_requests = []
48 g.api_requests.append((url, params, response.text))
49 except RuntimeError: pass # not within flask (e.g. utils.py)
50 return response
51 requests.Session = requests.sessions.Session = _NSASession
52
53 def fetch_xml(feed_type, feed_id):
54 # TODO: handle requests.exceptions.ConnectionError
55 r = requests.get("https://www.youtube.com/feeds/videos.xml", {
56 feed_type: feed_id,
57 })
58 if not r.ok:
59 return None
60
61 return r.content
62
63 def parse_xml(xmldata):
64 ns = {
65 'atom':"http://www.w3.org/2005/Atom",
66 'yt': "http://www.youtube.com/xml/schemas/2015",
67 'media':"http://search.yahoo.com/mrss/",
68 'at': "http://purl.org/atompub/tombstones/1.0",
69 }
70
71 feed = ElementTree.fromstring(xmldata)
72
73 if feed.find('at:deleted-entry',ns):
74 (_,_,vid) = feed.find('at:deleted-entry',ns).get('ref').rpartition(':')
75 return None, None, [{'deleted': True, 'video_id': vid}]
76
77 title = feed.find('atom:title',ns).text
78 author = feed.find('atom:author/atom:name',ns).text \
79 if feed.find('atom:author',ns) else None
80 videos = []
81 for entry in feed.findall('atom:entry',ns):
82 videos.append({
83 'video_id': entry.find('yt:videoId',ns).text,
84 'title': entry.find('atom:title',ns).text,
85 'published': entry.find('atom:published',ns).text,
86 'channel_id': entry.find('yt:channelId',ns).text,
87 'author': entry.find('atom:author',ns).find('atom:name',ns).text,
88 # extra fields for pull_subs/webhook:
89 'updated': entry.find('atom:updated',ns).text,
90 })
91
92 return title, author, videos
93
94 def update_channel(db, xmldata, from_webhook=False):
95 if not xmldata: return False
96
97 # Note: websub does not return global author, hence taking from first video
98 _, _, videos = parse_xml(xmldata)
99
100 c = db.cursor()
101 from flask import current_app # XXX: remove
102 for i, video in enumerate(videos):
103 if video.get('deleted'):
104 if from_webhook: current_app.logger.warning(f"ignoring deleted video {video['video_id']}") # XXX: remove
105 # TODO: enable once we enforce hmac validation:
106 #c.execute("DELETE FROM videos WHERE id = ?", (video['video_id'],))
107 break
108
109 now = datetime.now(timezone.utc)
110 updated = dateutil.parser.parse(video['updated'])
111 published = dateutil.parser.parse(video['published'])
112 # if update and published time are near-identical, we assume it's new.
113 # checking if it was posted this week is necessary during xmlfeed pulling.
114 if (updated - published).seconds < 60 and (now - published).days < 7:
115 timestamp = now
116 if from_webhook: current_app.logger.warning(f"fresh video {video['video_id']}") # XXX: remove
117 else:#, it might just an update to an older video, or a previously unlisted one.
118 # first, assume it's an older video (correct when pulling xmlfeeds)
119 timestamp = published
120 # then, check if we don't know about it and if so, look up the real date.
121
122 # The 'published' timestamp sent in websub POSTs are often wrong (e.g.:
123 # video gets uploaded as unlisted on day A and set to public on day B;
124 # the webhook is sent on day B, but 'published' says A. The video
125 # therefore looks like it's just an update to an older video). If
126 # that's the case, we fetch get_video_info and double-check.
127 # We only need to do this to not-yet-in-the-database videos.
128 c.execute("SELECT 1 from videos where id = ?", (video['video_id'],))
129 new_video = len(c.fetchall()) < 1
130 if from_webhook: current_app.logger.warning(f"video {video['video_id']}") # XXX: remove
131 if from_webhook and new_video:
132 if from_webhook: current_app.logger.warning(f" is webhook and new") # XXX: remove
133 _, meta, _, _ = get_video_info(video['video_id'])
134 if meta:
135 meta = prepare_metadata(meta)
136 published = dateutil.parser.parse(meta['published'])
137 if from_webhook: current_app.logger.warning(f" uploaded {published}") # XXX: remove
138 if (now - published).days < 7:
139 timestamp = now
140 else:#, it's just an update to an older video.
141 timestamp = published
142
143 c.execute("""
144 INSERT OR IGNORE INTO videos (id, channel_id, title, published, crawled)
145 VALUES (?, ?, ?, datetime(?), datetime(?))
146 """, (
147 video['video_id'],
148 video['channel_id'],
149 video['title'],
150 video['published'],
151 timestamp
152 ))
153
154 if i == 0: # only required once per feed
155 c.execute("""
156 INSERT OR REPLACE INTO channels (id, name)
157 VALUES (?, ?)
158 """, (video['channel_id'], video['author']))
159 db.commit()
160
161 return True
162
163 def get_video_info(video_id, sts=0, algo=""):
164 """
165 returns: best-quality muxed video stream, player_response, error-type/mesage
166 error types: player, malformed, livestream, geolocked, exhausted
167 """
168 player_error = None # for 'exhausted'
169 for el in ['embedded', 'detailpage']:#sometimes, only one or the other works
170 r = requests.get("https://www.youtube.com/get_video_info", {
171 "video_id": video_id,
172 "eurl": f"https://youtube.googleapis.com/v/{video_id}",
173 "el": el,
174 "sts": sts,
175 "hl": "en_US",
176 })
177 params = parse_qs(r.text)
178 if 'errorcode' in params: # status=fail
179 return None, None, 'malformed', params['reason'][0]
180
181 metadata = json.loads(params.get('player_response')[0])
182 playabilityStatus = metadata['playabilityStatus']['status']
183 if playabilityStatus != "OK":
184 playabilityReason = metadata['playabilityStatus'].get('reason',
185 '//'.join(metadata['playabilityStatus'].get('messages',[])))
186 player_error = f"{playabilityStatus}: {playabilityReason}"
187 if playabilityStatus == "UNPLAYABLE":
188 continue # try again with next el value (or fail as exhausted)
189 # without videoDetails, there's only the error message
190 maybe_metadata = metadata if 'videoDetails' in metadata else None
191 return None, maybe_metadata, 'player', player_error
192 if metadata['videoDetails']['isLiveContent'] and \
193 (metadata['videoDetails'].get('isLive', False) or \
194 metadata['videoDetails'].get('isPostLiveDvr', False)):
195 return None, metadata, 'livestream', None
196
197 if not 'formats' in metadata['streamingData']:
198 continue # no urls
199
200 formats = metadata['streamingData']['formats']
201 for (i,v) in enumerate(formats):
202 if not ('cipher' in v or 'signatureCipher' in v): continue
203 cipher = parse_qs(v.get('cipher') or v.get('signatureCipher'))
204 formats[i]['url'] = unscramble(cipher, algo)
205
206 # todo: check if we have urls or try again
207 url = sorted(formats, key=lambda k: k['height'], reverse=True)[0]['url']
208
209 if 'gcr' in parse_qs(url):
210 return None, metadata, 'geolocked', None
211
212 return url, metadata, None, None
213 else:
214 return None, metadata, 'exhausted', player_error
215
216 def unscramble(cipher, algo): # test video id: UxxajLWwzqY
217 signature = list(cipher['s'][0])
218 for c in algo.split():
219 op, ix = re.match(r"([rsw])(\d+)?", c).groups()
220 ix = int(ix) % len(signature) if ix else 0
221 if not op: continue
222 if op == 'r': signature = list(reversed(signature))
223 if op == 's': signature = signature[ix:]
224 if op == 'w': signature[0], signature[ix] = signature[ix], signature[0]
225 sp = cipher.get('sp', ['signature'])[0]
226 sig = cipher.get('sig', [''.join(signature)])[0]
227 return f"{cipher['url'][0]}&{sp}={sig}"
228
229 def prepare_metadata(metadata):
230 meta1 = metadata['videoDetails']
231 meta2 = metadata['microformat']['playerMicroformatRenderer']
232 cards = metadata['cards']['cardCollectionRenderer']['cards'] \
233 if 'cards' in metadata else []
234 endsc = metadata['endscreen']['endscreenRenderer']['elements'] \
235 if 'endscreen' in metadata else []
236
237 # the actual video streams have exact information:
238 try:
239 sd = metadata['streamingData']
240 some_stream = (sd.get('adaptiveFormats',[]) + sd.get('formats',[]))[0]
241 aspect_ratio = some_stream['width'] / some_stream['height']
242 # if that's unavailable (e.g. on livestreams), fall back to
243 # thumbnails (only either 4:3 or 16:9).
244 except:
245 some_img = meta2['thumbnail']['thumbnails'][0]
246 aspect_ratio = some_img['width'] / some_img['height']
247
248 # Note: we could get subtitles in multiple formats directly by querying
249 # https://video.google.com/timedtext?hl=en&type=list&v=<VIDEO_ID> followed by
250 # https://www.youtube.com/api/timedtext?lang=<LANG_CODE>&v=<VIDEO_ID>&fmt={srv1|srv2|srv3|ttml|vtt},
251 # but that won't give us autogenerated subtitles (and is an extra request).
252 # we can still add &fmt= to the extracted URLs below (first one takes precedence).
253 subtitles = sorted([
254 {'url':cc['baseUrl'],
255 'code':cc['languageCode'],
256 'autogenerated':cc.get('kind')=="asr",
257 'name':cc['name']['simpleText'],
258 'query':"fmt=vtt&"+urlparse(cc['baseUrl']).query} # for our internal proxy
259 for cc in metadata.get('captions',{})
260 .get('playerCaptionsTracklistRenderer',{})
261 .get('captionTracks',[])
262 ], key=lambda cc: cc['autogenerated'])
263
264 def clean_url(url):
265 # externals URLs are redirected through youtube.com/redirect, but we
266 # may encounter internal URLs, too
267 return parse_qs(urlparse(url).query).get('q',[url])[0]
268 # Remove left-/rightmost word from string:
269 delL = lambda s: s.partition(' ')[2]
270 delR = lambda s: s.rpartition(' ')[0]
271 # Thousands seperator aware int():
272 intT = lambda s: int(s.replace(',', ''))
273
274 def parse_infocard(card):
275 card = card['cardRenderer']
276 ctype = list(card['content'].keys())[0]
277 content = card['content'][ctype]
278 if ctype == "pollRenderer":
279 ctype = "POLL"
280 content = {
281 'question': content['question']['simpleText'],
282 'answers': [(a['text']['simpleText'],a['numVotes']) \
283 for a in content['choices']],
284 }
285 elif ctype == "videoInfoCardContentRenderer":
286 ctype = "VIDEO"
287 # if the card references a live stream, it has no length, but a "LIVE NOW" badge.
288 # TODO: this is ugly; cleanup.
289 is_live = content.get('badge',{}).get('liveBadgeRenderer',{})
290 length = is_live.get('label',{}).get('simpleText') or content['lengthString']['simpleText'] # '23:03'
291 content = {
292 'video_id': content['action']['watchEndpoint']['videoId'],
293 'title': content['videoTitle']['simpleText'],
294 'author': delL(content['channelName']['simpleText']),
295 'length': length,
296 'views': intT(delR(content['viewCountText']['simpleText'])),
297 }
298 elif ctype == "playlistInfoCardContentRenderer":
299 ctype = "PLAYLIST"
300 content = {
301 'playlist_id': content['action']['watchEndpoint']['playlistId'],
302 'video_id': content['action']['watchEndpoint']['videoId'],
303 'title': content['playlistTitle']['simpleText'],
304 'author': delL(content['channelName']['simpleText']),
305 'n_videos': intT(content['playlistVideoCount']['simpleText']),
306 }
307 elif ctype == "simpleCardContentRenderer" and 'urlEndpoint' in content['command']:
308 ctype = "WEBSITE"
309 content = {
310 'url': clean_url(content['command']['urlEndpoint']['url']),
311 'domain': content['displayDomain']['simpleText'],
312 'title': content['title']['simpleText'],
313 # XXX: no thumbnails for infocards
314 }
315 elif ctype == "collaboratorInfoCardContentRenderer":
316 ctype = "CHANNEL"
317 content = {
318 'channel_id': content['endpoint']['browseEndpoint']['browseId'],
319 'title': content['channelName']['simpleText'],
320 'icons': mkthumbs(content['channelAvatar']['thumbnails']),
321 'subscribers': content.get('subscriberCountText',{}).get('simpleText',''), # "545K subscribers"
322 }
323 else:
324 import pprint
325 content = {'error': f"{ctype} is not implemented; <pre>{pprint.pformat(card)}</pre>"}
326
327 return {'type': ctype, 'content': content}
328
329 def mkthumbs(thumbs):
330 return {e['height']: e['url'] for e in thumbs}
331 def parse_endcard(card):
332 card = card.get('endscreenElementRenderer', card) #only sometimes nested
333 ctype = card['style']
334 if ctype == "CHANNEL":
335 content = {
336 'channel_id': card['endpoint']['browseEndpoint']['browseId'],
337 'title': card['title']['simpleText'],
338 'icons': mkthumbs(card['image']['thumbnails']),
339 }
340 elif ctype == "VIDEO":
341 content = {
342 'video_id': card['endpoint']['watchEndpoint']['videoId'], # XXX: KeyError 'endpoint' exception (no idea which youtube video this was on)
343 'title': card['title']['simpleText'],
344 'length': card['videoDuration']['simpleText'], # '12:21'
345 'views': delR(card['metadata']['simpleText']),
346 # XXX: no channel name
347 }
348 elif ctype == "PLAYLIST":
349 content = {
350 'playlist_id': card['endpoint']['watchEndpoint']['playlistId'],
351 'video_id': card['endpoint']['watchEndpoint']['videoId'],
352 'title': card['title']['simpleText'],
353 'author': delL(card['metadata']['simpleText']),
354 'n_videos': intT(delR(card['playlistLength']['simpleText'])),
355 }
356 elif ctype == "WEBSITE" or ctype == "CREATOR_MERCHANDISE":
357 ctype = "WEBSITE"
358 url = clean_url(card['endpoint']['urlEndpoint']['url'])
359 content = {
360 'url': url,
361 'domain': urlparse(url).netloc,
362 'title': card['title']['simpleText'],
363 'icons': mkthumbs(card['image']['thumbnails']),
364 }
365 else:
366 import pprint
367 content = {'error': f"{ctype} is not implemented; <pre>{pprint.pformat(card)}</pre>"}
368
369 return {'type': ctype, 'content': content}
370
371 infocards = [parse_infocard(card) for card in cards]
372 endcards = [parse_endcard(card) for card in endsc]
373 # combine cards to weed out duplicates. for videos and playlists prefer
374 # infocards, for channels and websites prefer endcards, as those have more
375 # information than the other.
376 # if the card type is not in ident, we use the whole card for comparison
377 # (otherwise they'd all replace each other)
378 ident = { # ctype -> ident
379 'VIDEO': 'video_id',
380 'PLAYLIST': 'playlist_id',
381 'CHANNEL': 'channel_id',
382 'WEBSITE': 'url',
383 'POLL': 'question',
384 }
385 getident = lambda c: c['content'].get(ident.get(c['type']), c)
386 mkexclude = lambda cards, types: [getident(c) for c in cards if c['type'] in types]
387 exclude = lambda cards, without: [c for c in cards if getident(c) not in without]
388
389 allcards = exclude(infocards, mkexclude(endcards, ['CHANNEL','WEBSITE'])) + \
390 exclude(endcards, mkexclude(infocards, ['VIDEO','PLAYLIST']))
391
392 all_countries = """AD AE AF AG AI AL AM AO AQ AR AS AT AU AW AX AZ BA BB BD
393 BE BF BG BH BI BJ BL BM BN BO BQ BR BS BT BV BW BY BZ CA CC CD CF CG CH
394 CI CK CL CM CN CO CR CU CV CW CX CY CZ DE DJ DK DM DO DZ EC EE EG EH ER
395 ES ET FI FJ FK FM FO FR GA GB GD GE GF GG GH GI GL GM GN GP GQ GR GS GT
396 GU GW GY HK HM HN HR HT HU ID IE IL IM IN IO IQ IR IS IT JE JM JO JP KE
397 KG KH KI KM KN KP KR KW KY KZ LA LB LC LI LK LR LS LT LU LV LY MA MC MD
398 ME MF MG MH MK ML MM MN MO MP MQ MR MS MT MU MV MW MX MY MZ NA NC NE NF
399 NG NI NL NO NP NR NU NZ OM PA PE PF PG PH PK PL PM PN PR PS PT PW PY QA
400 RE RO RS RU RW SA SB SC SD SE SG SH SI SJ SK SL SM SN SO SR SS ST SV SX
401 SY SZ TC TD TF TG TH TJ TK TL TM TN TO TR TT TV TW TZ UA UG UM US UY UZ
402 VA VC VE VG VI VN VU WF WS YE YT ZA ZM ZW""".split()
403 whitelisted = sorted(meta2.get('availableCountries',[]))
404 blacklisted = sorted(set(all_countries) - set(whitelisted))
405
406 published_at = f"{meta2['publishDate']}T00:00:00Z" # yyyy-mm-dd
407 # 'premiere' videos (and livestreams?) have a ISO8601 date available:
408 if 'liveBroadcastDetails' in meta2 and 'startTimestamp' in meta2['liveBroadcastDetails']: # TODO: tighten up
409 published_at = meta2['liveBroadcastDetails']['startTimestamp']
410
411 return {
412 'title': meta1['title'],
413 'author': meta1['author'],
414 'channel_id': meta1['channelId'],
415 'description': meta1['shortDescription'],
416 'published': published_at,
417 'views': meta1['viewCount'],
418 'length': int(meta1['lengthSeconds']),
419 'rating': meta1['averageRating'],
420 'category': meta2['category'],
421 'aspectr': aspect_ratio,
422 'unlisted': meta2['isUnlisted'],
423 'whitelisted': whitelisted,
424 'blacklisted': blacklisted,
425 'poster': meta2['thumbnail']['thumbnails'][0]['url'],
426 'infocards': infocards,
427 'endcards': endcards,
428 'all_cards': allcards,
429 'subtitles': subtitles,
430 }
431
432 def store_video_metadata(video_id):
433 # check if we know about it, and if not, fetch and store video metadata
434 with sqlite3.connect(cf['global']['database']) as conn:
435 c = conn.cursor()
436 c.execute("SELECT 1 from videos where id = ?", (video_id,))
437 new_video = len(c.fetchall()) < 1
438 if new_video:
439 _, meta, _, _ = get_video_info(video_id)
440 if meta:
441 meta = prepare_metadata(meta)
442 c.execute("""
443 INSERT OR IGNORE INTO videos (id, channel_id, title, published, crawled)
444 VALUES (?, ?, ?, datetime(?), datetime(?))
445 """, (
446 video_id,
447 meta['channel_id'],
448 meta['title'],
449 meta['published'],
450 meta['published'],
451 ))
452 c.execute("""
453 INSERT OR REPLACE INTO channels (id, name)
454 VALUES (?, ?)
455 """, (meta['channel_id'], meta['author']))
456
457 class RedditException(Exception): pass
458 def fetch_reddit(subreddits, sorted_by="hot", time=None, *, limit=36,
459 count=None, before=None, after=None):
460 """
461 fetches data from a subreddit (or a multireddit like gif+gifs) and
462 filters/sorts results.
463 sorted_by values: hot, new, rising, controversial, top
464 time values: hour, day, week, month, year, all (for top and controversial)
465 """
466
467 if not subreddits:
468 return None
469
470 query = {k:v for k,v in {
471 'count':count,
472 'before':before,
473 'after':after,
474 'limit':limit, # 1..100 (default 25)
475 't': time, # hour,week,month,year,all
476 }.items() if v}
477 multireddit = '+'.join(subreddits)
478 r = requests.get(f"https://old.reddit.com/r/{multireddit}/{sorted_by}.json",
479 query, headers={'User-Agent':'Mozilla/5.0'})
480 if not r.ok or not 'data' in r.json():
481 raise RedditException(r.text)
482
483 return r.json()
484
485 def fetch_reddit_post(post_id):
486 # Note: /api/info.json?id=t3_h7mjes == /by_id/t3_h7mjes.json
487 r = requests.get(f"https://old.reddit.com/by_id/t3_{post_id}.json",
488 headers={'User-Agent':'Mozilla/5.0'})
489 if not r.ok or not 'data' in r.json():
490 raise RedditException(r.text)
491
492 return r.json()
493
494 def parse_reddit_videos(data):
495 videos = []
496 entries = sorted(data['data']['children'],
497 key=lambda e: e['data']['score'] > 1,
498 reverse=True)
499 for entry in entries:
500 e = entry['data']
501 if e['domain'] not in ['youtube.com', 'youtu.be', 'invidio.us']:
502 continue
503 try:
504 # Note: youtube.com/<video_id> is not valid (404s), but seen in the wild.
505 video_id = re.match(r'^https?://(?:www.|m.)?(?:youtube.com/watch\?(?:.*&amp;)?v=|youtu.be/|youtube.com/embed/|youtube.com/)([-_0-9A-Za-z]+)', e['url']).group(1)
506 except:
507 continue # XXX: should we log that?
508 if not video_id: continue
509 videos.append({
510 'video_id': video_id,
511 'title': html.unescape(e['title']), # Note: we unescape and re-escape in the template
512 'url': e['permalink'],
513 'n_comments': e['num_comments'],
514 'n_karma': e['score'],
515 'subreddit': e['subreddit'],
516 'post_id': e['id'],
517 })
518
519 return videos
520
521 class NoFallbackException(Exception): pass
522 def fallback_route(*args, **kwargs): # TODO: worthy as a flask-extension?
523 """
524 finds the next route that matches the current url rule, and executes it.
525 args, kwargs: pass all arguments of the current route
526 """
527 from flask import current_app, request, g
528 from werkzeug.exceptions import NotFound
529
530 # build a list of endpoints that match the current request's url rule:
531 matching = [
532 rule.endpoint
533 for rule in current_app.url_map.iter_rules()
534 if rule.rule == request.url_rule.rule
535 ]
536 current = matching.index(request.endpoint)
537
538 # since we can't change request.endpoint, we always get the original
539 # endpoint back. so for repeated fall throughs, we use the g object to
540 # increment how often we want to fall through.
541 if not '_fallback_next' in g:
542 g._fallback_next = 0
543 g._fallback_next += 1
544
545 next_ep = current + g._fallback_next
546
547 if next_ep < len(matching):
548 return current_app.view_functions[matching[next_ep]](*args, **kwargs)
549 else:
550 raise NoFallbackException
551
552 def websub_url_hmac(key, feed_id, timestamp, nonce):
553 """ generate sha1 hmac, as required by websub/pubsubhubbub """
554 sig_input = f"{feed_id}:{timestamp}:{nonce}".encode('ascii')
555 return hmac.new(key.encode('ascii'), sig_input, hashlib.sha1).hexdigest()
556
557 def websub_body_hmac(key, body):
558 return hmac.new(key.encode('ascii'), body, hashlib.sha1).hexdigest()
559
560 def pp(*args):
561 from pprint import pprint
562 import sys, codecs
563 pprint(args, stream=codecs.getwriter("utf-8")(sys.stderr.buffer))
Imprint / Impressum