]>
git.gir.st - subscriptionfeed.git/blob - app/common.py
7 from xml
. etree
import ElementTree
8 from configparser
import ConfigParser
9 from datetime
import datetime
, timezone
10 from urllib
. parse
import parse_qs
, urlparse
13 config_filename
= os
. environ
. get ( 'YT_CONFIG' , '/etc/yt/config.ini' )
14 cf
. read ( config_filename
)
16 # Note: currently expiring after 10 minutes. googlevideo-urls are valid for 5h59m, but this makes reddit very stale and premiere videos won't start. TODO: exipre when video is livestream/premiere/etc
17 requests_cache
. install_cache ( backend
= 'memory' , expire_after
= 10 * 60 , allowable_codes
=( 200 ,))
19 # Note: this should only be required for the 'memory' backed cache.
20 # TODO: only run for long-running processes, i.e. the frontend
21 from threading
import Timer
23 requests_cache
. remove_expired_responses ()
24 t
= Timer ( sec
, purge_cache
, args
=( sec
,))
29 def fetch_xml ( feed_type
, feed_id
):
30 r
= requests
. get ( f
"https://www.youtube.com/feeds/videos.xml? {feed_type} = {feed_id} " )
36 def parse_xml ( xmldata
):
38 'atom' : "http://www.w3.org/2005/Atom" ,
39 'yt' : "http://www.youtube.com/xml/schemas/2015" ,
40 'media' : "http://search.yahoo.com/mrss/"
43 feed
= ElementTree
. fromstring ( xmldata
)
44 title
= feed
. find ( 'atom:title' , ns
). text
45 author
= feed
. find ( 'atom:author/atom:name' , ns
). text \
46 if feed
. find ( 'atom:author' , ns
) else None
48 for entry
in feed
. findall ( 'atom:entry' , ns
):
50 'video_id' : entry
. find ( 'yt:videoId' , ns
). text
,
51 'title' : entry
. find ( 'atom:title' , ns
). text
,
52 'published' : entry
. find ( 'atom:published' , ns
). text
,
53 'channel_id' : entry
. find ( 'yt:channelId' , ns
). text
,
54 'author' : entry
. find ( 'atom:author' , ns
). find ( 'atom:name' , ns
). text
,
55 # extra fields for pull_subs/webhook:
56 'updated' : entry
. find ( 'atom:updated' , ns
). text
,
59 return title
, author
, videos
61 def update_channel ( db
, xmldata
):
62 if not xmldata
: return False
64 # Note: websub does not return global author, hence taking from first video
65 title
, _
, videos
= parse_xml ( xmldata
)
68 for i
, video
in enumerate ( videos
):
69 now
= datetime
. now ( timezone
. utc
)
70 updated
= dateutil
. parser
. parse ( video
[ 'updated' ])
71 published
= dateutil
. parser
. parse ( video
[ 'published' ])
72 # if update and published time are near-identical, we assume it's new.
73 if ( updated
- published
). seconds
< 60 and ( now
- published
). days
< 7 :
75 else : #, it's just an update to an older video.
79 INSERT OR IGNORE INTO videos (id, channel_id, title, published, crawled)
80 VALUES (?, ?, ?, datetime(?), datetime(?))
89 if i
== 0 : # only required once per feed
91 INSERT OR REPLACE INTO channels (id, name)
93 """ , ( video
[ 'channel_id' ], video
[ 'author' ]))
98 def get_video_info ( video_id
, sts
= 0 , algo
= "" ):
100 returns: best-quality muxed video stream, player_response, error-type/mesage
101 error types: player, malformed, livestream, geolocked, exhausted
103 player_error
= None # for 'exhausted'
104 for el
in [ 'embedded' , 'detailpage' ]: #sometimes, only one or the other works
105 r
= requests
. get ( f
"https://www.youtube.com/get_video_info" +
106 f
"?video_id= {video_id} " +
107 f
"&eurl=https://youtube.googleapis.com/v/ {video_id} " +
110 f
"&hl=en_US" ) #"&hl=en&gl=US"
111 params
= parse_qs ( r
. text
)
112 if 'errorcode' in params
: # status=fail
113 return None , None , 'malformed' , params
[ 'reason' ][ 0 ]
115 metadata
= json
. loads ( params
. get ( 'player_response' )[ 0 ])
116 playabilityStatus
= metadata
[ 'playabilityStatus' ][ 'status' ]
117 if playabilityStatus
!= "OK" :
118 playabilityReason
= metadata
[ 'playabilityStatus' ][ 'reason' ]
119 player_error
= f
" {playabilityStatus} : {playabilityReason} "
120 if playabilityStatus
== "UNPLAYABLE" :
121 continue # try again with next el value (or fail as exhausted)
122 # without videoDetails, there's only the error messge (playabilityStatus,responseContext,trackingParams)
123 maybe_metadata
= metadata
if 'videoDetails' in metadata
else None
124 return None , maybe_metadata
, 'player' , player_error
125 if 'liveStreamability' in metadata
[ 'playabilityStatus' ]:
126 # can also check .microformat.liveBroadcastDetails.isLiveNow
127 return None , metadata
, 'livestream' , None
129 if not 'formats' in metadata
[ 'streamingData' ]:
130 #TODO: hls only video with those params (kAZCrtJJaAo):
132 # "isLiveDefaultBroadcast": true,
133 # "isLowLatencyLiveStream": true,
134 # "isLiveContent": true,
135 # "isPostLiveDvr": true
138 formats
= metadata
[ 'streamingData' ][ 'formats' ]
139 for ( i
, v
) in enumerate ( formats
):
140 if not ( 'cipher' in v
or 'signatureCipher' in v
): continue
141 cipher
= parse_qs ( v
. get ( 'cipher' ) or v
. get ( 'signatureCipher' ))
142 formats
[ i
][ 'url' ] = unscramble ( cipher
, algo
)
144 # todo: check if we have urls or try again
145 url
= sorted ( formats
, key
= lambda k
: k
[ 'height' ], reverse
= True )[ 0 ][ 'url' ]
147 if 'gcr' in parse_qs ( url
):
148 return None , metadata
, 'geolocked' , None
150 return url
, metadata
, None , None
152 return None , metadata
, 'exhausted' , player_error
154 def unscramble ( cipher
, algo
): # test video id: UxxajLWwzqY
155 signature
= list ( cipher
[ 's' ][ 0 ])
156 for c
in algo
. split ():
157 op
, ix
= re
. match ( r
"([rsw])(\d+)?" , c
). groups ()
158 ix
= int ( ix
) % len ( signature
) if ix
else 0
160 if op
== 'r' : signature
= list ( reversed ( signature
))
161 if op
== 's' : signature
= signature
[ ix
:]
162 if op
== 'w' : signature
[ 0 ], signature
[ ix
] = signature
[ ix
], signature
[ 0 ]
163 sp
= cipher
. get ( 'sp' , [ 'signature' ])[ 0 ]
164 sig
= cipher
. get ( 'sig' , [ '' . join ( signature
)])[ 0 ]
165 return f
"{cipher['url'][0]}& {sp} = {sig} "
167 def prepare_metadata ( metadata
):
168 meta1
= metadata
[ 'videoDetails' ]
169 meta2
= metadata
[ 'microformat' ][ 'playerMicroformatRenderer' ]
170 cards
= metadata
[ 'cards' ][ 'cardCollectionRenderer' ][ 'cards' ] \
171 if 'cards' in metadata
else []
172 endsc
= metadata
[ 'endscreen' ][ 'endscreenRenderer' ][ 'elements' ] \
173 if 'endscreen' in metadata
else []
175 # TODO: wrong on non-4:3 and non-16:9 videos! (e.g. l06PlYNShpQ)
176 #aspect_ratio = meta2['embed']['width'] / meta2['embed']['height'], # sometimes absent
177 aspect_ratio
= meta2
[ 'thumbnail' ][ 'thumbnails' ][ 0 ][ 'width' ] / meta2
[ 'thumbnail' ][ 'thumbnails' ][ 0 ][ 'height' ]
180 { 'url' : cc
[ 'baseUrl' ],
181 'code' : cc
[ 'languageCode' ],
182 'autogenerated' : cc
. get ( 'kind' )== "asr" ,
183 'name' : cc
[ 'name' ][ 'simpleText' ]}
184 for cc
in metadata
[ 'captions' ][ 'playerCaptionsTracklistRenderer' ][ 'captionTracks' ]
185 ], key
= lambda cc
: cc
[ 'autogenerated' ]) if 'captions' in metadata
and 'captionTracks' in metadata
[ 'captions' ][ 'playerCaptionsTracklistRenderer' ] else [] # TODO<,^: cleanup
188 # externals URLs are redirected through youtube.com/redirect, but we
189 # may encounter internal URLs, too
190 url
= parse_qs ( urlparse ( url
). query
). get ( 'q' ,[ url
])[ 0 ]
191 # Remove left-/rightmost word from string:
192 delL
= lambda s
: s
. partition ( ' ' )[ 2 ]
193 delR
= lambda s
: s
. rpartition ( ' ' )[ 0 ]
194 # Thousands seperator aware int():
195 intT
= lambda s
: int ( s
. replace ( ',' , '' ))
197 def parse_infocard ( card
):
198 card
= card
[ 'cardRenderer' ]
199 ctype
= list ( card
[ 'content' ]. keys ())[ 0 ]
200 content
= card
[ 'content' ][ ctype
]
201 if ctype
== "pollRenderer" :
204 'question' : content
[ 'question' ][ 'simpleText' ],
205 'answers' : [( a
[ 'text' ][ 'simpleText' ], a
[ 'numVotes' ]) \
206 for a
in content
[ 'choices' ]],
208 elif ctype
== "videoInfoCardContentRenderer" :
211 'video_id' : content
[ 'action' ][ 'watchEndpoint' ][ 'videoId' ],
212 'title' : content
[ 'videoTitle' ][ 'simpleText' ],
213 'author' : delL ( content
[ 'channelName' ][ 'simpleText' ]),
214 'length' : content
[ 'lengthString' ][ 'simpleText' ], # '23:03'
215 'views' : intT ( delR ( content
[ 'viewCountText' ][ 'simpleText' ])),
217 elif ctype
== "playlistInfoCardContentRenderer" :
220 'playlist_id' : content
[ 'action' ][ 'watchEndpoint' ][ 'playlistId' ],
221 'video_id' : content
[ 'action' ][ 'watchEndpoint' ][ 'videoId' ],
222 'title' : content
[ 'playlistTitle' ][ 'simpleText' ],
223 'author' : delL ( content
[ 'channelName' ][ 'simpleText' ]),
224 'n_videos' : intT ( content
[ 'playlistVideoCount' ][ 'simpleText' ]),
226 elif ctype
== "simpleCardContentRenderer" and 'urlEndpoint' in content
. get ( 'command' ,{}). keys (): # <TODO: cleanup
229 'url' : clean_url ( content
[ 'command' ][ 'urlEndpoint' ][ 'url' ]),
230 'domain' : content
[ 'displayDomain' ][ 'simpleText' ],
231 'title' : content
[ 'title' ][ 'simpleText' ],
232 # XXX: no thumbnails for infocards
236 content
= { 'error' : f
" {ctype} is not implemented; <pre>{pprint.pformat(card)}</pre>" }
238 return { 'type' : ctype
, 'content' : content
}
240 def mkthumbs ( thumbs
):
241 return { e
[ 'height' ]: e
[ 'url' ] for e
in thumbs
}
242 def parse_endcard ( card
):
243 card
= card
. get ( 'endscreenElementRenderer' , card
) #only sometimes nested
244 ctype
= card
[ 'style' ]
245 if ctype
== "CHANNEL" :
247 'channel_id' : card
[ 'endpoint' ][ 'browseEndpoint' ][ 'browseId' ],
248 'title' : card
[ 'title' ][ 'simpleText' ],
249 'icons' : mkthumbs ( card
[ 'image' ][ 'thumbnails' ]),
251 elif ctype
== "VIDEO" :
253 'video_id' : card
[ 'endpoint' ][ 'watchEndpoint' ][ 'videoId' ],
254 'title' : card
[ 'title' ][ 'simpleText' ],
255 'length' : card
[ 'videoDuration' ][ 'simpleText' ], # '12:21'
256 'views' : delR ( card
[ 'metadata' ][ 'simpleText' ]),
257 # XXX: no channel name
259 elif ctype
== "PLAYLIST" :
261 'playlist_id' : card
[ 'endpoint' ][ 'watchEndpoint' ][ 'playlistId' ],
262 'video_id' : card
[ 'endpoint' ][ 'watchEndpoint' ][ 'videoId' ],
263 'title' : card
[ 'title' ][ 'simpleText' ],
264 'author' : delL ( card
[ 'metadata' ][ 'simpleText' ]),
265 'n_videos' : intT ( delR ( card
[ 'playlistLength' ][ 'simpleText' ])),
267 elif ctype
== "WEBSITE" or ctype
== "CREATOR_MERCHANDISE" :
270 'url' : clean_url ( card
[ 'endpoint' ][ 'urlEndpoint' ][ 'url' ]),
271 'domain' : urlparse ( url
). netloc
, # TODO: remove .domain
272 'title' : card
[ 'title' ][ 'simpleText' ],
273 'icons' : mkthumbs ( card
[ 'image' ][ 'thumbnails' ]),
277 content
= { 'error' : f
" {ctype} is not implemented; <pre>{pprint.pformat(card)}</pre>" }
279 return { 'type' : ctype
, 'content' : content
}
281 all_countries
= """AD AE AF AG AI AL AM AO AQ AR AS AT AU AW AX AZ BA BB BD
282 BE BF BG BH BI BJ BL BM BN BO BQ BR BS BT BV BW BY BZ CA CC CD CF CG CH
283 CI CK CL CM CN CO CR CU CV CW CX CY CZ DE DJ DK DM DO DZ EC EE EG EH ER
284 ES ET FI FJ FK FM FO FR GA GB GD GE GF GG GH GI GL GM GN GP GQ GR GS GT
285 GU GW GY HK HM HN HR HT HU ID IE IL IM IN IO IQ IR IS IT JE JM JO JP KE
286 KG KH KI KM KN KP KR KW KY KZ LA LB LC LI LK LR LS LT LU LV LY MA MC MD
287 ME MF MG MH MK ML MM MN MO MP MQ MR MS MT MU MV MW MX MY MZ NA NC NE NF
288 NG NI NL NO NP NR NU NZ OM PA PE PF PG PH PK PL PM PN PR PS PT PW PY QA
289 RE RO RS RU RW SA SB SC SD SE SG SH SI SJ SK SL SM SN SO SR SS ST SV SX
290 SY SZ TC TD TF TG TH TJ TK TL TM TN TO TR TT TV TW TZ UA UG UM US UY UZ
291 VA VC VE VG VI VN VU WF WS YE YT ZA ZM ZW""" . split ()
292 whitelisted
= sorted ( meta2
[ 'availableCountries' ])
293 blacklisted
= sorted ( set ( all_countries
) - set ( whitelisted
))
296 'title' : meta1
[ 'title' ],
297 'author' : meta1
[ 'author' ],
298 'channel_id' : meta1
[ 'channelId' ],
299 'description' : meta1
[ 'shortDescription' ],
300 'published' : meta2
[ 'publishDate' ],
301 'views' : meta1
[ 'viewCount' ],
302 'length' : int ( meta1
[ 'lengthSeconds' ]),
303 'rating' : meta1
[ 'averageRating' ],
304 'category' : meta2
[ 'category' ],
305 'aspectr' : aspect_ratio
,
306 'unlisted' : meta2
[ 'isUnlisted' ],
307 'countries' : whitelisted
,
308 'blacklisted' : blacklisted
,
309 'poster' : meta2
[ 'thumbnail' ][ 'thumbnails' ][ 0 ][ 'url' ],
310 'infocards' : [ parse_infocard ( card
) for card
in cards
],
311 'endcards' : [ parse_endcard ( card
) for card
in endsc
],
312 'subtitles' : subtitles
,
316 from pprint
import pprint
318 pprint ( args
, stream
= codecs
. getwriter ( "utf-8" )( sys
. stderr
. buffer ))