mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-23 19:33:59 +01:00
Merge remote-tracking branch 'FiloSottille/supports'
Conflicts: youtube-dl
This commit is contained in:
commit
7f36e39676
BIN
youtube-dl.exe
BIN
youtube-dl.exe
Binary file not shown.
|
@ -13,6 +13,8 @@
|
||||||
import urllib2
|
import urllib2
|
||||||
import email.utils
|
import email.utils
|
||||||
import xml.etree.ElementTree
|
import xml.etree.ElementTree
|
||||||
|
import random
|
||||||
|
import math
|
||||||
from urlparse import parse_qs
|
from urlparse import parse_qs
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -95,7 +97,7 @@ def _real_extract(self, url):
|
||||||
class YoutubeIE(InfoExtractor):
|
class YoutubeIE(InfoExtractor):
|
||||||
"""Information extractor for youtube.com."""
|
"""Information extractor for youtube.com."""
|
||||||
|
|
||||||
_VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|tube.majestyc.net/)(?!view_play_list|my_playlists|artist|playlist)(?:(?:(?:v|embed|e)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))?)?([0-9A-Za-z_-]+)(?(1).+)?$'
|
_VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|tube\.majestyc\.net/)(?!view_play_list|my_playlists|artist|playlist)(?:(?:(?:v|embed|e)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))?)?([0-9A-Za-z_-]+)(?(1).+)?$'
|
||||||
_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
|
_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
|
||||||
_LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
|
_LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
|
||||||
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
|
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
|
||||||
|
@ -2956,6 +2958,129 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
return [info]
|
return [info]
|
||||||
|
|
||||||
|
|
||||||
|
class YoukuIE(InfoExtractor):
|
||||||
|
|
||||||
|
_VALID_URL = r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html'
|
||||||
|
IE_NAME = u'Youku'
|
||||||
|
|
||||||
|
def __init__(self, downloader=None):
|
||||||
|
InfoExtractor.__init__(self, downloader)
|
||||||
|
|
||||||
|
def report_download_webpage(self, file_id):
|
||||||
|
"""Report webpage download."""
|
||||||
|
self._downloader.to_screen(u'[Youku] %s: Downloading webpage' % file_id)
|
||||||
|
|
||||||
|
def report_extraction(self, file_id):
|
||||||
|
"""Report information extraction."""
|
||||||
|
self._downloader.to_screen(u'[Youku] %s: Extracting information' % file_id)
|
||||||
|
|
||||||
|
def _gen_sid(self):
|
||||||
|
nowTime = int(time.time() * 1000)
|
||||||
|
random1 = random.randint(1000,1998)
|
||||||
|
random2 = random.randint(1000,9999)
|
||||||
|
|
||||||
|
return "%d%d%d" %(nowTime,random1,random2)
|
||||||
|
|
||||||
|
def _get_file_ID_mix_string(self, seed):
|
||||||
|
mixed = []
|
||||||
|
source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
|
||||||
|
seed = float(seed)
|
||||||
|
for i in range(len(source)):
|
||||||
|
seed = (seed * 211 + 30031 ) % 65536
|
||||||
|
index = math.floor(seed / 65536 * len(source) )
|
||||||
|
mixed.append(source[int(index)])
|
||||||
|
source.remove(source[int(index)])
|
||||||
|
#return ''.join(mixed)
|
||||||
|
return mixed
|
||||||
|
|
||||||
|
def _get_file_id(self, fileId, seed):
|
||||||
|
mixed = self._get_file_ID_mix_string(seed)
|
||||||
|
ids = fileId.split('*')
|
||||||
|
realId = []
|
||||||
|
for ch in ids:
|
||||||
|
if ch:
|
||||||
|
realId.append(mixed[int(ch)])
|
||||||
|
return ''.join(realId)
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
if mobj is None:
|
||||||
|
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
||||||
|
return
|
||||||
|
video_id = mobj.group('ID')
|
||||||
|
|
||||||
|
info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
|
||||||
|
|
||||||
|
request = urllib2.Request(info_url, None, std_headers)
|
||||||
|
try:
|
||||||
|
self.report_download_webpage(video_id)
|
||||||
|
jsondata = urllib2.urlopen(request).read()
|
||||||
|
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||||
|
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
|
||||||
|
return
|
||||||
|
|
||||||
|
self.report_extraction(video_id)
|
||||||
|
try:
|
||||||
|
config = json.loads(jsondata)
|
||||||
|
|
||||||
|
video_title = config['data'][0]['title']
|
||||||
|
seed = config['data'][0]['seed']
|
||||||
|
|
||||||
|
format = self._downloader.params.get('format', None)
|
||||||
|
supported_format = config['data'][0]['streamfileids'].keys()
|
||||||
|
|
||||||
|
if format is None or format == 'best':
|
||||||
|
if 'hd2' in supported_format:
|
||||||
|
format = 'hd2'
|
||||||
|
else:
|
||||||
|
format = 'flv'
|
||||||
|
ext = u'flv'
|
||||||
|
elif format == 'worst':
|
||||||
|
format = 'mp4'
|
||||||
|
ext = u'mp4'
|
||||||
|
else:
|
||||||
|
format = 'flv'
|
||||||
|
ext = u'flv'
|
||||||
|
|
||||||
|
|
||||||
|
fileid = config['data'][0]['streamfileids'][format]
|
||||||
|
seg_number = len(config['data'][0]['segs'][format])
|
||||||
|
|
||||||
|
keys=[]
|
||||||
|
for i in xrange(seg_number):
|
||||||
|
keys.append(config['data'][0]['segs'][format][i]['k'])
|
||||||
|
|
||||||
|
#TODO check error
|
||||||
|
#youku only could be viewed from mainland china
|
||||||
|
except:
|
||||||
|
self._downloader.trouble(u'ERROR: unable to extract info section')
|
||||||
|
return
|
||||||
|
|
||||||
|
files_info=[]
|
||||||
|
sid = self._gen_sid()
|
||||||
|
fileid = self._get_file_id(fileid, seed)
|
||||||
|
|
||||||
|
#column 8,9 of fileid represent the segment number
|
||||||
|
#fileid[7:9] should be changed
|
||||||
|
for index, key in enumerate(keys):
|
||||||
|
|
||||||
|
temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
|
||||||
|
download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
|
||||||
|
|
||||||
|
info = {
|
||||||
|
'id': '%s_part%02d' % (video_id, index),
|
||||||
|
'url': download_url,
|
||||||
|
'uploader': None,
|
||||||
|
'title': video_title,
|
||||||
|
'ext': ext,
|
||||||
|
'format': u'NA'
|
||||||
|
}
|
||||||
|
files_info.append(info)
|
||||||
|
|
||||||
|
return files_info
|
||||||
|
|
||||||
|
|
||||||
class XNXXIE(InfoExtractor):
|
class XNXXIE(InfoExtractor):
|
||||||
"""Information extractor for xnxx.com"""
|
"""Information extractor for xnxx.com"""
|
||||||
|
|
||||||
|
@ -2973,30 +3098,6 @@ def report_extraction(self, video_id):
|
||||||
"""Report information extraction"""
|
"""Report information extraction"""
|
||||||
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
|
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
|
||||||
|
|
||||||
def extract_video_url(self, webpage):
|
|
||||||
"Extract the url for the video from the webpage"
|
|
||||||
|
|
||||||
result = re.search(self.VIDEO_URL_RE, webpage)
|
|
||||||
if result is None:
|
|
||||||
self._downloader.trouble(u'ERROR: unable to extract video url')
|
|
||||||
return urllib.unquote(result.group(1).decode('utf-8'))
|
|
||||||
|
|
||||||
def extract_video_title(self, webpage):
|
|
||||||
"Extract the title for the video from the webpage"
|
|
||||||
|
|
||||||
result = re.search(self.VIDEO_TITLE_RE, webpage)
|
|
||||||
if result is None:
|
|
||||||
self._downloader.trouble(u'ERROR: unable to extract video title')
|
|
||||||
return result.group(1).decode('utf-8')
|
|
||||||
|
|
||||||
def extract_video_thumbnail(self, webpage):
|
|
||||||
"Extract the thumbnail for the video from the webpage"
|
|
||||||
|
|
||||||
result = re.search(self.VIDEO_THUMB_RE, webpage)
|
|
||||||
if result is None:
|
|
||||||
self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
|
|
||||||
return result.group(1).decode('utf-8')
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
|
@ -3013,14 +3114,32 @@ def _real_extract(self, url):
|
||||||
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
|
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
result = re.search(self.VIDEO_URL_RE, webpage)
|
||||||
|
if result is None:
|
||||||
|
self._downloader.trouble(u'ERROR: unable to extract video url')
|
||||||
|
return
|
||||||
|
video_url = urllib.unquote(result.group(1).decode('utf-8'))
|
||||||
|
|
||||||
|
result = re.search(self.VIDEO_TITLE_RE, webpage)
|
||||||
|
if result is None:
|
||||||
|
self._downloader.trouble(u'ERROR: unable to extract video title')
|
||||||
|
return
|
||||||
|
video_title = result.group(1).decode('utf-8')
|
||||||
|
|
||||||
|
result = re.search(self.VIDEO_THUMB_RE, webpage)
|
||||||
|
if result is None:
|
||||||
|
self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
|
||||||
|
return
|
||||||
|
video_thumbnail = result.group(1).decode('utf-8')
|
||||||
|
|
||||||
info = {'id': video_id,
|
info = {'id': video_id,
|
||||||
'url': self.extract_video_url(webpage),
|
'url': video_url,
|
||||||
'uploader': None,
|
'uploader': None,
|
||||||
'upload_date': None,
|
'upload_date': None,
|
||||||
'title': self.extract_video_title(webpage),
|
'title': video_title,
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'format': 'flv',
|
'format': 'flv',
|
||||||
'thumbnail': self.extract_video_thumbnail(webpage),
|
'thumbnail': video_thumbnail,
|
||||||
'description': None,
|
'description': None,
|
||||||
'player_url': None}
|
'player_url': None}
|
||||||
|
|
||||||
|
|
|
@ -351,6 +351,7 @@ def gen_extractors():
|
||||||
MixcloudIE(),
|
MixcloudIE(),
|
||||||
StanfordOpenClassroomIE(),
|
StanfordOpenClassroomIE(),
|
||||||
MTVIE(),
|
MTVIE(),
|
||||||
|
YoukuIE(),
|
||||||
XNXXIE(),
|
XNXXIE(),
|
||||||
|
|
||||||
GenericIE()
|
GenericIE()
|
||||||
|
|
Loading…
Reference in New Issue
Block a user