2014-09-24 14:16:56 +02:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
import re
|
2016-01-10 20:09:53 +01:00
|
|
|
import binascii
|
|
|
|
try:
|
|
|
|
from Crypto.Cipher import AES
|
|
|
|
can_decrypt_frag = True
|
|
|
|
except ImportError:
|
|
|
|
can_decrypt_frag = False
|
2013-09-23 17:59:27 +02:00
|
|
|
|
2021-02-08 17:46:01 +01:00
|
|
|
from ..downloader import _get_real_downloader
|
2015-07-28 22:28:30 +02:00
|
|
|
from .fragment import FragmentFD
|
2016-05-01 09:56:51 +02:00
|
|
|
from .external import FFmpegFD
|
2015-07-28 22:28:30 +02:00
|
|
|
|
2016-01-10 20:09:53 +01:00
|
|
|
from ..compat import (
|
2016-08-26 23:55:55 +02:00
|
|
|
compat_urllib_error,
|
2016-01-10 20:09:53 +01:00
|
|
|
compat_urlparse,
|
|
|
|
compat_struct_pack,
|
|
|
|
)
|
2014-12-13 12:24:42 +01:00
|
|
|
from ..utils import (
|
2016-01-10 20:09:53 +01:00
|
|
|
parse_m3u8_attributes,
|
2016-08-13 23:53:07 +02:00
|
|
|
update_url_query,
|
2013-09-23 17:59:27 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2016-02-19 19:29:24 +01:00
|
|
|
class HlsFD(FragmentFD):
|
|
|
|
""" A limited implementation that does not require ffmpeg """
|
2014-09-24 14:16:56 +02:00
|
|
|
|
2015-07-28 22:28:30 +02:00
|
|
|
FD_NAME = 'hlsnative'
|
|
|
|
|
2016-05-01 09:56:51 +02:00
|
|
|
@staticmethod
|
2021-02-20 22:48:03 +01:00
|
|
|
def can_download(manifest, info_dict, allow_unplayable_formats=False, with_crypto=can_decrypt_frag):
|
2021-02-12 04:51:59 +01:00
|
|
|
UNSUPPORTED_FEATURES = [
|
2017-04-13 13:21:17 +02:00
|
|
|
# r'#EXT-X-BYTERANGE', # playlists composed of byte ranges of media files [2]
|
2016-06-04 22:16:05 +02:00
|
|
|
|
2016-05-09 16:45:03 +02:00
|
|
|
# Live streams heuristic does not always work (e.g. geo restricted to Germany
|
|
|
|
# http://hls-geo.daserste.de/i/videoportal/Film/c_620000/622873/format,716451,716457,716450,716458,716459,.mp4.csmil/index_4_av.m3u8?null=0)
|
2016-05-09 18:16:33 +02:00
|
|
|
# r'#EXT-X-MEDIA-SEQUENCE:(?!0$)', # live streams [3]
|
2016-06-04 22:16:05 +02:00
|
|
|
|
|
|
|
# This heuristic also is not correct since segments may not be appended as well.
|
2016-06-04 22:31:10 +02:00
|
|
|
# Twitch vods of finished streams have EXT-X-PLAYLIST-TYPE:EVENT despite
|
|
|
|
# no segments will definitely be appended to the end of the playlist.
|
2016-06-04 22:16:05 +02:00
|
|
|
# r'#EXT-X-PLAYLIST-TYPE:EVENT', # media segments may be appended to the end of
|
2016-06-04 22:21:43 +02:00
|
|
|
# # event media playlists [4]
|
2021-01-01 13:26:37 +01:00
|
|
|
r'#EXT-X-MAP:', # media initialization [5]
|
2016-06-04 22:16:05 +02:00
|
|
|
|
2016-05-01 09:56:51 +02:00
|
|
|
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.4
|
|
|
|
# 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.2
|
|
|
|
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.2
|
2016-05-09 16:55:37 +02:00
|
|
|
# 4. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.5
|
2021-01-01 13:26:37 +01:00
|
|
|
# 5. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.5
|
2021-02-12 04:51:59 +01:00
|
|
|
]
|
|
|
|
if not allow_unplayable_formats:
|
|
|
|
UNSUPPORTED_FEATURES += [
|
|
|
|
r'#EXT-X-KEY:METHOD=(?!NONE|AES-128)', # encrypted streams [1]
|
|
|
|
]
|
2016-01-10 20:09:53 +01:00
|
|
|
check_results = [not re.search(feature, manifest) for feature in UNSUPPORTED_FEATURES]
|
2017-04-13 13:21:17 +02:00
|
|
|
is_aes128_enc = '#EXT-X-KEY:METHOD=AES-128' in manifest
|
2021-02-20 22:48:03 +01:00
|
|
|
check_results.append(with_crypto or not is_aes128_enc)
|
2017-04-13 13:21:17 +02:00
|
|
|
check_results.append(not (is_aes128_enc and r'#EXT-X-BYTERANGE' in manifest))
|
2017-03-25 23:06:33 +01:00
|
|
|
check_results.append(not info_dict.get('is_live'))
|
2016-01-10 20:09:53 +01:00
|
|
|
return all(check_results)
|
2016-05-01 09:56:51 +02:00
|
|
|
|
2014-09-24 14:16:56 +02:00
|
|
|
def real_download(self, filename, info_dict):
|
2015-07-28 22:28:30 +02:00
|
|
|
man_url = info_dict['url']
|
|
|
|
self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
|
2016-11-13 16:06:16 +01:00
|
|
|
|
2017-07-29 10:02:41 +02:00
|
|
|
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
|
|
|
man_url = urlh.geturl()
|
|
|
|
s = urlh.read().decode('utf-8', 'ignore')
|
2016-05-01 09:56:51 +02:00
|
|
|
|
2021-02-12 04:51:59 +01:00
|
|
|
if not self.can_download(s, info_dict, self.params.get('allow_unplayable_formats')):
|
2019-12-03 12:23:08 +01:00
|
|
|
if info_dict.get('extra_param_to_segment_url') or info_dict.get('_decryption_key_url'):
|
2021-02-20 22:48:03 +01:00
|
|
|
self.report_error('pycryptodome not found. Please install it.')
|
2016-12-20 19:49:45 +01:00
|
|
|
return False
|
2021-02-20 22:48:03 +01:00
|
|
|
if self.can_download(s, info_dict, with_crypto=True):
|
|
|
|
self.report_warning('pycryptodome is needed to download this file with hlsnative')
|
2017-03-25 23:06:33 +01:00
|
|
|
self.report_warning(
|
|
|
|
'hlsnative has detected features it does not support, '
|
|
|
|
'extraction will be delegated to ffmpeg')
|
|
|
|
fd = FFmpegFD(self.ydl, self.params)
|
2021-02-08 17:46:01 +01:00
|
|
|
# TODO: Make progress updates work without hooking twice
|
|
|
|
# for ph in self._progress_hooks:
|
|
|
|
# fd.add_progress_hook(ph)
|
2017-03-25 23:06:33 +01:00
|
|
|
return fd.real_download(filename, info_dict)
|
2016-05-01 09:56:51 +02:00
|
|
|
|
2021-02-08 17:46:01 +01:00
|
|
|
real_downloader = _get_real_downloader(info_dict, 'frag_urls', self.params, None)
|
|
|
|
|
2019-01-13 10:01:26 +01:00
|
|
|
def is_ad_fragment_start(s):
|
2019-05-10 22:56:22 +02:00
|
|
|
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s
|
|
|
|
or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad'))
|
2017-10-15 01:13:48 +02:00
|
|
|
|
2019-01-13 10:01:26 +01:00
|
|
|
def is_ad_fragment_end(s):
|
2019-05-10 22:56:22 +02:00
|
|
|
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s
|
|
|
|
or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment'))
|
2019-01-13 10:01:26 +01:00
|
|
|
|
2021-02-08 17:46:01 +01:00
|
|
|
fragment_urls = []
|
|
|
|
|
2017-10-15 01:13:48 +02:00
|
|
|
media_frags = 0
|
|
|
|
ad_frags = 0
|
|
|
|
ad_frag_next = False
|
2014-09-24 14:16:56 +02:00
|
|
|
for line in s.splitlines():
|
|
|
|
line = line.strip()
|
2017-10-15 01:13:48 +02:00
|
|
|
if not line:
|
|
|
|
continue
|
|
|
|
if line.startswith('#'):
|
2019-01-13 10:01:26 +01:00
|
|
|
if is_ad_fragment_start(line):
|
2017-10-15 06:03:54 +02:00
|
|
|
ad_frag_next = True
|
2019-01-13 10:01:26 +01:00
|
|
|
elif is_ad_fragment_end(line):
|
|
|
|
ad_frag_next = False
|
2017-10-15 01:13:48 +02:00
|
|
|
continue
|
|
|
|
if ad_frag_next:
|
2019-01-13 10:01:26 +01:00
|
|
|
ad_frags += 1
|
2017-10-15 01:13:48 +02:00
|
|
|
continue
|
|
|
|
media_frags += 1
|
2014-09-24 14:16:56 +02:00
|
|
|
|
2015-07-28 22:28:30 +02:00
|
|
|
ctx = {
|
2014-09-24 14:16:56 +02:00
|
|
|
'filename': filename,
|
2017-10-15 01:13:48 +02:00
|
|
|
'total_frags': media_frags,
|
|
|
|
'ad_frags': ad_frags,
|
2015-07-28 22:28:30 +02:00
|
|
|
}
|
|
|
|
|
2021-02-08 17:46:01 +01:00
|
|
|
if real_downloader:
|
|
|
|
self._prepare_external_frag_download(ctx)
|
|
|
|
else:
|
|
|
|
self._prepare_and_start_frag_download(ctx)
|
2015-07-28 22:28:30 +02:00
|
|
|
|
2016-08-26 23:55:55 +02:00
|
|
|
fragment_retries = self.params.get('fragment_retries', 0)
|
|
|
|
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
|
|
|
test = self.params.get('test', False)
|
|
|
|
|
2016-08-28 18:51:53 +02:00
|
|
|
extra_query = None
|
2016-08-13 23:53:07 +02:00
|
|
|
extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url')
|
2016-08-28 18:51:53 +02:00
|
|
|
if extra_param_to_segment_url:
|
|
|
|
extra_query = compat_urlparse.parse_qs(extra_param_to_segment_url)
|
2016-01-10 20:09:53 +01:00
|
|
|
i = 0
|
|
|
|
media_sequence = 0
|
|
|
|
decrypt_info = {'METHOD': 'NONE'}
|
2021-02-13 17:15:41 +01:00
|
|
|
key_list = []
|
2017-04-13 13:21:17 +02:00
|
|
|
byte_range = {}
|
2016-06-28 19:07:50 +02:00
|
|
|
frag_index = 0
|
2017-10-15 01:13:48 +02:00
|
|
|
ad_frag_next = False
|
2016-01-10 20:09:53 +01:00
|
|
|
for line in s.splitlines():
|
|
|
|
line = line.strip()
|
|
|
|
if line:
|
|
|
|
if not line.startswith('#'):
|
2017-10-15 01:13:48 +02:00
|
|
|
if ad_frag_next:
|
|
|
|
continue
|
2016-06-28 19:07:50 +02:00
|
|
|
frag_index += 1
|
2017-04-22 17:42:24 +02:00
|
|
|
if frag_index <= ctx['fragment_index']:
|
2016-06-28 19:07:50 +02:00
|
|
|
continue
|
2016-01-10 20:09:53 +01:00
|
|
|
frag_url = (
|
|
|
|
line
|
|
|
|
if re.match(r'^https?://', line)
|
|
|
|
else compat_urlparse.urljoin(man_url, line))
|
2016-08-28 18:51:53 +02:00
|
|
|
if extra_query:
|
|
|
|
frag_url = update_url_query(frag_url, extra_query)
|
2021-02-08 17:46:01 +01:00
|
|
|
|
|
|
|
if real_downloader:
|
|
|
|
fragment_urls.append(frag_url)
|
|
|
|
continue
|
|
|
|
|
2016-08-26 23:55:55 +02:00
|
|
|
count = 0
|
2017-04-13 13:21:17 +02:00
|
|
|
headers = info_dict.get('http_headers', {})
|
|
|
|
if byte_range:
|
2020-09-18 00:26:56 +02:00
|
|
|
headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
|
2016-08-26 23:55:55 +02:00
|
|
|
while count <= fragment_retries:
|
|
|
|
try:
|
2016-06-28 19:07:50 +02:00
|
|
|
success, frag_content = self._download_fragment(
|
|
|
|
ctx, frag_url, info_dict, headers)
|
2016-08-26 23:55:55 +02:00
|
|
|
if not success:
|
|
|
|
return False
|
|
|
|
break
|
2016-08-26 23:57:59 +02:00
|
|
|
except compat_urllib_error.HTTPError as err:
|
2016-08-26 23:55:55 +02:00
|
|
|
# Unavailable (possibly temporary) fragments may be served.
|
|
|
|
# First we try to retry then either skip or abort.
|
2019-03-09 13:14:41 +01:00
|
|
|
# See https://github.com/ytdl-org/youtube-dl/issues/10165,
|
|
|
|
# https://github.com/ytdl-org/youtube-dl/issues/10448).
|
2016-08-26 23:55:55 +02:00
|
|
|
count += 1
|
|
|
|
if count <= fragment_retries:
|
2016-06-28 19:07:50 +02:00
|
|
|
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
2016-08-26 23:55:55 +02:00
|
|
|
if count > fragment_retries:
|
|
|
|
if skip_unavailable_fragments:
|
|
|
|
i += 1
|
|
|
|
media_sequence += 1
|
2016-06-28 19:07:50 +02:00
|
|
|
self.report_skip_fragment(frag_index)
|
2016-08-26 23:55:55 +02:00
|
|
|
continue
|
|
|
|
self.report_error(
|
|
|
|
'giving up after %s fragment retries' % fragment_retries)
|
2016-01-10 20:09:53 +01:00
|
|
|
return False
|
2021-02-08 17:46:01 +01:00
|
|
|
|
2016-01-10 20:09:53 +01:00
|
|
|
if decrypt_info['METHOD'] == 'AES-128':
|
2016-06-20 16:55:17 +02:00
|
|
|
iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', media_sequence)
|
2017-12-30 19:15:35 +01:00
|
|
|
decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen(
|
2019-12-03 12:23:08 +01:00
|
|
|
self._prepare_url(info_dict, info_dict.get('_decryption_key_url') or decrypt_info['URI'])).read()
|
2021-01-08 17:14:50 +01:00
|
|
|
# Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
|
|
|
|
# size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
|
|
|
|
# not what it decrypts to.
|
|
|
|
if not test:
|
|
|
|
frag_content = AES.new(
|
|
|
|
decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(frag_content)
|
2016-06-28 19:07:50 +02:00
|
|
|
self._append_fragment(ctx, frag_content)
|
2016-01-10 20:09:53 +01:00
|
|
|
# We only download the first fragment during the test
|
2016-08-26 23:55:55 +02:00
|
|
|
if test:
|
2016-01-10 20:09:53 +01:00
|
|
|
break
|
|
|
|
i += 1
|
|
|
|
media_sequence += 1
|
|
|
|
elif line.startswith('#EXT-X-KEY'):
|
2016-06-28 19:07:50 +02:00
|
|
|
decrypt_url = decrypt_info.get('URI')
|
2016-01-10 20:09:53 +01:00
|
|
|
decrypt_info = parse_m3u8_attributes(line[11:])
|
|
|
|
if decrypt_info['METHOD'] == 'AES-128':
|
|
|
|
if 'IV' in decrypt_info:
|
2016-08-25 09:37:41 +02:00
|
|
|
decrypt_info['IV'] = binascii.unhexlify(decrypt_info['IV'][2:].zfill(32))
|
2016-01-10 20:09:53 +01:00
|
|
|
if not re.match(r'^https?://', decrypt_info['URI']):
|
2016-06-20 16:55:17 +02:00
|
|
|
decrypt_info['URI'] = compat_urlparse.urljoin(
|
|
|
|
man_url, decrypt_info['URI'])
|
2016-08-28 18:51:53 +02:00
|
|
|
if extra_query:
|
|
|
|
decrypt_info['URI'] = update_url_query(decrypt_info['URI'], extra_query)
|
2016-06-28 19:07:50 +02:00
|
|
|
if decrypt_url != decrypt_info['URI']:
|
|
|
|
decrypt_info['KEY'] = None
|
2021-02-13 17:15:41 +01:00
|
|
|
key_data = decrypt_info.copy()
|
|
|
|
key_data['INDEX'] = frag_index
|
|
|
|
key_list.append(key_data)
|
|
|
|
|
2016-01-10 20:09:53 +01:00
|
|
|
elif line.startswith('#EXT-X-MEDIA-SEQUENCE'):
|
|
|
|
media_sequence = int(line[22:])
|
2017-04-13 13:21:17 +02:00
|
|
|
elif line.startswith('#EXT-X-BYTERANGE'):
|
|
|
|
splitted_byte_range = line[17:].split('@')
|
|
|
|
sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else byte_range['end']
|
|
|
|
byte_range = {
|
|
|
|
'start': sub_range_start,
|
|
|
|
'end': sub_range_start + int(splitted_byte_range[0]),
|
|
|
|
}
|
2019-01-13 10:01:26 +01:00
|
|
|
elif is_ad_fragment_start(line):
|
2017-10-15 01:13:48 +02:00
|
|
|
ad_frag_next = True
|
2019-01-13 10:01:26 +01:00
|
|
|
elif is_ad_fragment_end(line):
|
|
|
|
ad_frag_next = False
|
2015-07-28 22:28:30 +02:00
|
|
|
|
2021-02-08 17:46:01 +01:00
|
|
|
if real_downloader:
|
|
|
|
info_copy = info_dict.copy()
|
|
|
|
info_copy['url_list'] = fragment_urls
|
2021-02-13 17:15:41 +01:00
|
|
|
info_copy['key_list'] = key_list
|
2021-02-08 17:46:01 +01:00
|
|
|
fd = real_downloader(self.ydl, self.params)
|
|
|
|
# TODO: Make progress updates work without hooking twice
|
|
|
|
# for ph in self._progress_hooks:
|
|
|
|
# fd.add_progress_hook(ph)
|
|
|
|
success = fd.real_download(filename, info_copy)
|
|
|
|
if not success:
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
self._finish_frag_download(ctx)
|
2014-09-24 14:16:56 +02:00
|
|
|
return True
|