mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-30 07:28:19 +01:00
Compare commits
5 Commits
9b751a206c
...
01f8b2faf2
Author | SHA1 | Date | |
---|---|---|---|
|
01f8b2faf2 | ||
|
4b5eec0aaa | ||
|
440238ebc9 | ||
|
5ea1d902d3 | ||
|
47fb5ba647 |
|
@ -59,16 +59,15 @@ def _extract_from_api(self, video_id, tld):
|
|||
'Accept': 'application/json',
|
||||
}, fatal=False, impersonate=True) or {}
|
||||
|
||||
status = response.get('room_status')
|
||||
if status != 'public':
|
||||
if error := self._ERROR_MAP.get(status):
|
||||
raise ExtractorError(error, expected=True)
|
||||
self.report_warning('Falling back to webpage extraction')
|
||||
return None
|
||||
|
||||
m3u8_url = response.get('url')
|
||||
if not m3u8_url:
|
||||
status = response.get('room_status')
|
||||
if error := self._ERROR_MAP.get(status):
|
||||
raise ExtractorError(error, expected=True)
|
||||
if status == 'public':
|
||||
self.raise_geo_restricted()
|
||||
self.report_warning(f'Got status "{status}" from API; falling back to webpage extraction')
|
||||
return None
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
|
|
@ -78,7 +78,7 @@ def _parse_stream(self, stream, url):
|
|||
|
||||
return info
|
||||
|
||||
def _fetch_page(self, display_id, url, params, page):
|
||||
def _fetch_page(self, display_id, url, params, metapage_nr, page):
|
||||
page += 1
|
||||
page_params = {
|
||||
'no_totals': True,
|
||||
|
@ -86,8 +86,12 @@ def _fetch_page(self, display_id, url, params, page):
|
|||
'page_size': self._PAGE_SIZE,
|
||||
**params,
|
||||
}
|
||||
if metapage_nr == 0:
|
||||
resource = f'page {page}'
|
||||
else:
|
||||
resource = f'page {metapage_nr+1}_{page}'
|
||||
result = self._call_api_proxy(
|
||||
'claim_search', display_id, page_params, f'page {page}')
|
||||
'claim_search', display_id, page_params, resource)
|
||||
for item in traverse_obj(result, ('items', lambda _, v: v['name'] and v['claim_id'])):
|
||||
yield {
|
||||
**self._parse_stream(item, url),
|
||||
|
@ -96,6 +100,32 @@ def _fetch_page(self, display_id, url, params, page):
|
|||
'url': self._permanent_url(url, item['name'], item['claim_id']),
|
||||
}
|
||||
|
||||
def _metapage_entries(self, display_id, url, params):
|
||||
if 'release_time' in params:
|
||||
raise ExtractorError('release_time isn\'t allowed because _metapage_entires needs to specify it.')
|
||||
if not ('order_by' in params and params['order_by'] == ['release_time']):
|
||||
raise ExtractorError('videos must be sorted by release_time for _metapage_entries to work.')
|
||||
|
||||
last_metapage = []
|
||||
metapage = OnDemandPagedList(
|
||||
functools.partial(self._fetch_page, display_id, url, params, 0),
|
||||
self._PAGE_SIZE).getslice()
|
||||
|
||||
metapage_nr = 1
|
||||
while len(metapage) > 0:
|
||||
yield from metapage
|
||||
|
||||
next_metapage_params = {
|
||||
**params,
|
||||
'release_time': '<={}'.format(metapage[-1]['release_timestamp']),
|
||||
}
|
||||
last_metapage = metapage
|
||||
metapage = OnDemandPagedList(
|
||||
functools.partial(self._fetch_page, display_id, url, next_metapage_params, metapage_nr),
|
||||
self._PAGE_SIZE).getslice()
|
||||
metapage = [x for x in metapage if x not in last_metapage]
|
||||
metapage_nr += 1
|
||||
|
||||
def _playlist_entries(self, url, display_id, claim_param, metadata):
|
||||
qs = parse_qs(url)
|
||||
content = qs.get('content', [None])[0]
|
||||
|
@ -123,8 +153,12 @@ def _playlist_entries(self, url, display_id, claim_param, metadata):
|
|||
languages.append('none')
|
||||
params['any_languages'] = languages
|
||||
|
||||
if qs.get('order', ['new'])[0] == 'new':
|
||||
entries = self._metapage_entries(display_id, url, params)
|
||||
else:
|
||||
self.report_warning('Extraction is limited to 1000 Videos when not sorting by newest.')
|
||||
entries = OnDemandPagedList(
|
||||
functools.partial(self._fetch_page, display_id, url, params),
|
||||
functools.partial(self._fetch_page, display_id, url, params, 0),
|
||||
self._PAGE_SIZE)
|
||||
|
||||
return self.playlist_result(
|
||||
|
|
Loading…
Reference in New Issue
Block a user