Compare commits

...

5 Commits

Author SHA1 Message Date
Synarp
01f8b2faf2
Merge 440238ebc9 into 4b5eec0aaa 2024-11-25 14:32:27 +05:30
Jakob Kruse
4b5eec0aaa
[ie/chaturbate] Fix support for non-public streams (#11624)
Fix bug in 720b3dc453

Closes #11623
Authored by: jkruse
2024-11-24 22:20:30 +00:00
Synarp
440238ebc9 fix displayed page number 2024-06-12 02:45:36 +02:00
Synarp
5ea1d902d3 fix style issues 2024-06-12 02:34:23 +02:00
Synarp
47fb5ba647 workaround for the 1000 item API-limit 2024-06-12 01:46:57 +02:00
2 changed files with 46 additions and 13 deletions

View File

@ -59,16 +59,15 @@ def _extract_from_api(self, video_id, tld):
'Accept': 'application/json',
}, fatal=False, impersonate=True) or {}
status = response.get('room_status')
if status != 'public':
if error := self._ERROR_MAP.get(status):
raise ExtractorError(error, expected=True)
self.report_warning('Falling back to webpage extraction')
return None
m3u8_url = response.get('url')
if not m3u8_url:
status = response.get('room_status')
if error := self._ERROR_MAP.get(status):
raise ExtractorError(error, expected=True)
if status == 'public':
self.raise_geo_restricted()
self.report_warning(f'Got status "{status}" from API; falling back to webpage extraction')
return None
return {
'id': video_id,

View File

@ -78,7 +78,7 @@ def _parse_stream(self, stream, url):
return info
def _fetch_page(self, display_id, url, params, page):
def _fetch_page(self, display_id, url, params, metapage_nr, page):
page += 1
page_params = {
'no_totals': True,
@ -86,8 +86,12 @@ def _fetch_page(self, display_id, url, params, page):
'page_size': self._PAGE_SIZE,
**params,
}
if metapage_nr == 0:
resource = f'page {page}'
else:
resource = f'page {metapage_nr+1}_{page}'
result = self._call_api_proxy(
'claim_search', display_id, page_params, f'page {page}')
'claim_search', display_id, page_params, resource)
for item in traverse_obj(result, ('items', lambda _, v: v['name'] and v['claim_id'])):
yield {
**self._parse_stream(item, url),
@ -96,6 +100,32 @@ def _fetch_page(self, display_id, url, params, page):
'url': self._permanent_url(url, item['name'], item['claim_id']),
}
def _metapage_entries(self, display_id, url, params):
if 'release_time' in params:
raise ExtractorError('release_time isn\'t allowed because _metapage_entires needs to specify it.')
if not ('order_by' in params and params['order_by'] == ['release_time']):
raise ExtractorError('videos must be sorted by release_time for _metapage_entries to work.')
last_metapage = []
metapage = OnDemandPagedList(
functools.partial(self._fetch_page, display_id, url, params, 0),
self._PAGE_SIZE).getslice()
metapage_nr = 1
while len(metapage) > 0:
yield from metapage
next_metapage_params = {
**params,
'release_time': '<={}'.format(metapage[-1]['release_timestamp']),
}
last_metapage = metapage
metapage = OnDemandPagedList(
functools.partial(self._fetch_page, display_id, url, next_metapage_params, metapage_nr),
self._PAGE_SIZE).getslice()
metapage = [x for x in metapage if x not in last_metapage]
metapage_nr += 1
def _playlist_entries(self, url, display_id, claim_param, metadata):
qs = parse_qs(url)
content = qs.get('content', [None])[0]
@ -123,8 +153,12 @@ def _playlist_entries(self, url, display_id, claim_param, metadata):
languages.append('none')
params['any_languages'] = languages
if qs.get('order', ['new'])[0] == 'new':
entries = self._metapage_entries(display_id, url, params)
else:
self.report_warning('Extraction is limited to 1000 Videos when not sorting by newest.')
entries = OnDemandPagedList(
functools.partial(self._fetch_page, display_id, url, params),
functools.partial(self._fetch_page, display_id, url, params, 0),
self._PAGE_SIZE)
return self.playlist_result(