mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-27 22:21:17 +01:00
[youtube] use itertools.count instead of a "while True" loop and a manual counter
This commit is contained in:
parent
43ba5456b1
commit
755eb0320e
|
@ -705,10 +705,9 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
# Download playlist videos from API
|
# Download playlist videos from API
|
||||||
playlist_id = mobj.group(1) or mobj.group(2)
|
playlist_id = mobj.group(1) or mobj.group(2)
|
||||||
page_num = 1
|
|
||||||
videos = []
|
videos = []
|
||||||
|
|
||||||
while True:
|
for page_num in itertools.count(1):
|
||||||
start_index = self._MAX_RESULTS * (page_num - 1) + 1
|
start_index = self._MAX_RESULTS * (page_num - 1) + 1
|
||||||
if start_index >= 1000:
|
if start_index >= 1000:
|
||||||
self._downloader.report_warning(u'Max number of results reached')
|
self._downloader.report_warning(u'Max number of results reached')
|
||||||
|
@ -732,7 +731,6 @@ def _real_extract(self, url):
|
||||||
index = entry['yt$position']['$t']
|
index = entry['yt$position']['$t']
|
||||||
if 'media$group' in entry and 'media$player' in entry['media$group']:
|
if 'media$group' in entry and 'media$player' in entry['media$group']:
|
||||||
videos.append((index, entry['media$group']['media$player']['url']))
|
videos.append((index, entry['media$group']['media$player']['url']))
|
||||||
page_num += 1
|
|
||||||
|
|
||||||
videos = [v[1] for v in sorted(videos)]
|
videos = [v[1] for v in sorted(videos)]
|
||||||
|
|
||||||
|
@ -776,9 +774,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
# Download any subsequent channel pages using the json-based channel_ajax query
|
# Download any subsequent channel pages using the json-based channel_ajax query
|
||||||
if self._MORE_PAGES_INDICATOR in page:
|
if self._MORE_PAGES_INDICATOR in page:
|
||||||
while True:
|
for pagenum in itertools.count(1):
|
||||||
pagenum = pagenum + 1
|
|
||||||
|
|
||||||
url = self._MORE_PAGES_URL % (pagenum, channel_id)
|
url = self._MORE_PAGES_URL % (pagenum, channel_id)
|
||||||
page = self._download_webpage(url, channel_id,
|
page = self._download_webpage(url, channel_id,
|
||||||
u'Downloading page #%s' % pagenum)
|
u'Downloading page #%s' % pagenum)
|
||||||
|
@ -821,9 +817,8 @@ def _real_extract(self, url):
|
||||||
# all of them.
|
# all of them.
|
||||||
|
|
||||||
video_ids = []
|
video_ids = []
|
||||||
pagenum = 0
|
|
||||||
|
|
||||||
while True:
|
for pagenum in itertools.count(0):
|
||||||
start_index = pagenum * self._GDATA_PAGE_SIZE + 1
|
start_index = pagenum * self._GDATA_PAGE_SIZE + 1
|
||||||
|
|
||||||
gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
|
gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
|
||||||
|
@ -848,8 +843,6 @@ def _real_extract(self, url):
|
||||||
if len(ids_in_page) < self._GDATA_PAGE_SIZE:
|
if len(ids_in_page) < self._GDATA_PAGE_SIZE:
|
||||||
break
|
break
|
||||||
|
|
||||||
pagenum += 1
|
|
||||||
|
|
||||||
urls = ['http://www.youtube.com/watch?v=%s' % video_id for video_id in video_ids]
|
urls = ['http://www.youtube.com/watch?v=%s' % video_id for video_id in video_ids]
|
||||||
url_results = [self.url_result(rurl, 'Youtube') for rurl in urls]
|
url_results = [self.url_result(rurl, 'Youtube') for rurl in urls]
|
||||||
return [self.playlist_result(url_results, playlist_title = username)]
|
return [self.playlist_result(url_results, playlist_title = username)]
|
||||||
|
|
Loading…
Reference in New Issue
Block a user