Fix W504 and disable W503 (closes #20863)

This commit is contained in:
Sergey M․ 2019-05-11 03:56:22 +07:00
parent d23e85515a
commit 3089bc748c
No known key found for this signature in database
GPG key ID: 2C393E0F18A9236D
49 changed files with 211 additions and 211 deletions

View file

@ -45,12 +45,12 @@
RESULT = ('.' + domain + '\n' in LIST or '\n' + domain + '\n' in LIST)
if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict'] or
test['info_dict']['age_limit'] != 18):
if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict']
or test['info_dict']['age_limit'] != 18):
print('\nPotential missing age_limit check: {0}'.format(test['name']))
elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict'] and
test['info_dict']['age_limit'] == 18):
elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict']
and test['info_dict']['age_limit'] == 18):
print('\nPotential false negative: {0}'.format(test['name']))
else:

View file

@ -3,4 +3,4 @@ universal = True
[flake8]
exclude = youtube_dl/extractor/__init__.py,devscripts/buildserver.py,devscripts/lazy_load_template.py,devscripts/make_issue_template.py,setup.py,build,.git,venv
ignore = E402,E501,E731,E741
ignore = E402,E501,E731,E741,W503

View file

@ -44,16 +44,16 @@ def test_cbc_encrypt(self):
def test_decrypt_text(self):
password = intlist_to_bytes(self.key).decode('utf-8')
encrypted = base64.b64encode(
intlist_to_bytes(self.iv[:8]) +
b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae'
intlist_to_bytes(self.iv[:8])
+ b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae'
).decode('utf-8')
decrypted = (aes_decrypt_text(encrypted, password, 16))
self.assertEqual(decrypted, self.secret_msg)
password = intlist_to_bytes(self.key).decode('utf-8')
encrypted = base64.b64encode(
intlist_to_bytes(self.iv[:8]) +
b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83'
intlist_to_bytes(self.iv[:8])
+ b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83'
).decode('utf-8')
decrypted = (aes_decrypt_text(encrypted, password, 32))
self.assertEqual(decrypted, self.secret_msg)

View file

@ -34,8 +34,8 @@ def _make_testfunc(testfile):
def test_func(self):
as_file = os.path.join(TEST_DIR, testfile)
swf_file = os.path.join(TEST_DIR, test_id + '.swf')
if ((not os.path.exists(swf_file)) or
os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
if ((not os.path.exists(swf_file))
or os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
# Recompile
try:
subprocess.check_call([

View file

@ -400,9 +400,9 @@ def check_deprecated(param, option, suggestion):
else:
raise
if (sys.platform != 'win32' and
sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and
not params.get('restrictfilenames', False)):
if (sys.platform != 'win32'
and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
and not params.get('restrictfilenames', False)):
# Unicode filesystem API will throw errors (#1474, #13027)
self.report_warning(
'Assuming --restrict-filenames since file system encoding '
@ -440,9 +440,9 @@ def warn_if_short_id(self, argv):
if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
if idxs:
correct_argv = (
['youtube-dl'] +
[a for i, a in enumerate(argv) if i not in idxs] +
['--'] + [argv[i] for i in idxs]
['youtube-dl']
+ [a for i, a in enumerate(argv) if i not in idxs]
+ ['--'] + [argv[i] for i in idxs]
)
self.report_warning(
'Long argument string detected. '
@ -850,8 +850,8 @@ def process_ie_result(self, ie_result, download=True, extra_info={}):
if result_type in ('url', 'url_transparent'):
ie_result['url'] = sanitize_url(ie_result['url'])
extract_flat = self.params.get('extract_flat', False)
if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
extract_flat is True):
if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
or extract_flat is True):
if self.params.get('forcejson', False):
self.to_stdout(json.dumps(ie_result))
return ie_result
@ -1619,9 +1619,9 @@ def is_wellformed(f):
# https://github.com/ytdl-org/youtube-dl/issues/10083).
incomplete_formats = (
# All formats are video-only or
all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats) or
all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
# all formats are audio-only
all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
ctx = {
'formats': formats,
@ -1947,8 +1947,8 @@ def compatible_formats(formats):
else:
assert fixup_policy in ('ignore', 'never')
if (info_dict.get('requested_formats') is None and
info_dict.get('container') == 'm4a_dash'):
if (info_dict.get('requested_formats') is None
and info_dict.get('container') == 'm4a_dash'):
if fixup_policy == 'warn':
self.report_warning(
'%s: writing DASH m4a. '
@ -1967,9 +1967,9 @@ def compatible_formats(formats):
else:
assert fixup_policy in ('ignore', 'never')
if (info_dict.get('protocol') == 'm3u8_native' or
info_dict.get('protocol') == 'm3u8' and
self.params.get('hls_prefer_native')):
if (info_dict.get('protocol') == 'm3u8_native'
or info_dict.get('protocol') == 'm3u8'
and self.params.get('hls_prefer_native')):
if fixup_policy == 'warn':
self.report_warning('%s: malformed AAC bitstream detected.' % (
info_dict['id']))
@ -1995,10 +1995,10 @@ def compatible_formats(formats):
def download(self, url_list):
"""Download a given list of URLs."""
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
if (len(url_list) > 1 and
outtmpl != '-' and
'%' not in outtmpl and
self.params.get('max_downloads') != 1):
if (len(url_list) > 1
and outtmpl != '-'
and '%' not in outtmpl
and self.params.get('max_downloads') != 1):
raise SameFileError(outtmpl)
for url in url_list:
@ -2143,8 +2143,8 @@ def _format_note(self, fdict):
if res:
res += ', '
res += '%s container' % fdict['container']
if (fdict.get('vcodec') is not None and
fdict.get('vcodec') != 'none'):
if (fdict.get('vcodec') is not None
and fdict.get('vcodec') != 'none'):
if res:
res += ', '
res += fdict['vcodec']

View file

@ -230,14 +230,14 @@ def parse_retries(retries):
if opts.allsubtitles and not opts.writeautomaticsub:
opts.writesubtitles = True
outtmpl = ((opts.outtmpl is not None and opts.outtmpl) or
(opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') or
(opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') or
(opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') or
(opts.usetitle and '%(title)s-%(id)s.%(ext)s') or
(opts.useid and '%(id)s.%(ext)s') or
(opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') or
DEFAULT_OUTTMPL)
outtmpl = ((opts.outtmpl is not None and opts.outtmpl)
or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s')
or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s')
or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s')
or (opts.usetitle and '%(title)s-%(id)s.%(ext)s')
or (opts.useid and '%(id)s.%(ext)s')
or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s')
or DEFAULT_OUTTMPL)
if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
parser.error('Cannot download a video and extract audio into the same'
' file! Use "{0}.%(ext)s" instead of "{0}" as the output'

View file

@ -2649,9 +2649,9 @@ def compat_shlex_quote(s):
try:
args = shlex.split('中文')
assert (isinstance(args, list) and
isinstance(args[0], compat_str) and
args[0] == '中文')
assert (isinstance(args, list)
and isinstance(args[0], compat_str)
and args[0] == '中文')
compat_shlex_split = shlex.split
except (AssertionError, UnicodeEncodeError):
# Working around shlex issue with unicode strings on some python 2

View file

@ -330,15 +330,15 @@ def download(self, filename, info_dict):
"""
nooverwrites_and_exists = (
self.params.get('nooverwrites', False) and
os.path.exists(encodeFilename(filename))
self.params.get('nooverwrites', False)
and os.path.exists(encodeFilename(filename))
)
if not hasattr(filename, 'write'):
continuedl_and_exists = (
self.params.get('continuedl', True) and
os.path.isfile(encodeFilename(filename)) and
not self.params.get('nopart', False)
self.params.get('continuedl', True)
and os.path.isfile(encodeFilename(filename))
and not self.params.get('nopart', False)
)
# Check file already present

View file

@ -238,8 +238,8 @@ def write_metadata_tag(stream, metadata):
def remove_encrypted_media(media):
return list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and
'drmAdditionalHeaderSetId' not in e.attrib,
return list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib
and 'drmAdditionalHeaderSetId' not in e.attrib,
media))
@ -267,8 +267,8 @@ def _get_unencrypted_media(self, doc):
media = doc.findall(_add_ns('media'))
if not media:
self.report_error('No media found')
for e in (doc.findall(_add_ns('drmAdditionalHeader')) +
doc.findall(_add_ns('drmAdditionalHeaderSet'))):
for e in (doc.findall(_add_ns('drmAdditionalHeader'))
+ doc.findall(_add_ns('drmAdditionalHeaderSet'))):
# If id attribute is missing it's valid for all media nodes
# without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute
if 'id' not in e.attrib:

View file

@ -219,8 +219,8 @@ def frag_progress_hook(s):
frag_total_bytes = s.get('total_bytes') or 0
if not ctx['live']:
estimated_size = (
(ctx['complete_frags_downloaded_bytes'] + frag_total_bytes) /
(state['fragment_index'] + 1) * total_frags)
(ctx['complete_frags_downloaded_bytes'] + frag_total_bytes)
/ (state['fragment_index'] + 1) * total_frags)
state['total_bytes_estimate'] = estimated_size
if s['status'] == 'finished':

View file

@ -76,12 +76,12 @@ def real_download(self, filename, info_dict):
return fd.real_download(filename, info_dict)
def is_ad_fragment_start(s):
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s or
s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad'))
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s
or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad'))
def is_ad_fragment_end(s):
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s or
s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment'))
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s
or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment'))
media_frags = 0
ad_frags = 0

View file

@ -46,8 +46,8 @@ class DownloadContext(dict):
is_test = self.params.get('test', False)
chunk_size = self._TEST_FILE_SIZE if is_test else (
info_dict.get('downloader_options', {}).get('http_chunk_size') or
self.params.get('http_chunk_size') or 0)
info_dict.get('downloader_options', {}).get('http_chunk_size')
or self.params.get('http_chunk_size') or 0)
ctx.open_mode = 'wb'
ctx.resume_len = 0
@ -123,11 +123,11 @@ def establish_connection():
content_len = int_or_none(content_range_m.group(3))
accept_content_len = (
# Non-chunked download
not ctx.chunk_size or
not ctx.chunk_size
# Chunked download and requested piece or
# its part is promised to be served
content_range_end == range_end or
content_len < range_end)
or content_range_end == range_end
or content_len < range_end)
if accept_content_len:
ctx.data_len = content_len
return
@ -152,8 +152,8 @@ def establish_connection():
raise
else:
# Examine the reported length
if (content_length is not None and
(ctx.resume_len - 100 < int(content_length) < ctx.resume_len + 100)):
if (content_length is not None
and (ctx.resume_len - 100 < int(content_length) < ctx.resume_len + 100)):
# The file had already been fully downloaded.
# Explanation to the above condition: in issue #175 it was revealed that
# YouTube sometimes adds or removes a few bytes from the end of the file,

View file

@ -59,9 +59,9 @@ def _real_extract(self, url):
parsed_url = compat_urllib_parse_urlparse(url)
av_val = av_res + len(parsed_url.netloc)
confirm_url = (
parsed_url.scheme + '://' + parsed_url.netloc +
action + '?' +
compat_urllib_parse_urlencode({
parsed_url.scheme + '://' + parsed_url.netloc
+ action + '?'
+ compat_urllib_parse_urlencode({
'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
self._download_webpage(
confirm_url, video_id,

View file

@ -32,8 +32,8 @@ def _real_extract(self, url):
video_id = self._match_id(url)
display_id = video_id[:8]
api_url = ('https://apib4.blinkx.com/api.php?action=play_video&' +
'video=%s' % video_id)
api_url = ('https://apib4.blinkx.com/api.php?action=play_video&'
+ 'video=%s' % video_id)
data_json = self._download_webpage(api_url, display_id)
data = json.loads(data_json)['api']['results'][0]
duration = None

View file

@ -542,11 +542,11 @@ def extract(self, url):
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
if (not self._downloader.params.get('geo_bypass_country', None) and
self._GEO_BYPASS and
self._downloader.params.get('geo_bypass', True) and
not self._x_forwarded_for_ip and
countries):
if (not self._downloader.params.get('geo_bypass_country', None)
and self._GEO_BYPASS
and self._downloader.params.get('geo_bypass', True)
and not self._x_forwarded_for_ip
and countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
@ -682,8 +682,8 @@ def _guess_encoding_from_content(content_type, webpage_bytes):
def __check_blocked(self, content):
first_block = content[:512]
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in first_block):
if ('<title>Access to this site is blocked</title>' in content
and 'Websense' in first_block):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
@ -701,8 +701,8 @@ def __check_blocked(self, content):
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content and
'blocklist.rkn.gov.ru' in content):
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content
and 'blocklist.rkn.gov.ru' in content):
raise ExtractorError(
'Access to this webpage has been blocked by decision of the Russian government. '
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
@ -1709,8 +1709,8 @@ def build_stream_name():
continue
else:
tbr = float_or_none(
last_stream_inf.get('AVERAGE-BANDWIDTH') or
last_stream_inf.get('BANDWIDTH'), scale=1000)
last_stream_inf.get('AVERAGE-BANDWIDTH')
or last_stream_inf.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
@ -2504,8 +2504,8 @@ def _media_formats(src, cur_media_type, type_info={}):
if str_or_none(s_attr.get(lbl))
]
width = int_or_none(s_attr.get('width'))
height = (int_or_none(s_attr.get('height')) or
int_or_none(s_attr.get('res')))
height = (int_or_none(s_attr.get('height'))
or int_or_none(s_attr.get('res')))
if not width or not height:
for lbl in labels:
resolution = parse_resolution(lbl)
@ -2847,8 +2847,8 @@ def is_suitable(self, age_limit):
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
if (self._downloader.params.get('writesubtitles', False)
or self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
@ -2873,8 +2873,8 @@ def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
if (self._downloader.params.get('writeautomaticsub', False)
or self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
@ -2882,9 +2882,9 @@ def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False) and
(self._get_login_info()[0] is not None or
self._downloader.params.get('cookiefile') is not None)):
if (self._downloader.params.get('mark_watched', False)
and (self._get_login_info()[0] is not None
or self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):

View file

@ -45,8 +45,8 @@ def _real_extract(self, url):
sources_url = (try_get(
video_data,
(lambda x: x['plugins']['sources']['url'],
lambda x: x['sources']['url']), compat_str) or
'http://www.dailymail.co.uk/api/player/%s/video-sources.json' % video_id)
lambda x: x['sources']['url']), compat_str)
or 'http://www.dailymail.co.uk/api/player/%s/video-sources.json' % video_id)
video_sources = self._download_json(sources_url, video_id)
body = video_sources.get('body')

View file

@ -70,8 +70,8 @@ def _real_extract(self, url):
endpoint = next(
server['endpoint']
for server in servers
if url_or_none(server.get('endpoint')) and
'cloudfront' in server['endpoint'])
if url_or_none(server.get('endpoint'))
and 'cloudfront' in server['endpoint'])
else:
endpoint = 'rtmpe://s2pqqn4u96e4j8.cloudfront.net/cfx/st/'

View file

@ -82,8 +82,8 @@ def extract_data(name):
title = info.get('titleRaw') or data['title']
description = info.get('descriptionRaw')
thumbnail = info.get('socialMediaImage') or data.get('image')
duration = int_or_none(info.get('videoTotalSecondsDuration') or
data.get('totalSecondsDuration'))
duration = int_or_none(info.get('videoTotalSecondsDuration')
or data.get('totalSecondsDuration'))
timestamp = unified_timestamp(info.get('publishDate'))
return {

View file

@ -94,8 +94,8 @@ def _extract_lesson(chapters, lesson_id, lesson):
chapter_number = None
index = lesson.get('index')
element_index = lesson.get('elementIndex')
if (isinstance(index, int) and isinstance(element_index, int) and
index < element_index):
if (isinstance(index, int) and isinstance(element_index, int)
and index < element_index):
chapter_number = element_index - index
chapter = (chapters[chapter_number - 1]
if chapter_number - 1 < len(chapters) else None)

View file

@ -2549,11 +2549,11 @@ def _real_extract(self, url):
return self.url_result(mobj.group('url'))
# Look for Ooyala videos
mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or
re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage) or
re.search(r'OO\.Player\.create\.apply\(\s*OO\.Player\s*,\s*op\(\s*\[\s*[\'"][^\'"]*[\'"]\s*,\s*[\'"](?P<ec>.{32})[\'"]', webpage) or
re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage) or
re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage)
or re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage)
or re.search(r'OO\.Player\.create\.apply\(\s*OO\.Player\s*,\s*op\(\s*\[\s*[\'"][^\'"]*[\'"]\s*,\s*[\'"](?P<ec>.{32})[\'"]', webpage)
or re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage)
or re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
if mobj is not None:
embed_token = self._search_regex(
r'embedToken[\'"]?\s*:\s*[\'"]([^\'"]+)',
@ -3221,8 +3221,8 @@ def _real_extract(self, url):
else:
formats.append({
'url': src,
'ext': (mimetype2ext(src_type) or
ext if ext in KNOWN_EXTENSIONS else 'mp4'),
'ext': (mimetype2ext(src_type)
or ext if ext in KNOWN_EXTENSIONS else 'mp4'),
})
if formats:
self._sort_formats(formats)

View file

@ -155,8 +155,8 @@ def extract_title(default=NO_DEFAULT):
'id': video_id,
'title': title,
'description': description,
'thumbnail': (xpath_text(doc, './/{http://rss.jwpcdn.com/}image') or
self._og_search_thumbnail(webpage)),
'thumbnail': (xpath_text(doc, './/{http://rss.jwpcdn.com/}image')
or self._og_search_thumbnail(webpage)),
'timestamp': parse_iso8601(
self._html_search_meta('date', webpage)),
'formats': formats,

View file

@ -58,8 +58,8 @@ def _extract_metadata(self, url, video_id):
title = video_meta.get('media_status')
alt_title = video_meta.get('media_title')
description = clean_html(
video_meta.get('media_description') or
video_meta.get('media_description_md'))
video_meta.get('media_description')
or video_meta.get('media_description_md'))
duration = float_or_none(video_meta.get('media_duration'))
uploader = video_meta.get('media_user_name')
views = int_or_none(video_meta.get('media_views'))

View file

@ -47,8 +47,8 @@ def _real_extract(self, url):
tags = [
t['text']
for t in tags_list
if isinstance(t, dict) and t.get('text') and
isinstance(t['text'], compat_str)]
if isinstance(t, dict) and t.get('text')
and isinstance(t['text'], compat_str)]
return {
'id': video_id,

View file

@ -77,13 +77,13 @@ def _real_extract(self, url):
title = (
self._html_search_meta(
('ed_title', 'search.ed_title'), webpage, default=None) or
self._search_regex(
('ed_title', 'search.ed_title'), webpage, default=None)
or self._search_regex(
r'data-favorite_title_(?:eng|chi)=(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'title', default=None, group='url') or
self._html_search_regex(
r'<h1>([^<]+)</h1>', webpage, 'title', default=None) or
self._og_search_title(webpage)
webpage, 'title', default=None, group='url')
or self._html_search_regex(
r'<h1>([^<]+)</h1>', webpage, 'title', default=None)
or self._og_search_title(webpage)
)
file_id = self._search_regex(

View file

@ -60,8 +60,8 @@ def _initialize_api(self):
language=self._APP_LANGUAGE,
application_id=self._APP_PUBLICATION_ID)
self._login_url = (modules['user']['resources']['login']['uri'] +
'/format/json').format(session_id=self._session_id)
self._login_url = (modules['user']['resources']['login']['uri']
+ '/format/json').format(session_id=self._session_id)
self._logout_url = modules['user']['resources']['logout']['uri']

View file

@ -122,9 +122,9 @@ def _real_extract(self, url):
formats = self._extract_bokecc_formats(webpage, video_id)
else:
formats = (
self._extract_rtmp_video(webpage) +
self._extract_http_video(webpage) +
self._extract_http_audio(webpage, video_id))
self._extract_rtmp_video(webpage)
+ self._extract_http_video(webpage)
+ self._extract_http_audio(webpage, video_id))
self._sort_formats(formats)

View file

@ -383,9 +383,9 @@ def _real_extract(self, url):
self._sleep(5, video_id)
self._sort_formats(formats)
title = (get_element_by_id('widget-videotitle', webpage) or
clean_html(get_element_by_attribute('class', 'mod-play-tit', webpage)) or
self._html_search_regex(r'<span[^>]+data-videochanged-title="word"[^>]*>([^<]+)</span>', webpage, 'title'))
title = (get_element_by_id('widget-videotitle', webpage)
or clean_html(get_element_by_attribute('class', 'mod-play-tit', webpage))
or self._html_search_regex(r'<span[^>]+data-videochanged-title="word"[^>]*>([^<]+)</span>', webpage, 'title'))
return {
'id': video_id,

View file

@ -77,10 +77,10 @@ def _add_sub_element(element, name):
return etree.SubElement(element, _add_ns(name))
production_id = (
params.get('data-video-autoplay-id') or
'%s#001' % (
params.get('data-video-episode-id') or
video_id.replace('a', '/')))
params.get('data-video-autoplay-id')
or '%s#001' % (
params.get('data-video-episode-id')
or video_id.replace('a', '/')))
req_env = etree.Element(_add_ns('soapenv:Envelope'))
_add_sub_element(req_env, 'soapenv:Header')

View file

@ -118,8 +118,8 @@ def _extract_url(webpage):
(?P<q2>['"])_?(?P<partner_id>(?:(?!(?P=q2)).)+)(?P=q2),.*?
(?P<q3>['"])entry_?[Ii]d(?P=q3)\s*:\s*
(?P<q4>['"])(?P<id>(?:(?!(?P=q4)).)+)(?P=q4)(?:,|\s*\})
""", webpage) or
re.search(
""", webpage)
or re.search(
r'''(?xs)
(?P<q1>["'])
(?:https?:)?//cdnapi(?:sec)?\.kaltura\.com(?::\d+)?/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)(?:(?!(?P=q1)).)*
@ -132,8 +132,8 @@ def _extract_url(webpage):
\[\s*(?P<q2_1>["'])entry_?[Ii]d(?P=q2_1)\s*\]\s*=\s*
)
(?P<q3>["'])(?P<id>(?:(?!(?P=q3)).)+)(?P=q3)
''', webpage) or
re.search(
''', webpage)
or re.search(
r'''(?xs)
<(?:iframe[^>]+src|meta[^>]+\bcontent)=(?P<q1>["'])
(?:https?:)?//(?:(?:www|cdnapi(?:sec)?)\.)?kaltura\.com/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)

View file

@ -47,8 +47,8 @@ def _real_extract(self, url):
webpage = self._download_webpage(url, video_id)
title = (self._html_search_meta('title', webpage, default=None) or
self._search_regex(r'<h1 class="title">([^<]+)</h1>', webpage, 'video title'))
title = (self._html_search_meta('title', webpage, default=None)
or self._search_regex(r'<h1 class="title">([^<]+)</h1>', webpage, 'video title'))
video_id = self._search_regex(
r'/config/video/(.+?)\.xml', webpage, 'video id')

View file

@ -80,8 +80,8 @@ def _real_extract(self, url):
video_url = (self._html_search_regex(
(r'setup\(\{\s*["\']file["\']\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
r'fileurl\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1'),
webpage, 'video URL', default=None, group='url') or
'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id)
webpage, 'video URL', default=None, group='url')
or 'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id)
age_limit = self._rta_search(webpage)
view_count = str_to_int(self._html_search_regex(
r'<strong>Views</strong>\s+([^<]+)<',

View file

@ -84,8 +84,8 @@ def _real_extract(self, url):
# '__title' does not contain extra words such as sub-site name, "Video" etc.
title = compat_urllib_parse_unquote_plus(
self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None) or
self._og_search_title(webpage))
self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None)
or self._og_search_title(webpage))
filename = self._search_regex(
r"(?:__)?filename\s*[:=]\s*'([^']+)'", webpage, 'video filename')

View file

@ -180,8 +180,8 @@ class AppleDailyIE(NextMediaIE):
_URL_PATTERN = r'\{url: \'(.+)\'\}'
def _fetch_title(self, page):
return (self._html_search_regex(r'<h1 id="h1">([^<>]+)</h1>', page, 'news title', default=None) or
self._html_search_meta('description', page, 'news title'))
return (self._html_search_regex(r'<h1 id="h1">([^<>]+)</h1>', page, 'news title', default=None)
or self._html_search_meta('description', page, 'news title'))
def _fetch_thumbnail(self, page):
return self._html_search_regex(r"setInitialImage\(\'([^']+)'\)", page, 'video thumbnail', fatal=False)

View file

@ -369,14 +369,14 @@ def get_video_info(items):
video_detail = watch_api_data.get('videoDetail', {})
thumbnail = (
get_video_info(['thumbnail_url', 'thumbnailURL']) or
self._html_search_meta('image', webpage, 'thumbnail', default=None) or
video_detail.get('thumbnail'))
get_video_info(['thumbnail_url', 'thumbnailURL'])
or self._html_search_meta('image', webpage, 'thumbnail', default=None)
or video_detail.get('thumbnail'))
description = get_video_info('description')
timestamp = (parse_iso8601(get_video_info('first_retrieve')) or
unified_timestamp(get_video_info('postedDateTime')))
timestamp = (parse_iso8601(get_video_info('first_retrieve'))
or unified_timestamp(get_video_info('postedDateTime')))
if not timestamp:
match = self._html_search_meta('datePublished', webpage, 'date published', default=None)
if match:
@ -395,9 +395,9 @@ def get_video_info(items):
view_count = int_or_none(match.replace(',', ''))
view_count = view_count or video_detail.get('viewCount')
comment_count = (int_or_none(get_video_info('comment_num')) or
video_detail.get('commentCount') or
try_get(api_data, lambda x: x['thread']['commentCount']))
comment_count = (int_or_none(get_video_info('comment_num'))
or video_detail.get('commentCount')
or try_get(api_data, lambda x: x['thread']['commentCount']))
if not comment_count:
match = self._html_search_regex(
r'>Comments: <strong[^>]*>([^<]+)</strong>',
@ -406,11 +406,11 @@ def get_video_info(items):
comment_count = int_or_none(match.replace(',', ''))
duration = (parse_duration(
get_video_info('length') or
self._html_search_meta(
'video:duration', webpage, 'video duration', default=None)) or
video_detail.get('length') or
get_video_info('duration'))
get_video_info('length')
or self._html_search_meta(
'video:duration', webpage, 'video duration', default=None))
or video_detail.get('length')
or get_video_info('duration'))
webpage_url = get_video_info('watch_url') or url

View file

@ -45,8 +45,8 @@ def _real_extract(self, url):
entries = []
conviva = data.get('convivaStatistics') or {}
live = (data.get('mediaElementType') == 'Live' or
data.get('isLive') is True or conviva.get('isLive'))
live = (data.get('mediaElementType') == 'Live'
or data.get('isLive') is True or conviva.get('isLive'))
def make_title(t):
return self._live_title(t) if live else t

View file

@ -31,8 +31,8 @@ def _extract(self, content_tree_url, video_id, domain='example.org', supportedfo
title = metadata['title']
auth_data = self._download_json(
self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code) +
compat_urllib_parse_urlencode({
self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code)
+ compat_urllib_parse_urlencode({
'domain': domain,
'supportedFormats': supportedformats or 'mp4,rtmp,m3u8,hds,dash,smooth',
'embedToken': embed_token,

View file

@ -43,9 +43,9 @@ def cookie_to_dict(cookie):
if cookie.discard is not None:
cookie_dict['discard'] = cookie.discard
try:
if (cookie.has_nonstandard_attr('httpOnly') or
cookie.has_nonstandard_attr('httponly') or
cookie.has_nonstandard_attr('HttpOnly')):
if (cookie.has_nonstandard_attr('httpOnly')
or cookie.has_nonstandard_attr('httponly')
or cookie.has_nonstandard_attr('HttpOnly')):
cookie_dict['httponly'] = True
except TypeError:
pass

View file

@ -50,8 +50,8 @@ def _real_extract(self, url):
video_id = mobj.group('id')
channel = mobj.group('channel') or mobj.group('channel_2')
json_url = (('%s://%s.podomatic.com/entry/embed_params/%s' +
'?permalink=true&rtmp=0') %
json_url = (('%s://%s.podomatic.com/entry/embed_params/%s'
+ '?permalink=true&rtmp=0') %
(mobj.group('proto'), channel, video_id))
data_json = self._download_webpage(
json_url, video_id, 'Downloading video info')

View file

@ -91,8 +91,8 @@ def extract_formats(node):
extract_formats(child)
elif child.tag.endswith('File'):
video_url = child.text
if (not video_url or video_url in processed_urls or
any(p in video_url for p in ('NOT_USED', 'NOT-USED'))):
if (not video_url or video_url in processed_urls
or any(p in video_url for p in ('NOT_USED', 'NOT-USED'))):
continue
processed_urls.append(video_url)
ext = determine_ext(video_url)

View file

@ -55,8 +55,8 @@ def _real_extract(self, url):
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True)
urls = player_params['releaseUrls']
theplatform_url = (urls.get('progressive') or urls.get('html') or
urls.get('standard') or player_params['relatedItemsURL'])
theplatform_url = (urls.get('progressive') or urls.get('html')
or urls.get('standard') or player_params['relatedItemsURL'])
return {
'_type': 'url_transparent',

View file

@ -275,8 +275,8 @@ def _real_extract(self, url):
genres = video_info.get('genres')
genre = (
genres[0] if genres and isinstance(genres, list) and
isinstance(genres[0], compat_str) else None)
genres[0] if genres and isinstance(genres, list)
and isinstance(genres[0], compat_str) else None)
is_explicit = video_info.get('isExplicit')
if is_explicit is True:

View file

@ -443,8 +443,8 @@ def _real_extract(self, url):
format_url = url_or_none(format_url)
if not format_url or not format_url.startswith(('http', '//', 'rtmp')):
continue
if (format_id.startswith(('url', 'cache')) or
format_id in ('extra_data', 'live_mp4', 'postlive_mp4')):
if (format_id.startswith(('url', 'cache'))
or format_id in ('extra_data', 'live_mp4', 'postlive_mp4')):
height = int_or_none(self._search_regex(
r'^(?:url|cache)(\d+)', format_id, 'height', default=None))
formats.append({

View file

@ -70,9 +70,9 @@ def _real_extract(self, url):
description = content.get('description')
thumbnail = content.get('thumbnail')
timestamp = (int_or_none(content.get('release_date')) or
int_or_none(content.get('release_date_ut')) or
int_or_none(content.get('start_time')))
timestamp = (int_or_none(content.get('release_date'))
or int_or_none(content.get('release_date_ut'))
or int_or_none(content.get('start_time')))
duration = int_or_none(content.get('duration'))
series = content.get('program_title')
age_limit = int_or_none(content.get('restriction_age'))

View file

@ -258,8 +258,8 @@ def _extract_entries(self, playlist_data_url, show_id, note, query):
transform_source=lambda s: js_to_json(strip_jsonp(s))).get('html')
if playlist_data is None:
return [None, None]
drama_list = (get_element_by_class('p-drama-grid', playlist_data) or
get_element_by_class('p-drama-half-row', playlist_data))
drama_list = (get_element_by_class('p-drama-grid', playlist_data)
or get_element_by_class('p-drama-half-row', playlist_data))
if drama_list is None:
raise ExtractorError('No episodes found')
video_urls = re.findall(r'<a[^>]+href="([^"]+)"', drama_list)

View file

@ -2052,8 +2052,8 @@ def _extract_filesize(media_url):
url_or_none(try_get(
player_response,
lambda x: x['streamingData']['hlsManifestUrl'],
compat_str)) or
url_or_none(try_get(
compat_str))
or url_or_none(try_get(
video_info, lambda x: x['hlsvp'][0], compat_str)))
if manifest_url:
formats = []
@ -2102,10 +2102,10 @@ def _extract_filesize(media_url):
self._downloader.report_warning('unable to extract uploader nickname')
channel_id = (
str_or_none(video_details.get('channelId')) or
self._html_search_meta(
'channelId', video_webpage, 'channel id', default=None) or
self._search_regex(
str_or_none(video_details.get('channelId'))
or self._html_search_meta(
'channelId', video_webpage, 'channel id', default=None)
or self._search_regex(
r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
video_webpage, 'channel id', default=None, group='id'))
channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
@ -2564,9 +2564,9 @@ def _extract_mix(self, playlist_id):
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
title_span = (
search_title('playlist-title') or
search_title('title long-title') or
search_title('title'))
search_title('playlist-title')
or search_title('title long-title')
or search_title('title'))
title = clean_html(title_span)
return self.playlist_result(url_results, playlist_id, title)

View file

@ -86,8 +86,8 @@ def _extract_cid(self, video_id, channel_name):
return next(
chan['cid'] for chan in channel_list
if chan.get('cid') and (
chan.get('display_alias') == channel_name or
chan.get('cid') == channel_name))
chan.get('display_alias') == channel_name
or chan.get('cid') == channel_name))
except StopIteration:
raise ExtractorError('Could not extract channel id')

View file

@ -221,9 +221,9 @@ def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
# avconv does not have repeat option
if self.basename == 'ffmpeg':
cmd += [encodeArgument('-loglevel'), encodeArgument('repeat+info')]
cmd += (files_cmd +
[encodeArgument(o) for o in opts] +
[encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
cmd += (files_cmd
+ [encodeArgument(o) for o in opts]
+ [encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
if self._downloader.params.get('verbose', False):
self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd))
@ -326,8 +326,8 @@ def run(self, information):
information['ext'] = extension
# If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
if (new_path == path or
(self._nopostoverwrites and os.path.exists(encodeFilename(new_path)))):
if (new_path == path
or (self._nopostoverwrites and os.path.exists(encodeFilename(new_path)))):
self._downloader.to_screen('[ffmpeg] Post-process file %s exists, skipping' % new_path)
return [], information

View file

@ -64,8 +64,8 @@ def run(self, info):
except XAttrMetadataError as e:
if e.reason == 'NO_SPACE':
self._downloader.report_warning(
'There\'s no disk space left, disk quota exceeded or filesystem xattr limit exceeded. ' +
(('Some ' if num_written else '') + 'extended attributes are not written.').capitalize())
'There\'s no disk space left, disk quota exceeded or filesystem xattr limit exceeded. '
+ (('Some ' if num_written else '') + 'extended attributes are not written.').capitalize())
elif e.reason == 'VALUE_TOO_LONG':
self._downloader.report_warning(
'Unable to write extended attributes due to too long values.')

View file

@ -861,8 +861,8 @@ def __init__(self, code=None, msg='Unknown error'):
self.msg = msg
# Parsing code and msg
if (self.code in (errno.ENOSPC, errno.EDQUOT) or
'No space left' in self.msg or 'Disk quota excedded' in self.msg):
if (self.code in (errno.ENOSPC, errno.EDQUOT)
or 'No space left' in self.msg or 'Disk quota excedded' in self.msg):
self.reason = 'NO_SPACE'
elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
self.reason = 'VALUE_TOO_LONG'
@ -1453,8 +1453,8 @@ def _windows_write_string(s, out):
def not_a_console(handle):
if handle == INVALID_HANDLE_VALUE or handle is None:
return True
return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR
or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
if not_a_console(h):
return False
@ -1490,8 +1490,8 @@ def write_string(s, out=None, encoding=None):
if _windows_write_string(s, out):
return
if ('b' in getattr(out, 'mode', '') or
sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
if ('b' in getattr(out, 'mode', '')
or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
byt = s.encode(encoding or preferredencoding(), 'ignore')
out.write(byt)
elif hasattr(out, 'buffer'):
@ -2328,10 +2328,10 @@ def merge_dicts(*dicts):
for k, v in a_dict.items():
if v is None:
continue
if (k not in merged or
(isinstance(v, compat_str) and v and
isinstance(merged[k], compat_str) and
not merged[k])):
if (k not in merged
or (isinstance(v, compat_str) and v
and isinstance(merged[k], compat_str)
and not merged[k])):
merged[k] = v
return merged
@ -2657,14 +2657,14 @@ def _match_one(filter_part, dct):
if m:
op = COMPARISON_OPERATORS[m.group('op')]
actual_value = dct.get(m.group('key'))
if (m.group('quotedstrval') is not None or
m.group('strval') is not None or
if (m.group('quotedstrval') is not None
or m.group('strval') is not None
# If the original field is a string and matching comparisonvalue is
# a number we should respect the origin of the original field
# and process comparison value as a string (see
# https://github.com/ytdl-org/youtube-dl/issues/11082).
actual_value is not None and m.group('intval') is not None and
isinstance(actual_value, compat_str)):
or actual_value is not None and m.group('intval') is not None
and isinstance(actual_value, compat_str)):
if m.group('op') not in ('=', '!='):
raise ValueError(
'Operator %s does not support string values!' % m.group('op'))
@ -3973,9 +3973,9 @@ def write_xattr(path, key, value):
executable = 'xattr'
opts = ['-w', key, value]
cmd = ([encodeFilename(executable, True)] +
[encodeArgument(o) for o in opts] +
[encodeFilename(path, True)])
cmd = ([encodeFilename(executable, True)]
+ [encodeArgument(o) for o in opts]
+ [encodeFilename(path, True)])
try:
p = subprocess.Popen(