|
|
|
@ -12,10 +12,12 @@ __author__ = (
|
|
|
|
|
'Rogério Brito',
|
|
|
|
|
'Philipp Hagemeister',
|
|
|
|
|
'Sören Schulze',
|
|
|
|
|
'Kevin Ngo',
|
|
|
|
|
'Ori Avtalion',
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
__license__ = 'Public Domain'
|
|
|
|
|
__version__ = '2011.10.19'
|
|
|
|
|
__version__ = '2011.11.23'
|
|
|
|
|
|
|
|
|
|
UPDATE_URL = 'https://raw.github.com/rg3/youtube-dl/master/youtube-dl'
|
|
|
|
|
|
|
|
|
@ -77,8 +79,6 @@ std_headers = {
|
|
|
|
|
'Accept-Language': 'en-us,en;q=0.5',
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
simple_title_chars = string.ascii_letters.decode('ascii') + string.digits.decode('ascii')
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
import json
|
|
|
|
|
except ImportError: # Python <2.6, use trivialjson (https://github.com/phihag/trivialjson):
|
|
|
|
@ -277,6 +277,9 @@ def timeconvert(timestr):
|
|
|
|
|
timestamp = email.utils.mktime_tz(timetuple)
|
|
|
|
|
return timestamp
|
|
|
|
|
|
|
|
|
|
def _simplify_title(title):
|
|
|
|
|
expr = re.compile(ur'[^\w\d_\-]+', flags=re.UNICODE)
|
|
|
|
|
return expr.sub(u'_', title).strip(u'_')
|
|
|
|
|
|
|
|
|
|
class DownloadError(Exception):
|
|
|
|
|
"""Download Error exception.
|
|
|
|
@ -1289,8 +1292,7 @@ class YoutubeIE(InfoExtractor):
|
|
|
|
|
video_title = sanitize_title(video_title)
|
|
|
|
|
|
|
|
|
|
# simplified title
|
|
|
|
|
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
|
|
|
|
simple_title = simple_title.strip(ur'_')
|
|
|
|
|
simple_title = _simplify_title(video_title)
|
|
|
|
|
|
|
|
|
|
# thumbnail image
|
|
|
|
|
if 'thumbnail_url' not in video_info:
|
|
|
|
@ -1560,9 +1562,6 @@ class DailymotionIE(InfoExtractor):
|
|
|
|
|
"""Report information extraction."""
|
|
|
|
|
self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id)
|
|
|
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
|
# Extract id and simplified title from URL
|
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
@ -1651,9 +1650,6 @@ class GoogleIE(InfoExtractor):
|
|
|
|
|
"""Report information extraction."""
|
|
|
|
|
self._downloader.to_screen(u'[video.google] %s: Extracting information' % video_id)
|
|
|
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
|
# Extract id from URL
|
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
@ -1697,7 +1693,7 @@ class GoogleIE(InfoExtractor):
|
|
|
|
|
return
|
|
|
|
|
video_title = mobj.group(1).decode('utf-8')
|
|
|
|
|
video_title = sanitize_title(video_title)
|
|
|
|
|
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
|
|
|
|
simple_title = _simplify_title(video_title)
|
|
|
|
|
|
|
|
|
|
# Extract video description
|
|
|
|
|
mobj = re.search(r'<span id=short-desc-content>([^<]*)</span>', webpage)
|
|
|
|
@ -1758,9 +1754,6 @@ class PhotobucketIE(InfoExtractor):
|
|
|
|
|
"""Report information extraction."""
|
|
|
|
|
self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id)
|
|
|
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
|
# Extract id from URL
|
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
@ -1799,7 +1792,7 @@ class PhotobucketIE(InfoExtractor):
|
|
|
|
|
return
|
|
|
|
|
video_title = mobj.group(1).decode('utf-8')
|
|
|
|
|
video_title = sanitize_title(video_title)
|
|
|
|
|
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
|
|
|
|
simple_title = _simplify_title(vide_title)
|
|
|
|
|
|
|
|
|
|
video_uploader = mobj.group(2).decode('utf-8')
|
|
|
|
|
|
|
|
|
@ -1840,9 +1833,6 @@ class YahooIE(InfoExtractor):
|
|
|
|
|
"""Report information extraction."""
|
|
|
|
|
self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id)
|
|
|
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url, new_video=True):
|
|
|
|
|
# Extract ID from URL
|
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
@ -1896,7 +1886,7 @@ class YahooIE(InfoExtractor):
|
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video title')
|
|
|
|
|
return
|
|
|
|
|
video_title = mobj.group(1).decode('utf-8')
|
|
|
|
|
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
|
|
|
|
simple_title = _simplify_title(video_title)
|
|
|
|
|
|
|
|
|
|
mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
|
|
|
|
|
if mobj is None:
|
|
|
|
@ -1993,9 +1983,6 @@ class VimeoIE(InfoExtractor):
|
|
|
|
|
"""Report information extraction."""
|
|
|
|
|
self._downloader.to_screen(u'[vimeo] %s: Extracting information' % video_id)
|
|
|
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url, new_video=True):
|
|
|
|
|
# Extract ID from URL
|
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
@ -2027,7 +2014,7 @@ class VimeoIE(InfoExtractor):
|
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video title')
|
|
|
|
|
return
|
|
|
|
|
video_title = mobj.group(1).decode('utf-8')
|
|
|
|
|
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
|
|
|
|
simple_title = _simplify_title(video_title)
|
|
|
|
|
|
|
|
|
|
# Extract uploader
|
|
|
|
|
mobj = re.search(r'<uploader_url>http://vimeo.com/(.*?)</uploader_url>', webpage)
|
|
|
|
@ -2118,9 +2105,6 @@ class GenericIE(InfoExtractor):
|
|
|
|
|
"""Report information extraction."""
|
|
|
|
|
self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id)
|
|
|
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
|
# At this point we have a new video
|
|
|
|
|
self._downloader.increment_downloads()
|
|
|
|
@ -2174,7 +2158,7 @@ class GenericIE(InfoExtractor):
|
|
|
|
|
return
|
|
|
|
|
video_title = mobj.group(1).decode('utf-8')
|
|
|
|
|
video_title = sanitize_title(video_title)
|
|
|
|
|
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
|
|
|
|
simple_title = _simplify_title(video_title)
|
|
|
|
|
|
|
|
|
|
# video uploader is domain name
|
|
|
|
|
mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
|
|
|
|
@ -2470,7 +2454,7 @@ class YahooSearchIE(InfoExtractor):
|
|
|
|
|
class YoutubePlaylistIE(InfoExtractor):
|
|
|
|
|
"""Information Extractor for YouTube playlists."""
|
|
|
|
|
|
|
|
|
|
_VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
|
|
|
|
|
_VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
|
|
|
|
|
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
|
|
|
|
|
_VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
|
|
|
|
|
_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
|
|
|
|
@ -2514,7 +2498,8 @@ class YoutubePlaylistIE(InfoExtractor):
|
|
|
|
|
|
|
|
|
|
while True:
|
|
|
|
|
self.report_download_page(playlist_id, pagenum)
|
|
|
|
|
request = urllib2.Request(self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum))
|
|
|
|
|
url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum)
|
|
|
|
|
request = urllib2.Request(url)
|
|
|
|
|
try:
|
|
|
|
|
page = urllib2.urlopen(request).read()
|
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
@ -2548,7 +2533,7 @@ class YoutubeUserIE(InfoExtractor):
|
|
|
|
|
_TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
|
|
|
|
|
_GDATA_PAGE_SIZE = 50
|
|
|
|
|
_GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
|
|
|
|
|
_VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
|
|
|
|
|
_VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]'
|
|
|
|
|
_youtube_ie = None
|
|
|
|
|
IE_NAME = u'youtube:user'
|
|
|
|
|
|
|
|
|
@ -2646,9 +2631,6 @@ class DepositFilesIE(InfoExtractor):
|
|
|
|
|
"""Report information extraction."""
|
|
|
|
|
self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id)
|
|
|
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
|
# At this point we have a new file
|
|
|
|
|
self._downloader.increment_downloads()
|
|
|
|
@ -2709,11 +2691,12 @@ class DepositFilesIE(InfoExtractor):
|
|
|
|
|
class FacebookIE(InfoExtractor):
|
|
|
|
|
"""Information Extractor for Facebook"""
|
|
|
|
|
|
|
|
|
|
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/video/video\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
|
|
|
|
|
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
|
|
|
|
|
_LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
|
|
|
|
|
_NETRC_MACHINE = 'facebook'
|
|
|
|
|
_available_formats = ['highqual', 'lowqual']
|
|
|
|
|
_available_formats = ['video', 'highqual', 'lowqual']
|
|
|
|
|
_video_extensions = {
|
|
|
|
|
'video': 'mp4',
|
|
|
|
|
'highqual': 'mp4',
|
|
|
|
|
'lowqual': 'mp4',
|
|
|
|
|
}
|
|
|
|
@ -2741,10 +2724,9 @@ class FacebookIE(InfoExtractor):
|
|
|
|
|
def _parse_page(self, video_webpage):
|
|
|
|
|
"""Extract video information from page"""
|
|
|
|
|
# General data
|
|
|
|
|
data = {'title': r'class="video_title datawrap">(.*?)</',
|
|
|
|
|
data = {'title': r'\("video_title", "(.*?)"\)',
|
|
|
|
|
'description': r'<div class="datawrap">(.*?)</div>',
|
|
|
|
|
'owner': r'\("video_owner_name", "(.*?)"\)',
|
|
|
|
|
'upload_date': r'data-date="(.*?)"',
|
|
|
|
|
'thumbnail': r'\("thumb_url", "(?P<THUMB>.*?)"\)',
|
|
|
|
|
}
|
|
|
|
|
video_info = {}
|
|
|
|
@ -2846,9 +2828,7 @@ class FacebookIE(InfoExtractor):
|
|
|
|
|
video_title = video_title.decode('utf-8')
|
|
|
|
|
video_title = sanitize_title(video_title)
|
|
|
|
|
|
|
|
|
|
# simplified title
|
|
|
|
|
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
|
|
|
|
simple_title = simple_title.strip(ur'_')
|
|
|
|
|
simple_title = _simplify_title(video_title)
|
|
|
|
|
|
|
|
|
|
# thumbnail image
|
|
|
|
|
if 'thumbnail' not in video_info:
|
|
|
|
@ -2939,11 +2919,6 @@ class BlipTVIE(InfoExtractor):
|
|
|
|
|
"""Report information extraction."""
|
|
|
|
|
self._downloader.to_screen(u'[%s] %s: Direct download detected' % (self.IE_NAME, title))
|
|
|
|
|
|
|
|
|
|
def _simplify_title(self, title):
|
|
|
|
|
res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
|
|
|
|
|
res = res.strip(ur'_')
|
|
|
|
|
return res
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
|
if mobj is None:
|
|
|
|
@ -2963,13 +2938,14 @@ class BlipTVIE(InfoExtractor):
|
|
|
|
|
if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
|
|
|
|
|
basename = url.split('/')[-1]
|
|
|
|
|
title,ext = os.path.splitext(basename)
|
|
|
|
|
title = title.decode('UTF-8')
|
|
|
|
|
ext = ext.replace('.', '')
|
|
|
|
|
self.report_direct_download(title)
|
|
|
|
|
info = {
|
|
|
|
|
'id': title,
|
|
|
|
|
'url': url,
|
|
|
|
|
'title': title,
|
|
|
|
|
'stitle': self._simplify_title(title),
|
|
|
|
|
'stitle': _simplify_title(title),
|
|
|
|
|
'ext': ext,
|
|
|
|
|
'urlhandle': urlh
|
|
|
|
|
}
|
|
|
|
@ -3003,7 +2979,7 @@ class BlipTVIE(InfoExtractor):
|
|
|
|
|
'uploader': data['display_name'],
|
|
|
|
|
'upload_date': upload_date,
|
|
|
|
|
'title': data['title'],
|
|
|
|
|
'stitle': self._simplify_title(data['title']),
|
|
|
|
|
'stitle': _simplify_title(data['title']),
|
|
|
|
|
'ext': ext,
|
|
|
|
|
'format': data['media']['mimeType'],
|
|
|
|
|
'thumbnail': data['thumbnailUrl'],
|
|
|
|
@ -3039,9 +3015,6 @@ class MyVideoIE(InfoExtractor):
|
|
|
|
|
"""Report information extraction."""
|
|
|
|
|
self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id)
|
|
|
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
def _real_extract(self,url):
|
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
|
if mobj is None:
|
|
|
|
@ -3049,10 +3022,6 @@ class MyVideoIE(InfoExtractor):
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
video_id = mobj.group(1)
|
|
|
|
|
simple_title = mobj.group(2).decode('utf-8')
|
|
|
|
|
# should actually not be necessary
|
|
|
|
|
simple_title = sanitize_title(simple_title)
|
|
|
|
|
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', simple_title)
|
|
|
|
|
|
|
|
|
|
# Get video webpage
|
|
|
|
|
request = urllib2.Request('http://www.myvideo.de/watch/%s' % video_id)
|
|
|
|
@ -3079,6 +3048,8 @@ class MyVideoIE(InfoExtractor):
|
|
|
|
|
video_title = mobj.group(1)
|
|
|
|
|
video_title = sanitize_title(video_title)
|
|
|
|
|
|
|
|
|
|
simple_title = _simplify_title(video_title)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
self._downloader.process_info({
|
|
|
|
|
'id': video_id,
|
|
|
|
@ -3112,11 +3083,6 @@ class ComedyCentralIE(InfoExtractor):
|
|
|
|
|
def report_player_url(self, episode_id):
|
|
|
|
|
self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id)
|
|
|
|
|
|
|
|
|
|
def _simplify_title(self, title):
|
|
|
|
|
res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
|
|
|
|
|
res = res.strip(ur'_')
|
|
|
|
|
return res
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
|
if mobj is None:
|
|
|
|
@ -3125,9 +3091,9 @@ class ComedyCentralIE(InfoExtractor):
|
|
|
|
|
|
|
|
|
|
if mobj.group('shortname'):
|
|
|
|
|
if mobj.group('shortname') in ('tds', 'thedailyshow'):
|
|
|
|
|
url = 'http://www.thedailyshow.com/full-episodes/'
|
|
|
|
|
url = u'http://www.thedailyshow.com/full-episodes/'
|
|
|
|
|
else:
|
|
|
|
|
url = 'http://www.colbertnation.com/full-episodes/'
|
|
|
|
|
url = u'http://www.colbertnation.com/full-episodes/'
|
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
|
assert mobj is not None
|
|
|
|
|
|
|
|
|
@ -3213,14 +3179,14 @@ class ComedyCentralIE(InfoExtractor):
|
|
|
|
|
|
|
|
|
|
self._downloader.increment_downloads()
|
|
|
|
|
|
|
|
|
|
effTitle = showId + '-' + epTitle
|
|
|
|
|
effTitle = showId + u'-' + epTitle
|
|
|
|
|
info = {
|
|
|
|
|
'id': shortMediaId,
|
|
|
|
|
'url': video_url,
|
|
|
|
|
'uploader': showId,
|
|
|
|
|
'upload_date': officialDate,
|
|
|
|
|
'title': effTitle,
|
|
|
|
|
'stitle': self._simplify_title(effTitle),
|
|
|
|
|
'stitle': _simplify_title(effTitle),
|
|
|
|
|
'ext': 'mp4',
|
|
|
|
|
'format': format,
|
|
|
|
|
'thumbnail': None,
|
|
|
|
@ -3247,11 +3213,6 @@ class EscapistIE(InfoExtractor):
|
|
|
|
|
def report_config_download(self, showName):
|
|
|
|
|
self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName)
|
|
|
|
|
|
|
|
|
|
def _simplify_title(self, title):
|
|
|
|
|
res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
|
|
|
|
|
res = res.strip(ur'_')
|
|
|
|
|
return res
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
|
htmlParser = HTMLParser.HTMLParser()
|
|
|
|
|
|
|
|
|
@ -3304,7 +3265,7 @@ class EscapistIE(InfoExtractor):
|
|
|
|
|
'uploader': showName,
|
|
|
|
|
'upload_date': None,
|
|
|
|
|
'title': showName,
|
|
|
|
|
'stitle': self._simplify_title(showName),
|
|
|
|
|
'stitle': _simplify_title(showName),
|
|
|
|
|
'ext': 'flv',
|
|
|
|
|
'format': 'flv',
|
|
|
|
|
'thumbnail': imgUrl,
|
|
|
|
@ -3332,11 +3293,6 @@ class CollegeHumorIE(InfoExtractor):
|
|
|
|
|
"""Report information extraction."""
|
|
|
|
|
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
|
|
|
|
|
|
|
|
|
|
def _simplify_title(self, title):
|
|
|
|
|
res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
|
|
|
|
|
res = res.strip(ur'_')
|
|
|
|
|
return res
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
|
htmlParser = HTMLParser.HTMLParser()
|
|
|
|
|
|
|
|
|
@ -3378,7 +3334,7 @@ class CollegeHumorIE(InfoExtractor):
|
|
|
|
|
videoNode = mdoc.findall('./video')[0]
|
|
|
|
|
info['description'] = videoNode.findall('./description')[0].text
|
|
|
|
|
info['title'] = videoNode.findall('./caption')[0].text
|
|
|
|
|
info['stitle'] = self._simplify_title(info['title'])
|
|
|
|
|
info['stitle'] = _simplify_title(info['title'])
|
|
|
|
|
info['url'] = videoNode.findall('./file')[0].text
|
|
|
|
|
info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
|
|
|
|
|
info['ext'] = info['url'].rpartition('.')[2]
|
|
|
|
@ -3409,11 +3365,6 @@ class XVideosIE(InfoExtractor):
|
|
|
|
|
"""Report information extraction."""
|
|
|
|
|
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
|
|
|
|
|
|
|
|
|
|
def _simplify_title(self, title):
|
|
|
|
|
res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
|
|
|
|
|
res = res.strip(ur'_')
|
|
|
|
|
return res
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
|
htmlParser = HTMLParser.HTMLParser()
|
|
|
|
|
|
|
|
|
@ -3467,7 +3418,7 @@ class XVideosIE(InfoExtractor):
|
|
|
|
|
'uploader': None,
|
|
|
|
|
'upload_date': None,
|
|
|
|
|
'title': video_title,
|
|
|
|
|
'stitle': self._simplify_title(video_title),
|
|
|
|
|
'stitle': _simplify_title(video_title),
|
|
|
|
|
'ext': 'flv',
|
|
|
|
|
'format': 'flv',
|
|
|
|
|
'thumbnail': video_thumbnail,
|
|
|
|
@ -3481,6 +3432,184 @@ class XVideosIE(InfoExtractor):
|
|
|
|
|
self._downloader.trouble(u'\nERROR: unable to download ' + video_id)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SoundcloudIE(InfoExtractor):
|
|
|
|
|
"""Information extractor for soundcloud.com
|
|
|
|
|
To access the media, the uid of the song and a stream token
|
|
|
|
|
must be extracted from the page source and the script must make
|
|
|
|
|
a request to media.soundcloud.com/crossdomain.xml. Then
|
|
|
|
|
the media can be grabbed by requesting from an url composed
|
|
|
|
|
of the stream token and uid
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
_VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)'
|
|
|
|
|
IE_NAME = u'soundcloud'
|
|
|
|
|
|
|
|
|
|
def __init__(self, downloader=None):
|
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
|
|
|
|
|
|
def report_webpage(self, video_id):
|
|
|
|
|
"""Report information extraction."""
|
|
|
|
|
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
|
|
|
|
|
|
|
|
|
|
def report_extraction(self, video_id):
|
|
|
|
|
"""Report information extraction."""
|
|
|
|
|
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
|
htmlParser = HTMLParser.HTMLParser()
|
|
|
|
|
|
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
|
if mobj is None:
|
|
|
|
|
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# extract uploader (which is in the url)
|
|
|
|
|
uploader = mobj.group(1).decode('utf-8')
|
|
|
|
|
# extract simple title (uploader + slug of song title)
|
|
|
|
|
slug_title = mobj.group(2).decode('utf-8')
|
|
|
|
|
simple_title = uploader + '-' + slug_title
|
|
|
|
|
|
|
|
|
|
self.report_webpage('%s/%s' % (uploader, slug_title))
|
|
|
|
|
|
|
|
|
|
request = urllib2.Request('http://soundcloud.com/%s/%s' % (uploader, slug_title))
|
|
|
|
|
try:
|
|
|
|
|
webpage = urllib2.urlopen(request).read()
|
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
self.report_extraction('%s/%s' % (uploader, slug_title))
|
|
|
|
|
|
|
|
|
|
# extract uid and stream token that soundcloud hands out for access
|
|
|
|
|
mobj = re.search('"uid":"([\w\d]+?)".*?stream_token=([\w\d]+)', webpage)
|
|
|
|
|
if mobj:
|
|
|
|
|
video_id = mobj.group(1)
|
|
|
|
|
stream_token = mobj.group(2)
|
|
|
|
|
|
|
|
|
|
# extract unsimplified title
|
|
|
|
|
mobj = re.search('"title":"(.*?)",', webpage)
|
|
|
|
|
if mobj:
|
|
|
|
|
title = mobj.group(1)
|
|
|
|
|
|
|
|
|
|
# construct media url (with uid/token)
|
|
|
|
|
mediaURL = "http://media.soundcloud.com/stream/%s?stream_token=%s"
|
|
|
|
|
mediaURL = mediaURL % (video_id, stream_token)
|
|
|
|
|
|
|
|
|
|
# description
|
|
|
|
|
description = u'No description available'
|
|
|
|
|
mobj = re.search('track-description-value"><p>(.*?)</p>', webpage)
|
|
|
|
|
if mobj:
|
|
|
|
|
description = mobj.group(1)
|
|
|
|
|
|
|
|
|
|
# upload date
|
|
|
|
|
upload_date = None
|
|
|
|
|
mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage)
|
|
|
|
|
if mobj:
|
|
|
|
|
try:
|
|
|
|
|
upload_date = datetime.datetime.strptime(mobj.group(1), '%B %d, %Y %H:%M').strftime('%Y%m%d')
|
|
|
|
|
except Exception, e:
|
|
|
|
|
print str(e)
|
|
|
|
|
|
|
|
|
|
# for soundcloud, a request to a cross domain is required for cookies
|
|
|
|
|
request = urllib2.Request('http://media.soundcloud.com/crossdomain.xml', std_headers)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
self._downloader.process_info({
|
|
|
|
|
'id': video_id.decode('utf-8'),
|
|
|
|
|
'url': mediaURL,
|
|
|
|
|
'uploader': uploader.decode('utf-8'),
|
|
|
|
|
'upload_date': upload_date,
|
|
|
|
|
'title': simple_title.decode('utf-8'),
|
|
|
|
|
'stitle': simple_title.decode('utf-8'),
|
|
|
|
|
'ext': u'mp3',
|
|
|
|
|
'format': u'NA',
|
|
|
|
|
'player_url': None,
|
|
|
|
|
'description': description.decode('utf-8')
|
|
|
|
|
})
|
|
|
|
|
except UnavailableVideoError:
|
|
|
|
|
self._downloader.trouble(u'\nERROR: unable to download video')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class InfoQIE(InfoExtractor):
|
|
|
|
|
"""Information extractor for infoq.com"""
|
|
|
|
|
|
|
|
|
|
_VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$'
|
|
|
|
|
IE_NAME = u'infoq'
|
|
|
|
|
|
|
|
|
|
def report_webpage(self, video_id):
|
|
|
|
|
"""Report information extraction."""
|
|
|
|
|
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
|
|
|
|
|
|
|
|
|
|
def report_extraction(self, video_id):
|
|
|
|
|
"""Report information extraction."""
|
|
|
|
|
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
|
htmlParser = HTMLParser.HTMLParser()
|
|
|
|
|
|
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
|
if mobj is None:
|
|
|
|
|
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
self.report_webpage(url)
|
|
|
|
|
|
|
|
|
|
request = urllib2.Request(url)
|
|
|
|
|
try:
|
|
|
|
|
webpage = urllib2.urlopen(request).read()
|
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
self.report_extraction(url)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Extract video URL
|
|
|
|
|
mobj = re.search(r"jsclassref='([^']*)'", webpage)
|
|
|
|
|
if mobj is None:
|
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video url')
|
|
|
|
|
return
|
|
|
|
|
video_url = 'rtmpe://video.infoq.com/cfx/st/' + urllib2.unquote(mobj.group(1).decode('base64'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Extract title
|
|
|
|
|
mobj = re.search(r'contentTitle = "(.*?)";', webpage)
|
|
|
|
|
if mobj is None:
|
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video title')
|
|
|
|
|
return
|
|
|
|
|
video_title = mobj.group(1).decode('utf-8')
|
|
|
|
|
|
|
|
|
|
# Extract description
|
|
|
|
|
video_description = u'No description available.'
|
|
|
|
|
mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', webpage)
|
|
|
|
|
if mobj is not None:
|
|
|
|
|
video_description = mobj.group(1).decode('utf-8')
|
|
|
|
|
|
|
|
|
|
video_filename = video_url.split('/')[-1]
|
|
|
|
|
video_id, extension = video_filename.split('.')
|
|
|
|
|
|
|
|
|
|
self._downloader.increment_downloads()
|
|
|
|
|
info = {
|
|
|
|
|
'id': video_id,
|
|
|
|
|
'url': video_url,
|
|
|
|
|
'uploader': None,
|
|
|
|
|
'upload_date': None,
|
|
|
|
|
'title': video_title,
|
|
|
|
|
'stitle': _simplify_title(video_title),
|
|
|
|
|
'ext': extension,
|
|
|
|
|
'format': extension, # Extension is always(?) mp4, but seems to be flv
|
|
|
|
|
'thumbnail': None,
|
|
|
|
|
'description': video_description,
|
|
|
|
|
'player_url': None,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
self._downloader.process_info(info)
|
|
|
|
|
except UnavailableVideoError, err:
|
|
|
|
|
self._downloader.trouble(u'\nERROR: unable to download ' + video_url)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class PostProcessor(object):
|
|
|
|
|
"""Post Processor class.
|
|
|
|
|
|
|
|
|
@ -3877,11 +4006,13 @@ def gen_extractors():
|
|
|
|
|
EscapistIE(),
|
|
|
|
|
CollegeHumorIE(),
|
|
|
|
|
XVideosIE(),
|
|
|
|
|
SoundcloudIE(),
|
|
|
|
|
InfoQIE(),
|
|
|
|
|
|
|
|
|
|
GenericIE()
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
|
def _real_main():
|
|
|
|
|
parser, opts, args = parseOpts()
|
|
|
|
|
|
|
|
|
|
# Open appropriate CookieJar
|
|
|
|
@ -4041,10 +4172,9 @@ def main():
|
|
|
|
|
|
|
|
|
|
sys.exit(retcode)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
def main():
|
|
|
|
|
try:
|
|
|
|
|
main()
|
|
|
|
|
_real_main()
|
|
|
|
|
except DownloadError:
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
except SameFileError:
|
|
|
|
@ -4052,4 +4182,7 @@ if __name__ == '__main__':
|
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
|
sys.exit(u'\nERROR: Interrupted by user')
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
main()
|
|
|
|
|
|
|
|
|
|
# vim: set ts=4 sw=4 sts=4 noet ai si filetype=python:
|
|
|
|
|