Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions Contents/Code/Parsing.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,8 @@ def GetSources(url):

# Extract out provider name from source.
if (providerElem.span):
provider = providerElem.span["title"][len("Hosted By "):]
provider = "".join(map(lambda x: str(x).title() if str(x).isupper() else str(x), providerElem.span.findAll(text=True)))
#Log(provider)
else:
provider = providerElem.img["title"][len("Hosted By "):]

Expand Down Expand Up @@ -332,7 +333,7 @@ def GetMediaInfo(url, mediainfo, query_external=False):
imdb_id = mediainfo.id
else:
soup = BeautifulSoup(HTTP.Request(ICEFILMS_URL + url).content)
imdb_link = soup.find('a','iframe')['href']
imdb_link = soup.find('a','NOiframe')['href']
imdb_id = re.search("(tt\d+)", str(imdb_link)).group()

if (query_external):
Expand Down
6 changes: 3 additions & 3 deletions Contents/Code/RecentItems.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ def __init__(self):

def add(self, mediaInfo, providerURLs, path, caller=None):

self.items.append([mediaInfo, providerURLs, path, caller])
self.items.insert(0, [mediaInfo, providerURLs, path, caller])

while (len(self.items) > 50):
self.items.pop(0)
self.items.pop()

def getCaller(self, url):

Expand All @@ -34,7 +34,7 @@ def getByURL(self, url):
result = [elem for elem in self.items if url in elem[1]]

if (len(result) > 0):
return [result[-1][0], result[-1][2]]
return [result[0][0], result[0][2]]
else:
return None

Expand Down
37 changes: 28 additions & 9 deletions Contents/Code/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import copy
import sys
import base64
import md5

from datetime import date, datetime, timedelta
from dateutil import tz
Expand Down Expand Up @@ -1246,7 +1247,7 @@ def CaptchaRequiredMenu(mediainfo, source_item, url, parent_name=None, replace_p
tagline="This provider requires that you solve this Captcha.",
summary="This provider requires that you solve this Captcha.",
thumb=PLUGIN_URL + "/proxy?" + urllib.urlencode({'url':captcha_img_URL}),
art=mediainfo.background,
art=PLUGIN_URL + "/proxy?" + urllib.urlencode({'url':captcha_img_URL}),
)
)

Expand Down Expand Up @@ -1301,7 +1302,7 @@ def CaptchaProcessMenu(query, mediainfo, source_item, url, solve_captcha_url, pa
return oc

# Utility methods for captchas. All requests in the Captcha cycle must come from the same User-Agent
# If just let the clients load the Captcha image, we get different User-Agents. Some us libcurl and
# If just let the clients load the Captcha image, we get different User-Agents. Some use libcurl and
# it'd be possible to force a specific user agent using the "url|extraparams" notation, however some
# clients use the transcoder which does it's own separate thing and doesn't understand libcurl params.
# So, instead, we rewrite the Captcha's image URL to pass through this, so we can forcibly set
Expand All @@ -1312,8 +1313,26 @@ def CaptchaProcessMenu(query, mediainfo, source_item, url, solve_captcha_url, pa
def Proxy(url):

#Log(url)
return HTTP.Request(url,headers={'User-Agent':USER_AGENT}).content
key = "CAPTCHA-" + md5.new(url).hexdigest()

#Log("WAITING " + key);
Thread.AcquireLock(key)

try:
if (not Data.Exists(key)):
#Log("REQUESTING CAPTCHA")
captcha = HTTP.Request(url,headers={'User-Agent':USER_AGENT}, cacheTime=10).content
#Log("SAVING CAPTCHA")
Data.Save(key, captcha)
#Log("SLEEPING")
time.sleep(10)
except Exception, ex:
pass

#Log("UNBLOCKING " + key);
Thread.ReleaseLock(key)

return Data.Load(key)

####################################################################################################
def SearchResultsMenu(query, type, parent_name=None):
Expand Down Expand Up @@ -2338,7 +2357,7 @@ def GetAdditionalSources(imdb_id, title, year=None, season_num=None, ep_num=None
# to let the original plugin know when the user decides to play one of our sources.
if ('Referer' in Request.Headers):

match = re.search("/video/([^/]+)/", Request.Headers['Referer'])
match = re.search("/video/([^/]+)", Request.Headers['Referer'])
caller = match.group(1) if match else None

# Work out what type of search to carry out.
Expand Down Expand Up @@ -2477,9 +2496,9 @@ def PlaybackStarted(url):

# Use the information from the mediainfo to call the PlaybackStarted method of
# whatever plugin requested this.
url = PLEX_URL + '/video/%s/playback/external/%s' % (caller, mediainfo['id'])
if (mediainfo['ep_num']):
url += "/%s/%s" % (str(mediainfo['season']), str(mediainfo['ep_num']))
url = PLEX_URL + '/video/%s/playback/external/%s' % (caller, mediainfo.id)
if (hasattr(mediainfo, 'ep_num') and mediainfo.ep_num is not None):
url += "/%s/%s" % (str(mediainfo.season), str(mediainfo.ep_num))

request = urllib2.Request(url)
response = urllib2.urlopen(request)
Expand Down Expand Up @@ -2512,9 +2531,9 @@ def PlaybackStartedExternal(id, season_num=None, ep_num=None):
browsedItems = cerealizer.loads(Data.Load(BROWSED_ITEMS_KEY))

# See if the URL being played is on our recently browsed list.
info = browsedItems.getByID(id, season_num, ep_num)
item = browsedItems.getByID(id, season_num, ep_num)

if (info is None):
if (item is None):
Log("****** ERROR: Watching Item which hasn't been browsed to")
return ""

Expand Down
22 changes: 14 additions & 8 deletions Contents/Libraries/Shared/xgoogle/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,16 @@ def __str__(self):
return 'Google Search Result: "%s"' % self.title

class GoogleSearch(object):
SEARCH_URL_0 = "http://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&btnG=Google+Search&complete=0"
NEXT_PAGE_0 = "http://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&start=%(start)d&complete=0"
SEARCH_URL_1 = "http://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&num=%(num)d&btnG=Google+Search&complete=0"
NEXT_PAGE_1 = "http://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&num=%(num)d&start=%(start)d&complete=0"
SEARCH_URL_0 = "http://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&btnG=Google+Search"
NEXT_PAGE_0 = "http://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&start=%(start)d"
SEARCH_URL_1 = "http://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&num=%(num)d&btnG=Google+Search"
NEXT_PAGE_1 = "http://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&num=%(num)d&start=%(start)d"

# Use IceFilm's CSE. Prevents problems with query limiting. Other methods have also been adjusted accordingly.
SEARCH_URL_0 = "http://www.google.%(tld)s/cse?hl=%(lang)s&cx=010591583107216882486:bafpv02vxuq&cof=FORID:9&nojs=1&q=%(query)s"
NEXT_PAGE_0 = "http://www.google.%(tld)s/cse?hl=%(lang)s&cx=010591583107216882486:bafpv02vxuq&cof=FORID:9&nojs=1&q=%(query)s&start=%(start)d"
SEARCH_URL_1 = "http://www.google.%(tld)s/cse?hl=%(lang)s&cx=010591583107216882486:bafpv02vxuq&cof=FORID:9&nojs=1&q=%(query)s&num=%(num)d"
NEXT_PAGE_1 = "http://www.google.%(tld)s/cse?hl=%(lang)s&cx=010591583107216882486:bafpv02vxuq&cof=FORID:9&nojs=1&q=%(query)s&num=%(num)d&start=%(start)d"

def __init__(self, query, random_agent=False, debug=False, lang="en", tld="com", re_search_strings=None):
self.query = query
Expand Down Expand Up @@ -226,7 +232,7 @@ def _extract_info(self, soup):
return {'from': int(matches.group(1)), 'to': int(matches.group(2)), 'total': int(matches.group(3))}

def _extract_results(self, soup):
results = soup.findAll('li', {'class': 'g'})
results = soup.findAll('div', {'class': 'g'})
ret_res = []
for result in results:
eres = self._extract_result(result)
Expand All @@ -250,13 +256,13 @@ def _extract_title_url(self, result):
title = ''.join(title_a.findAll(text=True))
title = self._html_unescape(title)
url = title_a['href']
match = re.match(r'/url\?(url|q)=(http[^&]+)&', url)
match = re.match(r'/url\?q=(http[^&]+)&', url)
if match:
url = urllib.unquote(match.group(2))
url = urllib.unquote(match.group(1))
return title, url

def _extract_description(self, result):
desc_div = result.find('div', {'class': re.compile(r'\bs\b')})
desc_div = result.find('span', {'class': re.compile(r'\bs\b')})
if not desc_div:
self._maybe_raise(ParseError, "Description tag in Google search result was not found", result)
return None
Expand Down
48 changes: 48 additions & 0 deletions Contents/Libraries/Shared/xgoogle/test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
from search import GoogleSearch
import re

def GetSearchResults(query=None,type=None, exact=False):

if (type=="movies"):
# This a google search. The -tv will ommit all TV shows.
search = 'intitle:%s -"Episode List" -"Series Rating"' % (query)
else:
search = 'allintitle:%s "Episode List"' % (query)

gs = GoogleSearch(search)
gs.results_per_page = 25
gs.page = 0
results = gs.get_results() + gs.get_results()
items = []

for res in results:

name = re.sub(
'(<em>|</em>|<a>|</a>|DivX|-|icefilms(\.info)?|<b>\.\.\.</b>|Episode List|links)',
'',
res.title.encode('utf8')
).strip()

url=res.url
video_url = re.search("icefilms\.info(/.*)", url).group(1)

res = {}

res['type'] = type
res['title'] = name

match = re.search("(.*)\((\d*)\)", res['title'])

if (match):
res['title'] = match.group(1).strip()
res['year'] = int(match.group(2).strip())

res['id'] = video_url

items.append(res)

return items

items = GetSearchResults("the", "tv")
print items
print len(items)
2 changes: 1 addition & 1 deletion Contents/Services/ServiceInfo.plist
Original file line number Diff line number Diff line change
@@ -1 +1 @@
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"><plist version="1.0"><dict> <key>URL</key> <dict> <key>IceFilms</key> <dict> <key>URLPatterns</key> <array> <string>(external|captcha)://icefilms\.info/\d+/\d+</string> <string>play://icefilms\.info/</string> <string>providerinfo://icefilms/.*</string> <!-- Temporarily put old style url back in until we update other plugins to use new style urls. --> <string>http://providerinfo.icefilms/.*</string> </array> </dict> <key>ShareBees</key> <dict> <key>URLPatterns</key> <array> <string>http://(www\.)?sharebees.com</string> <string>providerinfo://sharebees/.*(icefilms)</string> </array> </dict> <key>RapidShare</key> <dict> <key>URLPatterns</key> <array> <string>https?://(www\.)?rapidshare.com</string> <string>providerinfo://rapidshare/.*(icefilms)</string> </array> </dict> <key>BillionUploads</key> <dict> <key>URLPatterns</key> <array> <string>http://(www\.)?billionuploads.com</string> <string>providerinfo://billionuploads/.*(icefilms)</string> </array> </dict> <key>2Shared</key> <dict> <key>URLPatterns</key> <array> <string>http://(www\.)?2shared.com</string> <string>providerinfo://2shared/.*(icefilms)</string> </array> </dict> <key>180Upload</key> <dict> <key>URLPatterns</key> <array> <string>(captcha|solve|play)://(www\.)?(180Upload.com|epicshare.net)/</string> <string>providerinfo://(www\.)?(180upload(\.com)?|epicshare(\.net)?)/.*(icefilms)</string> </array> </dict> <key>MegaRelease</key> <dict> <key>URLPatterns</key> <array> <string>(captcha|solve|play)://(www\.)?(megarelease\.org|lemuploads\.com)/</string> <string>providerinfo://(www\.)?(megarelease(\.org)?|lemuploads(\.com)?)/.*(icefilms)</string> </array> </dict> <key>MovReel</key> <dict> <key>URLPatterns</key> <array> <string>http://(www\.)?movreel.com</string> <string>providerinfo://movreel/.*(icefilms)</string> </array> </dict> <key>VidHog</key> <dict> <key>URLPatterns</key> <array> <string>http://(www\.)?(vidhog)\.(com|net)/[\d\w]{8,}</string> <string>providerinfo://vidhog/.*(icefilms)</string> </array> </dict> <key>HugeFiles</key> <dict> <key>URLPatterns</key> <array> <string>http://(www\.)?hugefiles\.net</string> <string>providerinfo://hugefiles/.*(icefilms)</string> </array> </dict> </dict></dict></plist>
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"><plist version="1.0"><dict> <key>URL</key> <dict> <key>IceFilms</key> <dict> <key>URLPatterns</key> <array> <string>(external|captcha)://icefilms\.info/\d+/\d+</string> <string>play://icefilms\.info/</string> <string>providerinfo://icefilms/.*</string> <!-- Temporarily put old style url back in until we update other plugins to use new style urls. --> <string>http://providerinfo.icefilms/.*</string> </array> </dict> <key>ShareBees</key> <dict> <key>URLPatterns</key> <array> <string>http://(www\.)?sharebees.com</string> <string>providerinfo://sharebees/.*(icefilms)</string> </array> </dict> <key>RapidShare</key> <dict> <key>URLPatterns</key> <array> <string>https?://(www\.)?rapidshare.com</string> <string>providerinfo://rapidshare/.*(icefilms)</string> </array> </dict> <key>BillionUploads</key> <dict> <key>URLPatterns</key> <array> <string>http://(www\.)?billionuploads.com</string> <string>providerinfo://billionuploads/.*(icefilms)</string> </array> </dict> <key>2Shared</key> <dict> <key>URLPatterns</key> <array> <string>http://(www\.)?2shared.com</string> <string>providerinfo://2shared/.*(icefilms)</string> </array> </dict> <key>180Upload</key> <dict> <key>URLPatterns</key> <array> <string>(captcha|solve|play)://(www\.)?(180Upload.com|epicshare.net)/</string> <string>providerinfo://(www\.)?(180upload(\.com)?|epicshare(\.net)?)/.*(icefilms)</string> </array> </dict> <key>MegaRelease</key> <dict> <key>URLPatterns</key> <array> <string>(captcha|solve|play)://(www\.)?(megarelease\.org|lemuploads\.com)/</string> <string>providerinfo://(www\.)?(megarelease(\.org)?|lemuploads(\.com)?)/.*(icefilms)</string> </array> </dict> <key>MovReel</key> <dict> <key>URLPatterns</key> <array> <string>http://(www\.)?movreel.com</string> <string>providerinfo://movreel/.*(icefilms)</string> </array> </dict> <key>VidHog</key> <dict> <key>URLPatterns</key> <array> <string>http://(www\.)?(vidhog)\.(com|net)/[\d\w]{8,}</string> <string>providerinfo://vidhog/.*(icefilms)</string> </array> </dict> <key>HugeFiles</key> <dict> <key>URLPatterns</key> <array> <string>(captcha|solve|play)://(www\.)?hugefiles\.net</string> <string>providerinfo://hugefiles/.*(icefilms)</string> </array> </dict> </dict></dict></plist>
Expand Down
44 changes: 4 additions & 40 deletions Contents/Services/URL/180Upload/ServiceCode.pys
Original file line number Diff line number Diff line change
Expand Up @@ -124,58 +124,22 @@ def MediaObjectsForURL(url):

if ('180upload' in url):
link = re.search('<a\s+id="lnk_download"\s+href="(.+?)">', html)
final_url = "play://180upload.com/?" + urllib.urlencode({'url':link.group(1)}).replace(" ", "+")
final_url = link.group(1).replace(" ", "+")
elif ('epicshare' in url):
link = re.search('<a\s+id="lnk_download"\s+href="(.+?)">Regular', html)
final_url = "play://180upload.com/?" + urllib.urlencode({'url':link.group(1)}).replace(" ", "+")
final_url = link.group(1).replace(" ", "+")

return [
MediaObject(
parts = [
PartObject(
key=final_url
)
],
)
]
Log("final url: " + final_url)

elif ('play://' in url):

return [
MediaObject(
parts = [
PartObject(
key=Callback(PlayVideo, url=url.replace("play://", "http://"))
key=final_url
)
],
)
]



@indirect
def PlayVideo(url):

# Extract out video URL.
url_parts = urlparse.urlparse(url)

# Break down query string.
data = dict(cgi.parse_qsl(url_parts.query))
final_url = data['url'].replace(" ","+")

Log(final_url)

return ObjectContainer(
objects = [
VideoClipObject(
items = [
MediaObject(
parts = [PartObject(key=final_url)],
)
]
)
]
)


def LogProviderError(msg="", ex=None):
Expand Down
Loading