forked from ckorn/Upstream-watchers
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Some upstreams do not tell the version number in the download link but elsewhere. These scripts try to find out the current upstream version and provide a link with the version number in it. So it can be checked as usual for new versions in the debian/watch files.
- Loading branch information
Christoph Korn
committed
Nov 6, 2012
0 parents
commit 0ad6eb0
Showing
10 changed files
with
187 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
*.html | ||
*.log | ||
*.pyc |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,34 @@ | ||
#!/usr/bin/python | ||
import urllib2 | ||
import re | ||
import sys | ||
DIR_URL = "http://main1.frostwire.com/frostwire/" | ||
|
||
def printlink(spiFile, spiDirectory): | ||
print "<a href='%s'>%s</a>"%(spiDirectory+spiFile,spiFile) | ||
|
||
def download(spiUrl): | ||
request = urllib2.Request(url=spiUrl) | ||
result = urllib2.urlopen(request, timeout=2) | ||
return result.read() | ||
|
||
if __name__ == "__main__": | ||
data = download(DIR_URL) | ||
data = data.split("\n") | ||
href_re = re.compile('<a href="(?P<sub_dir>\d[\d\.]+/)">') | ||
dirs = [] | ||
for line in data: | ||
search_result = re.search(href_re, line) | ||
if not search_result: continue | ||
sub_dir = search_result.group("sub_dir") | ||
dirs += [sub_dir] | ||
dirs = dirs[::-1] | ||
for dir in dirs: | ||
download_dir = DIR_URL+dir | ||
data = download(download_dir).split("\n") | ||
tarball_re = re.compile('<a href="(?P<file>frostwire-([\d\.]+)\.noarch\.tar\.(?:gz|bz2|xz))">') | ||
for line in data: | ||
search_result = re.search(tarball_re, line) | ||
if not search_result: continue | ||
file = search_result.group("file") | ||
printlink(file, download_dir) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,67 @@ | ||
#!/usr/bin/python | ||
import urllib2 | ||
import urllib | ||
import re | ||
import sys | ||
import subprocess | ||
import shlex | ||
|
||
DOMAIN="http://segaretro.org" | ||
URL="%s/Gens/GS"%(DOMAIN) | ||
try: | ||
r=urllib2.urlopen(URL) | ||
html=r.read() | ||
except urllib2.HTTPError, e: | ||
html=e.read() | ||
|
||
# This odd DDOS filter seems to be disabled again | ||
print html | ||
sys.exit() | ||
|
||
jschl_vc_finder = re.compile('(?:.*)<input type="hidden" name="jschl_vc" value="(?P<value>[^"]+)"/>(?:.*)', re.DOTALL) | ||
m=jschl_vc_finder.match(html) | ||
if not m: sys.exit() | ||
|
||
jschl_vc=m.group("value") | ||
#print jschl_vc | ||
|
||
jschl_answer_finder = re.compile("(?:.*)\$\('#jschl_answer'\).val\((?P<value>[^)]+)\);(?:.*)", re.DOTALL) | ||
m=jschl_answer_finder.match(html) | ||
if not m: sys.exit() | ||
|
||
jschl_answer=m.group("value") | ||
#print jschl_answer | ||
|
||
jschl_answer=eval("str(int(%s))"%(jschl_answer)) | ||
#print jschl_answer | ||
|
||
formdata = { "act" : "jschl", "jschl_vc": jschl_vc, "jschl_answer" : jschl_answer } | ||
#print formdata | ||
data_encoded = urllib.urlencode(formdata) | ||
|
||
# It is not working with urllib2 | ||
command="/usr/bin/wget -O- %s --post-data '%s'"%(URL,data_encoded) | ||
#command="/usr/bin/curl %s -d '%s' -D-"%(URL,data_encoded) | ||
args = shlex.split(command) | ||
html = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] | ||
|
||
url_finder = re.compile('(?:.*)href="(?P<URL>[a-zA-Z0-9/]+Gens-gs-r(?:[\d]+)\.tar\.gz)"(?:.*)', re.DOTALL) | ||
m=url_finder.match(html) | ||
if not m: sys.exit() | ||
|
||
url="%s%s"%(DOMAIN,m.group("URL")) | ||
print "<a href='%s'>%s</a>"%(url,url) | ||
|
||
""" | ||
print "curl %s -d '%s' -D-"%(URL,data_encoded) | ||
print "wget -O- %s --post-data '%s' | grep Source"%(URL,data_encoded) | ||
try: | ||
txheaders={'User-agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'} | ||
req=urllib2.Request(URL,data_encoded,txheaders) | ||
r=urllib2.urlopen(req) | ||
html=r.read() | ||
except urllib2.HTTPError, e: | ||
print e.info() | ||
html=e.read() | ||
""" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,12 @@ | ||
#!/usr/bin/python | ||
import urllib2 | ||
import re | ||
|
||
r=urllib2.urlopen("http://jag.xlabsoft.com/download.php") | ||
html=r.read() | ||
|
||
finder=re.compile("(?:.*)<a href='(?:[^']+)'>(?P<version>jag-(?:[\d\.]+)-src\.(?:zip|xz|gz|bz2))</a>(?:.*)", re.DOTALL) | ||
m=finder.match(html) | ||
if m: | ||
v=m.group('version') | ||
print "<a href='%s'>%s</a>"%(v,v) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
#!/usr/bin/python | ||
import urllib2 | ||
import re | ||
h=urllib2.urlopen("http://legendsthegame.net/index.php?m=fileswap").read() | ||
f=re.compile('title="Download (?:.*) Linux (?:.*) (?P<version>[\d\.]+)"', re.M) | ||
m=f.finditer(h) | ||
for x in m: | ||
v=x.group("version") | ||
print "<a href='http://legendsthegame.net/%s'>%s</a>"%(v,v) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,10 @@ | ||
#!/usr/bin/python | ||
import urllib2 | ||
import re | ||
r=urllib2.urlopen("http://code.joyridelabs.de/nikki/src/CHANGELOG") | ||
h=r.read() | ||
finder=re.compile("^(?P<version>[\d\.]+)(?:.*)") | ||
m=finder.match(h) | ||
if m: | ||
v=m.group("version") | ||
print("<a href='http://joyridelabs.de/game/code/%s'>%s</a>"%(v,v)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
#!/usr/bin/python | ||
import urllib2 | ||
import re | ||
URL="http://automated-builds-secondlife-com.s3.amazonaws.com/hg/repo/integration_viewer-development/arch/Linux/quicklink.html" | ||
|
||
if __name__ == "__main__": | ||
f=urllib2.urlopen(URL) | ||
html=f.read() | ||
url_finder=re.compile("(?:.*)URL=(?P<url>.*\.tar\.(?:bz2|xz|gz))(?:.*)", flags=re.DOTALL) | ||
|
||
result=url_finder.match(html) | ||
if result: | ||
g=result.group("url") | ||
print "<a href='%s'>%s</a>"%(g,g) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
#!/bin/sh | ||
python frostwire.py > frostwire.html 2> frostwire.log | ||
python warsow.py > warsow.html 2> warsow.log | ||
python snowstorm.py > snowstorm.html 2> snowstorm.log | ||
python wakeonplan.py > wakeonplan.html 2> wakeonplan.log | ||
python gens-gs.py > gens-gs.html 2> gens-gs.log | ||
python jag.py > jag.html 2> jag.log | ||
python nikki.py > nikki.html 2> nikki.log | ||
python legends.py > legends.html 2> legends.log |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,16 @@ | ||
#!/usr/bin/python | ||
import urllib2 | ||
import re | ||
|
||
URL="http://bazaar.launchpad.net/~xintx-ua/wakeonplan/wakeonplan/files" | ||
TARGET_URL="http://bazaar.launchpad.net/~xintx-ua/wakeonplan/wakeonplan/changes/%s" | ||
|
||
if __name__ == "__main__": | ||
r=urllib2.urlopen(URL) | ||
html = r.read() | ||
|
||
rev_finder=re.compile("(?:.*)</span> \(revision (?P<rev>[\d]+)\)</span>(?:.*)", re.DOTALL) | ||
finding=rev_finder.match(html) | ||
if finding: | ||
url=TARGET_URL%(finding.group("rev")) | ||
print "<a href='%s'>%s</a>"%(url,url) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
#!/usr/bin/python | ||
import httplib | ||
for link in ["linux102"]: | ||
conn = httplib.HTTPConnection("www.warsow.net") | ||
conn.request("HEAD", "/download?dl=%s"%(link)) | ||
res = conn.getresponse() | ||
print "<!-- %d %s -->"%(res.status, res.reason) | ||
h = res.getheaders() | ||
#print h | ||
for k,v in h: | ||
if k == "location": | ||
v = v.replace(".tar.gz", "_unified.tar.gz") | ||
print "<a href='%s'>%s</a>"%(v,v) |