diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..a1590bb
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+*.html
+*.log
+*.pyc
diff --git a/frostwire.py b/frostwire.py
new file mode 100644
index 0000000..1806618
--- /dev/null
+++ b/frostwire.py
@@ -0,0 +1,34 @@
+#!/usr/bin/python
+import urllib2
+import re
+import sys
+DIR_URL = "http://main1.frostwire.com/frostwire/"
+
+def printlink(spiFile, spiDirectory):
+ print "%s"%(spiDirectory+spiFile,spiFile)
+
+def download(spiUrl):
+ request = urllib2.Request(url=spiUrl)
+ result = urllib2.urlopen(request, timeout=2)
+ return result.read()
+
+if __name__ == "__main__":
+ data = download(DIR_URL)
+ data = data.split("\n")
+ href_re = re.compile('')
+ dirs = []
+ for line in data:
+ search_result = re.search(href_re, line)
+ if not search_result: continue
+ sub_dir = search_result.group("sub_dir")
+ dirs += [sub_dir]
+ dirs = dirs[::-1]
+ for dir in dirs:
+ download_dir = DIR_URL+dir
+ data = download(download_dir).split("\n")
+ tarball_re = re.compile('')
+ for line in data:
+ search_result = re.search(tarball_re, line)
+ if not search_result: continue
+ file = search_result.group("file")
+ printlink(file, download_dir)
diff --git a/gens-gs.py b/gens-gs.py
new file mode 100644
index 0000000..4976054
--- /dev/null
+++ b/gens-gs.py
@@ -0,0 +1,67 @@
+#!/usr/bin/python
+import urllib2
+import urllib
+import re
+import sys
+import subprocess
+import shlex
+
+DOMAIN="http://segaretro.org"
+URL="%s/Gens/GS"%(DOMAIN)
+try:
+ r=urllib2.urlopen(URL)
+ html=r.read()
+except urllib2.HTTPError, e:
+ html=e.read()
+
+# This odd DDOS filter seems to be disabled again
+print html
+sys.exit()
+
+jschl_vc_finder = re.compile('(?:.*)(?:.*)', re.DOTALL)
+m=jschl_vc_finder.match(html)
+if not m: sys.exit()
+
+jschl_vc=m.group("value")
+#print jschl_vc
+
+jschl_answer_finder = re.compile("(?:.*)\$\('#jschl_answer'\).val\((?P[^)]+)\);(?:.*)", re.DOTALL)
+m=jschl_answer_finder.match(html)
+if not m: sys.exit()
+
+jschl_answer=m.group("value")
+#print jschl_answer
+
+jschl_answer=eval("str(int(%s))"%(jschl_answer))
+#print jschl_answer
+
+formdata = { "act" : "jschl", "jschl_vc": jschl_vc, "jschl_answer" : jschl_answer }
+#print formdata
+data_encoded = urllib.urlencode(formdata)
+
+# It is not working with urllib2
+command="/usr/bin/wget -O- %s --post-data '%s'"%(URL,data_encoded)
+#command="/usr/bin/curl %s -d '%s' -D-"%(URL,data_encoded)
+args = shlex.split(command)
+html = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0]
+
+url_finder = re.compile('(?:.*)href="(?P[a-zA-Z0-9/]+Gens-gs-r(?:[\d]+)\.tar\.gz)"(?:.*)', re.DOTALL)
+m=url_finder.match(html)
+if not m: sys.exit()
+
+url="%s%s"%(DOMAIN,m.group("URL"))
+print "%s"%(url,url)
+
+"""
+print "curl %s -d '%s' -D-"%(URL,data_encoded)
+print "wget -O- %s --post-data '%s' | grep Source"%(URL,data_encoded)
+try:
+ txheaders={'User-agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
+
+ req=urllib2.Request(URL,data_encoded,txheaders)
+ r=urllib2.urlopen(req)
+ html=r.read()
+except urllib2.HTTPError, e:
+ print e.info()
+ html=e.read()
+"""
diff --git a/jag.py b/jag.py
new file mode 100644
index 0000000..1bfc973
--- /dev/null
+++ b/jag.py
@@ -0,0 +1,12 @@
+#!/usr/bin/python
+import urllib2
+import re
+
+r=urllib2.urlopen("http://jag.xlabsoft.com/download.php")
+html=r.read()
+
+finder=re.compile("(?:.*)(?Pjag-(?:[\d\.]+)-src\.(?:zip|xz|gz|bz2))(?:.*)", re.DOTALL)
+m=finder.match(html)
+if m:
+ v=m.group('version')
+ print "%s"%(v,v)
diff --git a/legends.py b/legends.py
new file mode 100644
index 0000000..3b1713e
--- /dev/null
+++ b/legends.py
@@ -0,0 +1,9 @@
+#!/usr/bin/python
+import urllib2
+import re
+h=urllib2.urlopen("http://legendsthegame.net/index.php?m=fileswap").read()
+f=re.compile('title="Download (?:.*) Linux (?:.*) (?P[\d\.]+)"', re.M)
+m=f.finditer(h)
+for x in m:
+ v=x.group("version")
+ print "%s"%(v,v)
diff --git a/nikki.py b/nikki.py
new file mode 100644
index 0000000..0c7f7f9
--- /dev/null
+++ b/nikki.py
@@ -0,0 +1,10 @@
+#!/usr/bin/python
+import urllib2
+import re
+r=urllib2.urlopen("http://code.joyridelabs.de/nikki/src/CHANGELOG")
+h=r.read()
+finder=re.compile("^(?P[\d\.]+)(?:.*)")
+m=finder.match(h)
+if m:
+ v=m.group("version")
+ print("%s"%(v,v))
diff --git a/snowstorm.py b/snowstorm.py
new file mode 100644
index 0000000..ef44e13
--- /dev/null
+++ b/snowstorm.py
@@ -0,0 +1,14 @@
+#!/usr/bin/python
+import urllib2
+import re
+URL="http://automated-builds-secondlife-com.s3.amazonaws.com/hg/repo/integration_viewer-development/arch/Linux/quicklink.html"
+
+if __name__ == "__main__":
+ f=urllib2.urlopen(URL)
+ html=f.read()
+ url_finder=re.compile("(?:.*)URL=(?P.*\.tar\.(?:bz2|xz|gz))(?:.*)", flags=re.DOTALL)
+
+ result=url_finder.match(html)
+ if result:
+ g=result.group("url")
+ print "%s"%(g,g)
diff --git a/update.sh b/update.sh
new file mode 100644
index 0000000..7744880
--- /dev/null
+++ b/update.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+python frostwire.py > frostwire.html 2> frostwire.log
+python warsow.py > warsow.html 2> warsow.log
+python snowstorm.py > snowstorm.html 2> snowstorm.log
+python wakeonplan.py > wakeonplan.html 2> wakeonplan.log
+python gens-gs.py > gens-gs.html 2> gens-gs.log
+python jag.py > jag.html 2> jag.log
+python nikki.py > nikki.html 2> nikki.log
+python legends.py > legends.html 2> legends.log
diff --git a/wakeonplan.py b/wakeonplan.py
new file mode 100644
index 0000000..5eae92d
--- /dev/null
+++ b/wakeonplan.py
@@ -0,0 +1,16 @@
+#!/usr/bin/python
+import urllib2
+import re
+
+URL="http://bazaar.launchpad.net/~xintx-ua/wakeonplan/wakeonplan/files"
+TARGET_URL="http://bazaar.launchpad.net/~xintx-ua/wakeonplan/wakeonplan/changes/%s"
+
+if __name__ == "__main__":
+ r=urllib2.urlopen(URL)
+ html = r.read()
+
+ rev_finder=re.compile("(?:.*) \(revision (?P[\d]+)\)(?:.*)", re.DOTALL)
+ finding=rev_finder.match(html)
+ if finding:
+ url=TARGET_URL%(finding.group("rev"))
+ print "%s"%(url,url)
diff --git a/warsow.py b/warsow.py
new file mode 100644
index 0000000..0686f54
--- /dev/null
+++ b/warsow.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+import httplib
+for link in ["linux102"]:
+ conn = httplib.HTTPConnection("www.warsow.net")
+ conn.request("HEAD", "/download?dl=%s"%(link))
+ res = conn.getresponse()
+ print ""%(res.status, res.reason)
+ h = res.getheaders()
+ #print h
+ for k,v in h:
+ if k == "location":
+ v = v.replace(".tar.gz", "_unified.tar.gz")
+ print "%s"%(v,v)