-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathudhr_length_ranking.py
More file actions
37 lines (28 loc) · 1.11 KB
/
udhr_length_ranking.py
File metadata and controls
37 lines (28 loc) · 1.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
# Website url: http://research.ics.aalto.fi/cog/data/udhr/
# Import libraries
import requests
from bs4 import BeautifulSoup
import PyPDF2, io, requests
import re
if __name__ == "__main__":
# Create an URL object
url = 'http://research.ics.aalto.fi/cog/data/udhr/'
# Create object page
page = requests.get(url)
# parser-lxml = Change html to Python friendly format
# Obtain page's information
soup = BeautifulSoup(page.text, 'lxml')
hrefs = []
names = []
for a in soup.find_all('a', href=True, text="pdf"):
hrefs.append(a['href'])
names.append(soup.find('a', href=re.compile("LangID=" + a['href'][4:7])).text)
url = "http://research.ics.aalto.fi/cog/data/udhr/"
myDict = {}
for name, a in zip(names, hrefs):
response = requests.get(url + a)
pdf_file = io.BytesIO(response.content) # response being a requests Response object
pdf_reader = PyPDF2.PdfFileReader(pdf_file)
num_pages = pdf_reader.numPages
myDict[name] = num_pages
print(sorted(myDict.items(), key=lambda item: item[1]))