-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsherdog_scraper.py
97 lines (72 loc) · 2.34 KB
/
sherdog_scraper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import re
import os
#https://realpython.com/python-web-scraping-practical-introduction/
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None
"""
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def is_good_response(resp):
"""
Returns true if the response seems to be HTML, false otherwise
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
def log_error(e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e)
# simple_get(catch_link)
def get_all_html(url):
"""
Get all HTML data
"""
response = simple_get(url)
if response is not None:
html = BeautifulSoup(response, 'html.parser')
return print(html)
# Raise an exception if we failed to get any data from the url
raise Exception('Error retrieving contents at {}'.format(url))
# get_all_html(welter_link)
def get_fighters(url):
"""
Scrape Sherdog for fighter info fighter data
"""
response = simple_get(url)
if response is not None:
html = BeautifulSoup(response, 'html.parser')
html_tr = html.find_all('tr')
# html_td = html.find_all('td')
# for fighter in html.find_all('div', class_ = 'odd')
# print(html_tr)
# print(html_td)
print(len(html_tr))
# print(type(html_td))
else:
# Raise an exception if we failed to get any data from the url
raise Exception('Error retrieving contents at {}'.format(url))
print(html_tr[3])
print(type(html_tr[3]))
#get_fighters(heavy_link)