-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnews_classifier.py
86 lines (55 loc) · 2.42 KB
/
news_classifier.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction import text
from nltk.stem.snowball import SnowballStemmer
from sklearn.naive_bayes import GaussianNB
from nltk.tokenize import word_tokenize
from nltk.probability import FreqDist
import pandas as pd
# modified from Documents/building-features-text-data/code
# in this example, the label is Y, X is text from the dbpedia part
# http://localhost:8888/notebooks/Documents/building-features-text-data/code/12b-Stemmer_HashingVectorizer_NaiveBayesClassifier.ipynb
def stemmed_words(doc):
return (stemmer.stem(w) for w in analyzer(doc))
def summarize_classification(y_test, y_pred):
acc = accuracy_score(y_test, y_pred, normalize=True)
num_acc = accuracy_score(y_test, y_pred, normalize=False)
prec = precision_score(y_test, y_pred, average='weighted')
recall = recall_score(y_test, y_pred, average='weighted')
print("Length of testing data: ", len(y_test))
print("accuracy_count : ", num_acc)
print("accuracy_score : ", acc)
print("precision_score : ", prec)
print("recall_score : ", recall)
stemmer = SnowballStemmer('english')
analyzer = HashingVectorizer().build_analyzer()
df = pd.read_csv('./archive/nyt-articles-2020.csv')
X = df['headline']
Y = df['n_comments']
# frequency filtering
tokens = word_tokenize("\n".join(X.values))
freq = FreqDist(tokens)
frequent_words = []
for key, value in freq.items():
if value >= 100:
frequent_words.append(key.lower())
stop_words = text.ENGLISH_STOP_WORDS.union(frequent_words)
#stem_vectorizer = HashingVectorizer(n_features=2**10, norm='l2', analyzer=stemmed_words, ngram_range=(2,5)) #34 acc count
stem_vectorizer = HashingVectorizer(n_features=2**10, norm='l2', ngram_range=(2,3), stop_words=stop_words) #38 acc count
feature_vector = stem_vectorizer.transform(X)
feature_vector.shape
X_dense = feature_vector.todense()
X_dense.shape
x_train, x_test, y_train, y_test = train_test_split(X_dense, Y, test_size = 0.2)
x_train.shape, x_test.shape
y_train.shape, y_test.shape
clf = GaussianNB().fit(x_train, y_train)
y_pred = clf.predict(x_test)
y_pred
print(y_test)
print(y_pred)
summarize_classification(y_test, y_pred)