-
Notifications
You must be signed in to change notification settings - Fork 1
/
BagOfWords.py
49 lines (43 loc) · 2.11 KB
/
BagOfWords.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
# Python3 code for preprocessing text
import nltk
import re
import numpy as np
import heapq
nltk.download('punkt')
# execute the text here as :
text = "Beans. I was trying to explain to somebody as we were flying in, that’s corn. That’s beans. And they were very impressed at my agricultural knowledge. Please give it up for Amaury once again for that outstanding introduction. I have a bunch of good friends here today, including somebody who I served with, who is one of the finest senators in the country, and we’re lucky to have him, your Senator, Dick Durbin is here. I also noticed, by the way, former Governor Edgar here, who I haven’t seen in a long time, and somehow he has not aged and I have. And it’s great to see you, Governor. I want to thank President Killeen and everybody at the U of I System for making it possible for me to be here today. And I am deeply honored at the Paul Douglas Award that is being given to me. He is somebody who set the path for so much outstanding public service here in Illinois. Now, I want to start by addressing the elephant in the room. I know people are still wondering why I didn’t speak at the commencement."
dataset = nltk.sent_tokenize(text)
for i in range(len(dataset)):
dataset[i] = dataset[i].lower()
dataset[i] = re.sub(r'\W', ' ', dataset[i])
dataset[i] = re.sub(r'\s+', ' ', dataset[i])
print("\nWords dataset: \n")
for word in dataset:
print(word)
# Creating the Bag of Words model
word2count = {}
for data in dataset:
words = nltk.word_tokenize(data)
for word in words:
if word not in word2count.keys():
word2count[word] = 1
else:
word2count[word] += 1
print("\nWords: \n")
for word in words:
print(word)
print("\nWord counts: \n")
print(word2count)
freq_words = heapq.nlargest(100, word2count, key=word2count.get)
X = []
for data in dataset:
vector = []
for word in freq_words:
if word in nltk.word_tokenize(data):
vector.append(1)
else:
vector.append(0)
X.append(vector)
X = np.asarray(X)
print("\nBag of words: \n")
print(X)