-
Notifications
You must be signed in to change notification settings - Fork 9
/
project_final_project_word_cloud.py
136 lines (103 loc) · 4.43 KB
/
project_final_project_word_cloud.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
#FINAL PROJECT OF SYSTEM AUTOMATION:
# using word cloud
# Create a dictionary with words and word frequencies that can be passed to the generate_from_frequencies function of the WordCloud class.
#snipset of generating images with word cloud
cloud = wordcloud.WordCloud()
cloud.generate_from_frequencies(frequencies)
cloud.to_file("myfile.jpg")
# start writing code:
# Here are all the installs and imports you will need for your word cloud script and uploader widget
!pip install wordcloud
!pip install fileupload
!pip install ipywidgets
!jupyter nbextension install --py --user fileupload
!jupyter nbextension enable --py fileupload
import wordcloud
import numpy as np
from matplotlib import pyplot as plt
from IPython.display import display
import fileupload
import io
import sys
# load the file with text
# This is the uploader widget
def _upload():
_upload_widget = fileupload.FileUploadWidget()
def _cb(change):
global file_contents
decoded = io.StringIO(change['owner'].data.decode('utf-8'))
filename = change['owner'].filename
print('Uploaded `{}` ({:.2f} kB)'.format(
filename, len(decoded.read()) / 2 **10))
file_contents = decoded.getvalue()
_upload_widget.observe(_cb, names='data')
display(_upload_widget)
_upload()
# remove unnecessary wordings
def calculate_frequencies(file_contents):
# Here is a list of punctuations and uninteresting words you can use to process your text
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
uninteresting_words = ["the", "a", "to", "if", "is", "it", "of", "and", "or", "an", "as", "i", "me", "my", \
"we", "our", "ours", "you", "your", "yours", "he", "she", "him", "his", "her", "hers", "its", "they", "them", \
"their", "what", "which", "who", "whom", "this", "that", "am", "are", "was", "were", "be", "been", "being", \
"have", "has", "had", "do", "does", "did", "but", "at", "by", "with", "from", "here", "when", "where", "how", \
"all", "any", "both", "each", "few", "more", "some", "such", "no", "nor", "too", "very", "can", "will", "just"]
# write code to complete the task
#wordcloud
cloud = wordcloud.WordCloud()
cloud.generate_from_frequencies()
return cloud.to_array()
#Final solution
#soln
!pip install wordcloud
!pip install fileupload
!pip install ipywidgets
!jupyter nbextension install --py --user fileupload
!jupyter nbextension enable --py fileupload
import wordcloud
import numpy as np
from matplotlib import pyplot as plt
from IPython.display import display
import fileupload
import io
import sys
def _upload():
_upload_widget = fileupload.FileUploadWidget()
def _cb(change):
global file_contents
decoded = io.StringIO(change['owner'].data.decode('utf-8'))
filename = change['owner'].filename
print('Uploaded `{}` ({:.2f} kB)'.format(
filename, len(decoded.read()) / 2 **10))
file_contents = decoded.getvalue()
_upload_widget.observe(_cb, names='data')
display(_upload_widget)
_upload()
def calculate_frequencies(file_contents):
# Here is a list of punctuations and uninteresting words you can use to process your text
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
uninteresting_words = ["the", "a", "to", "if", "is", "it", "of", "and", "or", "an", "as", "i", "me", "my", \
"we", "our", "ours", "you", "your", "yours", "he", "she", "him", "his", "her", "hers", "its", "they", "them", \
"their", "what", "which", "who", "whom", "this", "that", "am", "are", "was", "were", "be", "been", "being", \
"have", "has", "had", "do", "does", "did", "but", "at", "by", "with", "from", "here", "when", "where", "how", \
"all", "any", "both", "each", "few", "more", "some", "such", "no", "nor", "too", "very", "can", "will", "just"]
# LEARNER CODE START HERE
frequencies = {}
file_contents = file_contents.lower()
words = file_contents.split()
for word in words:
if word not in uninteresting_words:
if word.isalpha():
if word not in frequencies:
frequencies[word]=1
else:
frequencies[word] += 1
#wordcloud
cloud = wordcloud.WordCloud()
cloud.generate_from_frequencies(frequencies)
return cloud.to_array()
# Display your wordcloud image
myimage = calculate_frequencies(file_contents)
plt.imshow(myimage, interpolation = 'nearest')
plt.axis('off')
plt.show()