-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathantplanner.py
143 lines (119 loc) · 3.89 KB
/
antplanner.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import web
import urllib
import scraper
import schedule
from admin import *
from google.appengine.api import urlfetch
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.runtime import DeadlineExceededError
from django.utils import simplejson as json
import logging
urls = (
'/', 'index',
'/search', 'search',
'/schedules', 'schedules',
'/schedule/save', 'saveSchedule',
'/schedule/load', 'loadSchedule',
'/admin', 'admin',
'/admin/flush-cache', 'adminFlushCache',
'/admin/latest-web-soc', 'latestWebSoc',
'/admin/delete-old-schedules', 'deleteOldSchedules',
#'/prof', 'getProf'
)
render = web.template.render('templates/')
class index:
def GET(self):
return render.index()
class search:
def GET(self):
search_page = memcache.get("SEARCH")
if search_page is None:
try:
raw_page = urlfetch.fetch("http://websoc.reg.uci.edu")
search_page = scraper.strip_search(raw_page.content)
memcache.set("SEARCH", search_page, 24 * 60 * 60) #24 hours
except urlfetch.Error:
search_page = "UCI webpage is not available at the moment"
return render.search(search_page)
class schedules:
def POST(self):
p = web.input()
form_fields = {
"Breadth": p.Breadth,
"CancelledCourses": p.CancelledCourses,
"ClassType": p.ClassType,
"CourseCodes": p.CourseCodes,
"CourseNum" : p.CourseNum,
"CourseTitle": p.CourseTitle,
"Days": p.Days,
"Dept": p.Dept,
"Division": p.Division,
"EndTime": p.EndTime,
"FontSize": p.FontSize,
"FullCourses": p.FullCourses,
"InstrName": p.InstrName,
"MaxCap": p.MaxCap,
"StartTime": p.StartTime,
"Submit": p.Submit,
"Units": p.Units,
"YearTerm": p.YearTerm,
}
try:
form_fields["ShowComments"] = p.ShowComments
form_fields["ShowFinals"] = p.ShowFinals
except AttributeError, e:
pass
form_data = urllib.urlencode(form_fields)
form_hash = ''.join(form_fields.values())
schedule_page = memcache.get(form_hash)
if schedule_page is None:
try:
raw_page = urlfetch.fetch("http://websoc.reg.uci.edu",
payload=form_data,
method=urlfetch.POST,
headers={'Content-Type': 'application/x-www-form-urlencoded'})
schedule_page = scraper.strip_schedule(raw_page.content)
memcache.set(form_hash, schedule_page, 6 * 60 * 60) #6 hours
except urlfetch.Error:
schedule_page = "UCI webpage is not available at the moment"
return render.schedule(schedule_page)
class saveSchedule():
def POST(self):
p = web.input()
# TODO: error handling
schedule.save_schedule(p.username, p.caldata)
return '{"success":"true"}'
class loadSchedule():
def GET(self):
return schedule.load_schedule(web.input().username)
class getProf():
def GET(self):
p = web.input(names=[])
if p is None or p.names is None:
return '{"success": "Invalid or empty names parameter"}'
found = []
for n in p.names:
prof = memcache.get(n)
if prof is None:
try:
raw_page = urlfetch.fetch("http://www.ratemyprofessors.com/SelectTeacher.jsp?the_dept=All&sid=1074&orderby=TLName&letter=" + n.split(',')[0],
method=urlfetch.GET,
deadline=10)
prof = scraper.strip_professors(raw_page.content, unicode(n))
except urlfetch.DownloadError:
data = '{"success":"urlfetch.DownloadError: RateMyProfessors.com request exceeded maximum of 10 seconds"}'
return json.dumps(data)
except urlfetch.Error:
data = '{"success":"urlfetch.Error: RateMyProfessors.com is not available at the moment}'
return json.dumps(data)
except DeadlineExceedError:
data = '{"success":"DeadlineExceedError: Request to RateMyProfessors.com timed out."}'
return json.dumps(data)
found.extend(prof)
memcache.set(n, prof, 24 * 60)
data = {'success': 'true', 'professors': found}
return json.dumps(data)
if __name__ == "__main__":
app = web.application(urls, globals())
app.cgirun()