-
Notifications
You must be signed in to change notification settings - Fork 33
/
Copy pathDir-Xcan6.py
264 lines (231 loc) · 8.92 KB
/
Dir-Xcan6.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
#!/usr/bin/python
# LAST UPDATE 18/12/14
#
# DIR-XCAN5.PY
# This program is for finding hidden directories that are not directly linked on a website. It find HTTP response code 200 directories and outputs the URL to file.
# THIS PROGRAM IS A PYTHON VERSION OF THE OWASP'S DIRBUSTER PROJECT THAT IS NOW CLOSED
# https://www.owasp.org/index.php/Category:OWASP_DirBuster_Project
#
# This script uses OWASP's DirBuster list - directory-list-2.3-medium.txt
#
# Copyright 2007 James Fisher
#
# This work is licensed under the Creative Commons
# Attribution-Share Alike 3.0 License. To view a copy of this
# license, visit http://creativecommons.org/licenses/by-sa/3.0/
# or send a letter to Creative Commons, 171 Second Street,
# Suite 300, San Francisco, California, 94105, USA.
#
# ADD ME ON TWITTER @NOOBIEDOG
#Changelog:
# [DONE] - Http Authentication
# [DONE] - Add COLOR.
# [DONE] - http Proxy options.
# [DONE] - Kill threads on Ctrl+C.
# [DONE] - Defaults added to Arguments.
# [DONE] - Now using Requests instead of Urllib2.
# [DONE] - Verbose modes added, prints found and Non-Authed folders.
# [DONE] - Added User-Agent option.
# [DONE] - Cookie Authentication (with multiple cookies)
# [DONE] - SOCKS Proxy options # To use TOR socks5://127.0.0.1:9050 or socks4://127.0.0.1:9050
#TODO:
# Change number of threads on responce time from server.
# Fix error reporting for connection issues.
# Add Pause/Stop/Start functions to script.
# Add XML output option.
# Custom 404 page option.
# Add NTLM Authentication
__author__ = '@NoobieDog'
from sys import argv
import argparse
import Queue
import sys
import threading
#import requests
import requesocks
import re
import time
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
GR = '\033[37m' # gray
BB = '\033[1m' # Bold
NB = '\033[0m' # Not bold
def mapcount(listing):
lines = 0
with open(listing) as f:
lines = sum(1 for line in f)
return lines
parser = argparse.ArgumentParser(
version='5.0',
description='A Python version of DirBuster',
epilog='Dir-Xcan is a multi threaded python application designed to brute force directories on web/application servers.')
parser.add_argument('-s', action="store", help='Website Domain or IP')
parser.add_argument('-d', action="store", help='Directory word list', default="directorylist.txt")
parser.add_argument('-o', action="store", help='Output file name (HTML)', default="Dir-Xcan-results.html")
parser.add_argument('-n', action="store", help='Number of threads', default="5")
parser.add_argument('-p', action="store", help='Proxy address and port (host:port)')
parser.add_argument('-a', action="store", help='Authentication BasicHTTP(username:password)')
parser.add_argument('-c', action="store", help='use a previously established sessions cookie', default=None)
parser.add_argument('-u', action="store", help='User-Agent', default="Mozilla/5.0")
parser.add_argument("-V", action="store_true", help="Output information about new data.")
try:
results = vars(parser.parse_args())
#results = parser.parse_args()
except IOError, msg:
parser.error(str(msg))
print O + '''
%s _____ _____ _____ __ _______ _ _
| __ \_ _| __ \ \ \ / / ____| /\ | \ | |
| | | || | | |__) |____\ V / | / \ | \| |
| | | || | | _ /______> <| | / /\ \ | . ` |
| |__| || |_| | \ \ / . \ |____ / ____ \| |\ |
|_____/_____|_| \_\ /_/ \_\_____/_/ \_\_| \_|%s
%sRelease Date%s: 06/10/2014
%sRelease Version%s: V.5.0
%sCode%s: stuart@sensepost.com // @NoobieDog
%sVisit%s: www.sensepost.com // @sensepost
''' %(BB,NB,R,W,R,W,R,W,R,W)
ProxyOpt = False
AuthOpt = False
CookiesOpt = False
if not results['s'] or not results['d']:
parser.print_help()
exit()
else:
target = results['s']
if not target.startswith("http"):
print R + ' Please include the http:// or https:// parts' + W
exit()
list_file = results['d']
outputname = results['o']
ThreadNumber = int(results['n'])
Proxy_Addr = results['p']
Auth_Data = results['a']
Usr_Agent = results['u']
if results['p']:
ProxyOpt = True
Proxies = {
"http": Proxy_Addr,
"https": Proxy_Addr
}
if results['a']:
AuthOpt = True
Auth_User, Auth_Pwd = results['a'].split(':', 1)
if results['u']:
headers = {
'User-Agent': Usr_Agent,
}
if results['c'] is not None: ####### need to change imput to : not ; as it fuckes with life!
CookiesOpt = True
cookies = {}
# Check to see if the cookie has a semicolon, if so there might be mutiple cookies
if re.search(';', results['c']):
print results['c']
cookielist = results['c'].split(';')
# Loop through list of cookies
for authcookies in cookielist:
# If there isn't an equal and some sort of content, then it isn't a valid cookie, otherwise add to list of cookies
if re.search('[":_-/a-zA-Z0-9]', authcookies) and re.search('[=]', authcookies): ##### Error here too, regex all fucked up
cookieparts = authcookies.split('=')
cookies[cookieparts[0]] = cookieparts[1]
else:
# Check to see if cookie has =, if not it is malformed and send dummy cookie
# If so, split at the = into correct name/value pairs
if re.search('=', results['c']):
cookielist = results['c'].split('=')
cookies[cookielist[0]] = cookielist[1]
else:
print ' Error in Cookie - Sort your shit out!'
else:
cookielist = results['c'].split('=')
cookies = {cookielist[0]: cookielist[1],}
print O + ' lines to try..' + str(mapcount(list_file)) + W
with open(list_file) as f:
directorys = f.readlines()
queue = Queue.Queue()
NotFound = 0
NotAuthorised = 0
Found = 0
Forbidden = 0
Other = 0
LinesLeft = len(directorys)
Lines = len(directorys)
def GetURL(host, target):
global NotFound, Found, Forbidden, Other, LinesLeft, Lines
sys.stdout.write("\r\x1b[K \033[31m%d \033[0mFound, \033[33m%d \033[0mForbidden, \033[32m%d \033[0mNotFound, \033[37m%d \033[0mOther, \033[37m%d \033[0mPercent Left" % (Found, Forbidden, NotFound, Other, LinesLeft*100/Lines))
sys.stdout.flush()
try:
if AuthOpt == True:
url = requesocks.get(target + '/' + str(host.rstrip()), auth=(Auth_User, Auth_Pwd), headers=headers)
elif ProxyOpt == True:
url = requesocks.get(target + '/' + str(host.rstrip()), proxies=Proxies, headers=headers)
elif AuthOpt and ProxyOpt == True:
url = requesocks.get(target + '/' + str(host.rstrip()), proxies=Proxy_Addr, auth=(Auth_User, Auth_Pwd), headers=headers)
elif CookiesOpt == True:
url = requesocks.get(target + '/' + str(host.rstrip()), cookies=cookies, headers=headers)
else:
url = requesocks.get(target + '/' + str(host.rstrip()), headers=headers)
code = url.status_code
if code == 401:
Other += 1
LinesLeft -= 1
outputfile.write("<A HREF='" + target + "/" + host + "'>" + target + '/' + host + " - <STRONG>REQUIRES AUTHENTICATION</STRONG><br>\n");
if results['V']:
sys.stdout.write("\r\x1b[K\033[33m %s/%s\033[0m-REQUIRES AUTHENTICATION" % (target, host)) # Doesnt print after value :S
sys.stdout.flush()
elif code == 403:
Forbidden = Forbidden + 1
LinesLeft -= 1
elif code == 404: # Need to look at making this shizz better (array or list)
NotFound += 1
LinesLeft -= 1
elif code == 200:
Found += 1
LinesLeft -= 1
outputfile.write("<A HREF='" + target + "/" + host + "'>" + target + '/' + host + "<br>\n");
if results['V']:
sys.stdout.write("\r\x1b[K\033[31m %s/%s\033[0m-FOUND" % (target, host)) # Doesnt print after value :S
sys.stdout.flush()
else:
Other += 1
LinesLeft -= 1
except requesocks.ConnectionError, e:
outputfile.write("We failed to reach a server.<br>Reason: Connection Error</BODY></HTML>");
outputfile.close()
print R + "\n ERROR: Connection Error - Check target is correct or exists" + W
sys.exit()
class ThreadUrl(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
try: # NEED TO EXIT THE THREADS AND SCRIPT BETTER
host = self.queue.get()
GetURL(host, target)
self.queue.task_done()
except (SystemExit):
print R + '\n Shutting down! ' + W + '....'
def main():
for i in range(ThreadNumber):
t = ThreadUrl(queue)
t.setDaemon(True)
t.start()
for host in directorys:
try:
queue.put(host)
queue.join()
except (KeyboardInterrupt, SystemExit):
print R + '\n Ctrl+C Detected! ' + W + '....' + R + '\n Shutting down! ' + W + '....'
sys.exit()
outputfile = open(outputname, "wb")
outputfile.write("<HTML><HEAD><TITLE>" + target + "</TITLE></HEAD><BODY>\n");
start = time.time()
main()
outputfile.write("</BODY></HTML>");
outputfile.close()
print O + '\n Elapsed Time: \033[0m%s' % (time.time() - start)