目标:用Python3写一款小型的web目录扫描工具
功能:1.扫描指定站点
2.指定网站脚本类型来扫描
3.可控线程
4.可保存扫描结果
首先定义一个命令参数的函数
def parse_option():
parser = argparse.ArgumentParser(description="The Argument of DirScan")
parser.add_argument("-u","--url",dest="url",help="The Url to Scan")
parser.add_argument("-e","--extensions",dest="extensions",help="Web Extensions")
parser.add_argument("-t","--thread",dest="thread",default=10,type=int,help="The Thread to Scan")
parser.add_argument("-r","--report",action="store_true",help="Save The Result of Scan")
args = parser.parse_args()
return args
定义了4个参数,-u用来指定扫描的url,-e用来指定网站类型,-t用来指定线程数,默认为10,-r为保存扫描结果
参数的值传入变量args,然后返回
然后定义一个开始的函数,对url进行处理和启用多线程
def start(url, ext, count):
queue = Queue() #生成一个队列
print('\033[34;1m Status Url Path\033[0m')
global url_r #设置为全局变量
#对传入的url进行处理
if url.endswith("/"):
url = url.rstrip("/")
#对url进行处理方便写入结果
if url[4] == 's':
url_r = url.lstrip("https://")
if url[4] == ':':
url_r = url.lstrip("http://")
#打开字典,并和url进行拼接
f = open('%s.txt' %ext,'r')
for i in f:
queue.put(url + i.rstrip('\n'))
threads = []
thread_count = count
for i in range(thread_count):
threads.append(Dirscan(queue)) #调用多线程
for t in threads:
t.start() #开启多线程
for t in threads:
t.join() #等待
这里用类方法来进行多线程的调用
class Dirscan(threading.Thread):
def __init__(self,queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while not self.queue.empty():
url_path = self.queue.get() #从队列里获取url
#随机user-agents
u = open("user-agents.txt", "r")
headers_list = []
random_headers = {}
for i in u:
headers_list.append(i.strip())
random_headers["user-agents"] = random.choice(headers_list)
headers = random_headers
try:
r = requests.get(url=url_path, headers=headers, timeout=6,allow_redirects=False)
status_code = r.status_code
if status_code == 200:
print("\033[32;1m[+] [%s] %s\033[0m"%(status_code,url_path))
if cmd_args.report: #如果有传入-r参数,则执行write_report()函数来写扫描结果
write_report(url_path)
return url_path
except Exception as e:
print("\033[41;1m%s\033[0m"%e)
然后是写扫描结果的函数
def write_report(url):
with open("%s.html"%url_r, "a") as r:
r.write('<a href="'+url+'" target="_blank">'+url+'</a><br>')
r.close()
最后是程序的入口
if __name__ == '__main__':
cmd_args = parse_option() #把parse_option()返回的args赋给变量cmd_args
url = cmd_args.url #把args中的url传给变量url
extension = cmd_args.extensions #把args中的extensions传给变量extension
thread = cmd_args.thread #把args中的thread传给变量thread
start(url,extension,thread) #把三个参数传入start()函数
以html格式保存扫描结果,也方便直接打开扫描结果
最后的完整代码
# -*- coding:utf-8 -*-
__author__ = "MuT6 Sch01aR"
import requests
import argparse
import threading
from queue import Queue
import random
class Dirscan(threading.Thread):
def __init__(self,queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while not self.queue.empty():
url_path = self.queue.get()
u = open("user-agents.txt", "r")
headers_list = []
random_headers = {}
for i in u:
headers_list.append(i.strip())
random_headers["user-agents"] = random.choice(headers_list)
headers = random_headers
try:
r = requests.get(url=url_path, headers=headers, timeout=6,allow_redirects=False)
status_code = r.status_code
if status_code == 200:
print("\033[32;1m[+] [%s] %s\033[0m"%(status_code,url_path))
if cmd_args.report:
write_report(url_path)
return url_path
except Exception as e:
print("\033[41;1m%s\033[0m"%e)
def write_report(url):
with open("%s.html"%url_r, "a") as r:
r.write('<a href="'+url+'" target="_blank">'+url+'</a><br>')
r.close()
def start(url, ext, count):
queue = Queue()
print('\033[34;1m Status Url Path\033[0m')
global url_r
if url.endswith("/"):
url = url.rstrip("/")
if url[4] == 's':
url_r = url.lstrip("https://")
if url[4] == ':':
url_r = url.lstrip("http://")
f = open('%s.txt' %ext,'r')
for i in f:
queue.put(url + i.rstrip('\n'))
threads = []
thread_count = count
for i in range(thread_count):
threads.append(Dirscan(queue))
for t in threads:
t.start()
for t in threads:
t.join()
def parse_option():
parser = argparse.ArgumentParser(description="The Argument of DirScan")
parser.add_argument("-u","--url",dest="url",help="The Url to Scan")
parser.add_argument("-e","--extensions",dest="extensions",help="Web Extensions")
parser.add_argument("-t","--thread",dest="thread",default=10,type=int,help="The Thread to Scan")
parser.add_argument("-r","--report",action="store_true",help="Save The Result of Scan")
args = parser.parse_args()
return args
if __name__ == '__main__':
cmd_args = parse_option()
url = cmd_args.url
extension = cmd_args.extensions
thread = cmd_args.thread
start(url,extension,thread)