1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
from concurrent.futures import ThreadPoolExecutor
import requests
import time
import threading

success_url = []


def download(ls, filename):
with open(f'{filename}.txt', 'w') as f:
for l in ls:
f.write(l)

list = []
print_lock = threading.Lock() # 加锁形成输出队列,防止同时输出


def get_subdomain(url):
try:
response = requests.get(url, timeout=5)
if response.status_code in [200, 301, 302]:
with print_lock:
count_t = 6 - int(len(url.split('://')[-1]) / 4)
print(f'发现域名:\t' + url.split('://')[-1] + '\t'*count_t + f'{response.status_code}' + f'\t{len(response.text)}')
success_url.append(url.split('://')[-1] + '\t'*count_t + f'{response.status_code}' + f'\t\t{len(response.text)}\n')
else:
pass
except Exception as e:
pass
# print(f'{url}不存在')


if __name__ == '__main__':
domain = input('请输入需要爆破的根域名:\n') # 获取输入的根域名
with open('domain.txt', 'r') as f:
for line in f.readlines():
list.append('http://' + line.strip('\n') + '.' + domain)
list.append('https://' + line.strip('\n') + '.' + domain)
executor = ThreadPoolExecutor(max_workers=10)
start = time.time()
futures = [executor.submit(get_subdomain, url) for url in list]
for future in futures:
result = future.result() # 获取结果,这将阻塞直到任务完成
end = time.time()
print(f'总用时:{end - start:.2f}秒')
download(success_url, domain)