脚本:
import requests import re # 使用方法: # 在这个路径添加 D:\Desktop\url.txt url.txt 的文件 在文件中添加带 http:// 或者 https:// 的域名,一行一个 # 将13和15行中的 D:\Desktop\url.txt 改为 url.txt 即可读取脚本目录下的 url.txt 文件内容 proxy = {"http": "127.0.0.1:8080"} #代理 ## inurl 定义要扫的备份文件 inurl = {"/www.zip","/www.rar","/www.tar.gz","/wwwroot.zip","/wwwroot.rar","/wwwroot.tar.gz","/web.zip","/web.rar","/web.tar.gz","/.svn"} hz = {".zip",".rar",".tar.gz"} # 指定备份文件后缀 后面自动生成为 xxx.com.zip 样式的格式 count = len(open(r'D:\Desktop\url.txt', 'r').readlines()) count = count * 13 # inurl + hz 的总和 with open(r"D:\Desktop\url.txt") as f: n = 0 data = [] nodata = [] for line in f: line = line.replace("\n","").split() try: for x in line: for s in list(inurl): url = x + s n = n+1 html = requests.get(url,allow_redirects=False) html.encoding = 'utf-8' html = html.status_code if html == 200 : data.append(url) print('进程:(%s/%s) ----- 状态:%s ----- 文件存在 ----- 目标:%s'%(n,count,html,url) ) else: print('进程:(%s/%s) ----- 状态:%s ----- 不存在 ----- 目标:%s'%(n,count,html,url) ) if x.startswith("https://"): for index in hz: url = x + '/' + x.replace("https://","") + index n = n+1 html = requests.get(url,allow_redirects=False) html.encoding = 'utf-8' html = html.status_code if html == 200 : data.append(url) print('进程:(%s/%s) ----- 状态:%s ----- 文件存在 ----- 目标:%s'%(n,count,html,url) ) else: print('进程:(%s/%s) ----- 状态:%s ----- 不存在 ----- 目标:%s'%(n,count,html,url) ) elif x.startswith("http://"): for index in hz: url = x + '/' + x.replace("http://","") + index n = n+1 html = requests.get(url,allow_redirects=False) html.encoding = 'utf-8' html = html.status_code if html == 200 : data.append(url) print('进程:(%s/%s) ----- 状态:%s ----- 文件存在 ----- 目标:%s'%(n,count,html,url) ) else: print('进程:(%s/%s) ----- 状态:%s ----- 不存在 ----- 目标:%s'%(n,count,html,url) ) except: nodata.append(url) print('进程:(%s/%s) ----- 状态:%s ----- 无法访问 ----- 目标:%s'%(count,n,html,url) ) print('扫描完成') print('-----------成功的结果-----------') print("\n".join(str(i) for i in data)) print('-----------无法访问的-----------') print("\n".join(str(i) for i in nodata))
说明:
常见的备份文件命名如下:
www.zip
www.rar
www.tar.gz
wwwroot.zip
wwwroot.rar
wwwroot.tar.gz
web.zip
web.rar
web.tar.gz
.svn
www.rar
www.tar.gz
wwwroot.zip
wwwroot.rar
wwwroot.tar.gz
web.zip
web.rar
web.tar.gz
.svn
本脚本的作用是扫描如上备份文件以及带域名的备份文件(www.xx.com/www.xx.com.zip等)是否存在,喜欢的可以收藏~
由于经常折腾代码,可能会导致个别文章内容显示错位或者别的 BUG 影响阅读; 如发现请在该文章下留言告知于我,think you !
本文作者为Mr.Wu,转载请注明。
C:\Users\Administrator\Desktop\新建文件夹>1.py
Traceback (most recent call last):
File "C:\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 706, in urlopen
chunked=chunked,
File "C:\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 382, in _make_request
self._validate_conn(conn)
File "C:\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 1010, in _validate_conn
conn.connect()
File "C:\Python\Python37\lib\site-packages\urllib3\connection.py", line 464, in connect
_match_hostname(cert, self.assert_hostname or server_hostname)
File "C:\Python\Python37\lib\site-packages\urllib3\connection.py", line 508, in _match_hostname
match_hostname(cert, asserted_hostname)
File "C:\Python\Python37\lib\ssl.py", line 323, in match_hostname
% (hostname, ', '.join(map(repr, dnsnames))))
ssl.SSLCertVerificationError: ("hostname '106.3.87.70' doesn't match either of '35458dj.com', 'www.35458dj.com'",)
During handling of the above exception, another exception occurred:
@1ssl.SSLCertVerificationError: ("hostname '106.3.87.70' doesn't match either of '35458dj.com', 'www.35458dj.com'",)
这不是很明显的标注了吗? ssl证书错误