From 68bb09a8a21f5f953078fbb6c889c9ffc91270d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9C=AC=E9=97=B4=E7=99=BD=E7=8C=AB?= <139044047+honmashironeko@users.noreply.github.com> Date: Tue, 23 Apr 2024 16:04:08 +0800 Subject: [PATCH] Add files via upload --- icpscan-V2.1.py | 294 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 294 insertions(+) create mode 100644 icpscan-V2.1.py diff --git a/icpscan-V2.1.py b/icpscan-V2.1.py new file mode 100644 index 0000000..7b06dab --- /dev/null +++ b/icpscan-V2.1.py @@ -0,0 +1,294 @@ +from urllib.parse import urlparse +import pandas as pd +import tldextract +import argparse +import requests +import urllib3 +import asyncio +import aiohttp +import base64 +import re + +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +# 正则匹配类 +FOFA_LINK_PATTERN = re.compile(r'') + +# FOFA请求头 +HEADERSF = { + 'Connection': 'keep-alive', + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36', + 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' +} + +# 提取一级域名 +def extract_domain(domain): + return tldextract.extract(domain).registered_domain + +# 区分IP和域名 +def ipdomain(string): + ip_pattern = r'\b(?:\d{1,3}\.){3}\d{1,3}\b' + domain_pattern = r'(?:[a-zA-Z0-9-]+\.)+[a-zA-Z]{2,}' + return re.findall(ip_pattern, string), re.findall(domain_pattern, string) + +# 读取文件并分割成列表 +def read_file(file_path): + with open(file_path, 'r', encoding='utf-8') as file: + return file.read().splitlines() + +# FOFA提取域名 +def extract_links(response_text): + return re.findall(FOFA_LINK_PATTERN, response_text) +def extract_domains(links): + domains = [urlparse(link).hostname for link in links if urlparse(link).hostname] + return domains + +# FOFA计算base64 +def base64_encode(ip): + return base64.b64encode(f'is_domain=true && ip="{ip}"'.encode('utf-8')).decode('utf-8') + +# FOFA异步查询 +sem = asyncio.Semaphore(8) +async def fofa_async(b64, ip, proxyip, retry_count=3): + async with sem: + url = f"https://fofa.info/result?qbase64={b64}" + for _ in range(retry_count): + try: + if proxyip: + proxies = f"http://{proxyip}" + else: + proxies = None + async with aiohttp.ClientSession() as session: + async with session.get(url, headers=HEADERSF, timeout=10, verify_ssl=False, proxy=proxies) as response: + if response.status == 200: + text = await response.text() + if "资源访问每天限制" in text: + print("当前IP已达到查询上限,请更换IP或使用Zoomeye") + break + links = extract_links(await response.text()) + results = extract_domains(links) + print(f"FOFA查询完毕 IP: {ip}") + return results, ip + else: + print(f"FOFA请求失败!错误码: {response.status} - {ip}") + except aiohttp.ClientError as e: + print(f"FOFA请求失败: {e} - {ip}") + + print(f"多次尝试后无法成功获取结果 - {ip}") + return [], ip + +# Zoomeye异步查询 +async def zoomeye_async(ip,auth, retry_count=3): + async with sem: + url = f"https://api.zoomeye.org/web/search?query=ip%3A%22{ip}%22&page=1" + for _ in range(retry_count): + try: + async with aiohttp.ClientSession() as session: + headers = { + "API-KEY": auth + } + async with session.get(url, headers=headers, timeout=10) as response: + if response.status == 200: + data = await response.json() + results = re.findall(r"'site': '(.*?)'", str(data)) + print(f"ZoomEye查询完毕 IP: {ip}") + return results, ip + else: + print(f"ZoomEye请求失败!错误码: {response.status} - {ip}") + except aiohttp.ClientError as e: + print(f"ZoomEye请求失败: {e} - {ip}") + + print(f"多次尝试后无法成功获取结果 - {ip}") + return [], ip + +# 爱站查询权重 +def get_aizhan_rank(domaindj): + pcrank = "0" + prrank = "0" + url1 = f"https://rank.aizhan.com/{domaindj}/" + url2 = f"https://pr.aizhan.com/{domaindj}/" + headers = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36' + } + try: + response1 = requests.get(url1, headers=headers, timeout=10) + if response1.status_code == 200: + pattern = r'id="br_pc_br">(.*?)' + matches = re.search(pattern, response.text, re.DOTALL) + content_array = [] + + if matches: + inner_content = matches.group(1) + p_matches = re.findall(r'

(.*?)

', inner_content) + content_array.extend(p_matches) + if icpscan_time == content_array[3]: + pass + else: + text1 = """ + Icpscan存在最新更新,请前往以下任意地址获取更新: + https://pan.quark.cn/s/39b4b5674570/ + https://github.com/honmashironeko/icpscan/ + https://pan.baidu.com/s/1C9LVC9aiaQeYFSj_2mWH1w?pwd=13r5/ + """ + print(text1) + input("请输入回车键继续运行工具") + +def print_icpscan_banner(): + print("=" * 70) + print(""" + __ ______ ______ ______ ______ ______ __ __ +/\ \ /\ ___\ /\ == \ /\ ___\ /\ ___\ /\ __ \ /\ "-.\ \ +\ \ \ \ \ \____ \ \ _-/ \ \___ \ \ \ \____ \ \ __ \ \ \ \-. \ + \ \_\ \ \_____\ \ \_\ \/\_____\ \ \_____\ \ \_\ \_\ \ \_\\"\_\ + \/_/ \/_____/ \/_/ \/_____/ \/_____/ \/_/\/_/ \/_/ \/_/ +""") + print("\t\t\t\t\t\t\tVersion:2.1") + print("\t\t\t\t\t微信公众号:樱花庄的本间白猫") + print("\t\t\t\t博客地址:https://y.shironekosan.cn") + print("=" * 70) + print("\t\tIcpScan开始执行") + +if __name__ == "__main__": + update_module() + print_icpscan_banner() + parser = argparse.ArgumentParser(description='ICPScan由本间白猫开发,旨在快速反查IP、域名归属') + parser.add_argument('-f', dest='file_path', required=True, help='指定使用的路径文件 -f url.txt') + parser.add_argument('-qz', dest='qz_auth', action='store_true', help='增加权重查询 -qz') + parser.add_argument('-key', dest='zoomeye_auth', help='指定ZoomEye的API-KEY认证信息 -key API-KEY') + parser.add_argument('-p', dest='proxyip', help='指定代理地址 -p 127.0.0.1:8080 或者 -p user:pass@127.0.0.1:8080') + args = parser.parse_args() + file_path = args.file_path + qz_auth = args.qz_auth + zoomeye_auth = args.zoomeye_auth + proxyip = args.proxyip + loop = asyncio.get_event_loop() + future = asyncio.ensure_future(main_async(file_path,zoomeye_auth,proxyip)) + loop.run_until_complete(future) \ No newline at end of file