summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author韩丁康 <[email protected]>2022-04-18 16:55:57 +0800
committerHDK1999 <[email protected]>2022-04-18 16:55:57 +0800
commit0acd839507a2c98fc9e2a14ab933f82122431bbd (patch)
treeefef8ed1d075964468af476140f01898e2d12d6a
parent1b5cd1c157eb0a8b7b01a39c649ecd8b77e57473 (diff)
DNSv6 代码及说明
-rw-r--r--DNSv6/Code/README.md9
-rw-r--r--DNSv6/Code/allv6.py20
-rw-r--r--DNSv6/Code/ch_dns.py23
-rw-r--r--DNSv6/Code/dnsfound_util.py51
-rw-r--r--DNSv6/Code/ipmatch.py44
5 files changed, 147 insertions, 0 deletions
diff --git a/DNSv6/Code/README.md b/DNSv6/Code/README.md
new file mode 100644
index 0000000..64baf35
--- /dev/null
+++ b/DNSv6/Code/README.md
@@ -0,0 +1,9 @@
+## 组织结构
+
+```
+allv6.py #汇总所有v6地址结果
+ch_dns.py #全国探针发送程序
+dnsfound_util.py #DNSv6工具包
+ipmatch.py #IP段匹配(山东)
+```
+
diff --git a/DNSv6/Code/allv6.py b/DNSv6/Code/allv6.py
new file mode 100644
index 0000000..b6f76fd
--- /dev/null
+++ b/DNSv6/Code/allv6.py
@@ -0,0 +1,20 @@
+'''
+ 用于将多个NS端pcap导出结果进行合并(另一种方法是将pcap文件合并后再导出)
+'''
+import pandas as pd
+
+# 结果保存位置
+res_path="./result/v6/allv6dns.csv"
+# 创建结果文件
+allip=pd.DataFrame(columns=["IPv6","Count"])
+allip.to_csv("./result/v6/allv6dns.csv",encoding='gbk', header=True, index=False)
+
+for i in range(5):
+ # 资源路径
+ path="./result/v6/v6-"+str(i+1)+".csv"
+ ip_datas=pd.read_csv(path,skiprows=2,names=["level","parent","IPv6","count","ave","min","max","rate","per","BR","BS"])
+ data = ip_datas.iloc[1:, [2,3]]
+ data.to_csv(res_path,mode="a", encoding='gbk', header=False, index=False)
+
+
+
diff --git a/DNSv6/Code/ch_dns.py b/DNSv6/Code/ch_dns.py
new file mode 100644
index 0000000..5a8e3be
--- /dev/null
+++ b/DNSv6/Code/ch_dns.py
@@ -0,0 +1,23 @@
+'''
+ 探针发送主程序
+'''
+import pandas as pd
+import dnsfound_util as dnsu
+
+alphabet=dnsu.alphabet
+#IPv4地址
+spath="./res_data/china/forwarder.xlsx"
+#返回结果保存
+dpath="./result/china/forward/res-5.csv"
+ch_dns=pd.read_excel(spath,names=["rdns","loc","company"])
+# 对于直接响应dns
+# ch_dns=pd.read_excel(spath,names=["rdns","dns"])
+# dns_result = pd.DataFrame(columns=["rdns", "result"],)
+
+#保存所有多线程生成器
+List=[dnsu.dnsresolver(i,ch_dns) for i in dnsu.tqdm(range(ch_dns.shape[0]))]
+
+#从所有多线程生成器中读取结果
+dns_result=pd.concat([pd.DataFrame([res.result()],columns=["rdns","result"]) for res in List],ignore_index=True)
+
+dns_result.to_csv(dpath)
diff --git a/DNSv6/Code/dnsfound_util.py b/DNSv6/Code/dnsfound_util.py
new file mode 100644
index 0000000..15e3da9
--- /dev/null
+++ b/DNSv6/Code/dnsfound_util.py
@@ -0,0 +1,51 @@
+'''
+ DNSv6工具包,注意tomorrow3无法在arm架构处理器上使用(m1)
+'''
+import dns.resolver
+import pandas as pd
+import random as rd
+import tomorrow3 as tm
+from tqdm import tqdm
+
+
+alphabet = "abcdefghijklmnopqrstuvwxyz1234567890"
+
+
+result = pd.DataFrame(columns=["rdns", "result"])
+dot_ressult = pd.DataFrame(columns=["dot", "result"])
+
+
+
+# val负责定位,dataframe指定数据来源
+def dnsresolver(val,dataframe):
+ characters = "".join(rd.sample(alphabet, 10)) # 生成子域名
+ test = dataframe.loc[val, "rdns"]
+ reso = dns.resolver.Resolver()
+ reso.nameservers = [test]
+ reso.timeout = 10
+ try:
+ AAAA = reso.resolve(characters + ".v4.testv4-v6.live", "AAAA").response
+ # result = result.append([[test, AAAA.rcode()]], ignore_index=True)
+ return [test,AAAA.rcode()]
+ except:
+ # result=result.append([[test, 1]], ignore_index=True)
+ return [test,1]
+
+
+
+# if __name__=="__main__":
+# mode="main"
+# rdns = pd.read_csv("./res_data/rdns-shandong.csv", names=["rdns"])
+# dot = pd.read_csv("./res_data/853-shandong.csv", names=["dot"])
+# ch_rdns = pd.read_excel("./res_data/全国-递归DNS测量结果.xlsx", names=["rdns", "loc", "company"])
+#
+# for i in tqdm(range(ch_rdns.shape[0])):
+# dnsresolver(i,ch_rdns,result)
+#
+# if (mode == "rdns"):
+# result.to_csv("./result/"+str(2)+"-ch_rdns.csv")
+# else:
+# dot_ressult.to_csv("./result/dot.csv")
+
+
diff --git a/DNSv6/Code/ipmatch.py b/DNSv6/Code/ipmatch.py
new file mode 100644
index 0000000..929b271
--- /dev/null
+++ b/DNSv6/Code/ipmatch.py
@@ -0,0 +1,44 @@
+'''
+ IP段匹配
+'''
+import ipaddress as ipaddr
+import pandas as pd
+
+
+def makecidr(DATAframe):
+ cidr=pd.DataFrame()
+ for i in range(DATAframe.shape[0]):
+ if ":" in DATAframe.loc[i,"start_ip"]:
+ start_ip=ipaddr.ip_address(DATAframe.loc[i,"start_ip"])
+ end_ip=ipaddr.ip_address(DATAframe.loc[i,"end_ip"])
+ ipcidr=ipaddr.summarize_address_range(start_ip,end_ip)
+ for ips in ipcidr:
+ cidr = cidr.append([[ips]])
+
+ return cidr
+def matchIP(ip,cidrs):
+ for c in cidrs.keys():
+ for j in range(cidrs[c].shape[0]):
+ if (ipaddr.IPv6Address(ip) in ipaddr.IPv6Network(cidrs[c].iloc[j, 0])):
+
+ return str(c)
+if __name__=="__main__":
+ # 读取原始数据
+ cidrs=[]
+ dx = pd.read_excel("./res_data/IPrange/山东电信.xlsx", names=["time", "start_ip", "end_ip", "organization", "company"])
+ yd = pd.read_excel("./res_data/IPrange/山东移动.xlsx", names=["time", "start_ip", "end_ip", "organization", "company"])
+ lt = pd.read_excel("./res_data/IPrange/山东联通.xlsx", names=["time", "start_ip", "end_ip", "organization", "company"])
+ ips=pd.read_csv("./result/v6/allv6dns.csv",header=0)
+
+ # dx_cidr=makecidr(dx)
+ # yd_cidr=makecidr(yd)
+ # lt_cidr=makecidr(lt)
+ cidrs={"dx":makecidr(dx),"yd":makecidr(yd),"lt":makecidr(lt)}
+ ips["company"]=ips["IPv6"].map(lambda x:matchIP(x,cidrs))
+ ips_n=pd.pivot_table(ips,values=["Count"],index=["IPv6"],aggfunc=sum)
+ ips_c=ips.drop_duplicates(subset=["IPv6"],keep="first")
+ ips_L=ips_n.merge(ips_c.loc[:,["IPv6","company"]],how="left",on="IPv6")
+ ips_L.to_csv("./result/v6/v6DNSs.csv",index=False)
+ # for i in range(ips.shape[0]):
+
+