-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathosil #2.py
209 lines (182 loc) · 6.65 KB
/
osil #2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
import re
DMname = str(input("What is the doamin??"))
import subprocess
import os
# Create a directory called "python"
import requests
# read the list of target domains from the file
with open('target.txt', 'r') as f:
target_domains = f.read().splitlines()
print(target_domains)
for domain in target_domains:
os.makedirs(domain)
os.chdir(DMname)
#doing a domain scan
# Run the `dnsmap` command and save the output in the `out` variable
for domain in target_domains:
out, error = subprocess.Popen(["dnsmap", f"{DMname}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
out = out.decode()
with open("scanned.txt", "w") as f:
f.write(out)
#scanning Ip
def extract_ip_addresses(text):
# Use a regular expression to find all IP addresses in the text
ip_addresses = re.findall(r'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b', text)
return ip_addresses
with open('scanned.txt', 'r') as f:
# Read the contents of the file into a variable
text = f.read()
# Test the function
ip_addresses = extract_ip_addresses(text)
# Split the text into lines
lines = text.split("\n")
# Create a dictionary to store the subdomains and their corresponding IP addresses
subdomains = {}
# Iterate over the lines
for i, line in enumerate(lines):
# Check if the line contains an IP address
if re.search(r'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b', line):
# Extract the subdomain from the previous line
subdomain = lines[i-1]
# Extract the IP address from the current line
ip_address = line.split(":")[1].strip()
# Add the subdomain and IP address to the dictionary
subdomains[subdomain] = ip_address
# Iterate over the subdomains and their corresponding IP addresses
print(subdomains.items())
for items in subdomains.items():
print(items)
anum = []
x = 0
for subdomain, ip_address in subdomains.items():
x = x + 1
a = (f"{subdomain}: {ip_address}\n")
b = ip_address
# print(a)
anum.append(b)
print(anum)
print(x)
ip_list = anum
# Open the file in write mode
with open('ips.txt', 'w') as f:
# Write each IP address to a separate line in the file
for ip in ip_list:
f.write(ip + '\n')
with open('domain.txt', 'w') as f:
# Write each IP address to a separate line in the file
for subdomain in subdomains:
f.write(subdomain + '\n')
#running nmap
import subprocess
# Run the `nmap` command and save the output in the `out` variable
out, error = subprocess.Popen(["nmap", "-Pn", "--system-dns", "-sV", "-iL", "ips.txt", "-oX", "scan_results.xml"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
# Convert the `out` variable to a string object
out = out.decode()
# Write the output to a file
with open("log_nmap.txt", "w") as f:
f.write(out)
#putting result in excel
import xml.etree.ElementTree as ET
import re
import pandas as pd
# Parse the XML file
tree = ET.parse('scan_results.xml')
root = tree.getroot()
# Iterate over the hosts in the XML file
removed = []
for host in root.findall('./host'):
# Extract the IP address
ip_address = host.find('./address').attrib['addr']
# Iterate over the open ports for the host
for port in host.findall('./ports/port'):
# Extract the port number and protocol
port_num = port.attrib['portid']
protocol = port.attrib['protocol']
# Extract the service name and product name (if available)
service_name = port.find('./service').attrib.get('name', 'unknown')
product_name = port.find('./service').attrib.get('product', 'unknown')
# Print the IP address, port number, protocol, service name, and product name
output = (f'{ip_address}:{port_num} ({protocol}) - {service_name} ({product_name})')
# print(output)
pattern = r".*(?:80|443).*$"
filtered_output = "\n".join(re.sub(pattern, "", line) for line in output.split("\n"))
b = (filtered_output)
removed.append(b)
# print(removed)
filtered_list = [x for x in removed if x]
print(filtered_list)
# The data is stored in a variable called `array`
array1 = filtered_list
# Use a regular expression to extract the relevant information from the strings
pattern = re.compile(r'(?P<IP>\d+\.\d+\.\d+\.\d+):(?P<Port>\d+) \((?P<Protocol>\w+)\) - (?P<Service>\w+) (?:\((?P<Description>\w+)\))?')
# Create a list of dictionaries representing the rows of data
data_list = []
for line in array1:
match = pattern.match(line)
if match:
data_list.append(match.groupdict())
# Create a pandas DataFrame from the list of dictionaries
df = pd.DataFrame(data_list)
# Save the DataFrame to an Excel file
df.to_excel("data.xlsx")
#cherry tree formatting
with open('scanned.txt', 'r') as f:
# Read the contents of the file into a variable
text = f.read()
from cherry import *
from os import path
import os
# Create a directory called "python"
os.makedirs("Domain")
DIR_EXAMPLE = "Domain"
FIRST_EXAMPLE = path.join(DIR_EXAMPLE, "domain.mit.ctd")
# FIRST_EXAMPLE1 = path.join(DIR_EXAMPLE, "first1.ctd")
ct = newdoc("root")
import re
def extract_ip_addresses(text):
# Use a regular expression to find all IP addresses in the text
ip_addresses = re.findall(r'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b', text)
return ip_addresses
# Test the function
ip_addresses = extract_ip_addresses(text)
# Split the text into lines
lines = text.split("\n")
# Create a dictionary to store the subdomains and their corresponding IP addresses
subdomains = {}
# Iterate over the lines
for i, line in enumerate(lines):
# Check if the line contains an IP address
if re.search(r'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b', line):
# Extract the subdomain from the previous line
subdomain = lines[i-1]
# Extract the IP address from the current line
ip_address = line.split(":")[1].strip()
# Add the subdomain and IP address to the dictionary
subdomains[subdomain] = ip_address
# Iterate over the subdomains and their corresponding IP addresses
print(subdomains.items())
for items in subdomains.items():
print(items)
anum = []
for subdomain, ip_address in subdomains.items():
a = (f"{subdomain}: {ip_address}\n")
anum.append(a)
print(anum)
for elements in anum:
ct = addnode(ct, "root", elements)
# Extract the IP addresses from the text
ip_addresses = extract_ip_addresses(text)
create(ct, FIRST_EXAMPLE)
print(FIRST_EXAMPLE)
from cherry import *
from os import path
import os
os.makedirs("IP")
DIR_EXAMPLE = "IP"
FIRST_EXAMPLE1 = path.join(DIR_EXAMPLE, "IP.ctd")
ct = newdoc("IP.mit.edu")
for ip_address in ip_addresses:
# Add each IP address as a child node to the root node
ct = addnode(ct,"IP.mit.edu" , ip_address)
create(ct, FIRST_EXAMPLE1)
print(FIRST_EXAMPLE1)