Simple HTTP PROBER
import requests
import sys
requests.urllib3.disable_warnings()
headers = {'User-agent':'Mozilla//5.0',}
url = sys.argv[1]
addhttps = "https://"
if not "https://" in url:
url = addhttps + url
pedido = requests.get(url, headers=headers)
estado = pedido.status_code
print(estado, url)
Diz qual o estado de resposta de um website via a resposta HTTP
Cookie extract
import requests
import sys
from urllib3.exceptions import InsecureRequestWarning
requests.urllib3.disable_warnings()
url = sys.argv[1]
if not "https://" in url:
url = "https://" + url
headers={'User-agent':'Mozilla//10.0',}
r = requests.get(url=url, verify=False,headers=headers)
if "server" in r.headers:
print("server: ", r.headers["server"])
print("there are", len(r.cookies), "cookies")
print(r.cookies.values())
itemscookies = r.cookies.items()
for cookievalues in r.cookies: #prints cookies as an iteration
print(cookievalues)
if "CSRF" in r.cookies:
print("there are csrf tokens or values")
Extrair cookies e verificar se há algum referente a tokens de CSRF (Cross Site Request Forgery)
Website endpoints
import requests
import sys
from urllib3.exceptions import InsecureRequestWarning
import re
requests.urllib3.disable_warnings()
endpointsregex = "(\/\w*)?(\w+\/)"
newendpointsregex = "(\/\w*)+(\d*)"
url = sys.argv[1]
if not "https://" in url:
url = "https://" + url
headers={'User-agent':'Mozilla//5.0',}
r = requests.get(url=url, verify=False,headers=headers,timeout=2)
getcontent = r.content.decode("utf-8", "ignore")
endpointsfind = re.findall(endpointsregex,getcontent,re.DOTALL)
for endpointspresent in re.finditer(newendpointsregex, getcontent):
print(endpointspresent.group())
Extrai os endpoints presentes numa página web