获取FoFa10000条数据爬虫脚本
fofa-spider.sh
用法:
字符串为要搜索的字符串,无须base64。
保存文件名,会自动创建文件夹并存放三个文件,分别为搜索记录,ip文件,ip-port文件
页前数据,只能1000页以内,默认获取ip时间为每条1秒,可以修改sleep等待时间,
#!/bin/bash
#设定cookies
#read -p "fofapro_ars_sessions:" fofapro_ars_session
fofapro_ars_session="xxxxxxxxxxxxxxxxxxxxx"
#获取搜索字符串,并对字符串进行base64
echo -e "\033[33m 搜索的字符串: \033[0m"
read -p "" search
search_base64=`echo -e "$search\c" | base64 -w 0 -`
echo -e "\033[33m 保存的文件名: \033[0m"
read -p "" filename
mkdir $filename
url1="https://fofa.so/result?qbase64=${search_base64}"
echo -e "\033[37m url: \033[0m" $url1
echo $search >> $filename/search
echo $url1 >> $filename/search
#获取ip总数
Num=`curl --cookie "_fofapro_ars_session=$fofapro_ars_session" $url1 -s |grep distinct_ips | cut -d ">" -f 2 | cut -d "<" -f 1 | sed "s/,//g"`
echo -e 共有"\033[33m $Num \033[0m"条数据,每页10个,共"\033[33m $[$Num/10] \033[0m"页"\033[33m $[$Num%10] \033[0m"条
echo -e "\033[33m 获取多少页前数据: \033[0m"
read -p "" end_page
for ((i=1;i<=${end_page};i++))
do
echo -e "\033[33m Waite 正在读取第 $i 页数据 \033[0m"
curl -s --cookie "_fofapro_ars_session=$fofapro_ars_session" "https://fofa.so/result?file=&page=${i}&qbase64=${search_base64}" -o tmp_file
cat tmp_file | grep target | cut -d "\"" -f 4 |sed "/\(_blank\|#api_info\)/d" | sed "/^$/d">> $filename/$filename-port
cat tmp_file | grep hosts |cut -d "\"" -f 2 |cut -d / -f 3 | sed "/^$/d" >> $filename/$filename
sleep 10
done
exit
检测端口成活可以用masscan,nmap 。http成活和响应可以用whatweb,curl
for i in `cat $FileName`
do
echo $i
Status_Code=`curl -m 10 --connect-timeout 10 -o /dev/null -s -w %{http_code} $i/`
echo $Status_Code
if ((Status_Code == 200)); then
whatweb $i/
echo $i >> $FileName-live
fi
done
exit
masscan检测端口开放
masscan -p3306 -iL filename | cut -d " " -f 6 > filename-live