分享个自己写的C段扫描器
首页 > 工具    作者:Iamyc   2019年10月30日 19:35 星期三   热度:231°   百度已收录  
时间:2019-10-30 19:35   热度:231° 

前两天有个朋友又活过来了,找我写个工具。

干的是全网扫描,后缀:

/plus/erraddsave.php

把所有有这个的url记录下来,然后卖钱。

想着不犯法,所以打算帮ta写下。

查了下,这个链接是织梦的站,所以估计哪个黑产灰产的盯着织梦来搞了。

先去网上找了下,C段查询主要记下三个:爱站、站长之家、webscan

然后写了个小工具,可以选择这三个进行C段url查询。

用法:目录下放置url.txt,直接python2环境运行,输入密码:hai+yc,输入aizhan或者chinaz或者webscan,再输入线程。

然后调用相关api把c段扫描出来,再访问是否存在/plus/erraddsave.php后缀

c段url保存到domain.txt,need.txt保存存在后缀的域名





#!/usr/bin/env python2
#-*- encoding:utf-8 -*-
# YC:http://www.lang-v.com

import base64
import requests
import json
import re
import time
import sys
import os
import time
import ctypes
import threading
import Queue
import random
import socket





def logo():
    log = """
    '*'        '*'    '*'*''*'
     '*'      '*'    *'*'
      '*'    '*'    *'*'
        '*' '*'     '*'
          '*'       '*'
          '*'       '*'
          '*'       *'*'
          '*'        *'*'
          '*'         '*'*''*'
    \n
    by yc:www.lang-v.com
    """
    print log

all = Queue.Queue()
all_tool = Queue.Queue()# tool's home
all_just_save = Queue.Queue()

character = "abcdefghijklmnopqrstuvwxyz"
numberic = "0123456789-"
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
}
aizhan_header={
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Content-Type':'application/x-www-form-urlencoded',
            'Cookie':'_csrf=15b61cce4d661fbcef302be90ebcacdbe0bb2da5cc14afb578720a9fa09b159da:2:{i:0;s:5:"_csrfphpino()";i:1;s:32:"_VMb8EpgCcOjqNO1ObO_1pGXRmEUiIyv";}',
            'Upgrade-Insecure-Requests': '1'
}
tool_header={
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive',
            'Content-Type':'application/x-www-form-urlencoded',
            'Upgrade-Insecure-Requests': '1'
}
lock = threading.RLock()

def get_suff():
    fx_suff = open('suff.txt','a')
    url = "https://www.hulian.top/zixun/post/4417.html"
    html = requests.get(url)
    #print(html.content)
    suff = re.findall(r'<td>(.*?)</td>',str(html.content))
    for x in suff:

        x = x.replace("\\r","")
        x = x.replace("\\n","")
        x = x.replace("\\t","")
        if "\\" in x:#\x
            pass
        else:

            print(x)
            fx_suff.write(str(x)+"\n")




def make():
    way_raw = raw_input("Inut api:->(aizhan webscan chinaz)")
    thread_raw = raw_input("Input thread:->")
    lists = open('url.txt','r').readlines()
    q = Queue.Queue()
    try:
        for raw in lists:
            raw = raw.replace("\n","")
            raw = raw.replace("http://","")
            raw = raw.replace("https://","")
            ip = socket.getaddrinfo(raw,'http')[0][4][0]
            last_point = ip.rfind(".")
            ip_sub = ip[0:last_point]+"."
            q.put(ip_sub)
    
    except:
        pass
    '''
    for th in range(1,int(thread_raw)):
        t = threading.Thread(target=get_domain_tool,args=(q,))
        t.start()
    '''
    

    if way_raw=="aizhan":
    
        for th in range(1,int(thread_raw)):#threading

        
            t1 = threading.Thread(target=get_domain_aizhan,args=(q,))
            t1.start()
    elif way_raw=="chinaz":
        for th in range(1,int(thread_raw)):
            t2 = threading.Thread(target=get_domain_tool,args=(q,))
            t2.start()
    elif way_raw=="webscan":
        for th in range(1,int(thread_raw)):
            t3 = threading.Thread(target=get_domain_webscan,args=(q,))
            t3.start()
    
    print "url read over"
    '''
    get_domain_aizhan(q)
    
    #get_domain_tool(q)
    
    #get_domain_webscan(q)
    #get_domain_tool(q)
    '''


def get_domain_aizhan(q):
    print "aizhan Beging"
    while not q.empty():
        ip = q.get()
        for num in range(0,255):
            try:
                html = requests.get("http://dns.aizhan.com/"+ip+str(num)+"/",headers=aizhan_header,timeout=5)
                time.sleep(1/3)
                domain = re.findall('_blank">(.*?)</a>',str(html.content))
                domain = domain[0:len(domain)-1]
                for xx in domain:
                    if "." not in xx:
                        pass
                    else:
                        all.put(xx)
                        all_just_save.put(xx)
                last_page = re.findall('href="(.*?)"><i class="ico-pager-end',str(html.content))
                if len(last_page)!=0:
                    last = re.findall("/(.*?)/",str(re.findall("/\d{1,2}/",last_page[0])[0]))[0]#last page number
                    last_page_number = int(last)
                    print ip+str(num)+": Having "+str(last_page_number)+" pages"
                    for i in range(2,last_page_number+1):
                        time.sleep(1/3)
                        sub_url = "http://dns.aizhan.com/"+ip+str(num)+"/"+str(i)+"/"
                        print "Page "+str(i)+" OK"
                        sub_page = requests.get(sub_url,headers=aizhan_header)
                        other_domain = re.findall('<a href="(.*?)" rel="nofollow"',str(sub_page.content))
                        other_domain = other_domain[0:len(other_domain)-1]
                        for mm in other_domain:
                            all.put(mm)
                            all_just_save.put(mm)
                        
                else:#only one page
                    pass
                print ip+str(num)+":"+str(all.qsize())
                while not all.empty():
                    do1 = all.get()
                    with lock:
                        try:
                            fxx = open('domain.txt','a')
                            try:
                                fxx.write(str(do1)+"\n")
                            finally:
                                fxx.close()
                        except:
                            pass
                
                #print all
                while not all_just_save.empty():
                    do = all_just_save.get()
                    
                    try:
                        if "http" not in do:
                            do = "http://"+do
                        html1 = requests.get(do+"/plus/erraddsave.php",headers=header,timeout=3)
                        if html1.status_code==200:
                            print "[+] aizhan : "+do+"/plus/erraddsave.php"
                            
                            try:
                                fxn = open('need.txt','a')
                                try:
                                    fxn.write(str(do)+"/plus/erraddsave.php\n")
                                finally:
                                    fxn.close()
                            except:
                                pass
                    except:
                        pass
            except:
                pass
                
def get_domain_tool(q):
    print "tool Beging"
    while not q.empty():
        ip = q.get()
        for num in range(0,255):
            try:
                html = requests.get("http://s.tool.chinaz.com/same?s="+ip+str(num)+"&page=1",headers=tool_header,timeout=3)
                time.sleep(1/3)
                domain = re.findall('_blank>(.*?)</a>',str(html.content))
                for xxx in domain:
                    if "." not in xxx:
                        pass
                    else:
                        all_tool.put(xxx)
                        #print xxx
                last_page_tool = re.findall('col-blue02">(.*?)</i',str(html.content))
                print ip+str(num)+" : "+str(last_page_tool[0])
                
                if len(last_page_tool)!=0:
                    
                    page_num = int(last_page_tool[0])/20
                    mod = int(last_page_tool[0])%20
                    if mod!=0:
                        page_num = page_num+1
                    
                
                    for jj in range(2,page_num+1):
                        
                        try:
                            
                            html_i = requests.get("http://s.tool.chinaz.com/same?s="+ip+str(num)+"&page="+str(jj),headers=tool_header,timeout=3)
                            other = re.findall('target=_blank>(.*?)</a>',str(html_i.content))
                            for xxx in other:
                                all_tool.put(xxx)
                        except:
                            pass
                while not all_tool.empty():
                    do = all_tool.get()
                    with lock:
                        try:
                            fxx = open('domain.txt','a')
                            try:
                                fxx.write(str(do)+"\n")
                            finally:
                                fxx.close()
                        except:
                            pass
                    try:
                        if "http" not in do:
                            do = "http://"+do
                        html1 = requests.get(do+"/plus/erraddsave.php",headers=header,timeout=3)
                        if html1.status_code==200:
                            print "[+] tool "+do+"/plus/erraddsave"
                            
                            try:
                                fxn = open('need.txt','a')
                                try:
                                    fxn.write(str(do)+"/plus/erraddsave.php\n")
                                finally:
                                    fxn.close()
                            except:
                                pass
                    except:
                        pass
            except:
                pass
               
def get_domain_webscan(q):
    print "webscan Beging"
    while not q.empty():
        ip = q.get()
        print ip+"\n"
        for num in range(0,255):
            try:
                print ip+str(num)
                #aizhan html = requests.get("http://dns.aizhan.com/"+ip+str(num)+"/",headers=header,timeout=5)
                html = requests.get("http://api.webscan.cc/?action=query&ip="+ip+str(num)+"&type=xml",timeout=3)
                time.sleep(1/5)
                domain = re.findall('domain>(.*?)</domain>',str(html.content))
                for do in domain:
                    print "[*] url:->"+do
                    with lock:
                        try:
                            fxx = open('domain.txt','a')
                            try:
                                fxx.write(str(do)+"\n")
                            finally:
                                fxx.close()
                        except:
                            pass
                    try:
                        if "http" not in do:
                            do = "http://"+do
                            
                        html1 = requests.get(do+"/plus/erraddsave.php",headers=header,timeout=3)
                        if html1.status_code==200:
                            print "[+] webscan : "+do+"/plus/erraddsave"
                            
                            try:
                                fxn = open('need.txt','a')
                                try:
                                    fxn.write(str(do)+"/plus/erraddsave.php\n")
                                finally:
                                    fxn.close()
                            except:
                                pass
                    except:
                        pass
            except:
                pass
                       
        
def main():
    try:
        make()
    except:
        pass
    #get_suff()
    '''
    while True:
        length = random.randint(1,15)
        make(length)
    '''
if __name__ == '__main__':
    logo()
    yc = raw_input("YC password: ")
    if yc=="hai+yc":
        
        main()

    else:
        print "Bad Password"
        

ok,现在是11月4日。特此更新下工具。

一开始上面那个版本0.0的,在骚海这“产品经理”的折磨下,更新到了v1.1。。。

ok,主要总结下更新内容:

1、优化线程,提高url转ip速率

2、支持windows、linux版本(linxu读取文件最后是\r\n,windows是\n)

3、加入漏洞检测功能,今天四台云服挂了一天,出了40条shell左右

4、优化资源分配,合理调用系统资源

windows版本:windows版本下载

linux版本下载请联系 YC夜程


二维码加载中...
本文作者:Iamyc      文章标题: 分享个自己写的C段扫描器
本文地址:http://lang-v.com/first_cms/yc/emlog/src/?post=114
版权声明:若无注明,本文皆为“YC's Blog”原创,转载请保留文章出处。

返回顶部    首页    手机版本    后花园  
版权所有:YC's Blog    站长: Iamyc    程序:emlog