아미(아름다운미소)

[python]crawler 샘플 본문

랭귀지/PYTHON

[python]crawler 샘플

유키공 2017. 12. 21. 18:30

# encoding: utf-8
import codecs
from collections import deque
import urllib2, urlparse
import socket
from BeautifulSoup import BeautifulSoup

def get_links(uri, startswith=""):
        """Returns list of referenced URIs (without duplicates) found in the document returned for the input URI"""
        results = set()
        try:
                page = urllib2.urlopen(uri)
                soup = BeautifulSoup(page)
                for link in soup.findAll('a'): # <a href=""...>
                        try:
                                link = link['href']
                                if not link.startswith("javascript:") \
                                and not link.startswith("mailto:") \
                                and not link.startswith("skype:"): 
                                        link = urlparse.urljoin(uri, link) # expand relative URIs
                                        if link.startswith(startswith):
                                                results.add(link) 
                        except KeyError:
                                print "Missing href attribute in %s" % link
        except:
                pass
        results = list(results)
        return results
        
def crawl(seed_uris, timeout=5, limit=1000, debug=True, startswith=""):
        """Returns a list of URIs found by following all links from the list of seed URIs given"""      
        queue = deque(seed_uris)
        results = seed_uris[:]
        socket.setdefaulttimeout(timeout) # set time-out to 5 seconds
        
        while len(queue)>0 and len(results)<limit:
                uri = queue.popleft()
                if debug:
                        print "Analyzing %s" % uri
                links = get_links(uri,startswith)
                new_links = [uri for uri in links if uri not in results]
                if debug:
                        print "%i links found, of which %i are new" % (len(links), len(new_links))
                results.extend(new_links)
                queue.extend(new_links)
                if debug:
                        print "Status: %i URIs known, %i URIs in queue" % (len(results), len(queue))
        if debug:
                print "Completed."
                print "URI count after analysing all linked pages: %i distinct URLs" % len(results)
                
        results.sort()
        return(results)
        
def main():
#        SEED_URIS = ["http://www.heppnetz.de/", "http://www.unibw.de"]
        SEED_URIS = ["http://www.cafe24.com/"]
        results = crawl(seed_uris=SEED_URIS, limit=1000, startswith="") 
        f = codecs.open('uris.csv', 'wt', 'utf-8')
        for line in results:
                f.write(line+"\n")
        f.close()
        
if __name__ == '__main__':
        main()
Comments