如何实施抓取广告链接的网络爬虫?

How do I implement a web crawler that scrapes ad links?

为了获得训练数据,我写了一个爬虫来跟踪深度为 2 的 Alexa 上排名前 500 的网站,并将找到的所有链接写入一个文件。现在,它会查找 html 中的所有链接并将它们写入文件。问题是,抓取工具丢失了所有指向广告的链接,其中一些位于 iframe 中或位于 CSS 文件中。我如何更改我的网络爬虫以便它抓取所有链接,包括广告?相关代码如下。

class 爬虫(对象):

def __init__(self, root, depth, locked=True):
    self.root = root
    self.depth = depth
    self.locked = locked
    self.host = urlparse.urlparse(root)[1]
    self.urls = []
    self.links = 0
    self.followed = 0


def crawl(self):
    #print " in crawl"
    page = Fetcher(self.root)
    q = Queue()
    #print "made fetcher"
    try:
        page.fetch()
        if page.urls == []:
            print "Error: could not fetch urls for %s" % (self.root)
            return
            #raise KeyboardInterrupt
        else: 
            target = open("output.txt", 'w')
            for url in page.urls:
                q.put(url)
                target.write((url+'\n').encode('utf-8'))
            followed = [self.root]
            target.close()

    except Exception as e:
        print('Error: could not fetch urls')
        raise KeyboardInterrupt
        '''
    q = Queue()
    target = open("output.txt", 'w')
    for url in page.urls:
        q.put(url) f
        target.write((url+'\n').encode('utf-8'))
    followed = [self.root]
    target.close()
    #print followed
    '''

    n = 0

    while True:
        try:
            url = q.get()
        except QueueEmpty:
            break

        n += 1

        if url not in followed:
            try:
                host = urlparse.urlparse(url)[1]

                if self.locked and re.match(".*%s" % self.host, host):
                    followed.append(url)
                    #print url
                    self.followed += 1
                    page = Fetcher(url)
                    page.fetch()
                    for i, url in enumerate(page):
                        if url not in self.urls:
                            self.links += 1
                            q.put(url)
                            self.urls.append(url)
                            with open("data.out", 'w') as f:
                               f.write(url)
                    if n > self.depth and self.depth > 0:
                        break
            except Exception, e:
                print "ERROR: Can't process url '%s' (%s)" % (url, e)
                print format_exc()

class 获取器(对象):

def __init__(self, url):
    self.url = url
    self.urls = []

def __getitem__(self, x):
    return self.urls[x]

def _addHeaders(self, request):
    request.add_header("User-Agent", AGENT)

def open(self):
    url = self.url
    try:
        request = urllib2.Request(url)
        handle = urllib2.build_opener()
    except IOError:
        return None
    return (request, handle)

def fetch(self):
    request, handle = self.open()
    self._addHeaders(request)
    if handle:
        try:
            content = unicode(handle.open(request).read(), "utf-8",
                    errors="replace")
            soup = BeautifulSoup(content)
            tags = soup('a')
        except urllib2.HTTPError, error:
            if error.code == 404:
                print >> sys.stderr, "ERROR: %s -> %s" % (error, error.url)
            else:
                print >> sys.stderr, "ERROR: %s" % error
            tags = []
        except urllib2.URLError, error:
            print >> sys.stderr, "ERROR: %s" % error
            tags = []
        for tag in tags:
            href = tag.get("href")
            if href is not None:
                url = urlparse.urljoin(self.url, escape(href))
                if url not in self:
                    self.urls.append(url)

def getLinks(url):
    page = Fetcher(url)
    page.fetch()
    for i, url in enumerate(page):
        print "%d. %s" % (i, url)

静态方法:

def main():
    depth =2
    file_in = []
    reload(sys)
    sys.setdefaultencoding('utf-8')
    filename = "stuff.txt"
    text = open(filename)
    for line in text:
        file_in.append(line.rstrip())


    for i in file_in:
        print "Crawling %s (Max Depth: %d)" % (i, depth)
        crawler = Crawler(i, depth)
        crawler.crawl()
        print "\n".join(crawler.urls)

很多广告是通过在页面上执行的异步 javascript 投放的。如果您只是抓取服务器初始输出,您将无法获得其他链接。一种方法是使用像 PhantomJS 这样的无头浏览器将 html 渲染到一个文件,然后在该文件上使用您的脚本。还有其他的可能性。