Firstcry.com 爬虫问题

Firstcry.com Scraper Issues

我正在尝试抓取以下网站 - www.firstcry.com。该网站使用 AJAX(以 XHR 的形式)来显示它的搜索结果。

现在,如果您看到我的代码,jsonresponse 变量包含网站的 json 输出。现在,当我尝试打印它时,它包含许多 \(反斜杠)

现在,如果您正确地看到我的代码就在 jsonresponse 变量下方,我已经注释了几行。这些是我尝试删除所有 反斜杠 以及这些 - u' ,那里也有。

但是,在所有这些尝试之后,我无法删除 ALL backslashesu'

现在,如果我不删除所有这些,我将无法使用它的密钥访问 json 响应,因此,删除所有这些对我来说非常重要。

请帮我解决这个问题。如果您提供代码,特别是针对我的案例(问题),而不是一般代码,那就更好了!

我的代码在这里-:

from twisted.internet import reactor
from scrapy.crawler import CrawlerProcess, CrawlerRunner
import scrapy
from scrapy.utils.log import configure_logging
from scrapy.utils.project import get_project_settings
from scrapy.settings import Settings
import datetime
from multiprocessing import Process, Queue
import os
from scrapy.http import Request
from scrapy import signals
from scrapy.xlib.pydispatch import dispatcher
from scrapy.signalmanager import SignalManager
import json , simplejson , ujson

#query=raw_input("Enter a product to search for= ")
query='bag'
query1=query.replace(" ", "+")  


class DmozItem(scrapy.Item):

    productname = scrapy.Field()
    product_link = scrapy.Field()
    current_price = scrapy.Field()
    mrp = scrapy.Field()
    offer = scrapy.Field()
    imageurl = scrapy.Field()
    outofstock_status = scrapy.Field()

class DmozSpider(scrapy.Spider):
    name = "dmoz"
    allowed_domains = ["http://www.firstcry.com"]


    def start_requests(self):

        task_urls = [
        ]
        i=1
        for i in range(1,2):
            temp = "http://www.firstcry.com/svcs/search.svc/GetSearchPagingProducts_new?PageNo=" + str(i) + "&PageSize=20&SortExpression=Relevance&SubCatId=&BrandId=&Price=&OUTOFSTOCK=&DISCOUNT=&Q=" + query1 + "&rating="
            task_urls.append(temp)
            i=i+1

        start_urls = (task_urls)
        p=len(task_urls)
        return [ Request(url = start_url) for start_url in start_urls ]


    def parse(self, response):
        print response

        items = []
        jsonresponse = dict(ujson.loads(response.body_as_unicode()))
#       jsonresponse = jsonresponse.replace("\","")
#       jsonresponse = jsonresponse.decode('string_escape')
#       jsonresponse = ("%r" % json.loads(response.body_as_unicode()))
#       d= jsonresponse.json()
        #jsonresponse = jsonresponse.strip("/")
#       print jsonresponse
#       print d
#       print json.dumps("%r" % jsonresponse, indent=4, sort_keys=True)
#       a = simplejson.dumps(simplejson.loads(response.body_as_unicode()).replace("u\'","\'"), indent=4, sort_keys=True)
        #a= json.dumps(json.JSONDecoder().decode(jsonresponse))
        #a = ujson.dumps((ujson.loads(response.body_as_unicode())) , indent=4 )
        a=json.dumps(jsonresponse, indent=4)
        a=a.decode('string_escape')
        a=(a.decode('string_escape'))
#       a.gsub('\', '')
        #a = a.strip('/')
        #print (jsonresponse)
        print a
        #print "%r" % a
#       print "%r" % json.loads(response.body_as_unicode())

        p=(jsonresponse["hits"])["hit"]
#       print p
#       raw_input()
        for x in p:
            item = DmozItem()
            item['productname'] = str(x['title'])
            item['product_link'] = "http://www.yepme.com/Deals1.aspx?CampId="+str(x["uniqueId"])
            item['current_price']='Rs. ' + str(x["price"])

            try:            
                p=x["marketprice"]
                item['mrp'] = 'Rs. ' + str(p)

            except:
                item['mrp'] = item['current_price']

            try:            
                item['offer'] = str(x["promotionalMsg"])
            except:
                item['offer'] = str('No additional offer available')

            item['imageurl'] = "http://staticaky.yepme.com/newcampaign/"+str(x["uniqueId"])[:-1]+"/"+str(x["smallimage"])
            item['outofstock_status'] = str('In Stock')
            items.append(item)

        print (items)

spider1 = DmozSpider()
settings = Settings()
settings.set("PROJECT", "dmoz")
settings.set("CONCURRENT_REQUESTS" , 100)
#)
settings.set( "DEPTH_PRIORITY" , 1)
settings.set("SCHEDULER_DISK_QUEUE" , "scrapy.squeues.PickleFifoDiskQueue")
settings.set( "SCHEDULER_MEMORY_QUEUE" , "scrapy.squeues.FifoMemoryQueue")
crawler = CrawlerProcess(settings)
crawler.crawl(spider1)
crawler.start()

不需要把所有事情都搞复杂。不要使用 ujsonresponse.body_as_unicode() 然后将其转换为 dict,只需使用常规 jsonresponse.body:

$ scrapy shell "http://www.firstcry.com/svcs/search.svc/GetSearchPagingProducts_new?PageNo=1&PageSize=20&SortExpression=Relevence&SubCatId=&BrandId=&Price=&OUTOFSTOCK=&DISCOUNT=&Q=bag&rating="
...
>>> jsonresponse = json.loads(response.body)
>>> jsonresponse.keys()
[u'ProductResponse']

你的例子对我来说效果很好。看起来您对 "hacking around for an answer" 模式有点深入 ;)

我会注意到这一行...

p=(jsonresponse["hits"])["hit"]

... 在您的代码中不起作用。解析 JSON 后 jsonresponse 中唯一可用的键是 "ProductResponse"。该密钥包含另一个 JSON 对象,然后您可以像这样访问它:

>>> product_response = json.loads(jsonresponse['ProductResponse'])
>>> product_response['hits']['hit']
[{u'fields': {u'_score': u'56.258633',
    u'bname': u'My Milestones',
    u'brandid': u'450',
...

我认为这会为您提供您希望在 p 变量中获得的内容。