使用Scrapy发送请求负载的POST请求

6

我该如何爬取这个网站? 我如何使用负载发送POST请求并获取数据?如果我使用此代码,我能够爬取第一页,但是我如何爬取第二页?我需要使用Selenium还是Scrapy就足够了?

import scrapy
from scrapy import log
from scrapy.http import *
import urllib2
class myntra_spider(scrapy.Spider):
    name="myntra"
    allowed_domain=[]
    start_urls=["http://www.myntra.com/men-footwear"]
    logfile=open('testlog.log','w')
    log_observer=log.ScrapyFileLogObserver(logfile,level=log.ERROR)
    log_observer.start()
    # sub_category=[]



    def parse(self,response):
        print "response url ",response.url

        link=response.xpath("//ul[@class='results small']/li/a/@href").extract()
        print links
        yield Request('http://www.myntra.com/search-service/searchservice/search/filteredSearch', callback=self.nextpages,body="")



    def nextpages(self,response):
        link=response.xpath("//ul[@class='results small']/li/a/@href").extract()
        for i in range(10):
            print "link ",link[i]
1个回答

7
您无需使用Selenium。请检查浏览器中所需发送的有效负载,并将其附加到请求中。
我已经在您的网站上尝试过,以下代码片段可行 -
def start_requests(self):
    url = "http://www.myntra.com/search-service/searchservice/search/filteredSearch"
    payload = [{
        "query": "(global_attr_age_group:(\"Adults-Unisex\" OR \"Adults-Women\") AND global_attr_master_category:(\"Footwear\"))",
        "start": 0,
        "rows": 96,
        "facetField": [],
        "pivotFacets": [],
        "fq": ["count_options_availbale:[1 TO *]"],
        "sort": [
            {"sort_field": "count_options_availbale", "order_by": "desc"},
            {"sort_field": "score", "order_by": "desc"},
            {"sort_field": "style_store1_female_sort_field", "order_by": "desc"},
            {"sort_field": "potential_revenue_female_sort_field", "order_by": "desc"},
            {"sort_field": "global_attr_catalog_add_date", "order_by": "desc"}
        ],
        "return_docs": True,
        "colour_grouping": True,
        "useCache": True,
        "flatshot": False,
        "outOfStock": False,
        "showInactiveStyles": False,
        "facet": True
    }]
    yield Request(url, self.parse, method="POST", body=json.dumps(payload))

def parse(self, response):
    data = json.loads(response.body)
    print data

这是否意味着每次我需要打开浏览器并查找有效载荷?那么我该如何自动化爬取呢? - sangharsh

网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接