as the title says, here is my code.
-sharp -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
import re
from jingdong.items import JingdongItem
from jingdong.settings import *
class GoodsSpider(scrapy.Spider):
name = "goods"
allowed_domains = ["jd.com"]
start_urls = ["http://jd.com/"]
-sharp https://search.jd.com/Search?keyword=&enc=utf-8&wq=&page=1
-sharp
url = "https://search.jd.com/Search?keyword={KEYWORDS}&enc=utf-8&wq={KEYWORDS}&page={page}"
-sharp
Eprice_url = "https://c.3.cn/book?skuId={skuId}&cat={cat}&area=1_72_2799_0&callback=book_jsonp_callback"
-sharp
price_url = "https://p.3.cn/prices/mgets?type=1&area=1_72_2799_0&pdtk=&pduid=1771569446&pdpin=&pdbp=0&skuIds=J_{skuId}&ext=11100000&callback=jQuery3021180&_=1547383556702"
price2_url = "https://c0.3.cn/stock?skuId={skuId}&venderId=1000005720&cat={cat}&area=1_72_2799_0&buyNum=1&extraParam={%22originid%22:%221%22}&ch=1&pduid=1771569446&pdpin=&fqsp=0&callback=getStockCallback"
-sharp
comment_url = "https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv39228&productId={skuId}&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1"
def start_requests(self):
for k in range(1,PAGE_NUM):
yield Request(url=self.url.format(KEYWORDS=KEYWORDS,page=2*k-1),callback=self.page_parse)
def page_parse(self, response):
-sharpID
goodsID = response.xpath("//li/@data-sku").extract()
print(goodsID)
for each in goodsID:
goodsurl = "https://item.jd.com/{}.html".format(each)
yield Request(url=goodsurl,callback=self.get_goods_info)
def get_goods_info(self,response):
item = JingdongItem()
-sharp
item["link"] = response.url
-sharp
item["title"] = response.xpath("//div[@class="sku-name"]/text()").extract()[0].strip()
-sharp
item["writer"] = response.xpath("//div[@class="p-author"]/a/text()").extract()
-sharpID
skuId = re.compile(r"https:..item.jd.com.(\d+).html").findall(response.url)[0]
item["Id"] = skuId
cat = re.compile(r"pcat:\[(.*?)\],").findall(response.text)
cat = re.sub("\|",",",cat[0]).strip(""")
item["catId"] = cat
print(skuId)
print(cat)
-sharp
yield Request(url = self.Eprice_url.format(skuId=skuId, cat=cat),meta={"item":item},callback=self.price_parse)
-sharp
yield Request(url = self.price_url.format(skuId=skuId),meta={"item":item},callback=self.jingdong_price_parse)
-sharp json
yield Request(url = self.comment_url.format(skuId=skuId),meta={"item":item},callback=self.comment_parse)
def price_parse(self,response):
item =response.meta["item"]
-sharp
item["e_price"] = re.compile(""p":"(.*?)",").findall(response.text)[0]
yield item
def jingdong_price_parse(self,response):
item = response.meta["item"]
-sharp
item["n_price"] = re.compile(""op":"(.*?)",").findall(response.text)[0]
print(item["n_price"])
-sharp
item["o_price"] = re.compile(""m":"(.*?)",").findall(response.text)[0]
if item["n_price"] == None and item["o_price"]== None:
yield Request(url=self.price2_url.format(skuId=item["id"],cat=item["catId"]),meta={"item":item}, callback=self.jingdong_price_parse)
else:
yield item
def comment_parse(self,response):
item = response.meta["item"]
-sharp
item["comment"] =re.compile(""content":"(.*?)",").findall(response.text)
yield item
the following three requests only execute the bottom comment. The other two I have tried to get information successfully using the same matching rules directly with import requests. So I don"t understand, why can"t it be implemented?
-sharp
yield Request(url = self.Eprice_url.format(skuId=skuId, cat=cat),meta={"item":item},callback=self.price_parse)
-sharp
yield Request(url = self.price_url.format(skuId=skuId),meta={"item":item},callback=self.jingdong_price_parse)
-sharp json
yield Request(url = self.comment_url.format(skuId=skuId),meta={"item":item},callback=self.comment_parse)