爬取不同商品只要换下User-Agent和url就行了
单线程方法
#coding=gbk
import re
import time
import csv
import os
import requests
import html
headers = {
'cookie': 'shshshfp=22dd633052035d21be92463ffa35684d; shshshfpa=ab283f84-c40f-9710-db89-84a8d3366a81-1586333030; __jda=122270672.1586333031101106032014.1586333031.1586333031.1586333031.1; __jdv=122270672|direct|-|none|-|1586333031103; __jdc=122270672; shshshfpb=bUe7tI9%2FOOaJKd7vP0EtSOg%3D%3D; __jdu=1586333031101106032014; areaId=22; ipLoc-djd=22-1977-1980-0; 3AB9D23F7A4B3C9B=7XEQD4BFTGEH44EK7LN7HLFCHJW6W2NS5VJOQOCHABZVI7LXJJIW3K2IX5MTPZ4TBERBLY6TRQR5CA3S3IYVLQ2JGI; jwotest_product=99; shshshsID=a7457cee6a4a9fa285fe2cff44c6bd17_4_1586333142454; __jdb=122270672.4.1586333031101106032014|1.1586333031; JSESSIONID=8C21549A613B83F0CB86EF1F38FD63D3.s1',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'none',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36'
}
def comment_crawl(page, writer):
print('当前正在下载第%d页评论' % (page + 1))
url = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=10026434916372&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1' # 请求链接获取数据
text = requests.get(url, headers=headers).text
comment_list = re.findall(
r'guid":".*?"content":"(.*?)".*?"creationTime":"(.*?)",".*?"replyCount":(\d+),"score":(\d+).*?usefulVoteCount":(\d+).*?imageCount":(\d+).*?images":',
text)
for result in comment_list:
content = html.unescape(result[0]).replace('\n', ' ')
comment_time = result[1]
reply_count = result[2]
score = result[3]
vote_count = result[4]
image_count = result[5]
writer.writerow((comment_time, score, reply_count, vote_count, image_count, content))
if __name__ == '__main__':
start = time.time()
if os.path.exists('DATA3.csv'):
os.remove('DATA3.csv')
with open('DATA3.csv', 'a+', newline='', encoding='gb18030') as f:
writer = csv.writer(f)
writer.writerow(('留言时间', '评分', '回复数', '点赞数', '图片数', '评论内容'))
for page in range(100):
comment_crawl(page, writer)
run_time = time.time() - start
print('运行时间为%d分钟%d秒。' % (run_time // 60, run_time % 60))
多线程
#coding=gbk
import re
import time
import csv
import os
from concurrent.futures import ThreadPoolExecutor
import requests
import html
headers = {
'cookie': 'shshshfp=22dd633052035d21be92463ffa35684d; shshshfpa=ab283f84-c40f-9710-db89-84a8d3366a81-1586333030; __jda=122270672.1586333031101106032014.1586333031.1586333031.1586333031.1; __jdv=122270672|direct|-|none|-|1586333031103; __jdc=122270672; shshshfpb=bUe7tI9%2FOOaJKd7vP0EtSOg%3D%3D; __jdu=1586333031101106032014; areaId=22; ipLoc-djd=22-1977-1980-0; 3AB9D23F7A4B3C9B=7XEQD4BFTGEH44EK7LN7HLFCHJW6W2NS5VJOQOCHABZVI7LXJJIW3K2IX5MTPZ4TBERBLY6TRQR5CA3S3IYVLQ2JGI; jwotest_product=99; shshshsID=a7457cee6a4a9fa285fe2cff44c6bd17_4_1586333142454; __jdb=122270672.4.1586333031101106032014|1.1586333031; JSESSIONID=8C21549A613B83F0CB86EF1F38FD63D3.s1',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'none',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36'
}
def comment_crawl(page, writer):
print('当前正在下载第%d页评论' % (page + 1))
url = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=10026434916372&score=0&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1'
text = requests.get(url, headers=headers).text
comment_list = re.findall(
r'guid":".*?"content":"(.*?)".*?"creationTime":"(.*?)",".*?"replyCount":(\d+),"score":(\d+).*?usefulVoteCount":(\d+).*?imageCount":(\d+).*?images":',
text)
print("dsgffg")
for result in comment_list:
print("wefgweg")
content = html.unescape(result[0]).replace('\n', ' ')
comment_time = result[1]
reply_count = result[2]
score = result[3]
vote_count = result[4]
image_count = result[5]
writer.writerow((comment_time, score, reply_count, vote_count, image_count, content))
if __name__ == '__main__':
start = time.time()
pool = ThreadPoolExecutor(5)
if os.path.exists('DATA3.csv'):
os.remove('DATA3.csv')
with open('DATA3.csv', 'a+', newline='', encoding='gb18030') as f:
writer = csv.writer(f)
writer.writerow(('留言时间', '评分', '回复数', '点赞数', '图片数', '评论内容'))
for page in range(100):
pool.submit(comment_crawl, page, writer)
run_time = time.time() - start
print('运行时间为%d分钟%d秒。' % (run_time // 60, run_time % 60))