Bootstrap

弹幕数据爬取及可视化(python实现)

弹幕数据爬取及可视化(python实现)


数据爬取

from urllib import request
from bs4 import BeautifulSoup
def get_html(url):
    req = request.urlopen(url)
    html = req.read().decode('utf-8') 
    return html
# 爬取各集网址
start_url = 'https://v.qq.com/detail/x/xbd1y6fvwl3maoz.html'
#获取网页内容
html = get_html(start_url)
# 解析
soup = BeautifulSoup(html, 'html.parser')
# css选择器
episodes = soup.select('div.mod_episode span.item a')
for episode in episodes:
    # 第几集
    eno = int(episode.get_text().strip())
    #剧集的网址
    eurl = episode['href']
    print ('{:02} {}'.format(eno, eurl))


#获取弹幕
import re
# 亲爱的热爱的第一集链接
episode_url = 'https://v.qq.com/x/cover/xbd1y6fvwl3maoz/t00313mumzy.html'
vid = re.split(r'[/.]',eurl)[-2]
print ('vid={}'.format(vid))
target_url = 'http://bullet.video.qq.com/fcgi-bin/target/regist?otype=json&vid='+vid
html = get_html(target_url)
target_id = re.search(r'(?<=targetid=)\d+',html).group()
print(target_id)
bullet_url = "http://mfm.video.qq.com/danmu?timestamp={}&target_id={}".format(0,target_id)
html = get_html(bullet_url)
import json
data = json.loads(html, strict = False)
# 保存到DataFrame
import pandas as pd
df = pd.DataFrame(columns=['commentid','content','name', 'upcount','degree','timepoint'])

for item in data['comments']:
    # 移除所有的白空格
    content = re.sub(r'"','',''.join(item['content'].split()))
    name = re.sub(r'"','',''.join(ite
;