【Python教程】爬取B站排行榜Top100的视频数据

所需工具:

Python

聪明的大脑

勤劳的双手

 

注意:本站只提供教程,不提供任何成品+工具+软件链接,仅限用于学习和研究。

 

1、第三方库导入

from bs4 import BeautifulSoup # 解析网页
import re # 正则表达式,进行文字匹配
import urllib.request,urllib.error # 通过浏览器请求数据
import sqlite3 # 轻型数据库
import time # 获取当前时间

2、程序运行主函数

爬取过程主要包括声明爬取网页 -> 爬取网页数据并解析 -> 保存数据

def main():
#声明爬取网站
baseurl = "https://www.bilibili.com/v/popular/rank/all"
#爬取网页
datalist = getData(baseurl)
# print(datalist)
#保存数据
dbname = time.strftime("%Y-%m-%d", time.localtime())
dbpath = "BiliBiliTop100 " + dbname
saveData(datalist,dbpath)

(1)在爬取的过程中采用的技术为:伪装成浏览器对数据进行请求;
(2)解析爬取到的网页源码时:采用Beautifulsoup解析出需要的数据,使用re正则表达式对数据进行匹配;
(3)保存数据时,考虑到B站排行榜是每日进行刷新,故可以用当前日期进行保存数据库命名。

3、程序运行结果

%title插图%num

数据库中包含的数据有:排名、视频链接、标题、播放量、评论量、作者、综合分数这7个数据。

%title插图%num

4、程序源代码

from bs4 import BeautifulSoup #解析网页
import re # 正则表达式,进行文字匹配
import urllib.request,urllib.error
import sqlite3
import time
def main():
#声明爬取网站
baseurl = "https://www.bilibili.com/v/popular/rank/all"
#爬取网页
datalist = getData(baseurl)
# print(datalist)
#保存数据
dbname = time.strftime("%Y-%m-%d", time.localtime())
dbpath = "BiliBiliTop100 " + dbname
saveData(datalist,dbpath)
#re正则表达式
findLink =re.compile(r'<a class="title" href="(.*?)" rel="external nofollow" ') #视频链接
findOrder = re.compile(r'<div class="num">(.*?)</div>') #榜单次序
findTitle = re.compile(r'<a class="title" href=".*?" rel="external nofollow" rel="external nofollow" target="_blank">(.*?)</a>') #视频标题
findPlay = re.compile(r'<span class="data-box"><i class="b-icon play"></i>([\s\S]*)(.*?)</span> <span class="data-box">') #视频播放量
findView = re.compile(r'<span class="data-box"><i class="b-icon view"></i>([\s\S]*)(.*?)</span> <a href=".*?" rel="external nofollow" rel="external nofollow" target="_blank"><span class="data-box up-name">') # 视频评价数
findName = re.compile(r'<i class="b-icon author"></i>(.*?)</span></a>',re.S) #视频作者
findScore = re.compile(r'<div class="pts"><div>(.*?)</div>综合得分',re.S) #视频得分
def getData(baseurl):
datalist = []
html = askURL(baseurl)
#print(html)
soup = BeautifulSoup(html,'html.parser') #解释器
for item in soup.find_all('li',class_="rank-item"):
# print(item)
data = []
item = str(item)
Order = re.findall(findOrder,item)[0]
data.append(Order)
# print(Order)
Link = re.findall(findLink,item)[0]
Link = 'https:' + Link
data.append(Link)
# print(Link)
Title = re.findall(findTitle,item)[0]
data.append(Title)
# print(Title)
Play = re.findall(findPlay,item)[0][0]
Play = Play.replace(" ","")
Play = Play.replace("\n","")
Play = Play.replace(".","")
Play = Play.replace("万","0000")
data.append(Play)
# print(Play)
View = re.findall(findView,item)[0][0]
View = View.replace(" ","")
View = View.replace("\n","")
View = View.replace(".","")
View = View.replace("万","0000")
data.append(View)
# print(View)
Name = re.findall(findName,item)[0]
Name = Name.replace(" ","")
Name = Name.replace("\n","")
data.append(Name)
# print(Name)
Score = re.findall(findScore,item)[0]
data.append(Score)
# print(Score)
datalist.append(data)
return datalist
def askURL(url):
#设置请求头
head = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0;Win64;x64) AppleWebKit/537.36(KHTML, likeGecko) Chrome/80.0.3987.163Safari/537.36"
}
request = urllib.request.Request(url, headers = head)
html = ""
try:
response = urllib.request.urlopen(request)
html = response.read().decode("utf-8")
#print(html)
except urllib.error.URLError as e:
if hasattr(e,"code"):
print(e.code)
if hasattr(e,"reason"):
print(e.reason)
return html
def saveData(datalist,dbpath):
init_db(dbpath)
conn = sqlite3.connect(dbpath)
cur = conn.cursor()
for data in datalist:
sql = '''
insert into Top100(
id,info_link,title,play,view,name,score)
values("%s","%s","%s","%s","%s","%s","%s")'''%(data[0],data[1],data[2],data[3],data[4],data[5],data[6])
print(sql)
cur.execute(sql)
conn.commit()
cur.close()
conn.close()
def init_db(dbpath):
sql = '''
create table Top100
(
id integer primary key autoincrement,
info_link text,
title text,
play numeric,
view numeric,
name text,
score numeric
)
'''
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
conn.close()
if __name__ =="__main__":
main()

标签

发表评论