600字范文,内容丰富有趣,生活中的好帮手!
600字范文 > Python实现爬取豆瓣电影|python豆瓣全栈爬虫:电影系列全爬虫系统1.0:(信息 短评

Python实现爬取豆瓣电影|python豆瓣全栈爬虫:电影系列全爬虫系统1.0:(信息 短评

时间:2022-06-24 23:58:36

相关推荐

Python实现爬取豆瓣电影|python豆瓣全栈爬虫:电影系列全爬虫系统1.0:(信息 短评

写在前面:

此博客仅用于记录个人学习进度,学识浅薄,若有错误观点欢迎评论区指出。欢迎各位前来交流。(部分材料来源网络,若有侵权,立即删除)

豆瓣电影全系列爬虫系统

免责声明情况说明效果展示主菜单单部电影检索获取电影详细信息电影影评查看电影短评查看电影海报查看多部电影检索代码展示后续说明

免责声明

代码仅用于学习,如被转载用于其他非法行为,自负法律责任代码全部都是原创,不允许转载,转载侵权

情况说明

python爬虫实现了对电影信息,电影短评,电影影评,电影海报的详细内容爬取

效果展示

主菜单

分为单电影查看和多电影信息查看

单部电影检索

获取电影详细信息

可以看到电影主页有的信息都可以显示出来

电影影评查看

这里爬取的影评是完整的影评内容,不是只显示在外部的摘要以及评论的相关信息总之很详细

电影短评查看

很详细就完了

电影海报查看

爬的是超链接

多部电影检索

首先需要在目录下创建一个在名为“电影id.txt”的存储大量豆瓣唯一标识电影的sid关于电影id获取这部分可以看我其他的博客,在此不赘述然后获取到的内容同单部电影检索无任何区别,在此之上,可以存储到本地csv文件中就不放图了

代码展示

from bs4 import BeautifulSoupimport requestsimport reimport csvimport timeimport randomheaders = {'User-Agent': ''#自己加,'Cookie': ''#自己加}def get_single_id(name):#这个函数用来把电影名词对应的唯一标识获取出来#思路是在豆瓣搜索框中搜索电影,然后返回第一个结果对应url中的sidmovie_name = nameparams = {"q": movie_name}search_url = "/search"r = requests.get(search_url, params=params, headers=headers)soup = BeautifulSoup(r.content, 'lxml')first_movie_info = soup.find('a', {'class': 'nbg'})['onclick']pattern = pile('\d{4,}')sid = str(pattern.search(first_movie_info).group())return(sid)def get_films_info(sid):#获取电影详细results = []for i in range(1):# print(" 正在爬取第{}页的评论,还有{}页".format(i+1,25-i))url = '/subject/{}/'.format(sid)rs = requests.session()r = rs.get(url, headers=headers)# random_sleep(1.5, 0.4)soup = BeautifulSoup(r.content, 'lxml')#名称names = soup.find('span', {'property': 'v:itemreviewed'}).textinfo = soup.find('div', {'id': 'info'})Info = info.text# print(Info)#故事概要Story = soup.find('span', {'property': 'v:summary'}).textstory = re.sub('[ 1\u3000\n]', '', Story)#豆瓣评分points = soup.find('strong', {'property': 'v:average'}).text#评分人数vote_people = soup.find('span', {'property': 'v:votes'}).textshort_comment_people = soup.find('a', {'href': '/subject/{}/comments?status=P'.format(sid)}).textcomment_people = soup.find('a', {'href': 'reviews'}).texttry:Where = re.search(r'制片国家/地区: (.*)', Info, re.M | re.I)where = Where.group(1)except:where = ''try:Director = re.search(r'导演: (.*)', Info, re.M | re.I)director = Director.group(1)except:director = ''try:Writer = re.search(r'编剧: (.*)', Info, re.M | re.I)writer = Writer.group(1)except:writer = ''try:Actor = re.search(r'主演: (.*)', Info, re.M | re.I)actor = Actor.group(1)except:actor = ''try:Kind = re.search(r'类型: (.*)', Info, re.M | re.I)kind = Kind.group(1)except:kind = ''try:Lang = re.search(r'语言: (.*)', Info, re.M | re.I)lang = Lang.group(1)except:lang = ''try:Runtime = re.search(r'片长: (.*)', Info, re.M | re.I)runtime = Runtime.group(1)except:runtime = ''try:Showtime = re.search(r'上映日期: (.*)', Info, re.M | re.I)showtime = Showtime.group(1)except:showtime = ''try:Othername = re.search(r'又名: (.*)', Info, re.M | re.I)othername = Othername.group(1)except:othername = ''datas = []datas.append(sid)datas.append(names)datas.append(story)datas.append(director)datas.append(writer)datas.append(actor)datas.append(kind)datas.append(where)datas.append(lang)datas.append(showtime)datas.append(runtime)datas.append(othername)datas.append(points)datas.append(vote_people)datas.append(short_comment_people)datas.append(comment_people)#print(datas)results.append(datas)return resultsdef get_the_file_comments(sid):#获取电影影评def getthecomment(id):headers = {'User-Agent': ''#自己加,'Cookie': ''#自己加}url = '/j/review/{}/full'.format(id)rs = requests.session()r = rs.get(url, headers=headers)r.encoding = 'utf-8'sep = '},"html":"'rest = r.text.split(sep, 1)[1]rest = re.sub('[\n <br> &nbsp; / \\\\]', '', rest)return restresults = []for i in range(1):url = '/subject/{}/reviews'.format(sid)rs = requests.session()r = rs.get(url, headers=headers)random_sleep(1.5, 0.4)soup = BeautifulSoup(r.content, 'lxml')all = soup.find_all('div', {'class': 'main review-item'})m=0;for every in all:m=m+1;id = every.find_all('div')[1]['id'][7:15]#print(id)comment=getthecomment(id)data = []info01 = every.find('header', {'class': 'main-hd'})author = info01.find('a',{'class':'name'}).textscore = info01.find_all('span')[0]['class'][0][-2]times = info01.find('span', {'class': 'main-meta'}).textinfo02 = every.find('div', {'class': 'main-bd'})title = info02.find('h2').textaction01 = info02.find('a',{'class':'action-btn up'})Useful = action01.find('span').textuseful = re.sub('[\n ]', '', Useful)action02 = info02.find('a',{'class':'action-btn down'})Useless = action02.find('span').textuseless = re.sub('[\n ]', '', Useless)Rely = info02.find('a',{'class':'reply'}).textdata.append(sid)data.append(author)data.append(score)data.append(times)data.append(useful)data.append(useless)data.append(Rely)data.append(title)data.append(comment)results.append(data)if(m==10):breakreturn resultsdef get_comments(sid):#获取电影短评import timeresults = []for i in range(1):url = '/subject/{}/comments?start={}&limit=20&sort=new_score&status=P'.format(sid, 40)rs = requests.session()r = rs.get(url, headers=headers)#random_sleep(1.5, 0.4)soup = BeautifulSoup(r.content, 'lxml')all = soup.find_all('div', {'class': 'comment'})for every in all:data = []comment = every.find('span', {'class': 'short'}).textvote = every.find('span', {'class': 'votes vote-count'}).textinfo = every.find('span', {'class': 'comment-info'})author = info.find('a').textscore = info.find_all('span')[1]['class'][0][-2]times = info.find('span', {'class': 'comment-time'})['title']data.append(sid)data.append(author)data.append(times)data.append(comment)data.append(vote)data.append(score)results.append(data)print(data)return resultsdef get_picture(sid):#获取海报result = []data =[]for i in range(1):# print(" 正在爬取第{}页的评论,还有{}页".format(i+1,25-i))url = '/subject/{}/'.format(sid)rs = requests.session()r = rs.get(url, headers=headers)# random_sleep(1.5, 0.4)soup = BeautifulSoup(r.content, 'lxml')mainpic = soup.find('div', {'id': 'mainpic'})img = mainpic.find('img', {'rel': 'v:image'})Img = str(img)sep0 = '/public/p'pid = Img.split(sep0, 1)[1]sep1 = '.jpg"'pid = pid.split(sep1, 1)[0]#print(pid)url1 ='/view/photo/l/public/p{}.webp'.format(pid)#print(url1)data.append(sid)data.append(url1)result.append(data)return resultdef get_id_list():#从本地txt文本中读取idwith open("id.txt", "r", encoding="utf-8", newline="") as csvfile:reader = csv.reader(csvfile)ID = []for Row in reader:ID.append(str(Row))return IDdef random_sleep(mu, sigma):#用于sleep暂停防止爬取过快导致的ip封禁'''正态分布随机睡眠:param mu: 平均值:param sigma: 标准差,决定波动范围'''secs = random.normalvariate(mu, sigma)if secs <= 0:secs = mu # 太小则重置为平均值time.sleep(secs)def main():#主函数print_menu1()#打印主菜单while True:# 获取用户输入try:num = int(input("请输入需要的操作:"))except ValueError:# except Exception:print("输入错误,请重新输入(1.2.3)")continueexcept IndexError:print("请输入一个有效值:(1.2.3)")continue# 根据用户的数据执行相应的功能if num == 1:single()#单电影操作print_menu1()elif num == 2:more()#多电影操作print_menu1()elif num == 3:breakelse:print("输入错误")def single():#单电影操作界面print_menu21()while True:# 获取用户输入try:num = int(input("请输入需要的操作:"))except ValueError:# except Exception:print("输入错误,请重新输入(1.2.3.4.5.6)")continueexcept IndexError:print("请输入一个有效值:(1.2.3.4.5.6)")continue# 根据用户的数据执行相应的功能if num == 1:try:name=get_filmname()id=get_single_id(name)#将输入电影名称转换成sidinfo=get_films_info(id)#获取信息for i in info:for j in i:print(j)except:print('error')input()print_menu21()elif num == 2:try:name = get_filmname()id = get_single_id(name)#将输入电影名称转换成sidcom =get_the_file_comments(id)#获取影评for i in com:print(i)except:print('error')input()print_menu21()elif num == 3:try:name = get_filmname()id = get_single_id(name)#将输入电影名称转换成sidcom = get_comments(id)#获取短评for i in com:print(i)except:print('error')input()print_menu21()elif num == 4:try:name = get_filmname()id = get_single_id(name)#将输入电影名称转换成sidpic =get_picture(id)#获取海报print(pic)except:print('error')input()print_menu21()elif num == 5:print_menu21()elif num == 6:breakelse:print("输入错误")def more():print_menu22()ID=get_id_list()while True:# 获取用户输入try:num = int(input("请输入需要的操作:"))except ValueError:# except Exception:print("输入错误,请重新输入(1.2.3.4.5.6)")continueexcept IndexError:print("请输入一个有效值:(1.2.3.4.5.6)")continueif num <= 4:j = 1try:if (num == 1):with open("电影信息.csv", "a", encoding="gb18030", newline="") as csvfile:j = 1for I in ID:print("[INFO]爬取电影ID为{}".format(I))i = str(I)sid = re.sub("\D", "", i)print("[INFO]正在爬取第{}部电影".format(j))result = get_films_info(sid)print(result)random_sleep(0.8, 0.4)j = j + 1writer = csv.writer(csvfile)print("正在写入csv文件中")writer.writerows(result)if (num == 2):with open("电影影评.csv", "a", encoding="gb18030", newline="") as csvfile:j = 1for I in ID:print("[INFO]爬取电影ID为{}".format(I))i = str(I)sid = re.sub("\D", "", i)print("[INFO]正在爬取第{}部电影".format(j))result = get_the_file_comments(sid)print(result)random_sleep(0.8, 0.4)j = j + 1writer = csv.writer(csvfile)print("正在写入csv文件中")writer.writerows(result)if (num == 3):with open("电影短评.csv", "a", encoding="gb18030", newline="") as csvfile:j = 1for I in ID:print("[INFO]爬取电影ID为{}".format(I))i = str(I)sid = re.sub("\D", "", i)print("[INFO]正在爬取第{}部电影".format(j))result = get_comments(sid)print(result)random_sleep(0.8, 0.4)j = j + 1writer = csv.writer(csvfile)print("正在写入csv文件中")writer.writerows(result)if (num == 4):with open("电影海报.csv", "a", encoding="gb18030", newline="") as csvfile:j = 1for I in ID:print("[INFO]爬取电影ID为{}".format(I))i = str(I)sid = re.sub("\D", "", i)print("[INFO]正在爬取第{}部电影".format(j))result = get_picture(sid)print(result)random_sleep(0.8, 0.4)j = j + 1writer = csv.writer(csvfile)print("正在写入csv文件中")writer.writerows(result)except:print("[ERROR]:出错请检查".format(I))random_sleep(1.8, 0.4)j = j + 12elif num == 5:passelif num == 6:breakelse:print("输入错误")def print_menu1():print ("="*100)print ("1. 单部电影检索查看")print ("2. 多部电影检索查看并保存结果")print ("3. 退出系统")print("=" * 100)print ("****注意:操作2为将对应的txt文本中的所有id对应的电影相关进行获取,主要用于爬取大量数据****")print ("****注意:操作3的工作量较为繁杂,不建议大量使用 ****")print("=" * 100)def print_menu21():print ("="*50)print ("1. 获取电影详细信息")print ("2. 获取电影影评及评论相关信息")print ("2. 获取电影短评及评论相关信息")print ("4. 获取电影海报")print ("5. 显示所有选项")print ("6. 退出系统")print("=" * 50)def get_filmname():print("=" * 50)name=input('请输入你想要查询的电影名称:')print("=" * 50)return(name)def print_menu22():print ("="*50)print ("1. 获取电影详细信息")print ("2. 获取电影影评及评论相关信息")print ("3. 获取电影短评及评论相关信息")print ("4. 获取电影海报")print ("5. 显示所有选项")print ("6. 退出系统")print("=" * 50)if __name__ == "__main__":print("欢迎使用zack的豆瓣电影查询系统")main()

后续说明

具体分部讲解会在其他博客进行讲解

end

光明正大的开始摸鱼

Python实现爬取豆瓣电影|python豆瓣全栈爬虫:电影系列全爬虫系统1.0:(信息 短评 影评 海报)|你想爬的都有

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。