reversed_weibo_event / 1.2 reptile_of_comment.py
Rachel74mx's picture
Upload crawler
035b9a8 verified
'''
作者id author_id; 作者url author_url;
发布时间 text_time; 内容 text_content; 微博url text_url; 微博/评论 text_type;
点赞数 like_num; 评论数 comment_num;
'''
from selenium import webdriver
import pandas as pd
import selenium.webdriver.support.ui as ui
import time
import re
import os
# 模拟登录微博
def login():
driver.get('https://weibo.com/login.php')
driver.maximize_window()
time.sleep(3)
title = driver.title
print(title)
while (title != "微博 – 随时随地发现新鲜事"):
time.sleep(1)
title = driver.title
print(title)
time.sleep(1)
# 打开Chorme并登录微博
driver = webdriver.Chrome()
wait = ui.WebDriverWait(driver, 10)
kk = re.compile(r'\d+')
login()
for i in range(28):
# 导入weibo数据
csv_name = "weibo_case" + str(i) + ".csv"
print(csv_name)
if (not os.path.exists(csv_name)):
continue
pd_data = pd.read_csv(csv_name, encoding="utf-8")
urls = pd_data["text_url"].tolist()
comments = pd_data["comment_num"].tolist()
Num = len(urls) + 1 # 记录爬取条数
fp = open(csv_name, 'a', encoding='utf-8')
for k in range(len(urls)):
print(k, "_of_", len(urls))
time.sleep(1)
# 评论数量不足则不爬
if (comments[k]=="评论"):
continue
#if (int(comments[k])<100):
#continue
url_name = urls[k]
driver.get(url_name)
time.sleep(2)
# 开始爬虫
author_id = []
text_comment = []
no_fresh = 0
for i in range(2000):
# 当前窗口显示的所有评论的div
div_list = driver.find_elements_by_xpath('//*[@id="scroller"]/div[1]/div')
no_fresh += 1
for div in div_list:
_time = div.find_element_by_xpath('./div/div/div/div[1]/div[2]/div[2]/div[1]').text # 爬取时间
name = div.find_element_by_xpath('./div/div/div/div[1]/div[2]/div[1]/a[1]').text # 爬取发表人id
aurl = div.find_element_by_xpath('./div/div/div/div[1]/div[2]/div[1]/a[1]').get_attribute('href') # 爬取发表人url
comment = div.find_element_by_xpath('./div/div/div/div[1]/div[2]/div[1]/span').text # 爬取微博内容
comment = comment.replace(',', ',')
if ((name in author_id) and (comment in text_comment) or (len(name)<=1)): # 去重
#print("Have Reptiled!")
continue
ele = div.find_elements_by_xpath('./div/div/div/div[1]/div[2]/div[2]/div[2]/div[4]/button/span[2]') # 爬取点赞数量
if (len(ele) == 1):
like = ele[0].text
else:
like = 0
ele = div.find_elements_by_xpath('./div/div/div/div[2]/div/div/div/span') # 爬取评论数量
reply = 0
if (len(ele) == 1):
x = re.findall(kk, ele[0].text) # 正则表达式定位数字
if (len(x) == 1):
reply = int(x[0])
print("No. ", Num, "(", k, "_of_", len(urls), ")")
print("Time:", _time)
print("Name:", name)
print("Comment:", comment)
print("Like:", like)
print("Reply:", reply)
# 爬取完毕,添加到数据列表当中
text_comment.append(comment)
author_id.append(name)
Num += 1
no_fresh = 0
fp.writelines(name + "," + aurl + "," + _time + "," + comment + ",,2," + str(like) + "," + str(reply) + "\n")
if (no_fresh>=5):
break
else:
driver.execute_script("window.scrollBy(0,500)") # 往下滑动更新页面显示的微博
time.sleep(2)