-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathzhihu_selenium.py
281 lines (256 loc) · 9.86 KB
/
zhihu_selenium.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
from selenium import webdriver
import time
from bs4 import BeautifulSoup
import sys
import re
class zhihuSpider(object):
def __init__(self):
self.driver = webdriver.Firefox()
self.homePageUrl = 'https://www.zhihu.com/'
self.topic = 'JAVA'
self.startUrl = 'https://www.zhihu.com/search?q=' + self.topic + '&type=topic'
self.topicUrl = ''
self.account = 'xxx'
self.password = 'xxx'
self.times = 0
self.QueTitle = []
self.QueFirstAns = []
self.QueUrl = []
self.QueDic = {
'url': self.QueUrl,
'title': self.QueTitle,
'author': self.QueFirstAns
}
self.Ans = []
def login(self):
try:
self.driver.get(self.homePageUrl)
self.driver.find_element_by_name('username').send_keys(self.account)
time.sleep(1)
self.driver.find_element_by_name('password').send_keys(self.password)
time.sleep(1)
self.driver.find_element_by_class_name('SignFlow-submitButton').click()
time.sleep(1)
except:
print('login error!:(')
sys.exit(-1)
def _getTopicPage(self, url=None):
try:
self.driver.get(url)
return self.driver.page_source
except:
print('get topic failed!:(')
sys.exit(-1)
def _getTopicUrl(self, html=None):
try:
soup = BeautifulSoup(html, 'html.parser')
tag = soup.find('a', attrs={'class': 'TopicLink', 'target': '_blank'})
url = 'https://www.zhihu.com' + tag.get('href')
return url
except:
print('没有这个话题!请重新输入话题,启动爬虫!')
sys.exit(-1)
def _getTopicHtml(self, url=None):
try:
self.driver.get(url)
self._windowScroll(self.times)
return self.driver.page_source
except:
print('get question Html error!:(')
return ""
def _getQuestionInfo(self, html=None):
try:
soup = BeautifulSoup(html, 'html.parser')
queTag = soup.find_all('a', attrs={'class': 'question_link'})
queFirstAnsTag = soup.find_all('a', attrs={'class': 'author-link'})
for i in range(len(queTag)):
try:
self.QueUrl.append(re.sub('\n', '', 'https://www.zhihu.com' + queTag[i]['href']))
self.QueTitle.append(re.sub('\n', '', queTag[i].string))
self.QueFirstAns.append(re.sub('\n', '', queFirstAnsTag[i].string))
except:
continue
except:
print('get question url error!:(')
sys.exit(-1)
def _windowScroll(self, times):
for i in range(times + 1):
self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight)')
time.sleep(3)
def _saveQuestion(self):
k = 1
with open('question.txt', 'w', encoding='utf-8') as f:
for i in range(len(self.QueUrl)):
try:
f.write(str(k) + '\t' + '标题:' + str(self.QueTitle[i]) + '\n' +
'\t' + '回答者:' + str(self.QueFirstAns[i]) + '\n' +
'\t' + 'url:' + str(self.QueUrl[i]))
f.write('\n')
k = k + 1
except:
continue
def getQuestion(self):
topichtml = self._getTopicPage(self.startUrl)
self.topicUrl = self._getTopicUrl(topichtml)
quehtml = self._getTopicHtml(self.topicUrl)
self._getQuestionInfo(quehtml)
self._saveQuestion()
def _getAnswerHtml(self, url=None):
try:
self.driver.get(url)
try:
self.driver.find_element_by_class_name('QuestionRichText-more').click()
time.sleep(1)
except:
pass
self._windowScroll(self.times)
return self.driver.page_source
except:
print('get answer html failed!:(')
return ""
def _getAnswerInfo(self, html=None):
ansInfo = AnsPage(html)
ansInfo.getAnsInfoDic()
ansDic = ansInfo.ansInfoDic
return ansDic
def _saveAns(self, infoDic=None, i=0):
fpath = './answer/answer' + str(i+1) + '.txt'
with open(fpath, 'w', encoding='utf-8') as f:
f.write('问题 ' + str(i+1) + '\n')
f.write('问题标题:' + '\t' + str(self.QueTitle[i]) + '\n')
f.write('关注者:' + '\t' + str(infoDic['noticerNum'] + '\t' + '被浏览:' + '\t' + str(infoDic['lookNum']) + '\n'))
f.write('问题描述:' + '\t' + str(infoDic['queDis']) + '\n\n')
for j in range(len(infoDic['ansAuthor'])):
try:
f.write('回答者' + str(j+1) + ':' + '\t' + str(infoDic['ansAuthor'][j]) + '\n')
f.write('点赞数:' + '\t\t' + str(infoDic['ansPraise'][j]) + '\n')
f.write('回答内容:' + '\t' + str(infoDic['ansText'][j]) + '\n\n')
except:
continue
def getAnswer(self):
index = 0
for url in self.QueUrl:
html = self._getAnswerHtml(url)
ansInfoDic = self._getAnswerInfo(html)
self.Ans.append(ansInfoDic)
self._saveAns(ansInfoDic, index)
index = index + 1
def stopSpider(self):
print('Spider end!')
sys.exit(-1)
def runSpider(self):
self.login()
self.getQuestion()
self.getAnswer()
class AnsPage(object):
def __init__(self, html):
self.html = html
self.soup = BeautifulSoup(self.html, 'html.parser')
self.queDis = ''
self.noticerNum = ''
self.lookNum = ''
self.ansAuthor = []
self.ansPraise = []
self.ansText = []
self.ansInfoDic = {
'queDis': self.queDis,
'noticerNum': self.noticerNum,
'lookNum': self.lookNum,
'ansAuthor': self.ansAuthor,
'ansPraise': self.ansPraise,
'ansText': self.ansText
}
def _getQueDiscribe(self):
try:
queDis = ''
queDisTag = self.soup.find('span', attrs={'class': 'RichText', 'itemprop': 'text'})
if queDisTag.get('class') == ['RichText']:
for i in queDisTag.strings:
queDis = queDis + i
else:
queDis = None
return queDis
except:
print('get question discription failed!')
return None
def _getNoticerAndLookNum(self):
num = []
try:
Tag = self.soup.find_all('strong', attrs={'class': 'NumberBoard-itemValue'})
for elem in Tag:
num.append(elem.get('title'))
return num
except:
num.append(None)
num.append(None)
return num
def _getAnswerer(self):
try:
authorPic = self.soup.find_all('img', attrs={'class': ['Avatar', 'AuthorInfo-avatar'],
'width': '38', 'height': '38'})
author = []
for elem in authorPic:
author.append(elem.get('alt'))
return author
except:
print('get answerer failed!:(')
return None
def _getAnsPraise(self):
try:
praiseTag = self.soup.find_all('button', attrs={
'class': ['Button', 'VoteButton', 'VoteButton--up'], 'aria-label': '赞同'})
praiseNum = []
for elem in praiseTag:
try:
for string in elem.strings:
praiseNum.append(string)
except:
continue
return praiseNum
except:
print('get praise number failed!:(')
return None
def _getAnsText(self):
try:
ansTextTag = self.soup.find_all('span', attrs={'class': ['RichText', 'CopyrightRichText-richText'],
'itemprop': 'text'})
ansTextTag = [elem for elem in ansTextTag
if elem.get('class') == ['RichText', 'CopyrightRichText-richText']]
ansText = []
tempText = ''
for i in range(len(ansTextTag)):
try:
try:
while True:
ansTextTag[i].figure.decompose()
except:
for child_str in ansTextTag[i].strings:
tempText = tempText + child_str
ansText.append(tempText)
tempText = ''
except:
continue
return ansText
except:
print('get answer text failed!:(')
return None
def getAnsInfoDic(self):
num = self._getNoticerAndLookNum()
self.queDis = self._getQueDiscribe()
self.noticerNum = num[0]
self.lookNum = num[1]
self.ansAuthor = self._getAnswerer()
self.ansPraise = self._getAnsPraise()
self.ansText = self._getAnsText()
self.ansInfoDic = {
'queDis': self.queDis,
'noticerNum': self.noticerNum,
'lookNum': self.lookNum,
'ansAuthor': self.ansAuthor,
'ansPraise': self.ansPraise,
'ansText': self.ansText
}
if __name__ == '__main__':
zhihu = zhihuSpider()
zhihu.runSpider()
print(len(zhihu.QueUrl))