Compare commits

..

No commits in common. "ee0478010dffe000529461ee69fafc8a2a619b2b" and "2546508a01ed52496f1346e382f406727f390aa5" have entirely different histories.

2 changed files with 10 additions and 49 deletions

View File

@ -106,7 +106,6 @@ python.exe crawler.py --think --article --answer --MarkDown
### 注意
1、需要较好的网速本机网速测验是下载100Mbps上传60Mbps低点也可以的不是太慢太卡就行[https://www.speedtest.cn/](https://www.speedtest.cn/)<br>
2、爬取时设置了睡眠时间, 避免给知乎服务器带来太大压力,可以日间调试好,然后深夜运行爬取人少, 给其他小伙伴更好的用户体验, 避免知乎顺着网线过来找人,默认**6**s<br>
3、若是一直停在登录页面可能是之前保存的cookie失效了需要再次登录保存cookie
### blogs
[https://www.aliyundrive.com/s/NikyVRJq8JV 阿里云分享的](https://www.aliyundrive.com/s/NikyVRJq8JV) `提取 0h3l` <br>

View File

@ -345,16 +345,10 @@ def parser_beautiful(innerHTML, article, number, dircrea, bk=False):
if 'class' in chi.attrs.keys():
classc = chi.attrs["class"]
if datatex and classc and 'ztext-math' in classc:
content = chi.attrs["data-tex"]
while len(content) > 0 and ' '==content[0]:
content = content[1:]
while len(content) > 0 and ' '==content[-1]:
content = content[:-1]
if len(content) > 0:
if article[-3-1:]=='<br>' or article[-1:]=='\n':
article += "\n$" + content + "$"
else:
article += "$" + content + "$"
if article[-3-1:]=='<br>' or article[-1:]=='\n':
article += "\n$" + chi.attrs["data-tex"] + "$"
else:
article += "$" + chi.attrs["data-tex"] + "$"
else:
article, number = parser_beautiful(chi, article, number, dircrea, bk)
# article += nod.text
@ -410,9 +404,6 @@ def parser_beautiful(innerHTML, article, number, dircrea, bk=False):
if len(prenode) > 0:
for i in prenode:
article += "\n\n```\n" + i.text + "\n```\n\n"
else:
article, number = parser_beautiful(chi, article, number, dircrea, bk)
article += "\n\n"
if bk:
article += "**"
return article, number
@ -925,14 +916,7 @@ def login_loadsavecookie():
try:
load_cookie(driver, cookie_path)
driver.get(r"https://www.zhihu.com/")
WebDriverWait(driver, timeout=10).until(lambda d: d.find_element(By.ID, 'Popover15-toggle'))
toggle = driver.find_element(By.ID, 'Popover15-toggle')
except Exception as e:
if os.path.exists(cookie_path):
os.remove(cookie_path)
print("浏览器cookie失效了删除了之前的cookie需要再次登录并保存cookie。")
else:
print("需要登陆并保存cookie下次就不用登录了。")
except:
driver = login(driver)
save_cookie(driver, cookie_path)
driver.quit()
@ -950,24 +934,10 @@ def login_loadsavecookie():
username = url.split("/")[-1]
return driver, username
def downloaddriver():
url = "https://msedgedriver.azureedge.net/116.0.1938.62/edgedriver_win64.zip"
def zhihu():
# #crawl articles links
if not os.path.exists(driverpath):
ret = requests.get("https://developer.microsoft.com/en-us/microsoft-edge/tools/webdriver/")
if ret.status_code!=200:
assert ret.status_code!=200
ret = BeautifulSoup(ret.content, 'html.parser')
# divall = ret.find_all('div', class_=r'common-card--lightblue')
ddl = ret.find_all('a')
for k in ddl:
key = k.attrs.keys()
if 'href' not in key:
continue
href = k.attrs['href']
if 'href' in key and "win64" in href and ".zip" in href:
url = href
break
response = requests.get(url)
response = requests.get("https://msedgedriver.azureedge.net/114.0.1823.67/edgedriver_win64.zip")
if response.status_code==200:
with open(os.path.join(abspath, 'msedgedriver/edgedriver.zip'), 'wb') as obj:
obj.write(response.content)
@ -988,15 +958,7 @@ def downloaddriver():
if kk < 0:
break
def zhihu():
# #crawl articles links
try:
downloaddriver()
driver, username = login_loadsavecookie()
except Exception as e:
os.remove(os.path.join(abspath, 'msedgedriver', "msedgedriver.exe"))
downloaddriver()
driver, username = login_loadsavecookie()
driver, username = login_loadsavecookie()
# #crawl think links
if crawl_think:
@ -1096,4 +1058,4 @@ if __name__ == "__main__":
# except:
# time.sleep(600)
# zhihu()
logfp.close()
logfp.close()