python 爬取百度文库并下载(免费文章限定)
(编辑:jimmy 日期: 2024/11/19 浏览:3 次 )
import requests import re import json import os session = requests.session() def fetch_url(url): return session.get(url).content.decode('gbk') def get_doc_id(url): return re.findall('view/(.*).html', url)[0] def parse_type(content): return re.findall(r"docType.*", content)[0] def parse_title(content): return re.findall(r"title.*", content)[0] def parse_doc(content): result = '' url_list = re.findall('(https.*"\\\\\\/", "/") for addr in url_list] for url in url_list[:-5]: content = fetch_url(url) y = 0 txtlists = re.findall('"c":"(.*".*"y":(.*"md5sum":"(.*"', content)[0] pn = re.findall('"totalPageNum":"(.*"', content)[0] rsign = re.findall('"rsign":"(.*"', content)[0] content_url = 'https://wkretype.bdimg.com/retype/text/' + doc_id + '"https://wenku.baidu.com/browse/getbcsurl" + doc_id + "&pn=1&rn=99999&type=ppt" content = fetch_url(content_url) url_list = re.findall('{"zoom":"(.*","page"', content) url_list = [item.replace("\\", '') for item in url_list] if not os.path.exists(doc_id): os.mkdir(doc_id) for index, url in enumerate(url_list): content = session.get(url).content path = os.path.join(doc_id, str(index) + '.jpg') with open(path, 'wb') as f: f.write(content) print("图片保存在" + doc_id + "文件夹") def save_file(filename, content): with open(filename, 'w', encoding='utf8') as f: f.write(content) print('已保存为:' + filename) # test_txt_url = 'https://wenku.baidu.com/view/cbb4af8b783e0912a3162a89.html"__main__": main()
爬取结果
以上就是python 爬取百度文库并以下载的详细内容,更多关于python 爬取百度文库的资料请关注其它相关文章!
下一篇:Python 实现PS滤镜的旋涡特效