Python用request下载金庸小说

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-03-18 13:02:32
# @Author : KK (xxxxx@xxxxx.com)
# @Link : http://default.com
# @Version : $Id$

# -*- coding: utf-8 -*-
import urllib.request
from bs4 import BeautifulSoup

#get book's chapter
def get_chapter(url):
html = urllib.request.urlopen(url)
content = html.read().decode('utf8')
html.close()
soup = BeautifulSoup(content, "lxml")
title = soup.find('h1').text #chapter title
text = soup.find('div', id='htmlContent') #chapter content
#processing the content to get nice style
content = text.get_text('\n','br/').replace('\n', '\n ')
content = content.replace('  ', '\n  ')
return title, ' '+content

def main():
books = ['射雕英雄传','天龙八部','鹿鼎记','神雕侠侣','笑傲江湖','碧血剑','倚天屠龙记',\
'飞狐外传','书剑恩仇录','连城诀','侠客行','越女剑','鸳鸯刀','白马啸西风',\
'雪山飞狐']
order = [1,2,3,4,5,6,7,8,10,11,12,14,15,13,9] #order of books to scrapy
#list to store each book's scrapying range
page_range = [1,43,94,145,185,225,248,289,309,329,341,362,363,364,375,385]

for i,book in enumerate(books):
for num in range(page_range[i],page_range[i+1]):
url = "http://jinyong.zuopinj.com/%s/%s.html"%(order[i],num)
try:
title, chapter = get_chapter(url)
with open('F://%s.txt'%book, 'a', encoding='gb18030') as f:
print(book+':'+title+'-->写入成功!')
f.write(title+'\n\n\n')
f.write(chapter+'\n\n\n')
except Exception as e:
print(e)
print('全部写入完毕!')
main()