import urllib.request
import urllib.parse
from lxml import html
import chardet
import urllib
import os
import time
from urllib import error
import lxml.html
# 确保 novel_base_url 和 Host 一致
novel_base_url = 'https://www.blqukk.cc'
novel_url = urllib.parse.urljoin(novel_base_url, '38_38836/')
chapter_url_list = []
headers = {
'Host': 'http://www.blqukk.cc',
'Referer': 'https://www.blqukk.cc/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0'
}
def fetch_chapter_urls():
try:
req = urllib.request.Request(url=novel_url, headers=headers)
response = urllib.request.urlopen(req)
content = response.read()
# 自动检测编码
detected_encoding = chardet.detect(content)['encoding']
if detected_encoding:
html_content = content.decode(detected_encoding, errors='ignore')
else:
print("无法检测编码,尝试使用 UTF-8 解码")
html_content = content.decode('utf-8', errors='ignore')
tree = html.fromstring(html_content)
# 修正 XPath 表达式
hrefs = tree.xpath('//dd/a/@href')
for href in hrefs[12:]:
chapter_url_list.append(urllib.parse.urljoin(novel_base_url, href))
print(chapter_url_list)
except Exception as e:
print(f"发生错误: {e}")
if __name__ == '__main__':
fetch_chapter_urls()
nove_save_dir=os.path.join(os.getcwd(),'novel_cache/')
def parsing_chapter(url):
req=urllib.request.Request(url=url,headers=headers)
html=lxml.html.parse(urllib.request.urlopen(req))
title=html.xpath('//h1/text()')[0]
contents=html.xpath('//*[@id="content"]/text()')
content=''
for i in contents:
content+=i.strip()
save_novel(title,content)
def save_novel(name,content):
try:
with open (nove_save_dir+name+'.txt',"w+")as f:
f.write(content.strip())
except(error.HTTPError,OSError) as reason:
print(str(reason))
else:
print("下载完成:"+name)
if __name__ == '__main__':
if not os.path.exists(nove_save_dir):
os.mkdir(nove_save_dir)
fetch_chapter_urls()
for chapter in chapter_url_list:
time.sleep(1)
parsing_chapter(chapter)
import urllib.parse
from lxml import html
import chardet
import urllib
import os
import time
from urllib import error
import lxml.html
# 确保 novel_base_url 和 Host 一致
novel_base_url = 'https://www.blqukk.cc'
novel_url = urllib.parse.urljoin(novel_base_url, '38_38836/')
chapter_url_list = []
headers = {
'Host': 'http://www.blqukk.cc',
'Referer': 'https://www.blqukk.cc/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0'
}
def fetch_chapter_urls():
try:
req = urllib.request.Request(url=novel_url, headers=headers)
response = urllib.request.urlopen(req)
content = response.read()
# 自动检测编码
detected_encoding = chardet.detect(content)['encoding']
if detected_encoding:
html_content = content.decode(detected_encoding, errors='ignore')
else:
print("无法检测编码,尝试使用 UTF-8 解码")
html_content = content.decode('utf-8', errors='ignore')
tree = html.fromstring(html_content)
# 修正 XPath 表达式
hrefs = tree.xpath('//dd/a/@href')
for href in hrefs[12:]:
chapter_url_list.append(urllib.parse.urljoin(novel_base_url, href))
print(chapter_url_list)
except Exception as e:
print(f"发生错误: {e}")
if __name__ == '__main__':
fetch_chapter_urls()
nove_save_dir=os.path.join(os.getcwd(),'novel_cache/')
def parsing_chapter(url):
req=urllib.request.Request(url=url,headers=headers)
html=lxml.html.parse(urllib.request.urlopen(req))
title=html.xpath('//h1/text()')[0]
contents=html.xpath('//*[@id="content"]/text()')
content=''
for i in contents:
content+=i.strip()
save_novel(title,content)
def save_novel(name,content):
try:
with open (nove_save_dir+name+'.txt',"w+")as f:
f.write(content.strip())
except(error.HTTPError,OSError) as reason:
print(str(reason))
else:
print("下载完成:"+name)
if __name__ == '__main__':
if not os.path.exists(nove_save_dir):
os.mkdir(nove_save_dir)
fetch_chapter_urls()
for chapter in chapter_url_list:
time.sleep(1)

parsing_chapter(chapter)
