为了爬取的高效性,实现的过程中我利用了python的threading模块,下面是threads.py模块,定义了下载解析页面的线程,下载图片的线程以及线程池
import threading
import urllib2
import Queue
import re
thread_lock = threading.RLock()
#下载页面的一个函数,header中没有任何内容也可以顺利的下载,就省去了
def download_page(html_url):
try:
req = urllib2.Request(html_url)
response = urllib2.urlopen(req)
page = response.read()
return page
except Exception:
print ‘download %s failed’ % html_url
return None
#下载图片的一个方法,和上面的函数很像,只不过添加了一个文件头
#因为在测试的过程中发现天涯对于没有如下文件头的图片链接是不会返回正确的图片的
def download_image(image_url, referer):
try:
req = urllib2.Request(image_url)
req.add_header(‘Host’, ‘img3.laibafile.cn’)
req.add_header(‘User-Agent’, ‘Mozilla/5.0 (Windows NT 6.3; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0’)
req.add_header(‘Accept’, ‘image/png,image/*;q=0.8,*/*;q=0.5’)
req.add_header(‘Accept-Language’, ‘zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3’)
req.add_header(‘Referer’, referer)
req.add_header(‘Origin’, ‘http://bbs.tianya.cn’)
req.add_header(‘Connection’, ‘keep-alive’)
response = urllib2.urlopen(req)
image = response.read()
return image
except Exception:
print ‘download %s failed’ % image_url
return None
#下载和解析一个页面的线程类
class download_html_page(threading.Thread):
#name:线程的名字
#page_range:用户输入的页面范围
#page_contents:解析之后楼主的内容
#img_urls:解析之后楼主贴的图的链接
#html_url:输入的页面url
#first_page:第一次已经下载好的页面,主要是考虑效率,不重复下载
def __init__(self, name, page_range, page_contents, img_urls, html_url, first_page):
threading.Thread.__init__(self)
self.name = name
self.page_range = page_range
self.page_contents = page_contents
self.img_urls = img_urls
self.html\_url = html\_url
self.first\_page = first\_page
#判断是不是楼主的内容
def is\_louzhu(self, s):
result = re.search(r'<!-- <div class="host-ico">(.\*?)</div> -->', s, re.S)
return (result is not None)
#获得页面里属于楼主图片的url
def get\_img\_url(self, s, page\_url):
#判断是不是楼主给其他用户的评论,如果是的话,直接过滤掉(本人从不看评论)
is\_louzhu\_answer = re.search(r'-{15,}<br>', s, re.S)
if is\_louzhu\_answer is None:
imgurl = re.findall(r'<img.\*?original="(?P<imgurl>.\*?)".\*?/><br>', s, flags = re.S)
url\_path = \[\]
for one\_url in imgurl:
self.img\_urls.put(one\_url + '|' + page\_url)
path = re.search('\\w+\\.jpg', one\_url).group(0)
url\_path.append('img/' + path)
segments = re.split(r'<img .\*?/><br>', s.strip())
content = segments\[0\].strip()
for i in range(len(url\_path)):
content += '\\n<img src = "' + url\_path\[i\] + '" />\\n<br>'
content += segments\[i+1\].strip()
return content
#解析夜歌页面
def parse\_page(self, html\_page, page\_url):
html\_page.decode('utf-8')
Items = re.findall(r'<div class="atl-content">(?P<islouzhu>.+?)<div class="bbs-content.\*?">(?P<content>.+?)</div>', html\_page, re.S)
page\_content = ''
for item in Items:
if self.is\_louzhu(item\[0\]):
one\_div = self.get\_img\_url(item\[1\], page\_url)
if one\_div is not None:
page\_content += one\_div
return page\_content
def run(self):
while self.page\_range.qsize() > 0:
page\_number = self.page\_range.get()
page\_url = re.sub('-(\\d+?)\\.shtml', '-' + str(page\_number) + '.shtml', self.html\_url)
page\_content = ''
print 'thread %s is downloading %s' % (self.name, page\_url)
if page\_url == self.html\_url:
page\_content = self.parse\_page(self.first\_page, page\_url)
else:
page = download\_page(page\_url)
if page is not None:
page\_content = self.parse\_page(page, page\_url)
#thread\_lock.acquire()
#self.page\_contents\[page\_number\] = page\_content
#thread\_lock.release()
self.page\_contents.put(page\_content, page\_number)
self.img\_urls.put('finished')
#下载图片的线程
class fetch_img(threading.Thread):
def __init__(self, name, img_urls, download_img):
threading.Thread.__init__(self)
self.name = name
self.img_urls = img_urls
self.download_img = download_img
def run(self):
while True:
message = self.img\_urls.get().split('|')
img\_url = message\[0\]
if img\_url == 'finished':
self.img\_urls.put('finished')
break
else:
thread\_lock.acquire()
if img\_url in self.download\_img:
thread\_lock.release()
continue
else:
thread\_lock.release()
print 'fetching image %s' % img\_url
referer = message\[1\]
image = download\_image(img\_url, referer)
image\_name = re.search('\\w+\\.jpg', img\_url).group(0)
with open(r'img\\%s' % image\_name, 'wb') as img:
img.write(image)
thread\_lock.acquire()
self.download\_img.add(img\_url)
thread\_lock.release()
#定义了一个线程池
class thread_pool:
def __init__(self, page_range, page_contents, html_url, first_page):
self.page_range = page_range
self.page_contents = page_contents
self.img_urls = Queue.Queue()
self.html_url = html_url
self.first_page = first_page
self.download_img = set()
self.page\_thread\_pool = \[\]
self.image\_thread\_pool = \[\]
def build\_thread(self, page, image):
for i in range(page):
t = download\_html\_page('page thread%d' % i, self.page\_range, self.page\_contents,
self.img\_urls, self.html\_url, self.first\_page)
self.page\_thread\_pool.append(t)
for i in range(image):
t = fetch\_img('image thread%d' % i, self.img\_urls, self.download\_img)
self.image\_thread\_pool.append(t)
def all\_start(self):
for t in self.page\_thread\_pool:
t.start()
for t in self.image\_thread\_pool:
t.start()
def all\_join(self):
for t in self.page\_thread\_pool:
t.join()
for t in self.image\_thread\_pool:
t.join()
下面是主线程的代码:
# -*- coding: utf-8 -*-
import re
import Queue
import threads
if __name__ == ‘__main__’:
html_url = raw_input('enter the url: ')
html_page = threads.download_page(html_url)
max\_page = 0
title = ''
if html\_page is not None:
search\_title = re.search(r'<span class="s\_title"><span style="\\S+?">(?P<title>.+?)</span></span>', html\_page, re.S)
title = search\_title.groupdict()\['title'\]
search\_page = re.findall(r'<a href="/post-\\S+?-\\d+?-(?P<page>\\d+?)\\.shtml">(?P=page)</a>', html\_page, re.S)
for page\_number in search\_page:
page\_number = int(page\_number)
if page\_number > max\_page:
max\_page = page\_number
print 'title:%s' % title
print 'max page number: %s' % max\_page
start\_page = 0
while start\_page < 1 or start\_page > max\_page:
start\_page = input('input the start page number:')
end\_page = 0
while end\_page < start\_page or end\_page > max\_page:
end\_page = input('input the end page number:')
page\_range = Queue.Queue()
for i in range(start\_page, end\_page + 1):
page\_range.put(i)
page\_contents = {}
thread\_pool = threads.thread\_pool(page\_range, page\_contents, html\_url, html\_page)
thread\_pool.build\_thread(1, 1)
thread\_pool.all\_start()
thread\_pool.all\_join()
本文仅作项目练习,且勿商用!!!
由于文章篇幅有限,文档资料内容较多,需要这些文档的朋友,可以加小助手微信免费获取,【保证100%免费】,中国人不骗中国人。
全套Python学习资料分享:
一、Python所有方向的学习路线
Python所有方向路线就是把Python常用的技术点做整理,形成各个领域的知识点汇总,它的用处就在于,你可以按照上面的知识点去找对应的学习资源,保证自己学得较为全面。
二、学习软件
工欲善其事必先利其器。学习Python常用的开发软件都在这里了,还有环境配置的教程,给大家节省了很多时间。
三、全套PDF电子书
书籍的好处就在于权威和体系健全,刚开始学习的时候你可以只看视频或者听某个人讲课,但等你学完之后,你觉得你掌握了,这时候建议还是得去看一下书籍,看权威技术书籍也是每个程序员必经之路。
四、入门学习视频全套
我们在看视频学习的时候,不能光动眼动脑不动手,比较科学的学习方法是在理解之后运用它们,这时候练手项目就很适合了。
五、实战案例
光学理论是没用的,要学会跟着一起敲,要动手实操,才能将自己的所学运用到实际当中去,这时候可以搞点实战案例来学习。
今天就分享到这里啦,感谢大家收看!