python爬虫学习hellow world

xiaoxiao2021-02-28  105

慕课网《python开发简单爬虫》代码记录

一、url下载器部分

#encoding=utf-8 import cookielib import urllib2 import urllib import os import BeautifulSoup url = "http://www.baidu.com" print '测试第一种方法' response1 = urllib2.urlopen(url) #获取的内容返回给对象 print response1.getcode() #状态码是否为200,判断是否成功 print len(response1.read()) #网页内容长度 read方法读取内容 print '测试第二种方法' #伪装成浏览器 request = urllib2.Request(url) #生成request对象 request.add_header("user-agent","Mozilla/5.0") #将爬虫伪装成一个浏览器 response2 = urllib2.urlopen(request) #提交网页下载请求 print response2.getcode() print len(response2.read()) print '测试第三种方法' #引入cookie增加处理情景的能力 cj = cookielib.CookieJar() #建立个cookie的容器 opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))#以容器作为参数创建一个handler,作为opener的参数 urllib2.install_opener(opener) #增强处理能力,给urllib2安装opener response3 = urllib2.urlopen(url) print response3.getcode() print cj print response3.read()

二、网页解析器部分—beautiful soup

#encoding=utf-8 from bs4 import BeautifulSoup import re html_doc = """ <html><head><title>The Dormouse's story</title></head> <body> <p class="title"><b>The Dormouse's story</b></p> <p class="story">Once upon a time there were three little sisters; and their names were <a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>, <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>; and they lived at the bottom of a well.</p> <p class="story">...</p> """ #根据HTML网页字符中创建BeautifulSoup对象 soup = BeautifulSoup( html_doc, 'html.parser', from_encoding = 'utf8' ) #HTML文档字串, HTML解析器,HTML文档的编码 print '获取所有链接' links = soup.find_all('a') for link in links: print link.name, link['href'], link.get_text() print '获取lacie的链接' link_node = soup.find('a', href = 'http://example.com/lacie') print link_node.name, link_node['href'], link_node.get_text() print '正则匹配' link_node = soup.find('a', href = re.compile(r"ill")) print link_node.name, link_node['href'], link_node.get_text() print '获取p段落文字' p_node = soup.find('p', class_ = "title") print p_node.name, p_node.get_text()

三、python编写爬虫实例

制订抓取策略

目标:百度百科python词条相关词条网页——标题和简介 入口页:https://baike.baidu.com/item/Python URL格式: -词条页面URL:/view/125370.htm(非完整URL) 数据格式: 页面编码:UTF-8


代码部分

url管理器

# coding:utf8 class UrlManager(object): # 维护两个Url列表 def __init__(self): self.new_urls = set() self.old_urls = set() def add_new_url(self, url): # 向管理器中添加一个新的Url if url is None: return if url not in self.new_urls and url not in self.old_urls: self.new_urls.add(url) def add_new_urls(self, urls): # 向管理器中添加批量Url if urls is None or len(urls) == 0: return for url in urls: self.add_new_url(url) def has_new_url(self): # 判断管理器中是否有新的待爬取的Url return len(self.new_urls) != 0 def get_new_url(self): # 从管理器中获取一个新的待爬取的Url new_url = self.new_urls.pop() self.old_urls.add(new_url) return new_url

html下载器

# coding:utf8 import urllib2 class HtmlDownloader(object): def download(self, url): if url is None: return None response = urllib2.urlopen(url) # 由于要爬取内容简单,此处使用urllib2最简单的方法 if response.getcode() != 200: return None return response.read()

url解析器

# coding:utf8 import re import urlparse from bs4 import BeautifulSoup class HtmlParser(object): def _get_new_urls(self, page_url, soup): # 获取页面中所有其他词条的Url new_urls = set() # 获取所有的连接 /view/123.htm links = soup.find_all('a', href=re.compile(r"/item")) for link in links: new_url = link['href'] new_full_url = urlparse.urljoin(page_url, new_url) # 按照page_url的格式,拼接成完全的url new_urls.add(new_full_url) return new_urls def _get_new_data(self, page_url, soup): # 解析数据 title summery res_data = {} # 建立字典存储数据 # url放入数据中,便于后续使用 res_data['url'] = page_url # <dd class="lemmaWgt-lemmaTitle-title"><h1>Python</h1> title_node = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find("h1") res_data['title'] = title_node.get_text() # <div class="lemma-summary"> summary_node = soup.find('div', class_="lemma-summary") res_data['summary'] = summary_node.get_text() return res_data def parse(self, page_url, html_cont): # 从cont中解析出两个数据,新的url列表和数据 if page_url is None or html_cont is None: return soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8') # 将cont加载近soup # 两个链接 new_urls = self._get_new_urls(page_url, soup) new_data = self._get_new_data(page_url, soup) return new_urls, new_data

html输出器

# coding:utf8 class HtmlOutputer(object): def __init__(self): self.datas = [] def collect_data(self, data): if data is None: return self.datas.append(data) def output_html(self): fout = open('output.html', 'w') # 建立文件输出对象 # 标签 fout.write("<html>") fout.write("<head><meta charset=\"utf-8\"></head>") fout.write("<body>") fout.write("<table>") # 以表格的形式输出 #ascii for data in self.datas: fout.write("<tr>")#行的开始标签 fout.write("<td>%s</td>" % data['url']) fout.write("<td>%s</td>" % data['title'].encode('utf-8').decode("utf-8")) fout.write("<td>%s</td>" % data['summary'].encode('utf-8').decode("utf-8")) fout.write("</tr>") #闭合标签 fout.write("</table>") fout.write("</body>") fout.write("</html>") fout.close()

main

# coding:utf8 import url_manager, html_downloader, html_parser, html_outputer class SpiderMain(object): def __init__(self): self.urls = url_manager.UrlManager() self.downloader = html_downloader.HtmlDownloader() self.parser = html_parser.HtmlParser() self.outputer = html_outputer.HtmlOutputer() def craw(self, root_url): # 爬虫的调度程序 count = 1 # 记录当前爬取的Url self.urls.add_new_url(root_url) while self.urls.has_new_url(): # url管理器中有待爬取的Url时 try: # 有的网页无要爬取内容 new_url = self.urls.get_new_url() # 获取一个 print 'craw %d : %s ' % (count, new_url) html_cont = self.downloader.download(new_url) # 下载页面,结果存储 new_urls, new_data = self.parser.parse(new_url, html_cont) # 解析器,得到新的Url列表以及新的数据 self.urls.add_new_urls(new_urls) # 添加进Url管理器(批量) self.outputer.collect_data(new_data) # 收集数据 if count == 20: break count = count + 1 except: print 'craw failed' self.outputer.output_html() if __name__ == "__main__": root_url = "https://baike.baidu.com/view/21087.htm" obj_spider = SpiderMain() obj_spider.craw(root_url)

注: 匹配形式需随网站的url形式的改变调整。

附: python 正则表达式 http://www.runoob.com/python/python-reg-expressions.html http://www.cnblogs.com/huxi/archive/2010/07/04/1771073.htm pycharm快捷键 http://www.cnblogs.com/littleseven/p/5599019.html 解决爬取网站内容乱码问题 http://blog.csdn.net/github_35160620/article/details/52529435

转载请注明原文地址: https://www.6miu.com/read-20878.html

最新回复(0)