Scrapy抓取动态内容、下载图片和导入selenium

xiaoxiao2021-02-28  74

Scrapy抓取动态内容、下载图片和导入selenium

爬取动态内容

在网页中,有些数据是ajax异步加载,而这些数据我们在用爬虫直接抓取页面的时候并不能抓取到

这时,我们可以通过直接通过抓取数据接口的方式来获取数据

首先在要抓取的页面打开开发者工具进入network下面的XHR,刷新页面抓取异步的数据包分析数据包的url,找到数据加载规则分析数据字段,提取需要的字段信息

下面是抓取360图片的一段代码,因为我们要自己定义要抓取的页面,所以不再需要start_url这个列表,并重写了它的start_requests方法。

# -*- coding: utf-8 -*- from json import loads from urllib.parse import urlencode import scrapy from img360.items import ImgItem class ImageSpider(scrapy.Spider): name = 'image' allowed_domains = ['image.so.com'] # 重写这个方法 def start_requests(self): base_url = 'http://image.so.com/zj?' param = {'ch': 'beauty', 'listtype': 'new', 'temp': 1} for page in range(10): param['sn'] = page * 30 # 可以将字典里面的数据加在url后面, 如果有中文会自动变成%编码 full_url = base_url + urlencode(param) yield scrapy.Request(url=full_url, callback=self.parse) def parse(self, response): # 因为返回的是一个json数据,所以用json.loads处理数据 # 数据通过浏览器的开发者工具中network中进行动态找包,分析包 model_dict = loads(response.text) for elem in model_dict['list']: item = ImgItem() item['title'] = elem['group_title'] item['tag'] = elem['tag'] item['width'] = elem['cover_width'] item['height'] = elem['cover_height'] item['url'] = elem['qhimg_url'] yield item

下载图片到本地

Scrapy中给我们封装好了下载图片的方法,只需要我们在pipelines文件中继承ImagesPipeline,并重写它的get_media_requests,item_completed, file_path 方法。并在settings中配置到下载路径,这样就可以在获取图片时,直接将图片下载到本地。具体的实现方法,参照下面的代码 # -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import logging import pymongo from scrapy import Request from scrapy.exceptions import DropItem from scrapy.pipelines.images import ImagesPipeline # 修改logging的名字 logging = logging.getLogger('SaveImgPipeline') # 需要pip install pillow class SaveImgPipeline(ImagesPipeline): # 图片的url def get_media_requests(self, item, info): yield Request(url=item['url']) # return super().get_media_requests(item, info) def item_completed(self, results, item, info): # results会返回一个列表,第一个值是一个元组,里面的第一个数据是数据是否下载成功的布尔值 if not results[0][0]: raise DropItem('下载失败') logging.debug('下载完成') return item # return super().item_completed(results, item, info) def file_path(self, request, response=None, info=None): filename = request.url.split('/')[-1] # 只需要返回文件名,文件路径会自动找到settings中IMAGES_STORE return filename # return super().file_path(request, response, info) class SaveToMongoPipeline(object): def __init__(self, mongo_url, db_name, coll_name): self.mongo_url = mongo_url self.db_name = db_name self.coll_name = coll_name self.client = None self.db = None self.coll = None def process_item(self, item, spider): self.coll.insert_one(dict(item)) return item # 蜘蛛开始时连接数据库 def open_spider(self, spider): self.client = pymongo.MongoClient(self.mongo_url) self.db = self.client[self.db_name] self.coll = self.db[self.coll_name] # 蜘蛛结束时释放连接 def close_spider(self, spider): self.client.close() # 依赖注入 -- 一种很好的编程技巧 # crawler代表整个项目 # 这个方法的用处主要作用可以导入crawler,并取到需要用的参数 @classmethod def from_crawler(cls, crawler): # 通过crawler可以读到settings return cls(crawler.settings.get('MONGO_URL'), crawler.settings.get('MONGO_DB'), crawler.settings.get('MONGO_COLL')) settings.py文件中在这个项目中额外配置的多个item,和middleware # -*- coding: utf-8 -*- # Scrapy settings for img360 project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # https://doc.scrapy.org/en/latest/topics/settings.html # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html # https://doc.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'img360' SPIDER_MODULES = ['img360.spiders'] NEWSPIDER_MODULE = 'img360.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)' \ ' Chrome/67.0.3396.62 Safari/537.36' # Obey robots.txt rules ROBOTSTXT_OBEY = False # Configure maximum concurrent requests performed by Scrapy (default: 16) CONCURRENT_REQUESTS = 2 # Configure a delay for requests for the same website (default: 0) # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs DOWNLOAD_DELAY = 3 # 设置延迟时间变成随机 RANDOMIZE_DOWNLOAD_DELAY = True # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) #COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: #DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', #} # Enable or disable spider middlewares # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { # 'img360.middlewares.Img360SpiderMiddleware': 543, #} # Enable or disable downloader middlewares # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html DOWNLOADER_MIDDLEWARES = { # 'img360.middlewares.Img360DownloaderMiddleware': 543, 'img360.middlewares.TaoDaoDownloaderMiddleware': 543, } # Enable or disable extensions # See https://doc.scrapy.org/en/latest/topics/extensions.html #EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, #} # 设置图片下载路径 IMAGES_STORE = './resourses/' # Configure item pipelines # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html ITEM_PIPELINES = { # 设置执行先后顺序 'img360.pipelines.SaveImgPipeline': 300, 'img360.pipelines.SaveToMongoPipeline': 301 } # 设置日志级别 LOG_LEVEL = 'DEBUG' MONGO_URL = 'mongodb://47.98.172.171:27017' MONGO_DB = 'image360' MONGO_COLL = 'beauty' # Enable and configure the AutoThrottle extension (disabled by default) # See https://doc.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay #AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' items.py文件中 # -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # https://doc.scrapy.org/en/latest/topics/items.html import scrapy class ImgItem(scrapy.Item): # define the fields for your item here like: # name = scrapy.Field() title = scrapy.Field() tag = scrapy.Field() width = scrapy.Field() height = scrapy.Field() url = scrapy.Field()

StringIO

StringIO() 可变字符串如果做字符串拼接要用这种方法,正常的字符串拼接会产生大量无用的数据存在内存中正常str是不变字符串

抓包工具

wireshark 一个强大的抓包工具Charles 只能抓http/https 抓包指南 # -*- coding: utf-8 -*- from io import StringIO from urllib.parse import urlencode import re import scrapy class TaobaoSpider(scrapy.Spider): name = 'taobao' allowed_domains = ['www.taobao.com'] def start_requests(self): base_url = 'https://s.taobao.com/search?' params = {} for keyword in ['ipad', 'iphone', '小米手机']: params['q'] = keyword for page in range(10): params['s'] = page * 44 full_url = base_url + urlencode(params) yield scrapy.Request(url=full_url, callback=self.parse) def parse(self, response): goods_list = response.xpath('//*[@id="mainsrp-itemlist"]/div/div/div[1]/div') for goods in goods_list: item = GoodsItem() item['price'] = goods.xpath('div[2]/div[1]/div[1]/strong/text()').extract_first() item['deal'] = goods.xpath('div[2]/div[1]/div[2]/text()').extract_first() segments = goods.xpath('div[2]/div[2]/a/text()').extract() title = StringIO() for segment in segments: title.write(re.sub('\s', '', segment)) item['title'] = title.getvalue() yield item

在request中加cookie

先手动登录, 取到里面的cookie在自己的请求中加cookie,这样就可以绕过登录

用selenium代替scrapy中的下载器

首先要介绍下Scrapy中从引擎到下载器的中间过程,正常流程是,引擎(engine)将url交给下载器(downloader),下载器来 进行数据下载,如果我们想导入selenium, 那么必须要阻断这个过程,并有selenium来代替,我们就需要通过修改downloadmiddleware中的process_request来实现。process_request,有三种返回结果 NONE: 有scrapy内置的下载器来下载页面return a Request object 不需要下载器下载,而是交给调度器return a Response object 越过下载器,进行一个流程所以我们在这里只需将selenium在这个方法中实现,并返回一个下载好的页面的response。 # -*- coding: utf-8 -*- # Define here the models for your spider middleware # # See documentation in: # https://doc.scrapy.org/en/latest/topics/spider-middleware.html import time from selenium import webdriver from scrapy import signals from scrapy.http import HtmlResponse from selenium.common.exceptions import TimeoutException class Img360SpiderMiddleware(object): # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the spider middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_spider_input(self, response, spider): # Called for each response that goes through the spider # middleware and into the spider. # Should return None or raise an exception. return None def process_spider_output(self, response, result, spider): # Called with the results returned from the Spider, after # it has processed the response. # Must return an iterable of Request, dict or Item objects. for i in result: yield i def process_spider_exception(self, response, exception, spider): # Called when a spider or process_spider_input() method # (from other spider middleware) raises an exception. # Should return either None or an iterable of Response, dict # or Item objects. pass def process_start_requests(self, start_requests, spider): # Called with the start requests of the spider, and works # similarly to the process_spider_output() method, except # that it doesn’t have a response associated. # Must return only requests (not items). for r in start_requests: yield r def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name) class Img360DownloaderMiddleware(object): # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the downloader middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_request(self, request, spider): # Called for each request that goes through the downloader # middleware. # Must either: # - return None: continue processing this request # 用内置下载器来下载页面 # - or return a Response object # 在中间可以通过selenuim已经下好了,不需要下载器下载, # - or return a Request object # 不需要下载器下载,而是交给调度器 # - or raise IgnoreRequest: process_exception() methods of # installed downloader middleware will be called return None def process_response(self, request, response, spider): # Called with the response returned from the downloader. # Must either; # - return a Response object # - return a Request object # - or raise IgnoreRequest return response def process_exception(self, request, exception, spider): # Called when a download handler or a process_request() # (from other downloader middleware) raises an exception. # Must either: # - return None: continue processing this exception # - return a Response object: stops process_exception() chain # - return a Request object: stops process_exception() chain pass def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name) class TaoDaoDownloaderMiddleware(object): def __init__(self, timeout=None): self.timeout = timeout self.browser = webdriver.Chrome() # 如果需要使用代理可以自己配置代理服务器CCProxy/ TinyProxy / Squid # 也可以购买国内比较有名的代理服务器提供商的服务(快代理/讯代理/阿布云代理) # 代理池 - 管理和维护一系列的代理并每次提供随机的代理 # 设置代理 # options = webdriver.ChromeOptions() # uid 用户名 pwd: 密码 # options.add_argument('--proxy-server=http://uid:pwd@1.2.3.5:808') # self.browser = webdriver(options=options) # 设置页面窗口大小 self.browser.set_window_size(1000, 600) # 设置页面超时时间 self.browser.set_page_load_timeout(self.timeout) def __del__(self): self.browser.close() def process_request(self, request, spider): try: self.browser.get(request.url) return HtmlResponse(url=request.url, body=self.browser.page_source, request=request, encoding='utf-8', status=200) except TimeoutException: return HtmlResponse(url=request.url, status=500, request=request) def process_response(self, request, response, spider): return response def process_exception(self, request, exception, spider): pass @classmethod def from_crawler(cls, crawler): return cls(timeout=10) 推荐学习爬虫的好的博客 崔庆才静觅,github代码
转载请注明原文地址: https://www.6miu.com/read-2627942.html

最新回复(0)