爬取网易新闻中的新闻数据(标题和内容)
1.通过网易新闻的首页解析出五大板块对应的详情页的url(没有动态加载)
2.每一个板块对应的新闻标题都是动态加载出来的(动态加载)
3.通过解析出每一条新闻详情页的url获取详情页的页面源码,解析出新闻内容
需要获取国内、国际,两大板块的详情页地址。它们均存在ul下的li标签
xpath定位://*[@id="index2016_wrap"]/div[2]/div[2]/div[2]/div[2]/div/ul/li
分别位于第3 和 4 标签列
- import scrapy
- from wangyiPro.items import WangyiproItem
- from selenium import webdriver
-
- class WangyiSpider(scrapy.Spider):
- name = 'wangyi'
- # allowed_domains = ['www.xxx.com']
- start_urls = ['https://news.163.com/']
- url_list = [] #存储板块对应详情页的url
-
-
- #实例化一个浏览器对象
- def __init__(self):
- self.bro = webdriver.Chrome()
-
- ##解析大板块对应详情页的url
- def parse(self, response):
- list = response.xpath('//*[@id="index2016_wrap"]/div[2]/div[2]/div[2]/div[2]/div/ul/li')
- index_list = [2,3]
- for index in index_list:
- url = list[index].xpath('./a/@href').extract_first()
- self.url_list.append(url)
-
- for url in self.url_list:
- yield scrapy.Request(url,callback=self.parse_model)
-
- # 每一个板块对应的新闻标题相关的内容都是动态加载
- def parse_model(self, response):#解析每一个板块页面中对应新闻的标题和新闻详情页的url
- div_list = response.xpath('/html/body/div/div[3]/div[4]/div[1]/div[1]/div/ul/li/div/div')
- for div in div_list:
- title = div.xpath('./div/div/h3/a/text()').extract_first()
- detail_url = div.xpath('./div/div/h3/a/@href').extract_first()
-
- item = WangyiproItem()
- item['title'] = title
- # 对新闻详情页的url发起请求
- yield scrapy.Request(url=detail_url,callback=self.parse_detail,meta={'item':item})
-
- # 解析新闻内容
- def parse_detail(self, response):
- content = response.xpath('//div[@id="content"]/div[2]//text()').extract()
- content = ''.join(content)
- item = response.meta['item']
- item['content'] = content
-
- yield item
-
-
- #关闭浏览器
- def closed(self,spider):
- self.bro.quit()
-
- # Define here the models for your spider middleware
- #
- # See documentation in:
- # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
-
- from scrapy import signals
- from time import sleep
-
- from scrapy.http import HtmlResponse
-
- # useful for handling different item types with a single interface
- from itemadapter import is_item, ItemAdapter
-
-
- class WangyiproSpiderMiddleware:
- # Not all methods need to be defined. If a method is not defined,
- # scrapy acts as if the spider middleware does not modify the
- # passed objects.
-
- @classmethod
- def from_crawler(cls, crawler):
- # This method is used by Scrapy to create your spiders.
- s = cls()
- crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
- return s
-
- def process_spider_input(self, response, spider):
- # Called for each response that goes through the spider
- # middleware and into the spider.
-
- # Should return None or raise an exception.
- return None
-
- def process_spider_output(self, response, result, spider):
- # Called with the results returned from the Spider, after
- # it has processed the response.
-
- # Must return an iterable of Request, or item objects.
- for i in result:
- yield i
-
- def process_spider_exception(self, response, exception, spider):
- # Called when a spider or process_spider_input() method
- # (from other spider middleware) raises an exception.
-
- # Should return either None or an iterable of Request or item objects.
- pass
-
- def process_start_requests(self, start_requests, spider):
- # Called with the start requests of the spider, and works
- # similarly to the process_spider_output() method, except
- # that it doesn’t have a response associated.
-
- # Must return only requests (not items).
- for r in start_requests:
- yield r
-
- def spider_opened(self, spider):
- spider.logger.info('Spider opened: %s' % spider.name)
-
-
- class WangyiproDownloaderMiddleware:
- # Not all methods need to be defined. If a method is not defined,
- # scrapy acts as if the downloader middleware does not modify the
- # passed objects.
-
- def process_request(self, request, spider):
- # Called for each request that goes through the downloader
- # middleware.
-
- # Must either:
- # - return None: continue processing this request
- # - or return a Response object
- # - or return a Request object
- # - or raise IgnoreRequest: process_exception() methods of
- # installed downloader middleware will be called
- return None
-
- # 该方法拦截板块对应的响应对象,进行篡改
- def process_response(self, request, response, spider):#spider爬虫对象
- bro = spider.bro # 获取了在爬虫类中定义的浏览器对象
- # 挑选出指定的响应对象进行篡改
- # 通过url指定request
- # 通过request指定response
- if request.url in spider.url_list:
- bro.get(request.url) # 五个板块对应的url进行请求
- sleep(3)
- page_text = bro.page_source # 包含了动态加载的新闻数据
-
- # response #五大板块对应的响应对象
- # 针对定位到的这些response进行篡改
- # 实例化一个新的响应对象(符合需求:包含动态加载出的新闻数据),替代原来旧的响应对象
- # 如何获取动态加载出的新闻数据?
- # 基于selenium便捷的获取动态加载数据
- new_response = HtmlResponse(url=request.url, body=page_text, encoding='utf-8', request=request)
-
- return new_response
- else:
- # response #其他请求对应的响应对象
- return response #其他请求对应的响应对象
-
- def process_exception(self, request, exception, spider):
- # Called when a download handler or a process_request()
- # (from other downloader middleware) raises an exception.
-
- # Must either:
- # - return None: continue processing this exception
- # - return a Response object: stops process_exception() chain
- # - return a Request object: stops process_exception() chain
- pass
-
- def spider_opened(self, spider):
- spider.logger.info('Spider opened: %s' % spider.name)
-
-
-