defparse_detail(self,response): item = response.meta["item"] item["content"] = response.xpath("//div[@class='wzy1']/table[2]//td[@class='txt16_3']/text()").extract() # print(item["content"]) item["content_img"] = response.xpath("//div[@class='wzy1']/table[2]//td[@class='txt16_3']//img/@src").extract() item["content_img"] = ["http://wz.sun0769.com"+i for i in item["content_img"]] # print(item["content_img"]) # print(item) yield item
items.py:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
# -*- coding: utf-8 -*-
# Define here the models for your scraped items # # See documentation in: # https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
classSunItem(scrapy.Item): # define the fields for your item here like: title = scrapy.Field() publish_date = scrapy.Field() href = scrapy.Field() content = scrapy.Field() content_img = scrapy.Field()
# Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html import re
defprocess_content(self, content): content = [re.sub(r"\xa0|\s","",i) for i in content] #去除\xa0和空格 content = [i for i in content iflen(i)>0] #去除列表中的空字符串 return content
# Scrapy settings for sun project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # https://docs.scrapy.org/en/latest/topics/settings.html # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
LOG_LEVEL = 'WARNING' # Crawl responsibly by identifying yourself (and your website) on the user-agent USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36'
# Obey robots.txt rules ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16) #CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0) # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs #DOWNLOAD_DELAY = 3 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default) #COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False
# Enable and configure the AutoThrottle extension (disabled by default) # See https://docs.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay #AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default) # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'