diff --git a/avito_parse/__init__.py b/avito_parse/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/avito_parse/items.py b/avito_parse/items.py new file mode 100644 index 0000000..02810e6 --- /dev/null +++ b/avito_parse/items.py @@ -0,0 +1,12 @@ +# Define here the models for your scraped items +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class AvitoParseItem(scrapy.Item): + # define the fields for your item here like: + # name = scrapy.Field() + pass diff --git a/avito_parse/loaders.py b/avito_parse/loaders.py new file mode 100644 index 0000000..ddacc83 --- /dev/null +++ b/avito_parse/loaders.py @@ -0,0 +1,40 @@ +# from scrapy import Selector +from scrapy.loader import ItemLoader +from itemloaders.processors import TakeFirst, MapCompose, Compose +from urllib.parse import urljoin + + +def get_url(itm): + return urljoin("https://www.avito.ru", itm) + + +def get_price(itm): + itm = itm.replace(" ", "") + return float(itm) + + +def get_addr(itm): + itm = itm.replace("\n ", "") + return itm + + +def get_params(data): + data = [itm.replace(": ", "") for itm in data if (itm != ' ') and (itm != '\n ')] + data = [itm[:-1] if itm[-1] == ' ' else itm for itm in data] + result = {} + for idx in range(int(len(data) / 2)): + result.update({data[idx * 2]: data[idx * 2 + 1]}) + return result + + +class FlatLoader(ItemLoader): + default_item_class = dict + url_out = TakeFirst() + title_out = TakeFirst() + seller_url_in = MapCompose(get_url) + seller_url_out = TakeFirst() + price_in = MapCompose(get_price) + price_out = TakeFirst() + address_in = MapCompose(get_addr) + address_out = TakeFirst() + parameters_out = Compose(get_params) diff --git a/avito_parse/middlewares.py b/avito_parse/middlewares.py new file mode 100644 index 0000000..08629ce --- /dev/null +++ b/avito_parse/middlewares.py @@ -0,0 +1,103 @@ +# Define here the models for your spider middleware +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + +# useful for handling different item types with a single interface +from itemadapter import is_item, ItemAdapter + + +class AvitoParseSpiderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, or item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Request or item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class AvitoParseDownloaderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) diff --git a/avito_parse/pipelines.py b/avito_parse/pipelines.py new file mode 100644 index 0000000..abd1b30 --- /dev/null +++ b/avito_parse/pipelines.py @@ -0,0 +1,20 @@ +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html + + +# useful for handling different item types with a single interface +# from itemadapter import ItemAdapter +from .settings import BOT_NAME +from pymongo import MongoClient + + +class AvitoParsePipeline: + def __init__(self): + client = MongoClient() + self.db = client[BOT_NAME] + + def process_item(self, item, spider): + self.db[spider.name].insert_one(item) + return item diff --git a/avito_parse/settings.py b/avito_parse/settings.py new file mode 100644 index 0000000..607b95f --- /dev/null +++ b/avito_parse/settings.py @@ -0,0 +1,108 @@ +# Scrapy settings for avito_parse project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://docs.scrapy.org/en/latest/topics/settings.html +# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'avito_parse' + +SPIDER_MODULES = ['avito_parse.spiders'] +NEWSPIDER_MODULE = 'avito_parse.spiders' + +LOG_ENABLE = True +LOG_LEVEL = "DEBUG" + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0' +# USER_AGENT = ['Mozilla/5.0 ' +# '(Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36', +# 'Mozilla/5.0 ' +# '(Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36', +# 'Mozilla/5.0 (Windows NT 6.3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36', +# 'Mozilla/5.0 ' +# '(Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36', +# 'Mozilla/5.0 ' +# '(Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36', +# 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36', +# 'Mozilla/5.0 ' +# '(Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36', +# 'Mozilla/5.0 ' +# '(Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36', +# 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0'] + +# Obey robots.txt rules +ROBOTSTXT_OBEY = False + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +#CONCURRENT_REQUESTS = 32 + +# Configure a delay for requests for the same website (default: 0) +# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +DOWNLOAD_DELAY = 3 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +COOKIES_ENABLED = True + +# Disable Telnet Console (enabled by default) +TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +DEFAULT_REQUEST_HEADERS = { + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", + "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3", +} + +# Enable or disable spider middlewares +# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'avito_parse.middlewares.AvitoParseSpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +# DOWNLOADER_MIDDLEWARES = { +# 'avito_parse.middlewares.AvitoParseDownloaderMiddleware': 543, +# 'rotating_proxies.middlewares.RotatingProxyMiddleware': 610, +# 'rotating_proxies.middlewares.BanDetectionMiddleware': 620, +# } + +# Enable or disable extensions +# See https://docs.scrapy.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html +ITEM_PIPELINES = { + 'avito_parse.pipelines.AvitoParsePipeline': 300, +} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/autothrottle.html +# AUTOTHROTTLE_ENABLED = False +# The initial download delay +# AUTOTHROTTLE_START_DELAY = 2 +# The maximum download delay to be set in case of high latencies +# AUTOTHROTTLE_MAX_DELAY = 10 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +# AUTOTHROTTLE_DEBUG = True + +# Enable and configure HTTP caching (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' +# ROTATING_PROXY_LIST_PATH = 'good_proxies' diff --git a/avito_parse/spiders/__init__.py b/avito_parse/spiders/__init__.py new file mode 100644 index 0000000..ebd689a --- /dev/null +++ b/avito_parse/spiders/__init__.py @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git a/avito_parse/spiders/avito.py b/avito_parse/spiders/avito.py new file mode 100644 index 0000000..7cc8b57 --- /dev/null +++ b/avito_parse/spiders/avito.py @@ -0,0 +1,42 @@ +import scrapy +from ..loaders import FlatLoader + + +class AvitoSpider(scrapy.Spider): + name = 'avito' + allowed_domains = ['www.avito.ru'] + start_urls = ['https://www.avito.ru/moskva/kvartiry/prodam'] + + _xpath_selectors = { + "pagination": '//div[contains(@class, "pagination-hidden")]//a[@class="pagination-page"]/@href', + "flat_url": '//div[@data-marker="item"]' + '//div[contains(@class, "iva-item-body")]' + '//a[@data-marker="item-title"]/@href', + } + + _xpath_data_selectors = { + "title": '//h1[@class="title-info-title"]/span[@class="title-info-title-text"]/text()', + "price": '//div[@class="item-price-wrapper"]/div[@id="price-value"]//span[@itemprop="price"]/text()', + "address": '//div[@itemprop="address"]/span[@class="item-address__string"]/text()', + "parameters": '//div[@class="item-params"]//li[@class="item-params-list-item"]//text()', + "seller_url": '//div[@data-marker="seller-info/name"]/a/@href' # добавить домен + } + + def _get_follow(self, response, selector_str, callback): + for itm in response.xpath(selector_str): + yield response.follow(itm, callback=callback) + + def parse(self, response, *args, **kwargs): + yield from self._get_follow( + response, self._xpath_selectors["pagination"], self.parse + ) + yield from self._get_follow( + response, self._xpath_selectors["flat_url"], self.flat_parse + ) + + def flat_parse(self, response): + flat_loader = FlatLoader(response=response) + flat_loader.add_value("url", response.url) + for key, xpath in self._xpath_data_selectors.items(): + flat_loader.add_xpath(key, xpath) + yield flat_loader.load_item() diff --git a/main.py b/main.py new file mode 100644 index 0000000..82a5383 --- /dev/null +++ b/main.py @@ -0,0 +1,11 @@ +from scrapy.crawler import CrawlerProcess +from scrapy.settings import Settings +from avito_parse.spiders.avito import AvitoSpider + + +if __name__ == "__main__": + crawler_settings = Settings() + crawler_settings.setmodule("avito_parse.settings") + crawler_process = CrawlerProcess(settings=crawler_settings) + crawler_process.crawl(AvitoSpider) + crawler_process.start() diff --git a/scrapy.cfg b/scrapy.cfg new file mode 100644 index 0000000..e444dff --- /dev/null +++ b/scrapy.cfg @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.io/en/latest/deploy.html + +[settings] +default = avito_parse.settings + +[deploy] +#url = http://localhost:6800/ +project = avito_parse