From 1512320f0e1d3bcfd8641f48144efc44a3a6f5cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=98=D0=BB=D1=8C=D1=8F?= Date: Wed, 5 May 2021 00:35:57 +0300 Subject: [PATCH 1/2] add homework4 --- gb_parse/__init__.py | 0 gb_parse/items.py | 16 ++++++ gb_parse/middlewares.py | 103 ++++++++++++++++++++++++++++++++++ gb_parse/pipelines.py | 22 ++++++++ gb_parse/settings.py | 93 ++++++++++++++++++++++++++++++ gb_parse/spiders/__init__.py | 4 ++ gb_parse/spiders/autoyoula.py | 66 ++++++++++++++++++++++ main.py | 11 ++++ scrapy.cfg | 11 ++++ 9 files changed, 326 insertions(+) create mode 100644 gb_parse/__init__.py create mode 100644 gb_parse/items.py create mode 100644 gb_parse/middlewares.py create mode 100644 gb_parse/pipelines.py create mode 100644 gb_parse/settings.py create mode 100644 gb_parse/spiders/__init__.py create mode 100644 gb_parse/spiders/autoyoula.py create mode 100644 main.py create mode 100644 scrapy.cfg diff --git a/gb_parse/__init__.py b/gb_parse/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gb_parse/items.py b/gb_parse/items.py new file mode 100644 index 0000000..8bb8322 --- /dev/null +++ b/gb_parse/items.py @@ -0,0 +1,16 @@ +# Define here the models for your scraped items +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class GbParseItem(scrapy.Item): + # define the fields for your item here like: + url = scrapy.Field() + title = scrapy.Field() + img_links = scrapy.Field() + specifications = scrapy.Field() + description = scrapy.Field() + author = scrapy.Field() diff --git a/gb_parse/middlewares.py b/gb_parse/middlewares.py new file mode 100644 index 0000000..bc98872 --- /dev/null +++ b/gb_parse/middlewares.py @@ -0,0 +1,103 @@ +# Define here the models for your spider middleware +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + +# useful for handling different item types with a single interface +from itemadapter import is_item, ItemAdapter + + +class GbParseSpiderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, or item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Request or item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class GbParseDownloaderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) diff --git a/gb_parse/pipelines.py b/gb_parse/pipelines.py new file mode 100644 index 0000000..5ef6307 --- /dev/null +++ b/gb_parse/pipelines.py @@ -0,0 +1,22 @@ +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html + + +# useful for handling different item types with a single interface + +from pymongo import MongoClient +from itemadapter import ItemAdapter +from gb_parse import settings + + +class GbParsePipeline: + + def __init__(self): + db = MongoClient()[settings.MONGODB_DB] + self.collection = db[settings.MONGODB_COLLECTION] + + def process_item(self, item, spider): + self.collection.insert_one(ItemAdapter(item).asdict()) + return item diff --git a/gb_parse/settings.py b/gb_parse/settings.py new file mode 100644 index 0000000..fcdb489 --- /dev/null +++ b/gb_parse/settings.py @@ -0,0 +1,93 @@ +# Scrapy settings for gb_parse project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://docs.scrapy.org/en/latest/topics/settings.html +# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = "gb_parse" + +SPIDER_MODULES = ["gb_parse.spiders"] +NEWSPIDER_MODULE = "gb_parse.spiders" + +LOG_ENABLE = True +LOG_LEVEL = "DEBUG" + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +USER_AGENT = "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0" + +# Obey robots.txt rules +ROBOTSTXT_OBEY = False + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +# CONCURRENT_REQUESTS = 32 + +# Configure a delay for requests for the same website (default: 0) +# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +DOWNLOAD_DELAY = 1.2 +# The download delay setting will honor only one of: +# CONCURRENT_REQUESTS_PER_DOMAIN = 16 +# CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +COOKIES_ENABLED = True + +# Disable Telnet Console (enabled by default) +TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +DEFAULT_REQUEST_HEADERS = { + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", + "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3", +} + +# Enable or disable spider middlewares +# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html +# SPIDER_MIDDLEWARES = { +# 'gb_parse.middlewares.GbParseSpiderMiddleware': 543, +# } + +# Enable or disable downloader middlewares +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +# DOWNLOADER_MIDDLEWARES = { +# 'gb_parse.middlewares.GbParseDownloaderMiddleware': 543, +# } + +# Enable or disable extensions +# See https://docs.scrapy.org/en/latest/topics/extensions.html +# EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +# } + +# Configure item pipelines +# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html +ITEM_PIPELINES = { + 'gb_parse.pipelines.GbParsePipeline': 300, +} + +MONGODB_DB = "autoyoula_parse_03_05_21" +MONGODB_COLLECTION = "autoyoula_parse" + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/autothrottle.html +# AUTOTHROTTLE_ENABLED = True +# The initial download delay +# AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +# AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +# AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +# HTTPCACHE_ENABLED = True +# HTTPCACHE_EXPIRATION_SECS = 0 +# HTTPCACHE_DIR = 'httpcache' +# HTTPCACHE_IGNORE_HTTP_CODES = [] +# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' diff --git a/gb_parse/spiders/__init__.py b/gb_parse/spiders/__init__.py new file mode 100644 index 0000000..ebd689a --- /dev/null +++ b/gb_parse/spiders/__init__.py @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git a/gb_parse/spiders/autoyoula.py b/gb_parse/spiders/autoyoula.py new file mode 100644 index 0000000..39727ee --- /dev/null +++ b/gb_parse/spiders/autoyoula.py @@ -0,0 +1,66 @@ +import scrapy +import re +from gb_parse.items import GbParseItem + + +class AutoyoulaSpider(scrapy.Spider): + name = "autoyoula" + allowed_domains = ["auto.youla.ru"] + start_urls = ["https://auto.youla.ru/"] + + def _get_follow(self, response, selector_str, callback): + for itm in response.css(selector_str): + url = itm.attrib["href"] + yield response.follow(url, callback=callback) + + def parse(self, response, *args, **kwargs): + yield from self._get_follow( + response, + ".TransportMainFilters_brandsList__2tIkv .ColumnItemList_column__5gjdt a.blackLink", + self.brand_parse, + ) + + def brand_parse(self, response): + yield from self._get_follow( + response, ".Paginator_block__2XAPy a.Paginator_button__u1e7D", self.brand_parse + ) + yield from self._get_follow( + response, + "article.SerpSnippet_snippet__3O1t2 a.SerpSnippet_name__3F7Yu.blackLink", + self.car_parse, + ) + + def car_parse(self, response): + data = GbParseItem() + data["url"] = response.url + data["title"] = response.css(".AdvertCard_advertTitle__1S1Ak::text").extract_first() + data["img_links"] = [itm.attrib['src'] for itm in + response.css("figure.PhotoGallery_photo__36e_r img.PhotoGallery_photoImage__2mHGn")] + specifications_dict = {} + for itm in response.css(".AdvertCard_specs__2FEHc .AdvertSpecs_row__ljPcX"): + key = itm.css(".AdvertSpecs_label__2JHnS::text").extract_first() + value = itm.css(".AdvertSpecs_data__xK2Qx a.blackLink::text").extract_first() + if not value: + value = itm.css(".AdvertSpecs_data__xK2Qx::text").extract_first() + specifications_dict.update({key: value}) + data["specifications"] = specifications_dict + data["description"] = response.css(".AdvertCard_descriptionWrap__17EU3 " + ".AdvertCard_descriptionInner__KnuRi::text").extract_first() + data["author"] = AutoyoulaSpider.get_author_id(response) + return data + + @staticmethod + def get_author_id(resp): + marker = "window.transitState = decodeURIComponent" + for script in resp.css("script"): + try: + if marker in script.css("::text").extract_first(): + re_pattern = re.compile(r"youlaId%22%2C%22([a-zA-Z|\d]+)%22%2C%22avatar") + result = re.findall(re_pattern, script.css("::text").extract_first()) + return ( + resp.urljoin(f"/user/{result[0]}").replace("auto.", "", 1) + if result + else None + ) + except TypeError: + pass diff --git a/main.py b/main.py new file mode 100644 index 0000000..cddc3fa --- /dev/null +++ b/main.py @@ -0,0 +1,11 @@ +from scrapy.crawler import CrawlerProcess +from scrapy.settings import Settings +from gb_parse.spiders.autoyoula import AutoyoulaSpider + + +if __name__ == "__main__": + crawler_settings = Settings() + crawler_settings.setmodule("gb_parse.settings") + crawler_process = CrawlerProcess(settings=crawler_settings) + crawler_process.crawl(AutoyoulaSpider) + crawler_process.start() diff --git a/scrapy.cfg b/scrapy.cfg new file mode 100644 index 0000000..6a444a0 --- /dev/null +++ b/scrapy.cfg @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.io/en/latest/deploy.html + +[settings] +default = gb_parse.settings + +[deploy] +#url = http://localhost:6800/ +project = gb_parse From 36641c09b2c5a64d4f8816908fd132045ff0c248 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=98=D0=BB=D1=8C=D1=8F?= Date: Wed, 5 May 2021 12:09:25 +0300 Subject: [PATCH 2/2] add phone --- gb_parse/items.py | 3 ++- gb_parse/spiders/autoyoula.py | 17 ++++++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/gb_parse/items.py b/gb_parse/items.py index 8bb8322..361c7cf 100644 --- a/gb_parse/items.py +++ b/gb_parse/items.py @@ -13,4 +13,5 @@ class GbParseItem(scrapy.Item): img_links = scrapy.Field() specifications = scrapy.Field() description = scrapy.Field() - author = scrapy.Field() + author_id = scrapy.Field() + author_phone = scrapy.Field() diff --git a/gb_parse/spiders/autoyoula.py b/gb_parse/spiders/autoyoula.py index 39727ee..c26d14d 100644 --- a/gb_parse/spiders/autoyoula.py +++ b/gb_parse/spiders/autoyoula.py @@ -1,5 +1,6 @@ import scrapy import re +from base64 import b64decode from gb_parse.items import GbParseItem @@ -46,7 +47,8 @@ def car_parse(self, response): data["specifications"] = specifications_dict data["description"] = response.css(".AdvertCard_descriptionWrap__17EU3 " ".AdvertCard_descriptionInner__KnuRi::text").extract_first() - data["author"] = AutoyoulaSpider.get_author_id(response) + data["author_id"] = AutoyoulaSpider.get_author_id(response) + data["author_phone"] = AutoyoulaSpider.get_author_phone(response) return data @staticmethod @@ -64,3 +66,16 @@ def get_author_id(resp): ) except TypeError: pass + + @staticmethod + def get_author_phone(resp): + marker = "window.transitState = decodeURIComponent" + for script in resp.css("script"): + try: + if marker in script.css("::text").extract_first(): + re_pattern = re.compile(r"phone%22%2C%22([a-zA-Z|\d]+)Xw%3D%3D%22%2C%22time") + result = re.findall(re_pattern, script.css("::text").extract_first()) + result = b64decode(b64decode(result[0])).decode('UTF-8') + return result + except TypeError: + pass