diff --git a/insta_parse/__init__.py b/insta_parse/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/insta_parse/items.py b/insta_parse/items.py new file mode 100644 index 0000000..1b237ac --- /dev/null +++ b/insta_parse/items.py @@ -0,0 +1,20 @@ +# Define here the models for your scraped items +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class TagParseItem(scrapy.Item): + # define the fields for your item here like: + # name = scrapy.Field() + date_parse = scrapy.Field() + data = scrapy.Field() + + +class PostParseItem(scrapy.Item): + # define the fields for your item here like: + # name = scrapy.Field() + date_parse = scrapy.Field() + data = scrapy.Field() diff --git a/insta_parse/loaders.py b/insta_parse/loaders.py new file mode 100644 index 0000000..c6c5b28 --- /dev/null +++ b/insta_parse/loaders.py @@ -0,0 +1,14 @@ +from scrapy.loader import ItemLoader +from itemloaders.processors import TakeFirst + + +class TagLoader(ItemLoader): + default_item_class = dict + date_parse_out = TakeFirst() + data_out = TakeFirst() + + +class PostLoader(ItemLoader): + default_item_class = dict + date_parse_out = TakeFirst() + data_out = TakeFirst() \ No newline at end of file diff --git a/insta_parse/middlewares.py b/insta_parse/middlewares.py new file mode 100644 index 0000000..135662e --- /dev/null +++ b/insta_parse/middlewares.py @@ -0,0 +1,103 @@ +# Define here the models for your spider middleware +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + +# useful for handling different item types with a single interface +from itemadapter import is_item, ItemAdapter + + +class InstaParseSpiderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, or item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Request or item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class InstaParseDownloaderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) diff --git a/insta_parse/pipelines.py b/insta_parse/pipelines.py new file mode 100644 index 0000000..e61bbe8 --- /dev/null +++ b/insta_parse/pipelines.py @@ -0,0 +1,41 @@ +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html + + +# useful for handling different item types with a single interface +from itemadapter import ItemAdapter +from scrapy import Request +from scrapy.pipelines.images import ImagesPipeline +from .settings import BOT_NAME +from pymongo import MongoClient + + +class InstaParsePipeline: + def __init__(self): + client = MongoClient() + self.db = client[BOT_NAME] + + def process_item(self, item, spider): + self.db[spider.name + type(item).__name__].insert_one(ItemAdapter(item).asdict()) + return item + + +class InstaImageDownloadPipeline(ImagesPipeline): + def get_media_requests(self, item, info): + if item['data'].get("carousel_media"): + for media in item['data'].get("carousel_media"): + yield Request(media["image_versions2"]["candidates"][0]['url']) # скачиваю только первую фотку из + else: # коллекции фоток разного размера + if item['data'].get("image_versions2"): + yield Request(item['data']["image_versions2"]["candidates"][0]['url']) + + def item_completed(self, results, item, info): + if item['data'].get("carousel_media"): + for media in item['data'].get("carousel_media"): + media["image_versions2"]["candidates"] = [itm[1] for itm in results] + else: + if item['data'].get("image_versions2"): + item['data']["image_versions2"]["candidates"] = [itm[1] for itm in results] + return item diff --git a/insta_parse/settings.py b/insta_parse/settings.py new file mode 100644 index 0000000..1e8d11a --- /dev/null +++ b/insta_parse/settings.py @@ -0,0 +1,92 @@ +# Scrapy settings for insta_parse project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://docs.scrapy.org/en/latest/topics/settings.html +# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'insta_parse' + +SPIDER_MODULES = ['insta_parse.spiders'] +NEWSPIDER_MODULE = 'insta_parse.spiders' + +LOG_ENABLE = True +LOG_LEVEL = "DEBUG" +IMAGES_STORE = 'images' + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0' + +# Obey robots.txt rules +ROBOTSTXT_OBEY = False + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +#CONCURRENT_REQUESTS = 32 + +# Configure a delay for requests for the same website (default: 0) +# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +DOWNLOAD_DELAY = 0.05 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +COOKIES_ENABLED = True + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +DEFAULT_REQUEST_HEADERS = { + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", + "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3", +} + +# Enable or disable spider middlewares +# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'insta_parse.middlewares.InstaParseSpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +#DOWNLOADER_MIDDLEWARES = { +# 'insta_parse.middlewares.InstaParseDownloaderMiddleware': 543, +#} + +# Enable or disable extensions +# See https://docs.scrapy.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html +ITEM_PIPELINES = { + 'insta_parse.pipelines.InstaParsePipeline': 300, + 'insta_parse.pipelines.InstaImageDownloadPipeline': 250, +} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/autothrottle.html +AUTOTHROTTLE_ENABLED = True +# The initial download delay +AUTOTHROTTLE_START_DELAY = 2 +# The maximum download delay to be set in case of high latencies +AUTOTHROTTLE_MAX_DELAY = 10 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +AUTOTHROTTLE_DEBUG = True + +# Enable and configure HTTP caching (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' diff --git a/insta_parse/spiders/__init__.py b/insta_parse/spiders/__init__.py new file mode 100644 index 0000000..ebd689a --- /dev/null +++ b/insta_parse/spiders/__init__.py @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git a/insta_parse/spiders/instagram.py b/insta_parse/spiders/instagram.py new file mode 100644 index 0000000..c82fa78 --- /dev/null +++ b/insta_parse/spiders/instagram.py @@ -0,0 +1,97 @@ +import json +import datetime +import scrapy +from ..items import TagParseItem, PostParseItem +from ..loaders import TagLoader, PostLoader + + +class InstagramSpider(scrapy.Spider): + name = "instagram" + allowed_domains = ["www.instagram.com", "i.instagram.com"] + start_urls = ["https://www.instagram.com/accounts/login/"] + _login_url = "https://www.instagram.com/accounts/login/ajax/" + _tags_path = "/explore/tags/" + pagin_post_url = 'https://i.instagram.com/api/v1/tags/' + ig_app_id = '936619743392459' + + def __init__(self, login, password, tags, *args, **kwargs): + super().__init__(*args, **kwargs) + self.login = login + self.password = password + self.tags = tags + + def parse(self, response, *args, **kwargs): + try: + js_data = self.js_data_extract(response) + yield scrapy.FormRequest( + self._login_url, + method="POST", + callback=self.parse, + formdata={"username": self.login, "enc_password": self.password}, + headers={"X-CSRFToken": js_data["config"]["csrf_token"]}, + ) + except AttributeError: + data = response.json() + if data['authenticated']: + for tag in self.tags: + yield response.follow(f"{self._tags_path}{tag}/", callback=self.tag_page_parse) + else: + yield response.follow(self.start_urls[0], callback=self.parse) + + def tag_page_parse(self, response): + data = self.js_data_extract(response) + item = TagParseItem() + tag_loader = TagLoader(item=item) + tag_name = data['entry_data']['TagPage'][0]['data']['name'] + token = data["config"]["csrf_token"] + top_posts = data['entry_data']['TagPage'][0]['data'].pop('top') + yield from self.post_parse(top_posts, token, tag_name, 'top') + recent_posts = data['entry_data']['TagPage'][0]['data'].pop('recent') + yield from self.post_parse(recent_posts, token, tag_name, 'recent') + tag_loader.add_value("date_parse", datetime.datetime.now()) + tag_loader.add_value("data", data['entry_data']['TagPage'][0]['data']) + yield tag_loader.load_item() + + def post_parse(self, data, token, tag_name, type_post): + sections = data.pop('sections') + pagination = data # Переназвал переменную для удобства использования + for section in sections: + for media in section['layout_content']['medias']: + result = {} + result.update(media['media']) + result.update(pagination) + item = PostParseItem() + post_loader = PostLoader(item=item) + post_loader.add_value("date_parse", datetime.datetime.now()) + post_loader.add_value("data", result) + yield post_loader.load_item() + if pagination['more_available'] and (type_post == 'recent'): + formdata = {"include_persistent": "0", + "max_id": pagination['next_max_id'], + "page": str(pagination['next_page']), + "surface": 'grid', + "tab": type_post} + if pagination['next_media_ids']: + formdata.update({"next_media_ids": list(map(str, pagination['next_media_ids']))}) + yield scrapy.FormRequest( + f'{self.pagin_post_url}{tag_name}/sections/', + method="POST", + callback=self.pagination_follow, + formdata=formdata, + headers={"X-CSRFToken": token, "X-IG-App-ID": self.ig_app_id}, + meta={'token': token, 'tag_name': tag_name, 'type_post': type_post}, + ) + + def pagination_follow(self, response): + data = response.json() + token = response.meta['token'] + tag_name = response.meta['tag_name'] + type_post = response.meta['type_post'] + yield from self.post_parse(data, token, tag_name, type_post) + + + def js_data_extract(self, response): + script = response.xpath( + "//script[contains(text(), 'window._sharedData =')]/text()" + ).extract_first() + return json.loads(script.replace("window._sharedData = ", "")[:-1]) diff --git a/main.py b/main.py new file mode 100644 index 0000000..242f406 --- /dev/null +++ b/main.py @@ -0,0 +1,20 @@ +import os +import dotenv +from scrapy.crawler import CrawlerProcess +from scrapy.settings import Settings +from insta_parse.spiders.instagram import InstagramSpider + + +if __name__ == "__main__": + dotenv.load_dotenv(".env") + crawler_settings = Settings() + crawler_settings.setmodule("insta_parse.settings") + crawler_process = CrawlerProcess(settings=crawler_settings) + tags = ["fpga", "xilinx", "altera"] + crawler_process.crawl( + InstagramSpider, + login=os.getenv("INST_LOGIN"), + password=os.getenv("INST_PSWORD"), + tags=tags, + ) + crawler_process.start() \ No newline at end of file diff --git a/scrapy.cfg b/scrapy.cfg new file mode 100644 index 0000000..70ed6fc --- /dev/null +++ b/scrapy.cfg @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.io/en/latest/deploy.html + +[settings] +default = insta_parse.settings + +[deploy] +#url = http://localhost:6800/ +project = insta_parse