|
| 1 | +#!/usr/bin/env node |
| 2 | + |
| 3 | +/** |
| 4 | + * Example demonstrating the ScrapeGraphAI Crawler with sitemap functionality. |
| 5 | + * |
| 6 | + * This example shows how to use the crawler with sitemap enabled for better page discovery: |
| 7 | + * - Sitemap helps discover more pages efficiently |
| 8 | + * - Better coverage of website content |
| 9 | + * - More comprehensive crawling results |
| 10 | + * |
| 11 | + * Requirements: |
| 12 | + * - Node.js 14+ |
| 13 | + * - scrapegraph-js |
| 14 | + * - dotenv |
| 15 | + * - A valid API key (set in .env file as SGAI_APIKEY=your_key or environment variable) |
| 16 | + * |
| 17 | + * Usage: |
| 18 | + * node crawl_sitemap_example.js |
| 19 | + */ |
| 20 | + |
| 21 | +import { crawl, getCrawlRequest } from '../index.js'; |
| 22 | +import 'dotenv/config'; |
| 23 | + |
| 24 | +// Example .env file: |
| 25 | +// SGAI_APIKEY=your_sgai_api_key |
| 26 | + |
| 27 | +const apiKey = process.env.SGAI_APIKEY; |
| 28 | + |
| 29 | +/** |
| 30 | + * Poll for crawl results with intelligent backoff to avoid rate limits. |
| 31 | + * @param {string} crawlId - The crawl ID to poll for |
| 32 | + * @param {number} maxAttempts - Maximum number of polling attempts |
| 33 | + * @returns {Promise<Object>} The final result or throws an exception on timeout/failure |
| 34 | + */ |
| 35 | +async function pollForResult(crawlId, maxAttempts = 20) { |
| 36 | + console.log("⏳ Starting to poll for results with rate-limit protection..."); |
| 37 | + |
| 38 | + // Initial wait to give the job time to start processing |
| 39 | + await new Promise(resolve => setTimeout(resolve, 15000)); |
| 40 | + |
| 41 | + for (let attempt = 0; attempt < maxAttempts; attempt++) { |
| 42 | + try { |
| 43 | + const result = await getCrawlRequest(apiKey, crawlId); |
| 44 | + const status = result.status; |
| 45 | + |
| 46 | + if (status === "success") { |
| 47 | + return result; |
| 48 | + } else if (status === "failed") { |
| 49 | + throw new Error(`Crawl failed: ${result.error || 'Unknown error'}`); |
| 50 | + } else { |
| 51 | + // Calculate progressive wait time: start at 15s, increase gradually |
| 52 | + const baseWait = 15000; |
| 53 | + const progressiveWait = Math.min(60000, baseWait + (attempt * 3000)); // Cap at 60s |
| 54 | + |
| 55 | + console.log(`⏳ Status: ${status} (attempt ${attempt + 1}/${maxAttempts}) - waiting ${progressiveWait/1000}s...`); |
| 56 | + await new Promise(resolve => setTimeout(resolve, progressiveWait)); |
| 57 | + } |
| 58 | + } catch (error) { |
| 59 | + if (error.message.toLowerCase().includes('rate') || error.message.includes('429')) { |
| 60 | + const waitTime = Math.min(90000, 45000 + (attempt * 10000)); |
| 61 | + console.log(`⚠️ Rate limit detected in error, waiting ${waitTime/1000}s...`); |
| 62 | + await new Promise(resolve => setTimeout(resolve, waitTime)); |
| 63 | + continue; |
| 64 | + } else { |
| 65 | + console.log(`❌ Error polling for results: ${error.message}`); |
| 66 | + if (attempt < maxAttempts - 1) { |
| 67 | + await new Promise(resolve => setTimeout(resolve, 20000)); // Wait before retry |
| 68 | + continue; |
| 69 | + } |
| 70 | + throw error; |
| 71 | + } |
| 72 | + } |
| 73 | + } |
| 74 | + |
| 75 | + throw new Error(`⏰ Timeout: Job did not complete after ${maxAttempts} attempts`); |
| 76 | +} |
| 77 | + |
| 78 | +/** |
| 79 | + * Sitemap-enabled Crawling Example |
| 80 | + * |
| 81 | + * This example demonstrates how to use sitemap for better page discovery. |
| 82 | + * Sitemap helps the crawler find more pages efficiently by using the website's sitemap.xml. |
| 83 | + */ |
| 84 | +async function sitemapCrawlingExample() { |
| 85 | + console.log("=".repeat(60)); |
| 86 | + console.log("SITEMAP-ENABLED CRAWLING EXAMPLE"); |
| 87 | + console.log("=".repeat(60)); |
| 88 | + console.log("Use case: Comprehensive website crawling with sitemap discovery"); |
| 89 | + console.log("Benefits: Better page coverage, more efficient crawling"); |
| 90 | + console.log("Features: Sitemap-based page discovery, structured data extraction"); |
| 91 | + console.log(); |
| 92 | + |
| 93 | + // Target URL - using a website that likely has a sitemap |
| 94 | + const url = "https://www.giemmeagordo.com/risultati-ricerca-annunci/?sort=newest&search_city=&search_lat=null&search_lng=null&search_category=0&search_type=0&search_min_price=&search_max_price=&bagni=&bagni_comparison=equal&camere=&camere_comparison=equal"; |
| 95 | + |
| 96 | + // Schema for real estate listings |
| 97 | + const schema = { |
| 98 | + "type": "object", |
| 99 | + "properties": { |
| 100 | + "listings": { |
| 101 | + "type": "array", |
| 102 | + "items": { |
| 103 | + "type": "object", |
| 104 | + "properties": { |
| 105 | + "title": { "type": "string" }, |
| 106 | + "price": { "type": "string" }, |
| 107 | + "location": { "type": "string" }, |
| 108 | + "description": { "type": "string" }, |
| 109 | + "features": { "type": "array", "items": { "type": "string" } }, |
| 110 | + "url": { "type": "string" } |
| 111 | + } |
| 112 | + } |
| 113 | + } |
| 114 | + } |
| 115 | + }; |
| 116 | + |
| 117 | + const prompt = "Extract all real estate listings with their details including title, price, location, description, and features"; |
| 118 | + |
| 119 | + console.log(`🌐 Target URL: ${url}`); |
| 120 | + console.log("🤖 AI Prompt: Extract real estate listings"); |
| 121 | + console.log("📊 Crawl Depth: 1"); |
| 122 | + console.log("📄 Max Pages: 10"); |
| 123 | + console.log("🗺️ Use Sitemap: true (enabled for better page discovery)"); |
| 124 | + console.log("🏠 Same Domain Only: true"); |
| 125 | + console.log("💾 Cache Website: true"); |
| 126 | + console.log("💡 Mode: AI extraction with sitemap discovery"); |
| 127 | + console.log(); |
| 128 | + |
| 129 | + // Start the sitemap-enabled crawl job |
| 130 | + console.log("🚀 Starting sitemap-enabled crawl job..."); |
| 131 | + |
| 132 | + try { |
| 133 | + // Call crawl with sitemap=true for better page discovery |
| 134 | + const response = await crawl(apiKey, url, prompt, schema, { |
| 135 | + extractionMode: true, // AI extraction mode |
| 136 | + depth: 1, |
| 137 | + maxPages: 10, |
| 138 | + sameDomainOnly: true, |
| 139 | + cacheWebsite: true, |
| 140 | + sitemap: true, // Enable sitemap for better page discovery |
| 141 | + }); |
| 142 | + |
| 143 | + const crawlId = response.id || response.task_id || response.crawl_id; |
| 144 | + |
| 145 | + if (!crawlId) { |
| 146 | + console.log("❌ Failed to start sitemap-enabled crawl job"); |
| 147 | + return; |
| 148 | + } |
| 149 | + |
| 150 | + console.log(`📋 Crawl ID: ${crawlId}`); |
| 151 | + console.log("⏳ Polling for results..."); |
| 152 | + console.log(); |
| 153 | + |
| 154 | + // Poll for results with rate-limit protection |
| 155 | + const result = await pollForResult(crawlId, 20); |
| 156 | + |
| 157 | + console.log("✅ Sitemap-enabled crawl completed successfully!"); |
| 158 | + console.log(); |
| 159 | + |
| 160 | + const resultData = result.result || {}; |
| 161 | + const llmResult = resultData.llm_result || {}; |
| 162 | + const crawledUrls = resultData.crawled_urls || []; |
| 163 | + const creditsUsed = resultData.credits_used || 0; |
| 164 | + const pagesProcessed = resultData.pages_processed || 0; |
| 165 | + |
| 166 | + // Prepare JSON output |
| 167 | + const jsonOutput = { |
| 168 | + crawl_results: { |
| 169 | + pages_processed: pagesProcessed, |
| 170 | + credits_used: creditsUsed, |
| 171 | + cost_per_page: pagesProcessed > 0 ? creditsUsed / pagesProcessed : 0, |
| 172 | + crawled_urls: crawledUrls, |
| 173 | + sitemap_enabled: true |
| 174 | + }, |
| 175 | + extracted_data: llmResult |
| 176 | + }; |
| 177 | + |
| 178 | + // Print JSON output |
| 179 | + console.log("📊 RESULTS IN JSON FORMAT:"); |
| 180 | + console.log("-".repeat(40)); |
| 181 | + console.log(JSON.stringify(jsonOutput, null, 2)); |
| 182 | + |
| 183 | + // Print summary |
| 184 | + console.log("\n" + "=".repeat(60)); |
| 185 | + console.log("📈 CRAWL SUMMARY:"); |
| 186 | + console.log("=".repeat(60)); |
| 187 | + console.log(`✅ Pages processed: ${pagesProcessed}`); |
| 188 | + console.log(`💰 Credits used: ${creditsUsed}`); |
| 189 | + console.log(`🔗 URLs crawled: ${crawledUrls.length}`); |
| 190 | + console.log(`🗺️ Sitemap enabled: Yes`); |
| 191 | + console.log(`📊 Data extracted: ${llmResult.listings ? llmResult.listings.length : 0} listings found`); |
| 192 | + |
| 193 | + } catch (error) { |
| 194 | + console.log(`❌ Sitemap-enabled crawl failed: ${error.message}`); |
| 195 | + } |
| 196 | +} |
| 197 | + |
| 198 | +/** |
| 199 | + * Main function to run the sitemap crawling example. |
| 200 | + */ |
| 201 | +async function main() { |
| 202 | + console.log("🌐 ScrapeGraphAI Crawler - Sitemap Example"); |
| 203 | + console.log("Comprehensive website crawling with sitemap discovery"); |
| 204 | + console.log("=".repeat(60)); |
| 205 | + |
| 206 | + // Check if API key is set |
| 207 | + if (!apiKey) { |
| 208 | + console.log("⚠️ Please set your API key in the environment variable SGAI_APIKEY"); |
| 209 | + console.log(" Option 1: Create a .env file with: SGAI_APIKEY=your_api_key_here"); |
| 210 | + console.log(" Option 2: Set environment variable: export SGAI_APIKEY=your_api_key_here"); |
| 211 | + console.log(); |
| 212 | + console.log(" You can get your API key from: https://dashboard.scrapegraphai.com"); |
| 213 | + return; |
| 214 | + } |
| 215 | + |
| 216 | + console.log(`🔑 Using API key: ${apiKey.substring(0, 10)}...`); |
| 217 | + console.log(); |
| 218 | + |
| 219 | + // Run the sitemap crawling example |
| 220 | + await sitemapCrawlingExample(); |
| 221 | + |
| 222 | + console.log("\n" + "=".repeat(60)); |
| 223 | + console.log("🎉 Example completed!"); |
| 224 | + console.log("💡 This demonstrates sitemap-enabled crawling:"); |
| 225 | + console.log(" • Better page discovery using sitemap.xml"); |
| 226 | + console.log(" • More comprehensive website coverage"); |
| 227 | + console.log(" • Efficient crawling of structured websites"); |
| 228 | + console.log(" • Perfect for e-commerce, news sites, and content-heavy websites"); |
| 229 | +} |
| 230 | + |
| 231 | +// Run the example |
| 232 | +main().catch(console.error); |
0 commit comments