mirror of
https://github.com/aykhans/PostScrape.git
synced 2025-04-16 06:33:12 +00:00
first commit
This commit is contained in:
commit
6cabf7acd5
3
.gitignore
vendored
Normal file
3
.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
__pycache__
|
||||||
|
.venv
|
||||||
|
cars.jl
|
7
README.md
Normal file
7
README.md
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
## Installition
|
||||||
|
pip install scrapy<br />
|
||||||
|
set last_page in post_scrape/spiders/car_spider.py
|
||||||
|
|
||||||
|
## Run
|
||||||
|
cd src/<br />
|
||||||
|
scrapy crawl car -O cars.jl
|
0
src/post_scrape/__init__.py
Normal file
0
src/post_scrape/__init__.py
Normal file
12
src/post_scrape/items.py
Normal file
12
src/post_scrape/items.py
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# Define here the models for your scraped items
|
||||||
|
#
|
||||||
|
# See documentation in:
|
||||||
|
# https://docs.scrapy.org/en/latest/topics/items.html
|
||||||
|
|
||||||
|
import scrapy
|
||||||
|
|
||||||
|
|
||||||
|
class PostScrapeItem(scrapy.Item):
|
||||||
|
# define the fields for your item here like:
|
||||||
|
# name = scrapy.Field()
|
||||||
|
pass
|
103
src/post_scrape/middlewares.py
Normal file
103
src/post_scrape/middlewares.py
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
# Define here the models for your spider middleware
|
||||||
|
#
|
||||||
|
# See documentation in:
|
||||||
|
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
|
||||||
|
|
||||||
|
from scrapy import signals
|
||||||
|
|
||||||
|
# useful for handling different item types with a single interface
|
||||||
|
from itemadapter import is_item, ItemAdapter
|
||||||
|
|
||||||
|
|
||||||
|
class PostScrapeSpiderMiddleware:
|
||||||
|
# Not all methods need to be defined. If a method is not defined,
|
||||||
|
# scrapy acts as if the spider middleware does not modify the
|
||||||
|
# passed objects.
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_crawler(cls, crawler):
|
||||||
|
# This method is used by Scrapy to create your spiders.
|
||||||
|
s = cls()
|
||||||
|
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
|
||||||
|
return s
|
||||||
|
|
||||||
|
def process_spider_input(self, response, spider):
|
||||||
|
# Called for each response that goes through the spider
|
||||||
|
# middleware and into the spider.
|
||||||
|
|
||||||
|
# Should return None or raise an exception.
|
||||||
|
return None
|
||||||
|
|
||||||
|
def process_spider_output(self, response, result, spider):
|
||||||
|
# Called with the results returned from the Spider, after
|
||||||
|
# it has processed the response.
|
||||||
|
|
||||||
|
# Must return an iterable of Request, or item objects.
|
||||||
|
for i in result:
|
||||||
|
yield i
|
||||||
|
|
||||||
|
def process_spider_exception(self, response, exception, spider):
|
||||||
|
# Called when a spider or process_spider_input() method
|
||||||
|
# (from other spider middleware) raises an exception.
|
||||||
|
|
||||||
|
# Should return either None or an iterable of Request or item objects.
|
||||||
|
pass
|
||||||
|
|
||||||
|
def process_start_requests(self, start_requests, spider):
|
||||||
|
# Called with the start requests of the spider, and works
|
||||||
|
# similarly to the process_spider_output() method, except
|
||||||
|
# that it doesn’t have a response associated.
|
||||||
|
|
||||||
|
# Must return only requests (not items).
|
||||||
|
for r in start_requests:
|
||||||
|
yield r
|
||||||
|
|
||||||
|
def spider_opened(self, spider):
|
||||||
|
spider.logger.info('Spider opened: %s' % spider.name)
|
||||||
|
|
||||||
|
|
||||||
|
class PostScrapeDownloaderMiddleware:
|
||||||
|
# Not all methods need to be defined. If a method is not defined,
|
||||||
|
# scrapy acts as if the downloader middleware does not modify the
|
||||||
|
# passed objects.
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_crawler(cls, crawler):
|
||||||
|
# This method is used by Scrapy to create your spiders.
|
||||||
|
s = cls()
|
||||||
|
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
|
||||||
|
return s
|
||||||
|
|
||||||
|
def process_request(self, request, spider):
|
||||||
|
# Called for each request that goes through the downloader
|
||||||
|
# middleware.
|
||||||
|
|
||||||
|
# Must either:
|
||||||
|
# - return None: continue processing this request
|
||||||
|
# - or return a Response object
|
||||||
|
# - or return a Request object
|
||||||
|
# - or raise IgnoreRequest: process_exception() methods of
|
||||||
|
# installed downloader middleware will be called
|
||||||
|
return None
|
||||||
|
|
||||||
|
def process_response(self, request, response, spider):
|
||||||
|
# Called with the response returned from the downloader.
|
||||||
|
|
||||||
|
# Must either;
|
||||||
|
# - return a Response object
|
||||||
|
# - return a Request object
|
||||||
|
# - or raise IgnoreRequest
|
||||||
|
return response
|
||||||
|
|
||||||
|
def process_exception(self, request, exception, spider):
|
||||||
|
# Called when a download handler or a process_request()
|
||||||
|
# (from other downloader middleware) raises an exception.
|
||||||
|
|
||||||
|
# Must either:
|
||||||
|
# - return None: continue processing this exception
|
||||||
|
# - return a Response object: stops process_exception() chain
|
||||||
|
# - return a Request object: stops process_exception() chain
|
||||||
|
pass
|
||||||
|
|
||||||
|
def spider_opened(self, spider):
|
||||||
|
spider.logger.info('Spider opened: %s' % spider.name)
|
13
src/post_scrape/pipelines.py
Normal file
13
src/post_scrape/pipelines.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Define your item pipelines here
|
||||||
|
#
|
||||||
|
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
|
||||||
|
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
|
||||||
|
|
||||||
|
|
||||||
|
# useful for handling different item types with a single interface
|
||||||
|
from itemadapter import ItemAdapter
|
||||||
|
|
||||||
|
|
||||||
|
class PostScrapePipeline:
|
||||||
|
def process_item(self, item, spider):
|
||||||
|
return item
|
88
src/post_scrape/settings.py
Normal file
88
src/post_scrape/settings.py
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
# Scrapy settings for post_scrape project
|
||||||
|
#
|
||||||
|
# For simplicity, this file contains only settings considered important or
|
||||||
|
# commonly used. You can find more settings consulting the documentation:
|
||||||
|
#
|
||||||
|
# https://docs.scrapy.org/en/latest/topics/settings.html
|
||||||
|
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
|
||||||
|
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
|
||||||
|
|
||||||
|
BOT_NAME = 'post_scrape'
|
||||||
|
|
||||||
|
SPIDER_MODULES = ['post_scrape.spiders']
|
||||||
|
NEWSPIDER_MODULE = 'post_scrape.spiders'
|
||||||
|
|
||||||
|
|
||||||
|
# Crawl responsibly by identifying yourself (and your website) on the user-agent
|
||||||
|
#USER_AGENT = 'post_scrape (+http://www.yourdomain.com)'
|
||||||
|
|
||||||
|
# Obey robots.txt rules
|
||||||
|
ROBOTSTXT_OBEY = True
|
||||||
|
|
||||||
|
# Configure maximum concurrent requests performed by Scrapy (default: 16)
|
||||||
|
#CONCURRENT_REQUESTS = 32
|
||||||
|
|
||||||
|
# Configure a delay for requests for the same website (default: 0)
|
||||||
|
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
|
||||||
|
# See also autothrottle settings and docs
|
||||||
|
#DOWNLOAD_DELAY = 3
|
||||||
|
# The download delay setting will honor only one of:
|
||||||
|
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
|
||||||
|
#CONCURRENT_REQUESTS_PER_IP = 16
|
||||||
|
|
||||||
|
# Disable cookies (enabled by default)
|
||||||
|
#COOKIES_ENABLED = False
|
||||||
|
|
||||||
|
# Disable Telnet Console (enabled by default)
|
||||||
|
#TELNETCONSOLE_ENABLED = False
|
||||||
|
|
||||||
|
# Override the default request headers:
|
||||||
|
#DEFAULT_REQUEST_HEADERS = {
|
||||||
|
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
||||||
|
# 'Accept-Language': 'en',
|
||||||
|
#}
|
||||||
|
|
||||||
|
# Enable or disable spider middlewares
|
||||||
|
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
|
||||||
|
#SPIDER_MIDDLEWARES = {
|
||||||
|
# 'post_scrape.middlewares.PostScrapeSpiderMiddleware': 543,
|
||||||
|
#}
|
||||||
|
|
||||||
|
# Enable or disable downloader middlewares
|
||||||
|
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
|
||||||
|
#DOWNLOADER_MIDDLEWARES = {
|
||||||
|
# 'post_scrape.middlewares.PostScrapeDownloaderMiddleware': 543,
|
||||||
|
#}
|
||||||
|
|
||||||
|
# Enable or disable extensions
|
||||||
|
# See https://docs.scrapy.org/en/latest/topics/extensions.html
|
||||||
|
#EXTENSIONS = {
|
||||||
|
# 'scrapy.extensions.telnet.TelnetConsole': None,
|
||||||
|
#}
|
||||||
|
|
||||||
|
# Configure item pipelines
|
||||||
|
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
|
||||||
|
#ITEM_PIPELINES = {
|
||||||
|
# 'post_scrape.pipelines.PostScrapePipeline': 300,
|
||||||
|
#}
|
||||||
|
|
||||||
|
# Enable and configure the AutoThrottle extension (disabled by default)
|
||||||
|
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
|
||||||
|
#AUTOTHROTTLE_ENABLED = True
|
||||||
|
# The initial download delay
|
||||||
|
#AUTOTHROTTLE_START_DELAY = 5
|
||||||
|
# The maximum download delay to be set in case of high latencies
|
||||||
|
#AUTOTHROTTLE_MAX_DELAY = 60
|
||||||
|
# The average number of requests Scrapy should be sending in parallel to
|
||||||
|
# each remote server
|
||||||
|
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
|
||||||
|
# Enable showing throttling stats for every response received:
|
||||||
|
#AUTOTHROTTLE_DEBUG = False
|
||||||
|
|
||||||
|
# Enable and configure HTTP caching (disabled by default)
|
||||||
|
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
|
||||||
|
#HTTPCACHE_ENABLED = True
|
||||||
|
#HTTPCACHE_EXPIRATION_SECS = 0
|
||||||
|
#HTTPCACHE_DIR = 'httpcache'
|
||||||
|
#HTTPCACHE_IGNORE_HTTP_CODES = []
|
||||||
|
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
4
src/post_scrape/spiders/__init__.py
Normal file
4
src/post_scrape/spiders/__init__.py
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
# This package will contain the spiders of your Scrapy project
|
||||||
|
#
|
||||||
|
# Please refer to the documentation for information on how to create and manage
|
||||||
|
# your spiders.
|
118
src/post_scrape/spiders/car_spider.py
Normal file
118
src/post_scrape/spiders/car_spider.py
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
import scrapy
|
||||||
|
|
||||||
|
|
||||||
|
class ToScrapeCSSSpider(scrapy.Spider):
|
||||||
|
name = "car"
|
||||||
|
headers = {
|
||||||
|
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36'
|
||||||
|
}
|
||||||
|
last_page = 1
|
||||||
|
|
||||||
|
def start_requests(self):
|
||||||
|
urls = [
|
||||||
|
'https://turbo.az/autos',
|
||||||
|
]
|
||||||
|
|
||||||
|
for url in urls:
|
||||||
|
yield scrapy.Request(url=url, headers=self.headers, callback=self.parse)
|
||||||
|
|
||||||
|
def parse(self, response):
|
||||||
|
posts = response.xpath('//div[@class="products-container"]//div[@class="products"]')[2].xpath('./div/a[@class="products-i__link"]/@href')
|
||||||
|
|
||||||
|
for p in posts:
|
||||||
|
yield response.follow(f"https://turbo.az{p.get()}", callback=self.parse_detail_url, headers=self.headers)
|
||||||
|
|
||||||
|
next_page = response.xpath('//a[@rel="next"]/@href').get()
|
||||||
|
if next_page is not None and int(next_page[next_page.rfind('=')+1:]) <= self.last_page:
|
||||||
|
yield response.follow(f"https://turbo.az{next_page}", callback=self.parse, headers=self.headers)
|
||||||
|
|
||||||
|
def parse_detail_url(self, r):
|
||||||
|
if r.xpath('//div[@class="shop-container"]'):
|
||||||
|
avto_salon = True
|
||||||
|
phone = r.xpath('//div[@class="shop-contact--phones-list"]//a[@class="shop-contact--phones-number"]/text()').getall()
|
||||||
|
else:
|
||||||
|
avto_salon = False
|
||||||
|
phone = r.xpath('//a[@class="phone"]/text()').get()
|
||||||
|
|
||||||
|
barter, loan = False, False
|
||||||
|
|
||||||
|
if r.xpath('//li[@class="product-properties-i product-properties-i_loan"]'):
|
||||||
|
loan = True
|
||||||
|
if r.xpath('//li[@class="product-properties-i product-properties-i_barter"]'):
|
||||||
|
barter = True
|
||||||
|
|
||||||
|
seats_count = r.xpath('//label[@for="ad_seats_count"]')
|
||||||
|
prior_owners_count = r.xpath('//label[@for="ad_prior_owners_count"]')
|
||||||
|
|
||||||
|
price = r.xpath('//li[@class="product-properties-i product-properties_price"]//div[@class="product-price"]/text()').get().replace(' ', '')
|
||||||
|
currency = r.xpath('//li[@class="product-properties-i product-properties_price"]//div[@class="product-price"]/span/text()').get()
|
||||||
|
extra_fields = r.xpath('//p[@class="product-extras-i"]/text()').getall()
|
||||||
|
description = r.xpath('//div[@class="product-description"]/p/text()').getall()
|
||||||
|
market = r.xpath('//li[@class="product-properties-i product-properties-market"]//div/text()').get()
|
||||||
|
data = r.xpath('//li[@class="product-properties-i"]/div[@class="product-properties-value"]/text()').getall()
|
||||||
|
data2 = r.xpath('//li[@class="product-properties-i"]/div[@class="product-properties-value"]/a/text()').getall()
|
||||||
|
|
||||||
|
if seats_count and prior_owners_count:
|
||||||
|
if '4 və daha çox' in data and '8+' in data:
|
||||||
|
prior_owners_count = data.pop(data.index('4 və daha çox'))
|
||||||
|
seats_count = data.pop(data.index('8+'))
|
||||||
|
elif '4 və daha çox' in data:
|
||||||
|
prior_owners_count = data.pop(data.index('4 və daha çox'))
|
||||||
|
seats_count = r.xpath('//li[@class="product-properties-i"]/div[@class="product-properties-value"]/span/text()').get()
|
||||||
|
elif '8+' in data:
|
||||||
|
seats_count = data.pop(data.index('8+'))
|
||||||
|
prior_owners_count = r.xpath('//li[@class="product-properties-i"]/div[@class="product-properties-value"]/span/text()').get()
|
||||||
|
else:
|
||||||
|
seats_count, prior_owners_count = r.xpath('//li[@class="product-properties-i"]/div[@class="product-properties-value"]/span/text()').getall()
|
||||||
|
|
||||||
|
elif seats_count:
|
||||||
|
if '8+' in data:
|
||||||
|
seats_count = data.pop(data.index('8+'))
|
||||||
|
else:
|
||||||
|
seats_count = r.xpath('//li[@class="product-properties-i"]/div[@class="product-properties-value"]/span/text()').get()
|
||||||
|
prior_owners_count = None
|
||||||
|
elif prior_owners_count:
|
||||||
|
if '4 və daha çox' in data:
|
||||||
|
prior_owners_count = data.pop(data.index('4 və daha çox'))
|
||||||
|
else:
|
||||||
|
prior_owners_count = r.xpath('//li[@class="product-properties-i"]/div[@class="product-properties-value"]/span/text()').get()
|
||||||
|
seats_count = None
|
||||||
|
else:
|
||||||
|
seats_count = None
|
||||||
|
prior_owners_count = None
|
||||||
|
|
||||||
|
if len(data) < 11:
|
||||||
|
crashed, painted = None, None
|
||||||
|
else:
|
||||||
|
crashed = 'Vuruğu yoxdur' not in data[10]
|
||||||
|
painted = 'rənglənməyib' not in data[10]
|
||||||
|
|
||||||
|
yield {
|
||||||
|
'url': r.url,
|
||||||
|
'avto_salon': avto_salon,
|
||||||
|
'phone': phone,
|
||||||
|
'extra_fields': extra_fields,
|
||||||
|
'description': description,
|
||||||
|
'city': data[0],
|
||||||
|
'brand': data2[0],
|
||||||
|
'model': data2[1],
|
||||||
|
'year': int(data2[2]),
|
||||||
|
'category': data[1],
|
||||||
|
'color': data[2],
|
||||||
|
'engine_volume': int(float(data[3][:-2])*1000),
|
||||||
|
'engine_power': int(data[4][:-5]),
|
||||||
|
'fuel_type': data[5],
|
||||||
|
'mileage': int(data[6][:-3].replace(' ', '')),
|
||||||
|
'mileage_type': data[6][-2:],
|
||||||
|
'transmission': data[7],
|
||||||
|
'gear': data[8],
|
||||||
|
'price': int(price),
|
||||||
|
'currency': currency,
|
||||||
|
'loan': loan,
|
||||||
|
'barter': barter,
|
||||||
|
'market': market,
|
||||||
|
'seats_count': seats_count,
|
||||||
|
'prior_owners_count': prior_owners_count,
|
||||||
|
'crashed': crashed,
|
||||||
|
'painted': painted,
|
||||||
|
}
|
11
src/scrapy.cfg
Normal file
11
src/scrapy.cfg
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Automatically created by: scrapy startproject
|
||||||
|
#
|
||||||
|
# For more information about the [deploy] section see:
|
||||||
|
# https://scrapyd.readthedocs.io/en/latest/deploy.html
|
||||||
|
|
||||||
|
[settings]
|
||||||
|
default = post_scrape.settings
|
||||||
|
|
||||||
|
[deploy]
|
||||||
|
#url = http://localhost:6800/
|
||||||
|
project = post_scrape
|
Loading…
x
Reference in New Issue
Block a user