scrapy 爬取网上租房信息
一、背景
二、创建项目
rules = (
# 设置爬取需要爬取城市url的正则表达式
Rule(LinkExtractor(allow=r'http://.*\.ziroom.com/z\/.*/\?isOpen=0'), follow=True),
# follow =True,不然只会爬到第四页,不会进行跟进爬取
Rule(LinkExtractor(allow=r'http://.*\.ziroom.com/z\/.*d\d+-p\d+\/'),callback="parse_page", follow=True),
)
1.创建项目scrapy startproject ziroom
2.进入项目所在路径 cd ziroom
3.创建爬虫 scrapy genspider -t ziroom_spider "域名"
4.scrapy genspider -t ziroom_spider "www.ziroom.com"
三、数据抓取
我可以看到房源数据是存放在列表中,我使用Xpath进行提取。
可以看到上面的代码还没有提取价格,这是因为自如网的价格有个小坑,房屋价格信息是图片,图片上的数字都是乱序,前端从这张图片根据像素截取出来数字,来展示价格。
复制url打开就可以看到如下的图片,有10个数字。
最开始想到的是使用百度的图像识别API接口,但是去看了看,发现免费的调用次数只有200,网上说这个图片的url是随机的,如果真这样,那肯定要花钱,要么使用pytesseract,或者自己写代码。但是我不想自己造轮子,且安装 pytesseract 这个也挺麻烦。这时候我想,要是图片的url并不是随机的就好了,所以我爬了北京所有的租房信息,发现图片的url并不是网上所说的随机的,总共只有10个url是固定的。这就简单了。
我先用百度图像识别,识别出图片中的数字,并根据background-position 确定切割数字的位置,然后组合就能得到价格。例如-171.2px代表的位置是 8,对应的就是上图中的数字 2。通过观察发现,对应位置有如下几个。
['-0px', '-21.4px', '-42.8px', '-64.2px', '-85.6px', '-107px', '-128.4px', '-149.8px', '-171.2px', '-192.6px']
所以只需要将房源价格中图片的url与10个url进行对比,就能确定图片的数字。代码如下
四、数据存储
class ZiroomItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
# 固定写法
title = scrapy.Field()
desc = scrapy.Field()
location = scrapy.Field()
region = scrapy.Field()
prices_url = scrapy.Field()
price = scrapy.Field()
room_url = scrapy.Field()
city = scrapy.Field()
from ziroom.items import ZiroomItem
item = ZiroomItem(
title = title,
desc = desc,
location = location,
region = region,
prices_url = prices_url,
price = price,
room_url = room_url,
city = self.city,
)
# 异步插入,速度快
class TwistedPipeline(object):
def __init__(self):
dbparams = {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': '1234',
'database': 'ziroom',
'charset': 'utf8',
'cursorclass': cursors.DictCursor # 需要指定游标的类
}
# 设置连接池
self.dbpool = adbapi.ConnectionPool('pymysql',**dbparams)
self._sql = None
@property
def sql(self):
if not self._sql:
self._sql = """
insert into city(Id, title, area, location, city, region, price,room_url,
price_url)
values(null,%s,%s,%s,%s,%s,%s,%s,%s)
"""
return self._sql
return self._sql
def process_item(self, item, spider):
# 将insert_item 给 runInteraction 执行就可以实现异步
defer = self.dbpool.runInteraction(self.insert_item, item)
# 添加错误处理,想知道是哪个 item 出错,所以传入 item 参数,同理传入 spider
defer.addErrback(self.handle_error,item, spider)
# item 是 process_item
def insert_item(self, cursor, item):
cursor.execute(self.sql,(item['title'],item['desc'],item['location'],item['city'],item['region'],item['price'],
item['room_url'],item['prices_url']))
# 添加错误处理
def handle_error(self,errors,item,spider):
print("="*10)
print(errors)
print("="*10)
from scrapy import signals
import random
class UserAgentDownloadMiddleware(object):
def process_request(self, request,spider):
USER_AGENTS = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/44.0.2403.155 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
"Mozilla/5.0 (X11; Linux i686; rv:64.0) Gecko/20100101 Firefox/64.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:64.0) Gecko/20100101 Firefox/64.0",
"Mozilla/5.0 (X11; Linux i586; rv:63.0) Gecko/20100101 Firefox/63.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:63.0) Gecko/20100101 Firefox/63.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A",
"Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; TencentTraveler 4.0; Trident/4.0; SLCC1; Media Center PC 5.0; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30618)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; QQDownload 1.7; GTB6.6; TencentTraveler 4.0; SLCC1; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.5.30729; .NET CLR 3.0.30729)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; iCafeMedia; TencentTraveler 4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/418.8 (KHTML, like Gecko, Safari) Cheshire/1.0.UNOFFICIAL",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en) AppleWebKit/418.9 (KHTML, like Gecko, Safari) Cheshire/1.0.UNOFFICIAL"
]
user_agent = random.choice(USER_AGENTS)
request.headers['User-Agent'] = user_agent
五、基本设置
1.ROBOTSTXT_OBEY = False # 设置为不遵守robot协议
2.设置headers
DEFAULT_REQUEST_HEADERS = {
'Host': 'nj.ziroom.com',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Upgrade-Insecure-Requests': '1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'}
3. 打开middlewares
DOWNLOADER_MIDDLEWARES = {
'ziroom.middlewares.UserAgentDownloadMiddleware': 543
}
4. 打开pipelines
ITEM_PIPELINES = {
'ziroom.pipelines.TwistedPipeline': 300
}
六、运行爬虫
需要在项目目录下运行:
运行成功:
结束运行:
赞 (0)