Skip to content

Commit

Permalink
微博.spider
Browse files Browse the repository at this point in the history
  • Loading branch information
tianting12 committed Apr 30, 2020
1 parent 6726454 commit 5c2d931
Show file tree
Hide file tree
Showing 19 changed files with 497 additions and 0 deletions.
6 changes: 6 additions & 0 deletions weibo/.idea/inspectionProfiles/profiles_settings.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions weibo/.idea/misc.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions weibo/.idea/modules.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

11 changes: 11 additions & 0 deletions weibo/.idea/weibo.iml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

104 changes: 104 additions & 0 deletions weibo/.idea/workspace.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

11 changes: 11 additions & 0 deletions weibo/scrapy.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
# Automatically created by: scrapy startproject
#
# For more information about the [deploy] section see:
# https://scrapyd.readthedocs.io/en/latest/deploy.html

[settings]
default = weibo.settings

[deploy]
#url = http://localhost:6800/
project = weibo
Empty file added weibo/weibo/__init__.py
Empty file.
Binary file added weibo/weibo/__pycache__/__init__.cpython-37.pyc
Binary file not shown.
Binary file added weibo/weibo/__pycache__/items.cpython-37.pyc
Binary file not shown.
Binary file added weibo/weibo/__pycache__/pipelines.cpython-37.pyc
Binary file not shown.
Binary file added weibo/weibo/__pycache__/settings.cpython-37.pyc
Binary file not shown.
24 changes: 24 additions & 0 deletions weibo/weibo/items.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class WeiboItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
name = scrapy.Field() #昵称
jianjie = scrapy.Field() #简介
gender = scrapy.Field() #性别
weibodengji = scrapy.Field() #微博等级
weiboshu = scrapy.Field() #微博数
fensishu = scrapy.Field() #粉丝数
guanzhuzhe = scrapy.Field() #关注者
chushengriqi = scrapy.Field() # 年龄
location = scrapy.Field() #地区
xueli = scrapy.Field() #学历
# hangye = scrapy.Field() # 行业
103 changes: 103 additions & 0 deletions weibo/weibo/middlewares.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals


class WeiboSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.

@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s

def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.

# Should return None or raise an exception.
return None

def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.

# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i

def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.

# Should return either None or an iterable of Request, dict
# or Item objects.
pass

def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.

# Must return only requests (not items).
for r in start_requests:
yield r

def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)


class WeiboDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.

@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s

def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.

# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None

def process_response(self, request, response, spider):
# Called with the response returned from the downloader.

# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response

def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.

# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass

def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
39 changes: 39 additions & 0 deletions weibo/weibo/pipelines.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html

from pymongo import MongoClient

class WeiboPipeline:
def process_item(self, item, spider):
return item

class WeiboMongoPipeline(object):

def __init__(self, mongodb_url, mongodb_db, mongodb_table_name):
self.mongodb_url = mongodb_url
self.mongodb_db = mongodb_db
self.table_name = mongodb_table_name

@classmethod
def from_crawler(cls, crawler):
return cls(
mongodb_url=crawler.settings.get('MONGODB_URL'),
mongodb_db=crawler.settings.get('MONGODB_DB'),
mongodb_table_name=crawler.settings.get('MONGODB_TABLE_NAME')
)

def open_spider(self, spider):
self.client = MongoClient(self.mongodb_url)
self.db = self.client[self.mongodb_db]

def process_item(self, item, spider):
self.db[self.table_name].insert(dict(item))
print('插入成功')
return item

def close_spider(self, spider):
self.client.close()
Loading

0 comments on commit 5c2d931

Please sign in to comment.