我所理解的Scrapy去重模块。
直接上源码。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# site-packages/scrapy/core/scheduler.py
# scheduler对象通过类的from_cralwer函数生成的。
class Scheduler(object):
def __init__(self, dupefilter, jobdir=None, dqclass=None, mqclass=None,
logunser=False, stats=None, pqclass=None, crawler=None):
"""
:dupefilter parameter: 去重模块
:dqclass parameter: 磁盘队列 LIFI磁盘队列,主要任务是停止爬虫后,再次启动完成续接任务
:mqclass parameter: 内存队列 因为pqclass而存在的
:pqclass parameter: 带优先级队列, 使用第三方queuelib, 对request请求按优先级进行排序
"""
self.df = dupefilter

@classmethod
def from_crawler(cls, crawler):
dupefilter = create_instance()
return cls(dupefilter, pqclass, dqclass, mqclass)

def enqueue_request(self, request):
if not request.dont_filter and self.df.request_seen(request):
self.df.log(request, self.spider)
return False

# scrapy/dupefilters.py
class RFPDupeFilter():
def __init__(self):
self.fingerprints = set()

def request_seen(self, request):
fp = self.request_fingerprint(request)
if fp in self.fingerprints:
return True
self.fingerprints.add(fp)
if self.file:
self.file.write(fp + '\n')

def request_fingerprint(self, request):
return request_fingerprint(request)

# scrapy/utils/request.py
import weakref

_fingerprint_cache = weakref.WeakKeyDictionary()

def request_fingerprint(request, include_headers=None, keep_fragments=False):
"""
to_bytes(request.method)
计算指纹时,请求方法(如GET、POST)被计算在内

canonicalize_url()
将url规范化的方法
http://www.example.com/query?id=111&cat=222
http://www.example.com/query?cat=222&id=111
这样参数位置变化,但参数值不变的网址,表示的仍是同一个网址,符合现实逻辑。

request.body的属性是字符串:
一般GET方法的body为空字符串,不考虑;
而POST方法要上传kwages(类型是dict),
要经过urllib.parse.urlencode()函数转换后才能变成request.body

加密
fp = hashlib.sha1() 安全哈希算法 基于MD5 加密后长度更长 比MD5多32位 也更安全但速度较慢
写入内存中
cache[include_headers] = fp.hexdigest()
"""
if include_headers:
include_headers = tuple(to_bytes(h.lower())
for h in sorted(include_headers))
cache = _fingerprint_cache.setdefault(request, {})
cache_key = (include_headers, keep_fragments)
if cache_key not in cache:
fp = hashlib.sha1()
fp.update(to_bytes(request.method))
fp.update(to_bytes(canonicalize_url(request.url, keep_fragments=keep_fragments)))
fp.update(request.body or b'')
if include_headers:
for hdr in include_headers:
if hdr in request.headers:
fp.update(hdr)
for v in request.headers.getlist(hdr):
fp.update(v)
cache[cache_key] = fp.hexdigest()
return cache[cache_key]