scrapy.statscollectors 源代码

"""
Scrapy extension for collecting scraping stats
"""
import pprint
import logging

logger = logging.getLogger(__name__)


[文档]class StatsCollector: def __init__(self, crawler): self._dump = crawler.settings.getbool('STATS_DUMP') self._stats = {}
[文档] def get_value(self, key, default=None, spider=None): return self._stats.get(key, default)
[文档] def get_stats(self, spider=None): return self._stats
[文档] def set_value(self, key, value, spider=None): self._stats[key] = value
[文档] def set_stats(self, stats, spider=None): self._stats = stats
[文档] def inc_value(self, key, count=1, start=0, spider=None): d = self._stats d[key] = d.setdefault(key, start) + count
[文档] def max_value(self, key, value, spider=None): self._stats[key] = max(self._stats.setdefault(key, value), value)
[文档] def min_value(self, key, value, spider=None): self._stats[key] = min(self._stats.setdefault(key, value), value)
[文档] def clear_stats(self, spider=None): self._stats.clear()
[文档] def open_spider(self, spider): pass
[文档] def close_spider(self, spider, reason): if self._dump: logger.info("Dumping Scrapy stats:\n" + pprint.pformat(self._stats), extra={'spider': spider}) self._persist_stats(self._stats, spider)
def _persist_stats(self, stats, spider): pass
[文档]class MemoryStatsCollector(StatsCollector): def __init__(self, crawler): super().__init__(crawler) self.spider_stats = {} def _persist_stats(self, stats, spider): self.spider_stats[spider.name] = stats
[文档]class DummyStatsCollector(StatsCollector): def get_value(self, key, default=None, spider=None): return default def set_value(self, key, value, spider=None): pass def set_stats(self, stats, spider=None): pass def inc_value(self, key, count=1, start=0, spider=None): pass def max_value(self, key, value, spider=None): pass def min_value(self, key, value, spider=None): pass