Archived
1
0

Merge branch 'develop' into feature/executable

Conflicts:
	sourceloader.py
This commit is contained in:
Jip J. Dekker 2014-06-04 18:05:20 +02:00
commit d8fcd70d72
8 changed files with 404 additions and 33 deletions

View File

@ -1,6 +1,4 @@
# Define here the models for your scraped items
#
# See documentation in:
# For more information on item definitions, see the Scrapy documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field

View File

@ -1,10 +1,25 @@
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
# For more information on item pipelines, see the Scrapy documentation in:
# http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import re
from scrapy.exceptions import DropItem
class RemoveNonePipeline(object):
def __init__(self):
self.known_values = set()
def process_item(self, item, spider):
"""
Processing the items so None values are replaced by empty strings
:param item: The incoming item
:param spider: The spider which scraped the spider
:return: :raise DropItem: Returns the item if unique or drops them if it's already known
"""
for key in item:
if item[key] is None:
item[key] = ""
return item
class DuplicatePipeline(object):

View File

@ -11,8 +11,9 @@ BOT_NAME = 'FourmiCrawler'
SPIDER_MODULES = ['FourmiCrawler']
NEWSPIDER_MODULE = 'FourmiCrawler'
ITEM_PIPELINES = {
'FourmiCrawler.pipelines.AttributeSelectionPipeline': 100,
'FourmiCrawler.pipelines.DuplicatePipeline': 200,
"FourmiCrawler.pipelines.RemoveNonePipeline": 100,
'FourmiCrawler.pipelines.AttributeSelectionPipeline': 200,
'FourmiCrawler.pipelines.DuplicatePipeline': 300,
}
FEED_URI = 'results.json'
FEED_FORMAT = 'jsonlines'

View File

@ -0,0 +1,273 @@
from source import Source
from scrapy import log
from scrapy.http import Request
from scrapy.selector import Selector
from FourmiCrawler.items import Result
import re
# [TODO]: values can be '128.', perhaps remove the dot in that case?
# [TODO]: properties have references and comments which do not exist in the
# Result item, but should be included eventually.
class NIST(Source):
"""NIST Scraper plugin
This plugin manages searching for a chemical on the NIST website
and parsing the resulting page if the chemical exists on NIST.
"""
website = "http://webbook.nist.gov/*"
search = 'cgi/cbook.cgi?Name=%s&Units=SI&cTP=on'
ignore_list = set()
def __init__(self):
Source.__init__(self)
def parse(self, response):
sel = Selector(response)
title = sel.xpath('head/title/text()').extract()[0]
if title == 'Name Not Found':
log.msg('NIST: Chemical not found!', level=log.ERROR)
return
if title not in self.ignore_list:
self.ignore_list.update(title)
log.msg('NIST emit synonym: %s' % title, level=log.DEBUG)
self._spider.get_synonym_requests(title)
requests = []
requests.extend(self.parse_generic_info(sel))
symbol_table = {}
tds = sel.xpath('//table[@class="symbol_table"]/tr/td')
for (symbol_td, name_td) in zip(tds[::2], tds[1::2]):
symbol = ''.join(symbol_td.xpath('node()').extract())
name = name_td.xpath('text()').extract()[0]
symbol_table[symbol] = name
log.msg('NIST symbol: |%s|, name: |%s|' % (symbol, name),
level=log.DEBUG)
for table in sel.xpath('//table[@class="data"]'):
summary = table.xpath('@summary').extract()[0]
if summary == 'One dimensional data':
log.msg('NIST table: Aggregrate data', level=log.DEBUG)
requests.extend(
self.parse_aggregate_data(table, symbol_table))
elif table.xpath('tr/th="Initial Phase"').extract()[0] == '1':
log.msg('NIST table; Enthalpy/entropy of phase transition',
level=log.DEBUG)
requests.extend(self.parse_transition_data(table, summary))
elif table.xpath('tr[1]/td'):
log.msg('NIST table: Horizontal table', level=log.DEBUG)
elif summary == 'Antoine Equation Parameters':
log.msg('NIST table: Antoine Equation Parameters',
level=log.DEBUG)
requests.extend(self.parse_antoine_data(table, summary))
elif len(table.xpath('tr[1]/th')) == 5:
log.msg('NIST table: generic 5 columns', level=log.DEBUG)
# Symbol (unit) Temperature (K) Method Reference Comment
requests.extend(self.parse_generic_data(table, summary))
elif len(table.xpath('tr[1]/th')) == 4:
log.msg('NIST table: generic 4 columns', level=log.DEBUG)
# Symbol (unit) Temperature (K) Reference Comment
requests.extend(self.parse_generic_data(table, summary))
else:
log.msg('NIST table: NOT SUPPORTED', level=log.WARNING)
continue #Assume unsupported
return requests
def parse_generic_info(self, sel):
"""Parses: synonyms, chemical formula, molecular weight, InChI,
InChiKey, CAS number
"""
ul = sel.xpath('body/ul[li/strong="IUPAC Standard InChI:"]')
li = ul.xpath('li')
raw_synonyms = ul.xpath('li[strong="Other names:"]/text()').extract()
for synonym in raw_synonyms[0].strip().split(';\n'):
log.msg('NIST synonym: %s' % synonym, level=log.DEBUG)
self.ignore_list.update(synonym)
self._spider.get_synonym_requests(synonym)
data = {}
raw_formula = ul.xpath('li[strong/a="Formula"]//text()').extract()
data['Chemical formula'] = ''.join(raw_formula[2:]).strip()
raw_mol_weight = ul.xpath('li[strong/a="Molecular weight"]/text()')
data['Molecular weight'] = raw_mol_weight.extract()[0].strip()
raw_inchi = ul.xpath('li[strong="IUPAC Standard InChI:"]//tt/text()')
data['IUPAC Standard InChI'] = raw_inchi.extract()[0]
raw_inchikey = ul.xpath('li[strong="IUPAC Standard InChIKey:"]'
'/tt/text()')
data['IUPAC Standard InChIKey'] = raw_inchikey.extract()[0]
raw_cas_number = ul.xpath('li[strong="CAS Registry Number:"]/text()')
data['CAS Registry Number'] = raw_cas_number.extract()[0].strip()
requests = []
for key, value in data.iteritems():
result = Result({
'attribute': key,
'value': value,
'source': 'NIST',
'reliability': 'Unknown',
'conditions': ''
})
requests.append(result)
return requests
def parse_aggregate_data(self, table, symbol_table):
"""Parses the table(s) which contain possible links to individual
data points
"""
results = []
for tr in table.xpath('tr[td]'):
extra_data_url = tr.xpath('td[last()][a="Individual data points"]'
'/a/@href').extract()
if extra_data_url:
request = Request(url=self.website[:-1] + extra_data_url[0],
callback=self.parse_individual_datapoints)
results.append(request)
continue
data = []
for td in tr.xpath('td'):
data.append(''.join(td.xpath('node()').extract()))
name = symbol_table[data[0]]
condition = ''
m = re.match(r'(.*) at (.*)', name)
if m:
name = m.group(1)
condition = m.group(2)
result = Result({
'attribute': name,
'value': data[1] + ' ' + data[2],
'source': 'NIST',
'reliability': 'Unknown',
'conditions': condition
})
log.msg('NIST: |%s|' % data, level=log.DEBUG)
results.append(result)
return results
@staticmethod
def parse_transition_data(table, summary):
"""Parses the table containing properties regarding phase changes"""
results = []
tr_unit = ''.join(table.xpath('tr[1]/th[1]/node()').extract())
m = re.search(r'\((.*)\)', tr_unit)
unit = '!'
if m:
unit = m.group(1)
for tr in table.xpath('tr[td]'):
tds = tr.xpath('td/text()').extract()
result = Result({
'attribute': summary,
'value': tds[0] + ' ' + unit,
'source': 'NIST',
'reliability': 'Unknown',
'conditions': '%s K, (%s -> %s)' % (tds[1], tds[2], tds[3])
})
results.append(result)
return results
@staticmethod
def parse_generic_data(table, summary):
"""Parses the common tables of 4 and 5 rows. Assumes they are of the
form:
Symbol (unit)|Temperature (K)|Method|Reference|Comment
Symbol (unit)|Temperature (K)|Reference|Comment
"""
results = []
tr_unit = ''.join(table.xpath('tr[1]/th[1]/node()').extract())
m = re.search(r'\((.*)\)', tr_unit)
unit = '!'
if m:
unit = m.group(1)
for tr in table.xpath('tr[td]'):
tds = tr.xpath('td/text()').extract()
result = Result({
'attribute': summary,
'value': tds[0] + ' ' + unit,
'source': 'NIST',
'reliability': 'Unknown',
'conditions': '%s K' % tds[1]
})
results.append(result)
return results
@staticmethod
def parse_antoine_data(table, summary):
"""Parse table containing parameters for the Antione equation"""
results = []
for tr in table.xpath('tr[td]'):
tds = tr.xpath('td/text()').extract()
result = Result({
'attribute': summary,
'value': 'A=%s, B=%s, C=%s' % (tds[1], tds[2], tds[3]),
'source': 'NIST',
'reliability': 'Unknown',
'conditions': '%s K' % tds[0]
})
results.append(result)
return results
def parse_individual_datapoints(self, response):
"""Parses the page linked from aggregate data"""
sel = Selector(response)
table = sel.xpath('//table[@class="data"]')[0]
results = []
name = table.xpath('@summary').extract()[0]
condition = ''
m = re.match(r'(.*) at (.*)', name)
if m:
name = m.group(1)
condition = m.group(2)
tr_unit = ''.join(table.xpath('tr[1]/th[1]/node()').extract())
m = re.search(r'\((.*)\)', tr_unit)
unit = '!'
if m:
unit = m.group(1)
for tr in table.xpath('tr[td]'):
tds = tr.xpath('td/text()').extract()
uncertainty = ''
m = re.search('Uncertainty assigned by TRC = (.*?) ', tds[-1])
if m:
uncertainty = '+- %s ' % m.group(1)
# [TODO]: get the plusminus sign working in here
result = Result({
'attribute': name,
'value': '%s %s%s' % (tds[0], uncertainty, unit),
'source': 'NIST',
'reliability': 'Unknown',
'conditions': condition
})
results.append(result)
return results
def new_compound_request(self, compound):
if compound not in self.ignore_list:
self.ignore_list.update(compound)
return Request(url=self.website[:-1] + self.search % compound,
callback=self.parse)

View File

@ -7,15 +7,32 @@ class Source:
_spider = None
def __init__(self):
"""
Initiation of a new Source
"""
pass
def parse(self, reponse):
log.msg("The parse function of the empty parser was used.", level=log.WARNING)
def parse(self, response):
"""
This function should be able to parse all Scrapy Response objects with a URL matching the website Regex.
:param response: A Scrapy Response object
:return: A list of Result items and new Scrapy Requests
"""
log.msg("The parse function of the empty source was used.", level=log.WARNING)
pass
def new_compound_request(self, compound):
"""
This function should return a Scrapy Request for the given compound request.
:param compound: A compound name.
:return: A new Scrapy Request
"""
# return Request(url=self.website[:-1] + compound, callback=self.parse)
pass
def set_spider(self, spider):
"""
A Function to save the associated spider.
:param spider: A FourmiSpider object
"""
self._spider = spider

View File

@ -1,43 +1,75 @@
import re
from scrapy.spider import Spider
from scrapy import log
import re
class FourmiSpider(Spider):
"""
A spider writen for the Fourmi Project which calls upon all available sources to request and scrape data.
"""
name = "FourmiSpider"
__parsers = []
__sources = []
synonyms = []
def __init__(self, compound=None, selected_attributes=[".*"], *args, **kwargs):
"""
Initiation of the Spider
:param compound: compound that will be searched.
:param selected_attributes: A list of regular expressions that the attributes should match.
"""
super(FourmiSpider, self).__init__(*args, **kwargs)
self.synonyms.append(compound)
self.selected_attributes = selected_attributes;
def parse(self, reponse):
for parser in self.__parsers:
if re.match(parser.website, reponse.url):
log.msg("Url: " + reponse.url + " -> Source: " + parser.website, level=log.DEBUG)
return parser.parse(reponse)
def parse(self, response):
"""
The function that is called when a response to a request is available. This function distributes this to a
source which should be able to handle parsing the data.
:param response: A Scrapy Response object that should be parsed
:return: A list of Result items and new Request to be handled by the scrapy core.
"""
for source in self.__sources:
if re.match(source.website, response.url):
log.msg("Url: " + response.url + " -> Source: " + source.website, level=log.DEBUG)
return source.parse(response)
return None
def get_synonym_requests(self, compound):
"""
A function that generates new Scrapy Request for each source given a new synonym of a compound.
:param compound: A compound name
:return: A list of Scrapy Request objects
"""
requests = []
for parser in self.__parsers:
for parser in self.__sources:
parser_requests = parser.new_compound_request(compound)
if parser_requests is not None:
requests.append(parser_requests)
return requests
def start_requests(self):
"""
The function called by Scrapy for it's first Requests
:return: A list of Scrapy Request generated from the known synonyms using the available sources.
"""
requests = []
for synonym in self.synonyms:
requests.extend(self.get_synonym_requests(synonym))
return requests
def add_parsers(self, parsers):
for parser in parsers:
self.add_parser(parser)
def add_sources(self, sources):
"""
A function to add a new Parser objects to the list of available sources.
:param sources: A list of Source Objects.
"""
for parser in sources:
self.add_source(parser)
def add_parser(self, parser):
self.__parsers.append(parser)
parser.set_spider(self)
def add_source(self, source):
"""
A function add a new Parser object to the list of available parsers.
:param source: A Source Object
"""
self.__sources.append(source)
source.set_spider(self)

View File

@ -33,9 +33,16 @@ from FourmiCrawler.spider import FourmiSpider
from sourceloader import SourceLoader
def setup_crawler(searchable, settings, source_loader, attributes):
spider = FourmiSpider(compound=searchable, selected_attributes=attributes)
spider.add_parsers(source_loader.sources)
def setup_crawler(compound, settings, source_loader, attributes):
"""
This function prepares and start the crawler which starts the actual search on the internet
:param compound: The compound which should be searched
:param settings: A scrapy settings object
:param source_loader: A fully functional SourceLoader object which contains only the sources that should be used.
:param attributes: A list of regular expressions which the attribute names should match.
"""
spider = FourmiSpider(compound=compound, selected_attributes=attributes)
spider.add_sources(source_loader.sources)
crawler = Crawler(settings)
crawler.signals.connect(reactor.stop, signal=signals.spider_closed)
crawler.configure()
@ -44,8 +51,13 @@ def setup_crawler(searchable, settings, source_loader, attributes):
def scrapy_settings_manipulation(docopt_arguments):
"""
This function manipulates the Scrapy settings that normally would be set in the settings file. In the Fourmi
project these are command line arguments.
:param docopt_arguments: A dictionary generated by docopt containing all CLI arguments.
"""
settings = get_project_settings()
# [todo] - add at least a warning for files that already exist
if docopt_arguments["--output"] != 'result.*format*':
settings.overrides["FEED_URI"] = docopt_arguments["--output"]
elif docopt_arguments["--format"] == "jsonlines":
@ -60,6 +72,10 @@ def scrapy_settings_manipulation(docopt_arguments):
def start_log(docopt_arguments):
"""
This function starts the logging functionality of Scrapy using the settings given by the CLI.
:param docopt_arguments: A dictionary generated by docopt containing all CLI arguments.
"""
if docopt_arguments["--log"] is not None:
if docopt_arguments["--verbose"]:
log.start(logfile=docopt_arguments["--log"], logstdout=False, loglevel=log.DEBUG)
@ -73,14 +89,20 @@ def start_log(docopt_arguments):
def search(docopt_arguments, source_loader):
"""
The function that facilitates the search for a specific compound.
:param docopt_arguments: A dictionary generated by docopt containing all CLI arguments.
:param source_loader: An initiated SourceLoader object pointed at the directory with the sources.
"""
start_log(docopt_arguments)
settings = scrapy_settings_manipulation(docopt_arguments)
setup_crawler(docopt_arguments["<compound>"], settings, source_loader, docopt_arguments["--attributes"].split(','))
reactor.run()
# The start for the Fourmi Command Line interface.
if __name__ == '__main__':
arguments = docopt.docopt(__doc__, version='Fourmi - V0.3.0')
arguments = docopt.docopt(__doc__, version='Fourmi - V0.4.0')
loader = SourceLoader()
if arguments["--include"]:

View File

@ -2,24 +2,25 @@ import inspect
import sys
import os
import re
from FourmiCrawler.sources.source import Source
class SourceLoader:
sources = []
def __init__(self, rel_dir="FourmiCrawler\\sources"):
def __init__(self, rel_dir="FourmiCrawler/sources"):
if hasattr(sys,'frozen'):
path = os.path.dirname(sys.executable)
else:
path = os.path.dirname(os.path.abspath(__file__))
path += "\\" + rel_dir
path += "/" + rel_dir
known_parser = set()
for py in [f[:-3] for f in os.listdir(path) if f.endswith('.py') and f != '__init__.py']:
mod = __import__('.'.join([rel_dir.replace('\\', "."), py]), fromlist=[py])
mod = __import__('.'.join([rel_dir.replace('/', "."), py]), fromlist=[py])
classes = [getattr(mod, x) for x in dir(mod) if inspect.isclass(getattr(mod, x))]
for cls in classes:
if issubclass(cls, Source) and cls not in known_parser:
@ -27,18 +28,30 @@ class SourceLoader:
# known_parser.add(cls)
def include(self, source_names):
"""
This function excludes all sources that don't match the given regular expressions.
:param source_names: A list of regular expression (strings)
"""
new = set()
for name in source_names:
new.update([src for src in self.sources if re.match(name, src.__class__.__name__)])
self.sources = list(new)
def exclude(self, source_names):
"""
This function excludes all sources that match the given regular expressions.
:param source_names: A list of regular expression (strings)
"""
exclude = []
for name in source_names:
exclude.extend([src for src in self.sources if re.match(name, src.__class__.__name__)])
self.sources = [src for src in self.sources if src not in exclude]
def __str__(self):
"""
This function returns a string with all sources currently available in the SourceLoader.
:return: a string with all available sources.
"""
string = ""
for src in self.sources:
string += "Source: " + src.__class__.__name__