Archived
1
0

Merge branch 'release/v0.3.0'

This commit is contained in:
Jip J. Dekker 2014-05-13 23:35:34 +02:00
commit 708ce44f67
8 changed files with 153 additions and 34 deletions

View File

@ -2,10 +2,11 @@
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import re
from scrapy.exceptions import DropItem
class FourmiPipeline(object):
class DuplicatePipeline(object):
def __init__(self):
self.known_values = set()
@ -17,9 +18,27 @@ class FourmiPipeline(object):
:param spider: The spider which scraped the spider
:return: :raise DropItem: Returns the item if unique or drops them if it's already known
"""
value = item['attribute'], item['value']
value = (item['attribute'], item['value'], item['conditions'])
if value in self.known_values:
raise DropItem("Duplicate item found: %s" % item) # #[todo] append sources of first item.
else:
self.known_values.add(value)
return item
class AttributeSelectionPipeline(object):
def __init__(self):
pass;
def process_item(self, item, spider):
"""
The items are processed using the selected attribute list available in the spider,
items that don't match the selected items are dropped.
:param item: The incoming item
:param spider: The spider which scraped the item. Should have an attribute "selected_attributes".
:return: :raise DropItem: Returns item if it matches an selected attribute, else it is dropped.
"""
if [x for x in spider.selected_attributes if re.match(x, item["attribute"])]:
return item
else:
raise DropItem("Attribute not selected by used: %s" % item)

View File

@ -11,7 +11,8 @@ BOT_NAME = 'FourmiCrawler'
SPIDER_MODULES = ['FourmiCrawler']
NEWSPIDER_MODULE = 'FourmiCrawler'
ITEM_PIPELINES = {
'FourmiCrawler.pipelines.FourmiPipeline': 100
'FourmiCrawler.pipelines.AttributeSelectionPipeline': 100,
'FourmiCrawler.pipelines.DuplicatePipeline': 200,
}
FEED_URI = 'results.json'
FEED_FORMAT = 'jsonlines'

View File

@ -47,7 +47,6 @@ class ChemSpider(Source):
properties = []
# Predicted - ACD/Labs tab
# [TODO] - test if tab contains data, some chemicals do not have data here
td_list = sel.xpath('.//table[@id="acdlabs-table"]//td').xpath(
'normalize-space(string())')
prop_names = td_list[::2]
@ -58,6 +57,12 @@ class ChemSpider(Source):
prop_value = prop_value.extract().encode('utf-8')
prop_conditions = ''
# Test for properties without values, with one hardcoded exception
if (not re.match(r'^\d', prop_value) or
(prop_name == 'Polarizability' and
prop_value == '10-24cm3')):
continue
# Match for condition in parentheses
m = re.match(r'(.*) \((.*)\)', prop_name)
if m:
@ -192,7 +197,8 @@ class ChemSpider(Source):
'reliability': 'Unknown',
'conditions': ''
})
properties.append(result)
if result['value']:
properties.append(result)
return properties
def parse_searchrequest(self, response):
@ -200,8 +206,14 @@ class ChemSpider(Source):
sel = Selector(response)
log.msg('chemspider parse_searchrequest', level=log.DEBUG)
sel.register_namespace('cs', 'http://www.chemspider.com/')
csid = sel.xpath('.//cs:int/text()').extract()[0]
# [TODO] - handle multiple csids in case of vague search term
csids = sel.xpath('.//cs:int/text()').extract()
if len(csids) == 0:
log.msg('ChemSpider found nothing', level=log.ERROR)
return
elif len(csids) > 1:
log.msg('ChemSpider found multiple substances, taking first '
'element', level=log.DEBUG)
csid = csids[0]
structure_url = self.website[:-1] + self.structure % csid
extendedinfo_url = self.website[:-1] + self.extendedinfo % csid
log.msg('chemspider URL: %s' % structure_url, level=log.DEBUG)

View File

@ -36,8 +36,8 @@ class WikipediaParser(Source):
""" scrape data from infobox on wikipedia. """
items = []
#be sure to get both chembox (wikipedia template) and drugbox (wikipedia template) to scrape
tr_list = sel.xpath('.//table[@class="infobox bordered" or @class="infobox"]//td[not(@colspan)]').\
#be sure to get chembox (wikipedia template)
tr_list = sel.xpath('.//table[@class="infobox bordered"]//td[not(@colspan)]'). \
xpath('normalize-space(string())')
prop_names = tr_list[::2]
prop_values = tr_list[1::2]
@ -46,11 +46,31 @@ class WikipediaParser(Source):
'attribute': prop_name.extract().encode('utf-8'),
'value': prop_values[i].extract().encode('utf-8'),
'source': "Wikipedia",
'reliability': "",
'reliability': "Unknown",
'conditions': ""
})
items.append(item)
log.msg('Wiki prop: |%s| |%s| |%s|' % (item['attribute'], item['value'], item['source']), level=log.DEBUG)
#scrape the drugbox (wikipedia template)
tr_list2 = sel.xpath('.//table[@class="infobox"]//tr')
log.msg('dit: %s' % tr_list2, level=log.DEBUG)
for tablerow in tr_list2:
log.msg('item: %s' % tablerow.xpath('./th').xpath('normalize-space(string())'), level=log.DEBUG)
if tablerow.xpath('./th').xpath('normalize-space(string())') and tablerow.xpath('./td').xpath(
'normalize-space(string())'):
item = Result({
'attribute': tablerow.xpath('./th').xpath('normalize-space(string())').extract()[0].encode('utf-8'),
'value': tablerow.xpath('./td').xpath('normalize-space(string())').extract()[0].encode('utf-8'),
'source': "Wikipedia",
'reliability': "Unknown",
'conditions': ""
})
items.append(item)
log.msg(
'Wiki prop: |attribute: %s| |value: %s| |%s|' % (item['attribute'], item['value'], item['source']),
level=log.DEBUG)
items = filter(lambda a: a['value'] != '', items) # remove items with an empty value
item_list = self.clean_items(items)

View File

@ -8,9 +8,10 @@ class FourmiSpider(Spider):
__parsers = []
synonyms = []
def __init__(self, compound=None, *args, **kwargs):
def __init__(self, compound=None, selected_attributes=[".*"], *args, **kwargs):
super(FourmiSpider, self).__init__(*args, **kwargs)
self.synonyms.append(compound)
self.selected_attributes = selected_attributes;
def parse(self, reponse):
for parser in self.__parsers:

81
README.md Normal file
View File

@ -0,0 +1,81 @@
# Fourmi
Fourmi is an web scraper for chemical substances. The program is designed to be
used as a search engine to search multiple chemical databases for a specific
substance. The program will produce all available attributes of the substance
and conditions associated with the attributes. Fourmi also attempts to estimate
the reliability of each data point to assist the user in deciding which data
should be used.
The Fourmi project is open source project licensed under the MIT license. Feel
free to contribute!
Fourmi is based on the [Scrapy framework](http://scrapy.org/), an open source
web scraping framework for python. Most of the functionality of this project can
be traced to this framework. Should the documentation for this application fall
short, we suggest you take a close look at the [Scrapy architecture]
(http://doc.scrapy.org/en/latest/topics/architecture.html) and the [Scrapy
documentation](http://doc.scrapy.org/en/latest/index.html).
### Installing
If you're installing Fourmi, please take a look at our [installation guide](...)
on our wiki. When you've installed the application, make sure to check our
[usage guide](...).
### Using the Source
To use the Fourmi source code multiple dependencies are required. Take a look at
the [wiki page](...) on using the application source code for a step by step
installation guide.
When developing for the Fourmi project keep in mind that code readability is a
must. To maintain the readability, code should be conform with the
[PEP-8](http://legacy.python.org/dev/peps/pep-0008/) style guide for Python
code. More information about the different structures and principles of the
Fourmi application can be found on our [wiki](...).
### To Do
The Fourmi project has the following goals for the nearby future:
__Main goals:__
- Improve our documentation and guides. (Assignee: Dekker)
- Build an graphical user interface(GUI) as alternative for the command line
interface(CLI). (Assignee: Harmen)
- Compiling the source into an windows executable. (Assignee: Bas)
- Create an configuration file to hold logins and API keys.
- Determine reliability of our data point.
- Create an module to gather data from NIST. (Assignee: Rob)
- Create an module to gather data from PubChem. (Assignee: Rob)
__Side goals:__
- Clean and unify data.
- Extensive reliability analysis using statistical tests.
- Test data with Descartes 1.
### Project Origin
The Fourmi project was started in February of 2014 as part of a software
engineering course at the Radboud University for students studying Computer
Science, Information Science or Artificial Intelligence. Students participate in
a real software development project as part of the
[Giphouse](http://www.giphouse.nl/).
This particular project was started on behalf of Ivo B. Rietveld. As a chemist
he was in need of an application to automatically search information on chemical
substances and create an phase diagram. The so called "Descrates" project was
split into two teams each creating a different application that has part of the
functionality. We are the team Descartes 2 and as we were responsible for
creating a web crawler, we've named our application Fourmi (Englis: Ants).
The following people were part of the original team:
- [Jip J. Dekker](http://jip.dekker.li)
- Rob ten Berge
- Harmen Prins
- Bas van Berkel
- Nout van Deijck
- Michail Kuznetcov

View File

@ -1,16 +0,0 @@
We are the team Descartes 2.
----------------------------
Our team members are:
+ Rob ten Berge
+ Bas van Berkel
+ Nout van Deijck
+ Jip J. Dekker
+ Michail Kuznetcov
+ Harmen Prins

View File

@ -12,14 +12,15 @@ Usage:
fourmi --version
Options:
--attributes=<regex> Include only that match these regular expressions split by a comma. [default: .*]
-h --help Show this screen.
--version Show version.
--verbose Verbose logging output.
--log=<file> Save log to an file.
-o <file> --output=<file> Output file [default: result.*format*]
-f <format> --format=<format> Output formats (supported: csv, json, jsonlines, xml) [default: jsonlines]
--include=<sourcenames> Include only sources that match the regular these expressions split by a comma.
--exclude=<sourcenames> Exclude the sources that match the regular these expressions split by a comma.
--include=<regex> Include only sources that match these regular expressions split by a comma.
--exclude=<regex> Exclude the sources that match these regular expressions split by a comma.
"""
from twisted.internet import reactor
@ -32,8 +33,8 @@ from FourmiCrawler.spider import FourmiSpider
from sourceloader import SourceLoader
def setup_crawler(searchable, settings, source_loader):
spider = FourmiSpider(compound=searchable)
def setup_crawler(searchable, settings, source_loader, attributes):
spider = FourmiSpider(compound=searchable, selected_attributes=attributes)
spider.add_parsers(source_loader.sources)
crawler = Crawler(settings)
crawler.signals.connect(reactor.stop, signal=signals.spider_closed)
@ -74,12 +75,12 @@ def start_log(docopt_arguments):
def search(docopt_arguments, source_loader):
start_log(docopt_arguments)
settings = scrapy_settings_manipulation(docopt_arguments)
setup_crawler(docopt_arguments["<compound>"], settings, source_loader)
setup_crawler(docopt_arguments["<compound>"], settings, source_loader, docopt_arguments["--attributes"].split(','))
reactor.run()
if __name__ == '__main__':
arguments = docopt.docopt(__doc__, version='Fourmi - V0.2.6')
arguments = docopt.docopt(__doc__, version='Fourmi - V0.3.0')
loader = SourceLoader()
if arguments["--include"]: