relextract.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. # Natural Language Toolkit: Relation Extraction
  2. #
  3. # Copyright (C) 2001-2019 NLTK Project
  4. # Author: Ewan Klein <ewan@inf.ed.ac.uk>
  5. # URL: <http://nltk.org/>
  6. # For license information, see LICENSE.TXT
  7. """
  8. Code for extracting relational triples from the ieer and conll2002 corpora.
  9. Relations are stored internally as dictionaries ('reldicts').
  10. The two serialization outputs are "rtuple" and "clause".
  11. - An rtuple is a tuple of the form ``(subj, filler, obj)``,
  12. where ``subj`` and ``obj`` are pairs of Named Entity mentions, and ``filler`` is the string of words
  13. occurring between ``sub`` and ``obj`` (with no intervening NEs). Strings are printed via ``repr()`` to
  14. circumvent locale variations in rendering utf-8 encoded strings.
  15. - A clause is an atom of the form ``relsym(subjsym, objsym)``,
  16. where the relation, subject and object have been canonicalized to single strings.
  17. """
  18. from __future__ import print_function
  19. # todo: get a more general solution to canonicalized symbols for clauses -- maybe use xmlcharrefs?
  20. from collections import defaultdict
  21. import re
  22. from six.moves import html_entities
  23. # Dictionary that associates corpora with NE classes
  24. NE_CLASSES = {
  25. 'ieer': [
  26. 'LOCATION',
  27. 'ORGANIZATION',
  28. 'PERSON',
  29. 'DURATION',
  30. 'DATE',
  31. 'CARDINAL',
  32. 'PERCENT',
  33. 'MONEY',
  34. 'MEASURE',
  35. ],
  36. 'conll2002': ['LOC', 'PER', 'ORG'],
  37. 'ace': [
  38. 'LOCATION',
  39. 'ORGANIZATION',
  40. 'PERSON',
  41. 'DURATION',
  42. 'DATE',
  43. 'CARDINAL',
  44. 'PERCENT',
  45. 'MONEY',
  46. 'MEASURE',
  47. 'FACILITY',
  48. 'GPE',
  49. ],
  50. }
  51. # Allow abbreviated class labels
  52. short2long = dict(LOC='LOCATION', ORG='ORGANIZATION', PER='PERSON')
  53. long2short = dict(LOCATION='LOC', ORGANIZATION='ORG', PERSON='PER')
  54. def _expand(type):
  55. """
  56. Expand an NE class name.
  57. :type type: str
  58. :rtype: str
  59. """
  60. try:
  61. return short2long[type]
  62. except KeyError:
  63. return type
  64. def class_abbrev(type):
  65. """
  66. Abbreviate an NE class name.
  67. :type type: str
  68. :rtype: str
  69. """
  70. try:
  71. return long2short[type]
  72. except KeyError:
  73. return type
  74. def _join(lst, sep=' ', untag=False):
  75. """
  76. Join a list into a string, turning tags tuples into tag strings or just words.
  77. :param untag: if ``True``, omit the tag from tagged input strings.
  78. :type lst: list
  79. :rtype: str
  80. """
  81. try:
  82. return sep.join(lst)
  83. except TypeError:
  84. if untag:
  85. return sep.join(tup[0] for tup in lst)
  86. from nltk.tag import tuple2str
  87. return sep.join(tuple2str(tup) for tup in lst)
  88. def descape_entity(m, defs=html_entities.entitydefs):
  89. """
  90. Translate one entity to its ISO Latin value.
  91. Inspired by example from effbot.org
  92. """
  93. # s = 'mcglashan_&amp;_sarrail'
  94. # l = ['mcglashan', '&amp;', 'sarrail']
  95. # pattern = re.compile("&(\w+?);")
  96. # new = list2sym(l)
  97. # s = pattern.sub(descape_entity, s)
  98. # print s, new
  99. try:
  100. return defs[m.group(1)]
  101. except KeyError:
  102. return m.group(0) # use as is
  103. def list2sym(lst):
  104. """
  105. Convert a list of strings into a canonical symbol.
  106. :type lst: list
  107. :return: a Unicode string without whitespace
  108. :rtype: unicode
  109. """
  110. sym = _join(lst, '_', untag=True)
  111. sym = sym.lower()
  112. ENT = re.compile("&(\w+?);")
  113. sym = ENT.sub(descape_entity, sym)
  114. sym = sym.replace('.', '')
  115. return sym
  116. def tree2semi_rel(tree):
  117. """
  118. Group a chunk structure into a list of 'semi-relations' of the form (list(str), ``Tree``).
  119. In order to facilitate the construction of (``Tree``, string, ``Tree``) triples, this
  120. identifies pairs whose first member is a list (possibly empty) of terminal
  121. strings, and whose second member is a ``Tree`` of the form (NE_label, terminals).
  122. :param tree: a chunk tree
  123. :return: a list of pairs (list(str), ``Tree``)
  124. :rtype: list of tuple
  125. """
  126. from nltk.tree import Tree
  127. semi_rels = []
  128. semi_rel = [[], None]
  129. for dtr in tree:
  130. if not isinstance(dtr, Tree):
  131. semi_rel[0].append(dtr)
  132. else:
  133. # dtr is a Tree
  134. semi_rel[1] = dtr
  135. semi_rels.append(semi_rel)
  136. semi_rel = [[], None]
  137. return semi_rels
  138. def semi_rel2reldict(pairs, window=5, trace=False):
  139. """
  140. Converts the pairs generated by ``tree2semi_rel`` into a 'reldict': a dictionary which
  141. stores information about the subject and object NEs plus the filler between them.
  142. Additionally, a left and right context of length =< window are captured (within
  143. a given input sentence).
  144. :param pairs: a pair of list(str) and ``Tree``, as generated by
  145. :param window: a threshold for the number of items to include in the left and right context
  146. :type window: int
  147. :return: 'relation' dictionaries whose keys are 'lcon', 'subjclass', 'subjtext', 'subjsym', 'filler', objclass', objtext', 'objsym' and 'rcon'
  148. :rtype: list(defaultdict)
  149. """
  150. result = []
  151. while len(pairs) > 2:
  152. reldict = defaultdict(str)
  153. reldict['lcon'] = _join(pairs[0][0][-window:])
  154. reldict['subjclass'] = pairs[0][1].label()
  155. reldict['subjtext'] = _join(pairs[0][1].leaves())
  156. reldict['subjsym'] = list2sym(pairs[0][1].leaves())
  157. reldict['filler'] = _join(pairs[1][0])
  158. reldict['untagged_filler'] = _join(pairs[1][0], untag=True)
  159. reldict['objclass'] = pairs[1][1].label()
  160. reldict['objtext'] = _join(pairs[1][1].leaves())
  161. reldict['objsym'] = list2sym(pairs[1][1].leaves())
  162. reldict['rcon'] = _join(pairs[2][0][:window])
  163. if trace:
  164. print(
  165. "(%s(%s, %s)"
  166. % (
  167. reldict['untagged_filler'],
  168. reldict['subjclass'],
  169. reldict['objclass'],
  170. )
  171. )
  172. result.append(reldict)
  173. pairs = pairs[1:]
  174. return result
  175. def extract_rels(subjclass, objclass, doc, corpus='ace', pattern=None, window=10):
  176. """
  177. Filter the output of ``semi_rel2reldict`` according to specified NE classes and a filler pattern.
  178. The parameters ``subjclass`` and ``objclass`` can be used to restrict the
  179. Named Entities to particular types (any of 'LOCATION', 'ORGANIZATION',
  180. 'PERSON', 'DURATION', 'DATE', 'CARDINAL', 'PERCENT', 'MONEY', 'MEASURE').
  181. :param subjclass: the class of the subject Named Entity.
  182. :type subjclass: str
  183. :param objclass: the class of the object Named Entity.
  184. :type objclass: str
  185. :param doc: input document
  186. :type doc: ieer document or a list of chunk trees
  187. :param corpus: name of the corpus to take as input; possible values are
  188. 'ieer' and 'conll2002'
  189. :type corpus: str
  190. :param pattern: a regular expression for filtering the fillers of
  191. retrieved triples.
  192. :type pattern: SRE_Pattern
  193. :param window: filters out fillers which exceed this threshold
  194. :type window: int
  195. :return: see ``mk_reldicts``
  196. :rtype: list(defaultdict)
  197. """
  198. if subjclass and subjclass not in NE_CLASSES[corpus]:
  199. if _expand(subjclass) in NE_CLASSES[corpus]:
  200. subjclass = _expand(subjclass)
  201. else:
  202. raise ValueError(
  203. "your value for the subject type has not been recognized: %s"
  204. % subjclass
  205. )
  206. if objclass and objclass not in NE_CLASSES[corpus]:
  207. if _expand(objclass) in NE_CLASSES[corpus]:
  208. objclass = _expand(objclass)
  209. else:
  210. raise ValueError(
  211. "your value for the object type has not been recognized: %s" % objclass
  212. )
  213. if corpus == 'ace' or corpus == 'conll2002':
  214. pairs = tree2semi_rel(doc)
  215. elif corpus == 'ieer':
  216. pairs = tree2semi_rel(doc.text) + tree2semi_rel(doc.headline)
  217. else:
  218. raise ValueError("corpus type not recognized")
  219. reldicts = semi_rel2reldict(pairs)
  220. relfilter = lambda x: (
  221. x['subjclass'] == subjclass
  222. and len(x['filler'].split()) <= window
  223. and pattern.match(x['filler'])
  224. and x['objclass'] == objclass
  225. )
  226. return list(filter(relfilter, reldicts))
  227. def rtuple(reldict, lcon=False, rcon=False):
  228. """
  229. Pretty print the reldict as an rtuple.
  230. :param reldict: a relation dictionary
  231. :type reldict: defaultdict
  232. """
  233. items = [
  234. class_abbrev(reldict['subjclass']),
  235. reldict['subjtext'],
  236. reldict['filler'],
  237. class_abbrev(reldict['objclass']),
  238. reldict['objtext'],
  239. ]
  240. format = '[%s: %r] %r [%s: %r]'
  241. if lcon:
  242. items = [reldict['lcon']] + items
  243. format = '...%r)' + format
  244. if rcon:
  245. items.append(reldict['rcon'])
  246. format = format + '(%r...'
  247. printargs = tuple(items)
  248. return format % printargs
  249. def clause(reldict, relsym):
  250. """
  251. Print the relation in clausal form.
  252. :param reldict: a relation dictionary
  253. :type reldict: defaultdict
  254. :param relsym: a label for the relation
  255. :type relsym: str
  256. """
  257. items = (relsym, reldict['subjsym'], reldict['objsym'])
  258. return "%s(%r, %r)" % items
  259. #######################################################
  260. # Demos of relation extraction with regular expressions
  261. #######################################################
  262. ############################################
  263. # Example of in(ORG, LOC)
  264. ############################################
  265. def in_demo(trace=0, sql=True):
  266. """
  267. Select pairs of organizations and locations whose mentions occur with an
  268. intervening occurrence of the preposition "in".
  269. If the sql parameter is set to True, then the entity pairs are loaded into
  270. an in-memory database, and subsequently pulled out using an SQL "SELECT"
  271. query.
  272. """
  273. from nltk.corpus import ieer
  274. if sql:
  275. try:
  276. import sqlite3
  277. connection = sqlite3.connect(":memory:")
  278. connection.text_factory = sqlite3.OptimizedUnicode
  279. cur = connection.cursor()
  280. cur.execute(
  281. """create table Locations
  282. (OrgName text, LocationName text, DocID text)"""
  283. )
  284. except ImportError:
  285. import warnings
  286. warnings.warn("Cannot import sqlite; sql flag will be ignored.")
  287. IN = re.compile(r'.*\bin\b(?!\b.+ing)')
  288. print()
  289. print("IEER: in(ORG, LOC) -- just the clauses:")
  290. print("=" * 45)
  291. for file in ieer.fileids():
  292. for doc in ieer.parsed_docs(file):
  293. if trace:
  294. print(doc.docno)
  295. print("=" * 15)
  296. for rel in extract_rels('ORG', 'LOC', doc, corpus='ieer', pattern=IN):
  297. print(clause(rel, relsym='IN'))
  298. if sql:
  299. try:
  300. rtuple = (rel['subjtext'], rel['objtext'], doc.docno)
  301. cur.execute(
  302. """insert into Locations
  303. values (?, ?, ?)""",
  304. rtuple,
  305. )
  306. connection.commit()
  307. except NameError:
  308. pass
  309. if sql:
  310. try:
  311. cur.execute(
  312. """select OrgName from Locations
  313. where LocationName = 'Atlanta'"""
  314. )
  315. print()
  316. print("Extract data from SQL table: ORGs in Atlanta")
  317. print("-" * 15)
  318. for row in cur:
  319. print(row)
  320. except NameError:
  321. pass
  322. ############################################
  323. # Example of has_role(PER, LOC)
  324. ############################################
  325. def roles_demo(trace=0):
  326. from nltk.corpus import ieer
  327. roles = """
  328. (.*( # assorted roles
  329. analyst|
  330. chair(wo)?man|
  331. commissioner|
  332. counsel|
  333. director|
  334. economist|
  335. editor|
  336. executive|
  337. foreman|
  338. governor|
  339. head|
  340. lawyer|
  341. leader|
  342. librarian).*)|
  343. manager|
  344. partner|
  345. president|
  346. producer|
  347. professor|
  348. researcher|
  349. spokes(wo)?man|
  350. writer|
  351. ,\sof\sthe?\s* # "X, of (the) Y"
  352. """
  353. ROLES = re.compile(roles, re.VERBOSE)
  354. print()
  355. print("IEER: has_role(PER, ORG) -- raw rtuples:")
  356. print("=" * 45)
  357. for file in ieer.fileids():
  358. for doc in ieer.parsed_docs(file):
  359. lcon = rcon = False
  360. if trace:
  361. print(doc.docno)
  362. print("=" * 15)
  363. lcon = rcon = True
  364. for rel in extract_rels('PER', 'ORG', doc, corpus='ieer', pattern=ROLES):
  365. print(rtuple(rel, lcon=lcon, rcon=rcon))
  366. ##############################################
  367. ### Show what's in the IEER Headlines
  368. ##############################################
  369. def ieer_headlines():
  370. from nltk.corpus import ieer
  371. from nltk.tree import Tree
  372. print("IEER: First 20 Headlines")
  373. print("=" * 45)
  374. trees = [
  375. (doc.docno, doc.headline)
  376. for file in ieer.fileids()
  377. for doc in ieer.parsed_docs(file)
  378. ]
  379. for tree in trees[:20]:
  380. print()
  381. print("%s:\n%s" % tree)
  382. #############################################
  383. ## Dutch CONLL2002: take_on_role(PER, ORG
  384. #############################################
  385. def conllned(trace=1):
  386. """
  387. Find the copula+'van' relation ('of') in the Dutch tagged training corpus
  388. from CoNLL 2002.
  389. """
  390. from nltk.corpus import conll2002
  391. vnv = """
  392. (
  393. is/V| # 3rd sing present and
  394. was/V| # past forms of the verb zijn ('be')
  395. werd/V| # and also present
  396. wordt/V # past of worden ('become)
  397. )
  398. .* # followed by anything
  399. van/Prep # followed by van ('of')
  400. """
  401. VAN = re.compile(vnv, re.VERBOSE)
  402. print()
  403. print("Dutch CoNLL2002: van(PER, ORG) -- raw rtuples with context:")
  404. print("=" * 45)
  405. for doc in conll2002.chunked_sents('ned.train'):
  406. lcon = rcon = False
  407. if trace:
  408. lcon = rcon = True
  409. for rel in extract_rels(
  410. 'PER', 'ORG', doc, corpus='conll2002', pattern=VAN, window=10
  411. ):
  412. print(rtuple(rel, lcon=lcon, rcon=rcon))
  413. #############################################
  414. ## Spanish CONLL2002: (PER, ORG)
  415. #############################################
  416. def conllesp():
  417. from nltk.corpus import conll2002
  418. de = """
  419. .*
  420. (
  421. de/SP|
  422. del/SP
  423. )
  424. """
  425. DE = re.compile(de, re.VERBOSE)
  426. print()
  427. print("Spanish CoNLL2002: de(ORG, LOC) -- just the first 10 clauses:")
  428. print("=" * 45)
  429. rels = [
  430. rel
  431. for doc in conll2002.chunked_sents('esp.train')
  432. for rel in extract_rels('ORG', 'LOC', doc, corpus='conll2002', pattern=DE)
  433. ]
  434. for r in rels[:10]:
  435. print(clause(r, relsym='DE'))
  436. print()
  437. def ne_chunked():
  438. print()
  439. print("1500 Sentences from Penn Treebank, as processed by NLTK NE Chunker")
  440. print("=" * 45)
  441. ROLE = re.compile(
  442. r'.*(chairman|president|trader|scientist|economist|analyst|partner).*'
  443. )
  444. rels = []
  445. for i, sent in enumerate(nltk.corpus.treebank.tagged_sents()[:1500]):
  446. sent = nltk.ne_chunk(sent)
  447. rels = extract_rels('PER', 'ORG', sent, corpus='ace', pattern=ROLE, window=7)
  448. for rel in rels:
  449. print('{0:<5}{1}'.format(i, rtuple(rel)))
  450. if __name__ == '__main__':
  451. import nltk
  452. from nltk.sem import relextract
  453. in_demo(trace=0)
  454. roles_demo(trace=0)
  455. conllned()
  456. conllesp()
  457. ieer_headlines()
  458. ne_chunked()