util.py 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. # Natural Language Toolkit: Parser Utility Functions
  2. #
  3. # Author: Ewan Klein <ewan@inf.ed.ac.uk>
  4. #
  5. # Copyright (C) 2001-2019 NLTK Project
  6. # URL: <http://nltk.org/>
  7. # For license information, see LICENSE.TXT
  8. """
  9. Utility functions for parsers.
  10. """
  11. from __future__ import print_function
  12. from nltk.grammar import CFG, FeatureGrammar, PCFG
  13. from nltk.data import load
  14. from nltk.parse.chart import Chart, ChartParser
  15. from nltk.parse.pchart import InsideChartParser
  16. from nltk.parse.featurechart import FeatureChart, FeatureChartParser
  17. def load_parser(
  18. grammar_url, trace=0, parser=None, chart_class=None, beam_size=0, **load_args
  19. ):
  20. """
  21. Load a grammar from a file, and build a parser based on that grammar.
  22. The parser depends on the grammar format, and might also depend
  23. on properties of the grammar itself.
  24. The following grammar formats are currently supported:
  25. - ``'cfg'`` (CFGs: ``CFG``)
  26. - ``'pcfg'`` (probabilistic CFGs: ``PCFG``)
  27. - ``'fcfg'`` (feature-based CFGs: ``FeatureGrammar``)
  28. :type grammar_url: str
  29. :param grammar_url: A URL specifying where the grammar is located.
  30. The default protocol is ``"nltk:"``, which searches for the file
  31. in the the NLTK data package.
  32. :type trace: int
  33. :param trace: The level of tracing that should be used when
  34. parsing a text. ``0`` will generate no tracing output;
  35. and higher numbers will produce more verbose tracing output.
  36. :param parser: The class used for parsing; should be ``ChartParser``
  37. or a subclass.
  38. If None, the class depends on the grammar format.
  39. :param chart_class: The class used for storing the chart;
  40. should be ``Chart`` or a subclass.
  41. Only used for CFGs and feature CFGs.
  42. If None, the chart class depends on the grammar format.
  43. :type beam_size: int
  44. :param beam_size: The maximum length for the parser's edge queue.
  45. Only used for probabilistic CFGs.
  46. :param load_args: Keyword parameters used when loading the grammar.
  47. See ``data.load`` for more information.
  48. """
  49. grammar = load(grammar_url, **load_args)
  50. if not isinstance(grammar, CFG):
  51. raise ValueError("The grammar must be a CFG, " "or a subclass thereof.")
  52. if isinstance(grammar, PCFG):
  53. if parser is None:
  54. parser = InsideChartParser
  55. return parser(grammar, trace=trace, beam_size=beam_size)
  56. elif isinstance(grammar, FeatureGrammar):
  57. if parser is None:
  58. parser = FeatureChartParser
  59. if chart_class is None:
  60. chart_class = FeatureChart
  61. return parser(grammar, trace=trace, chart_class=chart_class)
  62. else: # Plain CFG.
  63. if parser is None:
  64. parser = ChartParser
  65. if chart_class is None:
  66. chart_class = Chart
  67. return parser(grammar, trace=trace, chart_class=chart_class)
  68. def taggedsent_to_conll(sentence):
  69. """
  70. A module to convert a single POS tagged sentence into CONLL format.
  71. >>> from nltk import word_tokenize, pos_tag
  72. >>> text = "This is a foobar sentence."
  73. >>> for line in taggedsent_to_conll(pos_tag(word_tokenize(text))):
  74. ... print(line, end="")
  75. 1 This _ DT DT _ 0 a _ _
  76. 2 is _ VBZ VBZ _ 0 a _ _
  77. 3 a _ DT DT _ 0 a _ _
  78. 4 foobar _ JJ JJ _ 0 a _ _
  79. 5 sentence _ NN NN _ 0 a _ _
  80. 6 . _ . . _ 0 a _ _
  81. :param sentence: A single input sentence to parse
  82. :type sentence: list(tuple(str, str))
  83. :rtype: iter(str)
  84. :return: a generator yielding a single sentence in CONLL format.
  85. """
  86. for (i, (word, tag)) in enumerate(sentence, start=1):
  87. input_str = [str(i), word, '_', tag, tag, '_', '0', 'a', '_', '_']
  88. input_str = "\t".join(input_str) + "\n"
  89. yield input_str
  90. def taggedsents_to_conll(sentences):
  91. """
  92. A module to convert the a POS tagged document stream
  93. (i.e. list of list of tuples, a list of sentences) and yield lines
  94. in CONLL format. This module yields one line per word and two newlines
  95. for end of sentence.
  96. >>> from nltk import word_tokenize, sent_tokenize, pos_tag
  97. >>> text = "This is a foobar sentence. Is that right?"
  98. >>> sentences = [pos_tag(word_tokenize(sent)) for sent in sent_tokenize(text)]
  99. >>> for line in taggedsents_to_conll(sentences):
  100. ... if line:
  101. ... print(line, end="")
  102. 1 This _ DT DT _ 0 a _ _
  103. 2 is _ VBZ VBZ _ 0 a _ _
  104. 3 a _ DT DT _ 0 a _ _
  105. 4 foobar _ JJ JJ _ 0 a _ _
  106. 5 sentence _ NN NN _ 0 a _ _
  107. 6 . _ . . _ 0 a _ _
  108. <BLANKLINE>
  109. <BLANKLINE>
  110. 1 Is _ VBZ VBZ _ 0 a _ _
  111. 2 that _ IN IN _ 0 a _ _
  112. 3 right _ NN NN _ 0 a _ _
  113. 4 ? _ . . _ 0 a _ _
  114. <BLANKLINE>
  115. <BLANKLINE>
  116. :param sentences: Input sentences to parse
  117. :type sentence: list(list(tuple(str, str)))
  118. :rtype: iter(str)
  119. :return: a generator yielding sentences in CONLL format.
  120. """
  121. for sentence in sentences:
  122. for input_str in taggedsent_to_conll(sentence):
  123. yield input_str
  124. yield '\n\n'
  125. ######################################################################
  126. # { Test Suites
  127. ######################################################################
  128. class TestGrammar(object):
  129. """
  130. Unit tests for CFG.
  131. """
  132. def __init__(self, grammar, suite, accept=None, reject=None):
  133. self.test_grammar = grammar
  134. self.cp = load_parser(grammar, trace=0)
  135. self.suite = suite
  136. self._accept = accept
  137. self._reject = reject
  138. def run(self, show_trees=False):
  139. """
  140. Sentences in the test suite are divided into two classes:
  141. - grammatical (``accept``) and
  142. - ungrammatical (``reject``).
  143. If a sentence should parse accordng to the grammar, the value of
  144. ``trees`` will be a non-empty list. If a sentence should be rejected
  145. according to the grammar, then the value of ``trees`` will be None.
  146. """
  147. for test in self.suite:
  148. print(test['doc'] + ":", end=' ')
  149. for key in ['accept', 'reject']:
  150. for sent in test[key]:
  151. tokens = sent.split()
  152. trees = list(self.cp.parse(tokens))
  153. if show_trees and trees:
  154. print()
  155. print(sent)
  156. for tree in trees:
  157. print(tree)
  158. if key == 'accept':
  159. if trees == []:
  160. raise ValueError("Sentence '%s' failed to parse'" % sent)
  161. else:
  162. accepted = True
  163. else:
  164. if trees:
  165. raise ValueError("Sentence '%s' received a parse'" % sent)
  166. else:
  167. rejected = True
  168. if accepted and rejected:
  169. print("All tests passed!")
  170. def extract_test_sentences(string, comment_chars="#%;", encoding=None):
  171. """
  172. Parses a string with one test sentence per line.
  173. Lines can optionally begin with:
  174. - a bool, saying if the sentence is grammatical or not, or
  175. - an int, giving the number of parse trees is should have,
  176. The result information is followed by a colon, and then the sentence.
  177. Empty lines and lines beginning with a comment char are ignored.
  178. :return: a list of tuple of sentences and expected results,
  179. where a sentence is a list of str,
  180. and a result is None, or bool, or int
  181. :param comment_chars: ``str`` of possible comment characters.
  182. :param encoding: the encoding of the string, if it is binary
  183. """
  184. if encoding is not None:
  185. string = string.decode(encoding)
  186. sentences = []
  187. for sentence in string.split('\n'):
  188. if sentence == '' or sentence[0] in comment_chars:
  189. continue
  190. split_info = sentence.split(':', 1)
  191. result = None
  192. if len(split_info) == 2:
  193. if split_info[0] in ['True', 'true', 'False', 'false']:
  194. result = split_info[0] in ['True', 'true']
  195. sentence = split_info[1]
  196. else:
  197. result = int(split_info[0])
  198. sentence = split_info[1]
  199. tokens = sentence.split()
  200. if tokens == []:
  201. continue
  202. sentences += [(tokens, result)]
  203. return sentences
  204. # nose thinks it is a test
  205. extract_test_sentences.__test__ = False