texttiling.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478
  1. # Natural Language Toolkit: TextTiling
  2. #
  3. # Copyright (C) 2001-2019 NLTK Project
  4. # Author: George Boutsioukis
  5. #
  6. # URL: <http://nltk.org/>
  7. # For license information, see LICENSE.TXT
  8. import re
  9. import math
  10. try:
  11. import numpy
  12. except ImportError:
  13. pass
  14. from nltk.tokenize.api import TokenizerI
  15. BLOCK_COMPARISON, VOCABULARY_INTRODUCTION = 0, 1
  16. LC, HC = 0, 1
  17. DEFAULT_SMOOTHING = [0]
  18. class TextTilingTokenizer(TokenizerI):
  19. """Tokenize a document into topical sections using the TextTiling algorithm.
  20. This algorithm detects subtopic shifts based on the analysis of lexical
  21. co-occurrence patterns.
  22. The process starts by tokenizing the text into pseudosentences of
  23. a fixed size w. Then, depending on the method used, similarity
  24. scores are assigned at sentence gaps. The algorithm proceeds by
  25. detecting the peak differences between these scores and marking
  26. them as boundaries. The boundaries are normalized to the closest
  27. paragraph break and the segmented text is returned.
  28. :param w: Pseudosentence size
  29. :type w: int
  30. :param k: Size (in sentences) of the block used in the block comparison method
  31. :type k: int
  32. :param similarity_method: The method used for determining similarity scores:
  33. `BLOCK_COMPARISON` (default) or `VOCABULARY_INTRODUCTION`.
  34. :type similarity_method: constant
  35. :param stopwords: A list of stopwords that are filtered out (defaults to NLTK's stopwords corpus)
  36. :type stopwords: list(str)
  37. :param smoothing_method: The method used for smoothing the score plot:
  38. `DEFAULT_SMOOTHING` (default)
  39. :type smoothing_method: constant
  40. :param smoothing_width: The width of the window used by the smoothing method
  41. :type smoothing_width: int
  42. :param smoothing_rounds: The number of smoothing passes
  43. :type smoothing_rounds: int
  44. :param cutoff_policy: The policy used to determine the number of boundaries:
  45. `HC` (default) or `LC`
  46. :type cutoff_policy: constant
  47. >>> from nltk.corpus import brown
  48. >>> tt = TextTilingTokenizer(demo_mode=True)
  49. >>> text = brown.raw()[:4000]
  50. >>> s, ss, d, b = tt.tokenize(text)
  51. >>> b
  52. [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0]
  53. """
  54. def __init__(
  55. self,
  56. w=20,
  57. k=10,
  58. similarity_method=BLOCK_COMPARISON,
  59. stopwords=None,
  60. smoothing_method=DEFAULT_SMOOTHING,
  61. smoothing_width=2,
  62. smoothing_rounds=1,
  63. cutoff_policy=HC,
  64. demo_mode=False,
  65. ):
  66. if stopwords is None:
  67. from nltk.corpus import stopwords
  68. stopwords = stopwords.words('english')
  69. self.__dict__.update(locals())
  70. del self.__dict__['self']
  71. def tokenize(self, text):
  72. """Return a tokenized copy of *text*, where each "token" represents
  73. a separate topic."""
  74. lowercase_text = text.lower()
  75. paragraph_breaks = self._mark_paragraph_breaks(text)
  76. text_length = len(lowercase_text)
  77. # Tokenization step starts here
  78. # Remove punctuation
  79. nopunct_text = ''.join(
  80. c for c in lowercase_text if re.match("[a-z\-\' \n\t]", c)
  81. )
  82. nopunct_par_breaks = self._mark_paragraph_breaks(nopunct_text)
  83. tokseqs = self._divide_to_tokensequences(nopunct_text)
  84. # The morphological stemming step mentioned in the TextTile
  85. # paper is not implemented. A comment in the original C
  86. # implementation states that it offers no benefit to the
  87. # process. It might be interesting to test the existing
  88. # stemmers though.
  89. # words = _stem_words(words)
  90. # Filter stopwords
  91. for ts in tokseqs:
  92. ts.wrdindex_list = [
  93. wi for wi in ts.wrdindex_list if wi[0] not in self.stopwords
  94. ]
  95. token_table = self._create_token_table(tokseqs, nopunct_par_breaks)
  96. # End of the Tokenization step
  97. # Lexical score determination
  98. if self.similarity_method == BLOCK_COMPARISON:
  99. gap_scores = self._block_comparison(tokseqs, token_table)
  100. elif self.similarity_method == VOCABULARY_INTRODUCTION:
  101. raise NotImplementedError("Vocabulary introduction not implemented")
  102. else:
  103. raise ValueError(
  104. "Similarity method {} not recognized".format(self.similarity_method)
  105. )
  106. if self.smoothing_method == DEFAULT_SMOOTHING:
  107. smooth_scores = self._smooth_scores(gap_scores)
  108. else:
  109. raise ValueError(
  110. "Smoothing method {} not recognized".format(self.smoothing_method)
  111. )
  112. # End of Lexical score Determination
  113. # Boundary identification
  114. depth_scores = self._depth_scores(smooth_scores)
  115. segment_boundaries = self._identify_boundaries(depth_scores)
  116. normalized_boundaries = self._normalize_boundaries(
  117. text, segment_boundaries, paragraph_breaks
  118. )
  119. # End of Boundary Identification
  120. segmented_text = []
  121. prevb = 0
  122. for b in normalized_boundaries:
  123. if b == 0:
  124. continue
  125. segmented_text.append(text[prevb:b])
  126. prevb = b
  127. if prevb < text_length: # append any text that may be remaining
  128. segmented_text.append(text[prevb:])
  129. if not segmented_text:
  130. segmented_text = [text]
  131. if self.demo_mode:
  132. return gap_scores, smooth_scores, depth_scores, segment_boundaries
  133. return segmented_text
  134. def _block_comparison(self, tokseqs, token_table):
  135. """Implements the block comparison method"""
  136. def blk_frq(tok, block):
  137. ts_occs = filter(lambda o: o[0] in block, token_table[tok].ts_occurences)
  138. freq = sum([tsocc[1] for tsocc in ts_occs])
  139. return freq
  140. gap_scores = []
  141. numgaps = len(tokseqs) - 1
  142. for curr_gap in range(numgaps):
  143. score_dividend, score_divisor_b1, score_divisor_b2 = 0.0, 0.0, 0.0
  144. score = 0.0
  145. # adjust window size for boundary conditions
  146. if curr_gap < self.k - 1:
  147. window_size = curr_gap + 1
  148. elif curr_gap > numgaps - self.k:
  149. window_size = numgaps - curr_gap
  150. else:
  151. window_size = self.k
  152. b1 = [ts.index for ts in tokseqs[curr_gap - window_size + 1 : curr_gap + 1]]
  153. b2 = [ts.index for ts in tokseqs[curr_gap + 1 : curr_gap + window_size + 1]]
  154. for t in token_table:
  155. score_dividend += blk_frq(t, b1) * blk_frq(t, b2)
  156. score_divisor_b1 += blk_frq(t, b1) ** 2
  157. score_divisor_b2 += blk_frq(t, b2) ** 2
  158. try:
  159. score = score_dividend / math.sqrt(score_divisor_b1 * score_divisor_b2)
  160. except ZeroDivisionError:
  161. pass # score += 0.0
  162. gap_scores.append(score)
  163. return gap_scores
  164. def _smooth_scores(self, gap_scores):
  165. "Wraps the smooth function from the SciPy Cookbook"
  166. return list(
  167. smooth(numpy.array(gap_scores[:]), window_len=self.smoothing_width + 1)
  168. )
  169. def _mark_paragraph_breaks(self, text):
  170. """Identifies indented text or line breaks as the beginning of
  171. paragraphs"""
  172. MIN_PARAGRAPH = 100
  173. pattern = re.compile("[ \t\r\f\v]*\n[ \t\r\f\v]*\n[ \t\r\f\v]*")
  174. matches = pattern.finditer(text)
  175. last_break = 0
  176. pbreaks = [0]
  177. for pb in matches:
  178. if pb.start() - last_break < MIN_PARAGRAPH:
  179. continue
  180. else:
  181. pbreaks.append(pb.start())
  182. last_break = pb.start()
  183. return pbreaks
  184. def _divide_to_tokensequences(self, text):
  185. "Divides the text into pseudosentences of fixed size"
  186. w = self.w
  187. wrdindex_list = []
  188. matches = re.finditer("\w+", text)
  189. for match in matches:
  190. wrdindex_list.append((match.group(), match.start()))
  191. return [
  192. TokenSequence(i / w, wrdindex_list[i : i + w])
  193. for i in range(0, len(wrdindex_list), w)
  194. ]
  195. def _create_token_table(self, token_sequences, par_breaks):
  196. "Creates a table of TokenTableFields"
  197. token_table = {}
  198. current_par = 0
  199. current_tok_seq = 0
  200. pb_iter = par_breaks.__iter__()
  201. current_par_break = next(pb_iter)
  202. if current_par_break == 0:
  203. try:
  204. current_par_break = next(pb_iter) # skip break at 0
  205. except StopIteration:
  206. raise ValueError(
  207. "No paragraph breaks were found(text too short perhaps?)"
  208. )
  209. for ts in token_sequences:
  210. for word, index in ts.wrdindex_list:
  211. try:
  212. while index > current_par_break:
  213. current_par_break = next(pb_iter)
  214. current_par += 1
  215. except StopIteration:
  216. # hit bottom
  217. pass
  218. if word in token_table:
  219. token_table[word].total_count += 1
  220. if token_table[word].last_par != current_par:
  221. token_table[word].last_par = current_par
  222. token_table[word].par_count += 1
  223. if token_table[word].last_tok_seq != current_tok_seq:
  224. token_table[word].last_tok_seq = current_tok_seq
  225. token_table[word].ts_occurences.append([current_tok_seq, 1])
  226. else:
  227. token_table[word].ts_occurences[-1][1] += 1
  228. else: # new word
  229. token_table[word] = TokenTableField(
  230. first_pos=index,
  231. ts_occurences=[[current_tok_seq, 1]],
  232. total_count=1,
  233. par_count=1,
  234. last_par=current_par,
  235. last_tok_seq=current_tok_seq,
  236. )
  237. current_tok_seq += 1
  238. return token_table
  239. def _identify_boundaries(self, depth_scores):
  240. """Identifies boundaries at the peaks of similarity score
  241. differences"""
  242. boundaries = [0 for x in depth_scores]
  243. avg = sum(depth_scores) / len(depth_scores)
  244. stdev = numpy.std(depth_scores)
  245. # SB: what is the purpose of this conditional?
  246. if self.cutoff_policy == LC:
  247. cutoff = avg - stdev / 2.0
  248. else:
  249. cutoff = avg - stdev / 2.0
  250. depth_tuples = sorted(zip(depth_scores, range(len(depth_scores))))
  251. depth_tuples.reverse()
  252. hp = list(filter(lambda x: x[0] > cutoff, depth_tuples))
  253. for dt in hp:
  254. boundaries[dt[1]] = 1
  255. for dt2 in hp: # undo if there is a boundary close already
  256. if (
  257. dt[1] != dt2[1]
  258. and abs(dt2[1] - dt[1]) < 4
  259. and boundaries[dt2[1]] == 1
  260. ):
  261. boundaries[dt[1]] = 0
  262. return boundaries
  263. def _depth_scores(self, scores):
  264. """Calculates the depth of each gap, i.e. the average difference
  265. between the left and right peaks and the gap's score"""
  266. depth_scores = [0 for x in scores]
  267. # clip boundaries: this holds on the rule of thumb(my thumb)
  268. # that a section shouldn't be smaller than at least 2
  269. # pseudosentences for small texts and around 5 for larger ones.
  270. clip = min(max(len(scores) // 10, 2), 5)
  271. index = clip
  272. for gapscore in scores[clip:-clip]:
  273. lpeak = gapscore
  274. for score in scores[index::-1]:
  275. if score >= lpeak:
  276. lpeak = score
  277. else:
  278. break
  279. rpeak = gapscore
  280. for score in scores[index:]:
  281. if score >= rpeak:
  282. rpeak = score
  283. else:
  284. break
  285. depth_scores[index] = lpeak + rpeak - 2 * gapscore
  286. index += 1
  287. return depth_scores
  288. def _normalize_boundaries(self, text, boundaries, paragraph_breaks):
  289. """Normalize the boundaries identified to the original text's
  290. paragraph breaks"""
  291. norm_boundaries = []
  292. char_count, word_count, gaps_seen = 0, 0, 0
  293. seen_word = False
  294. for char in text:
  295. char_count += 1
  296. if char in " \t\n" and seen_word:
  297. seen_word = False
  298. word_count += 1
  299. if char not in " \t\n" and not seen_word:
  300. seen_word = True
  301. if gaps_seen < len(boundaries) and word_count > (
  302. max(gaps_seen * self.w, self.w)
  303. ):
  304. if boundaries[gaps_seen] == 1:
  305. # find closest paragraph break
  306. best_fit = len(text)
  307. for br in paragraph_breaks:
  308. if best_fit > abs(br - char_count):
  309. best_fit = abs(br - char_count)
  310. bestbr = br
  311. else:
  312. break
  313. if bestbr not in norm_boundaries: # avoid duplicates
  314. norm_boundaries.append(bestbr)
  315. gaps_seen += 1
  316. return norm_boundaries
  317. class TokenTableField(object):
  318. """A field in the token table holding parameters for each token,
  319. used later in the process"""
  320. def __init__(
  321. self,
  322. first_pos,
  323. ts_occurences,
  324. total_count=1,
  325. par_count=1,
  326. last_par=0,
  327. last_tok_seq=None,
  328. ):
  329. self.__dict__.update(locals())
  330. del self.__dict__['self']
  331. class TokenSequence(object):
  332. "A token list with its original length and its index"
  333. def __init__(self, index, wrdindex_list, original_length=None):
  334. original_length = original_length or len(wrdindex_list)
  335. self.__dict__.update(locals())
  336. del self.__dict__['self']
  337. # Pasted from the SciPy cookbook: http://www.scipy.org/Cookbook/SignalSmooth
  338. def smooth(x, window_len=11, window='flat'):
  339. """smooth the data using a window with requested size.
  340. This method is based on the convolution of a scaled window with the signal.
  341. The signal is prepared by introducing reflected copies of the signal
  342. (with the window size) in both ends so that transient parts are minimized
  343. in the beginning and end part of the output signal.
  344. :param x: the input signal
  345. :param window_len: the dimension of the smoothing window; should be an odd integer
  346. :param window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
  347. flat window will produce a moving average smoothing.
  348. :return: the smoothed signal
  349. example::
  350. t=linspace(-2,2,0.1)
  351. x=sin(t)+randn(len(t))*0.1
  352. y=smooth(x)
  353. :see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve,
  354. scipy.signal.lfilter
  355. TODO: the window parameter could be the window itself if an array instead of a string
  356. """
  357. if x.ndim != 1:
  358. raise ValueError("smooth only accepts 1 dimension arrays.")
  359. if x.size < window_len:
  360. raise ValueError("Input vector needs to be bigger than window size.")
  361. if window_len < 3:
  362. return x
  363. if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
  364. raise ValueError(
  365. "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
  366. )
  367. s = numpy.r_[2 * x[0] - x[window_len:1:-1], x, 2 * x[-1] - x[-1:-window_len:-1]]
  368. # print(len(s))
  369. if window == 'flat': # moving average
  370. w = numpy.ones(window_len, 'd')
  371. else:
  372. w = eval('numpy.' + window + '(window_len)')
  373. y = numpy.convolve(w / w.sum(), s, mode='same')
  374. return y[window_len - 1 : -window_len + 1]
  375. def demo(text=None):
  376. from nltk.corpus import brown
  377. from matplotlib import pylab
  378. tt = TextTilingTokenizer(demo_mode=True)
  379. if text is None:
  380. text = brown.raw()[:10000]
  381. s, ss, d, b = tt.tokenize(text)
  382. pylab.xlabel("Sentence Gap index")
  383. pylab.ylabel("Gap Scores")
  384. pylab.plot(range(len(s)), s, label="Gap Scores")
  385. pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores")
  386. pylab.plot(range(len(d)), d, label="Depth scores")
  387. pylab.stem(range(len(b)), b)
  388. pylab.legend()
  389. pylab.show()