util.py 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939
  1. # coding: utf-8
  2. #
  3. # Natural Language Toolkit: Sentiment Analyzer
  4. #
  5. # Copyright (C) 2001-2019 NLTK Project
  6. # Author: Pierpaolo Pantone <24alsecondo@gmail.com>
  7. # URL: <http://nltk.org/>
  8. # For license information, see LICENSE.TXT
  9. """
  10. Utility methods for Sentiment Analysis.
  11. """
  12. from __future__ import division
  13. import codecs
  14. import csv
  15. import json
  16. import pickle
  17. import random
  18. import re
  19. import sys
  20. import time
  21. from copy import deepcopy
  22. from itertools import tee
  23. import nltk
  24. from nltk.corpus import CategorizedPlaintextCorpusReader
  25. from nltk.data import load
  26. from nltk.tokenize.casual import EMOTICON_RE
  27. # ////////////////////////////////////////////////////////////
  28. # { Regular expressions
  29. # ////////////////////////////////////////////////////////////
  30. # Regular expression for negation by Christopher Potts
  31. NEGATION = r"""
  32. (?:
  33. ^(?:never|no|nothing|nowhere|noone|none|not|
  34. havent|hasnt|hadnt|cant|couldnt|shouldnt|
  35. wont|wouldnt|dont|doesnt|didnt|isnt|arent|aint
  36. )$
  37. )
  38. |
  39. n't"""
  40. NEGATION_RE = re.compile(NEGATION, re.VERBOSE)
  41. CLAUSE_PUNCT = r'^[.:;!?]$'
  42. CLAUSE_PUNCT_RE = re.compile(CLAUSE_PUNCT)
  43. # Happy and sad emoticons
  44. HAPPY = set(
  45. [
  46. ':-)',
  47. ':)',
  48. ';)',
  49. ':o)',
  50. ':]',
  51. ':3',
  52. ':c)',
  53. ':>',
  54. '=]',
  55. '8)',
  56. '=)',
  57. ':}',
  58. ':^)',
  59. ':-D',
  60. ':D',
  61. '8-D',
  62. '8D',
  63. 'x-D',
  64. 'xD',
  65. 'X-D',
  66. 'XD',
  67. '=-D',
  68. '=D',
  69. '=-3',
  70. '=3',
  71. ':-))',
  72. ":'-)",
  73. ":')",
  74. ':*',
  75. ':^*',
  76. '>:P',
  77. ':-P',
  78. ':P',
  79. 'X-P',
  80. 'x-p',
  81. 'xp',
  82. 'XP',
  83. ':-p',
  84. ':p',
  85. '=p',
  86. ':-b',
  87. ':b',
  88. '>:)',
  89. '>;)',
  90. '>:-)',
  91. '<3',
  92. ]
  93. )
  94. SAD = set(
  95. [
  96. ':L',
  97. ':-/',
  98. '>:/',
  99. ':S',
  100. '>:[',
  101. ':@',
  102. ':-(',
  103. ':[',
  104. ':-||',
  105. '=L',
  106. ':<',
  107. ':-[',
  108. ':-<',
  109. '=\\',
  110. '=/',
  111. '>:(',
  112. ':(',
  113. '>.<',
  114. ":'-(",
  115. ":'(",
  116. ':\\',
  117. ':-c',
  118. ':c',
  119. ':{',
  120. '>:\\',
  121. ';(',
  122. ]
  123. )
  124. def timer(method):
  125. """
  126. A timer decorator to measure execution performance of methods.
  127. """
  128. def timed(*args, **kw):
  129. start = time.time()
  130. result = method(*args, **kw)
  131. end = time.time()
  132. tot_time = end - start
  133. hours = tot_time // 3600
  134. mins = tot_time // 60 % 60
  135. # in Python 2.x round() will return a float, so we convert it to int
  136. secs = int(round(tot_time % 60))
  137. if hours == 0 and mins == 0 and secs < 10:
  138. print('[TIMER] {0}(): {:.3f} seconds'.format(method.__name__, tot_time))
  139. else:
  140. print(
  141. '[TIMER] {0}(): {1}h {2}m {3}s'.format(
  142. method.__name__, hours, mins, secs
  143. )
  144. )
  145. return result
  146. return timed
  147. def pairwise(iterable):
  148. """s -> (s0,s1), (s1,s2), (s2, s3), ..."""
  149. a, b = tee(iterable)
  150. next(b, None)
  151. return zip(a, b)
  152. # ////////////////////////////////////////////////////////////
  153. # { Feature extractor functions
  154. # ////////////////////////////////////////////////////////////
  155. """
  156. Feature extractor functions are declared outside the SentimentAnalyzer class.
  157. Users should have the possibility to create their own feature extractors
  158. without modifying SentimentAnalyzer.
  159. """
  160. def extract_unigram_feats(document, unigrams, handle_negation=False):
  161. """
  162. Populate a dictionary of unigram features, reflecting the presence/absence in
  163. the document of each of the tokens in `unigrams`.
  164. :param document: a list of words/tokens.
  165. :param unigrams: a list of words/tokens whose presence/absence has to be
  166. checked in `document`.
  167. :param handle_negation: if `handle_negation == True` apply `mark_negation`
  168. method to `document` before checking for unigram presence/absence.
  169. :return: a dictionary of unigram features {unigram : boolean}.
  170. >>> words = ['ice', 'police', 'riot']
  171. >>> document = 'ice is melting due to global warming'.split()
  172. >>> sorted(extract_unigram_feats(document, words).items())
  173. [('contains(ice)', True), ('contains(police)', False), ('contains(riot)', False)]
  174. """
  175. features = {}
  176. if handle_negation:
  177. document = mark_negation(document)
  178. for word in unigrams:
  179. features['contains({0})'.format(word)] = word in set(document)
  180. return features
  181. def extract_bigram_feats(document, bigrams):
  182. """
  183. Populate a dictionary of bigram features, reflecting the presence/absence in
  184. the document of each of the tokens in `bigrams`. This extractor function only
  185. considers contiguous bigrams obtained by `nltk.bigrams`.
  186. :param document: a list of words/tokens.
  187. :param unigrams: a list of bigrams whose presence/absence has to be
  188. checked in `document`.
  189. :return: a dictionary of bigram features {bigram : boolean}.
  190. >>> bigrams = [('global', 'warming'), ('police', 'prevented'), ('love', 'you')]
  191. >>> document = 'ice is melting due to global warming'.split()
  192. >>> sorted(extract_bigram_feats(document, bigrams).items())
  193. [('contains(global - warming)', True), ('contains(love - you)', False),
  194. ('contains(police - prevented)', False)]
  195. """
  196. features = {}
  197. for bigr in bigrams:
  198. features['contains({0} - {1})'.format(bigr[0], bigr[1])] = bigr in nltk.bigrams(
  199. document
  200. )
  201. return features
  202. # ////////////////////////////////////////////////////////////
  203. # { Helper Functions
  204. # ////////////////////////////////////////////////////////////
  205. def mark_negation(document, double_neg_flip=False, shallow=False):
  206. """
  207. Append _NEG suffix to words that appear in the scope between a negation
  208. and a punctuation mark.
  209. :param document: a list of words/tokens, or a tuple (words, label).
  210. :param shallow: if True, the method will modify the original document in place.
  211. :param double_neg_flip: if True, double negation is considered affirmation
  212. (we activate/deactivate negation scope everytime we find a negation).
  213. :return: if `shallow == True` the method will modify the original document
  214. and return it. If `shallow == False` the method will return a modified
  215. document, leaving the original unmodified.
  216. >>> sent = "I didn't like this movie . It was bad .".split()
  217. >>> mark_negation(sent)
  218. ['I', "didn't", 'like_NEG', 'this_NEG', 'movie_NEG', '.', 'It', 'was', 'bad', '.']
  219. """
  220. if not shallow:
  221. document = deepcopy(document)
  222. # check if the document is labeled. If so, do not consider the label.
  223. labeled = document and isinstance(document[0], (tuple, list))
  224. if labeled:
  225. doc = document[0]
  226. else:
  227. doc = document
  228. neg_scope = False
  229. for i, word in enumerate(doc):
  230. if NEGATION_RE.search(word):
  231. if not neg_scope or (neg_scope and double_neg_flip):
  232. neg_scope = not neg_scope
  233. continue
  234. else:
  235. doc[i] += '_NEG'
  236. elif neg_scope and CLAUSE_PUNCT_RE.search(word):
  237. neg_scope = not neg_scope
  238. elif neg_scope and not CLAUSE_PUNCT_RE.search(word):
  239. doc[i] += '_NEG'
  240. return document
  241. def output_markdown(filename, **kwargs):
  242. """
  243. Write the output of an analysis to a file.
  244. """
  245. with codecs.open(filename, 'at') as outfile:
  246. text = '\n*** \n\n'
  247. text += '{0} \n\n'.format(time.strftime("%d/%m/%Y, %H:%M"))
  248. for k in sorted(kwargs):
  249. if isinstance(kwargs[k], dict):
  250. dictionary = kwargs[k]
  251. text += ' - **{0}:**\n'.format(k)
  252. for entry in sorted(dictionary):
  253. text += ' - {0}: {1} \n'.format(entry, dictionary[entry])
  254. elif isinstance(kwargs[k], list):
  255. text += ' - **{0}:**\n'.format(k)
  256. for entry in kwargs[k]:
  257. text += ' - {0}\n'.format(entry)
  258. else:
  259. text += ' - **{0}:** {1} \n'.format(k, kwargs[k])
  260. outfile.write(text)
  261. def save_file(content, filename):
  262. """
  263. Store `content` in `filename`. Can be used to store a SentimentAnalyzer.
  264. """
  265. print("Saving", filename)
  266. with codecs.open(filename, 'wb') as storage_file:
  267. # The protocol=2 parameter is for python2 compatibility
  268. pickle.dump(content, storage_file, protocol=2)
  269. def split_train_test(all_instances, n=None):
  270. """
  271. Randomly split `n` instances of the dataset into train and test sets.
  272. :param all_instances: a list of instances (e.g. documents) that will be split.
  273. :param n: the number of instances to consider (in case we want to use only a
  274. subset).
  275. :return: two lists of instances. Train set is 8/10 of the total and test set
  276. is 2/10 of the total.
  277. """
  278. random.seed(12345)
  279. random.shuffle(all_instances)
  280. if not n or n > len(all_instances):
  281. n = len(all_instances)
  282. train_set = all_instances[: int(0.8 * n)]
  283. test_set = all_instances[int(0.8 * n) : n]
  284. return train_set, test_set
  285. def _show_plot(x_values, y_values, x_labels=None, y_labels=None):
  286. try:
  287. import matplotlib.pyplot as plt
  288. except ImportError:
  289. raise ImportError(
  290. 'The plot function requires matplotlib to be installed.'
  291. 'See http://matplotlib.org/'
  292. )
  293. plt.locator_params(axis='y', nbins=3)
  294. axes = plt.axes()
  295. axes.yaxis.grid()
  296. plt.plot(x_values, y_values, 'ro', color='red')
  297. plt.ylim(ymin=-1.2, ymax=1.2)
  298. plt.tight_layout(pad=5)
  299. if x_labels:
  300. plt.xticks(x_values, x_labels, rotation='vertical')
  301. if y_labels:
  302. plt.yticks([-1, 0, 1], y_labels, rotation='horizontal')
  303. # Pad margins so that markers are not clipped by the axes
  304. plt.margins(0.2)
  305. plt.show()
  306. # ////////////////////////////////////////////////////////////
  307. # { Parsing and conversion functions
  308. # ////////////////////////////////////////////////////////////
  309. def json2csv_preprocess(
  310. json_file,
  311. outfile,
  312. fields,
  313. encoding='utf8',
  314. errors='replace',
  315. gzip_compress=False,
  316. skip_retweets=True,
  317. skip_tongue_tweets=True,
  318. skip_ambiguous_tweets=True,
  319. strip_off_emoticons=True,
  320. remove_duplicates=True,
  321. limit=None,
  322. ):
  323. """
  324. Convert json file to csv file, preprocessing each row to obtain a suitable
  325. dataset for tweets Semantic Analysis.
  326. :param json_file: the original json file containing tweets.
  327. :param outfile: the output csv filename.
  328. :param fields: a list of fields that will be extracted from the json file and
  329. kept in the output csv file.
  330. :param encoding: the encoding of the files.
  331. :param errors: the error handling strategy for the output writer.
  332. :param gzip_compress: if True, create a compressed GZIP file.
  333. :param skip_retweets: if True, remove retweets.
  334. :param skip_tongue_tweets: if True, remove tweets containing ":P" and ":-P"
  335. emoticons.
  336. :param skip_ambiguous_tweets: if True, remove tweets containing both happy
  337. and sad emoticons.
  338. :param strip_off_emoticons: if True, strip off emoticons from all tweets.
  339. :param remove_duplicates: if True, remove tweets appearing more than once.
  340. :param limit: an integer to set the number of tweets to convert. After the
  341. limit is reached the conversion will stop. It can be useful to create
  342. subsets of the original tweets json data.
  343. """
  344. with codecs.open(json_file, encoding=encoding) as fp:
  345. (writer, outf) = outf_writer_compat(outfile, encoding, errors, gzip_compress)
  346. # write the list of fields as header
  347. writer.writerow(fields)
  348. if remove_duplicates == True:
  349. tweets_cache = []
  350. i = 0
  351. for line in fp:
  352. tweet = json.loads(line)
  353. row = extract_fields(tweet, fields)
  354. try:
  355. text = row[fields.index('text')]
  356. # Remove retweets
  357. if skip_retweets == True:
  358. if re.search(r'\bRT\b', text):
  359. continue
  360. # Remove tweets containing ":P" and ":-P" emoticons
  361. if skip_tongue_tweets == True:
  362. if re.search(r'\:\-?P\b', text):
  363. continue
  364. # Remove tweets containing both happy and sad emoticons
  365. if skip_ambiguous_tweets == True:
  366. all_emoticons = EMOTICON_RE.findall(text)
  367. if all_emoticons:
  368. if (set(all_emoticons) & HAPPY) and (set(all_emoticons) & SAD):
  369. continue
  370. # Strip off emoticons from all tweets
  371. if strip_off_emoticons == True:
  372. row[fields.index('text')] = re.sub(
  373. r'(?!\n)\s+', ' ', EMOTICON_RE.sub('', text)
  374. )
  375. # Remove duplicate tweets
  376. if remove_duplicates == True:
  377. if row[fields.index('text')] in tweets_cache:
  378. continue
  379. else:
  380. tweets_cache.append(row[fields.index('text')])
  381. except ValueError:
  382. pass
  383. writer.writerow(row)
  384. i += 1
  385. if limit and i >= limit:
  386. break
  387. outf.close()
  388. def parse_tweets_set(
  389. filename, label, word_tokenizer=None, sent_tokenizer=None, skip_header=True
  390. ):
  391. """
  392. Parse csv file containing tweets and output data a list of (text, label) tuples.
  393. :param filename: the input csv filename.
  394. :param label: the label to be appended to each tweet contained in the csv file.
  395. :param word_tokenizer: the tokenizer instance that will be used to tokenize
  396. each sentence into tokens (e.g. WordPunctTokenizer() or BlanklineTokenizer()).
  397. If no word_tokenizer is specified, tweets will not be tokenized.
  398. :param sent_tokenizer: the tokenizer that will be used to split each tweet into
  399. sentences.
  400. :param skip_header: if True, skip the first line of the csv file (which usually
  401. contains headers).
  402. :return: a list of (text, label) tuples.
  403. """
  404. tweets = []
  405. if not sent_tokenizer:
  406. sent_tokenizer = load('tokenizers/punkt/english.pickle')
  407. # If we use Python3.x we can proceed using the 'rt' flag
  408. if sys.version_info[0] == 3:
  409. with codecs.open(filename, 'rt') as csvfile:
  410. reader = csv.reader(csvfile)
  411. if skip_header == True:
  412. next(reader, None) # skip the header
  413. i = 0
  414. for tweet_id, text in reader:
  415. # text = text[1]
  416. i += 1
  417. sys.stdout.write('Loaded {0} tweets\r'.format(i))
  418. # Apply sentence and word tokenizer to text
  419. if word_tokenizer:
  420. tweet = [
  421. w
  422. for sent in sent_tokenizer.tokenize(text)
  423. for w in word_tokenizer.tokenize(sent)
  424. ]
  425. else:
  426. tweet = text
  427. tweets.append((tweet, label))
  428. # If we use Python2.x we need to handle encoding problems
  429. elif sys.version_info[0] < 3:
  430. with codecs.open(filename) as csvfile:
  431. reader = csv.reader(csvfile)
  432. if skip_header == True:
  433. next(reader, None) # skip the header
  434. i = 0
  435. for row in reader:
  436. unicode_row = [x.decode('utf8') for x in row]
  437. text = unicode_row[1]
  438. i += 1
  439. sys.stdout.write('Loaded {0} tweets\r'.format(i))
  440. # Apply sentence and word tokenizer to text
  441. if word_tokenizer:
  442. tweet = [
  443. w.encode('utf8')
  444. for sent in sent_tokenizer.tokenize(text)
  445. for w in word_tokenizer.tokenize(sent)
  446. ]
  447. else:
  448. tweet = text
  449. tweets.append((tweet, label))
  450. print("Loaded {0} tweets".format(i))
  451. return tweets
  452. # ////////////////////////////////////////////////////////////
  453. # { Demos
  454. # ////////////////////////////////////////////////////////////
  455. def demo_tweets(trainer, n_instances=None, output=None):
  456. """
  457. Train and test Naive Bayes classifier on 10000 tweets, tokenized using
  458. TweetTokenizer.
  459. Features are composed of:
  460. - 1000 most frequent unigrams
  461. - 100 top bigrams (using BigramAssocMeasures.pmi)
  462. :param trainer: `train` method of a classifier.
  463. :param n_instances: the number of total tweets that have to be used for
  464. training and testing. Tweets will be equally split between positive and
  465. negative.
  466. :param output: the output file where results have to be reported.
  467. """
  468. from nltk.tokenize import TweetTokenizer
  469. from nltk.sentiment import SentimentAnalyzer
  470. from nltk.corpus import twitter_samples, stopwords
  471. # Different customizations for the TweetTokenizer
  472. tokenizer = TweetTokenizer(preserve_case=False)
  473. # tokenizer = TweetTokenizer(preserve_case=True, strip_handles=True)
  474. # tokenizer = TweetTokenizer(reduce_len=True, strip_handles=True)
  475. if n_instances is not None:
  476. n_instances = int(n_instances / 2)
  477. fields = ['id', 'text']
  478. positive_json = twitter_samples.abspath("positive_tweets.json")
  479. positive_csv = 'positive_tweets.csv'
  480. json2csv_preprocess(positive_json, positive_csv, fields, limit=n_instances)
  481. negative_json = twitter_samples.abspath("negative_tweets.json")
  482. negative_csv = 'negative_tweets.csv'
  483. json2csv_preprocess(negative_json, negative_csv, fields, limit=n_instances)
  484. neg_docs = parse_tweets_set(negative_csv, label='neg', word_tokenizer=tokenizer)
  485. pos_docs = parse_tweets_set(positive_csv, label='pos', word_tokenizer=tokenizer)
  486. # We separately split subjective and objective instances to keep a balanced
  487. # uniform class distribution in both train and test sets.
  488. train_pos_docs, test_pos_docs = split_train_test(pos_docs)
  489. train_neg_docs, test_neg_docs = split_train_test(neg_docs)
  490. training_tweets = train_pos_docs + train_neg_docs
  491. testing_tweets = test_pos_docs + test_neg_docs
  492. sentim_analyzer = SentimentAnalyzer()
  493. # stopwords = stopwords.words('english')
  494. # all_words = [word for word in sentim_analyzer.all_words(training_tweets) if word.lower() not in stopwords]
  495. all_words = [word for word in sentim_analyzer.all_words(training_tweets)]
  496. # Add simple unigram word features
  497. unigram_feats = sentim_analyzer.unigram_word_feats(all_words, top_n=1000)
  498. sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
  499. # Add bigram collocation features
  500. bigram_collocs_feats = sentim_analyzer.bigram_collocation_feats(
  501. [tweet[0] for tweet in training_tweets], top_n=100, min_freq=12
  502. )
  503. sentim_analyzer.add_feat_extractor(
  504. extract_bigram_feats, bigrams=bigram_collocs_feats
  505. )
  506. training_set = sentim_analyzer.apply_features(training_tweets)
  507. test_set = sentim_analyzer.apply_features(testing_tweets)
  508. classifier = sentim_analyzer.train(trainer, training_set)
  509. # classifier = sentim_analyzer.train(trainer, training_set, max_iter=4)
  510. try:
  511. classifier.show_most_informative_features()
  512. except AttributeError:
  513. print(
  514. 'Your classifier does not provide a show_most_informative_features() method.'
  515. )
  516. results = sentim_analyzer.evaluate(test_set)
  517. if output:
  518. extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
  519. output_markdown(
  520. output,
  521. Dataset='labeled_tweets',
  522. Classifier=type(classifier).__name__,
  523. Tokenizer=tokenizer.__class__.__name__,
  524. Feats=extr,
  525. Results=results,
  526. Instances=n_instances,
  527. )
  528. def demo_movie_reviews(trainer, n_instances=None, output=None):
  529. """
  530. Train classifier on all instances of the Movie Reviews dataset.
  531. The corpus has been preprocessed using the default sentence tokenizer and
  532. WordPunctTokenizer.
  533. Features are composed of:
  534. - most frequent unigrams
  535. :param trainer: `train` method of a classifier.
  536. :param n_instances: the number of total reviews that have to be used for
  537. training and testing. Reviews will be equally split between positive and
  538. negative.
  539. :param output: the output file where results have to be reported.
  540. """
  541. from nltk.corpus import movie_reviews
  542. from nltk.sentiment import SentimentAnalyzer
  543. if n_instances is not None:
  544. n_instances = int(n_instances / 2)
  545. pos_docs = [
  546. (list(movie_reviews.words(pos_id)), 'pos')
  547. for pos_id in movie_reviews.fileids('pos')[:n_instances]
  548. ]
  549. neg_docs = [
  550. (list(movie_reviews.words(neg_id)), 'neg')
  551. for neg_id in movie_reviews.fileids('neg')[:n_instances]
  552. ]
  553. # We separately split positive and negative instances to keep a balanced
  554. # uniform class distribution in both train and test sets.
  555. train_pos_docs, test_pos_docs = split_train_test(pos_docs)
  556. train_neg_docs, test_neg_docs = split_train_test(neg_docs)
  557. training_docs = train_pos_docs + train_neg_docs
  558. testing_docs = test_pos_docs + test_neg_docs
  559. sentim_analyzer = SentimentAnalyzer()
  560. all_words = sentim_analyzer.all_words(training_docs)
  561. # Add simple unigram word features
  562. unigram_feats = sentim_analyzer.unigram_word_feats(all_words, min_freq=4)
  563. sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
  564. # Apply features to obtain a feature-value representation of our datasets
  565. training_set = sentim_analyzer.apply_features(training_docs)
  566. test_set = sentim_analyzer.apply_features(testing_docs)
  567. classifier = sentim_analyzer.train(trainer, training_set)
  568. try:
  569. classifier.show_most_informative_features()
  570. except AttributeError:
  571. print(
  572. 'Your classifier does not provide a show_most_informative_features() method.'
  573. )
  574. results = sentim_analyzer.evaluate(test_set)
  575. if output:
  576. extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
  577. output_markdown(
  578. output,
  579. Dataset='Movie_reviews',
  580. Classifier=type(classifier).__name__,
  581. Tokenizer='WordPunctTokenizer',
  582. Feats=extr,
  583. Results=results,
  584. Instances=n_instances,
  585. )
  586. def demo_subjectivity(trainer, save_analyzer=False, n_instances=None, output=None):
  587. """
  588. Train and test a classifier on instances of the Subjective Dataset by Pang and
  589. Lee. The dataset is made of 5000 subjective and 5000 objective sentences.
  590. All tokens (words and punctuation marks) are separated by a whitespace, so
  591. we use the basic WhitespaceTokenizer to parse the data.
  592. :param trainer: `train` method of a classifier.
  593. :param save_analyzer: if `True`, store the SentimentAnalyzer in a pickle file.
  594. :param n_instances: the number of total sentences that have to be used for
  595. training and testing. Sentences will be equally split between positive
  596. and negative.
  597. :param output: the output file where results have to be reported.
  598. """
  599. from nltk.sentiment import SentimentAnalyzer
  600. from nltk.corpus import subjectivity
  601. if n_instances is not None:
  602. n_instances = int(n_instances / 2)
  603. subj_docs = [
  604. (sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances]
  605. ]
  606. obj_docs = [
  607. (sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]
  608. ]
  609. # We separately split subjective and objective instances to keep a balanced
  610. # uniform class distribution in both train and test sets.
  611. train_subj_docs, test_subj_docs = split_train_test(subj_docs)
  612. train_obj_docs, test_obj_docs = split_train_test(obj_docs)
  613. training_docs = train_subj_docs + train_obj_docs
  614. testing_docs = test_subj_docs + test_obj_docs
  615. sentim_analyzer = SentimentAnalyzer()
  616. all_words_neg = sentim_analyzer.all_words(
  617. [mark_negation(doc) for doc in training_docs]
  618. )
  619. # Add simple unigram word features handling negation
  620. unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4)
  621. sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
  622. # Apply features to obtain a feature-value representation of our datasets
  623. training_set = sentim_analyzer.apply_features(training_docs)
  624. test_set = sentim_analyzer.apply_features(testing_docs)
  625. classifier = sentim_analyzer.train(trainer, training_set)
  626. try:
  627. classifier.show_most_informative_features()
  628. except AttributeError:
  629. print(
  630. 'Your classifier does not provide a show_most_informative_features() method.'
  631. )
  632. results = sentim_analyzer.evaluate(test_set)
  633. if save_analyzer == True:
  634. save_file(sentim_analyzer, 'sa_subjectivity.pickle')
  635. if output:
  636. extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
  637. output_markdown(
  638. output,
  639. Dataset='subjectivity',
  640. Classifier=type(classifier).__name__,
  641. Tokenizer='WhitespaceTokenizer',
  642. Feats=extr,
  643. Instances=n_instances,
  644. Results=results,
  645. )
  646. return sentim_analyzer
  647. def demo_sent_subjectivity(text):
  648. """
  649. Classify a single sentence as subjective or objective using a stored
  650. SentimentAnalyzer.
  651. :param text: a sentence whose subjectivity has to be classified.
  652. """
  653. from nltk.classify import NaiveBayesClassifier
  654. from nltk.tokenize import regexp
  655. word_tokenizer = regexp.WhitespaceTokenizer()
  656. try:
  657. sentim_analyzer = load('sa_subjectivity.pickle')
  658. except LookupError:
  659. print('Cannot find the sentiment analyzer you want to load.')
  660. print('Training a new one using NaiveBayesClassifier.')
  661. sentim_analyzer = demo_subjectivity(NaiveBayesClassifier.train, True)
  662. # Tokenize and convert to lower case
  663. tokenized_text = [word.lower() for word in word_tokenizer.tokenize(text)]
  664. print(sentim_analyzer.classify(tokenized_text))
  665. def demo_liu_hu_lexicon(sentence, plot=False):
  666. """
  667. Basic example of sentiment classification using Liu and Hu opinion lexicon.
  668. This function simply counts the number of positive, negative and neutral words
  669. in the sentence and classifies it depending on which polarity is more represented.
  670. Words that do not appear in the lexicon are considered as neutral.
  671. :param sentence: a sentence whose polarity has to be classified.
  672. :param plot: if True, plot a visual representation of the sentence polarity.
  673. """
  674. from nltk.corpus import opinion_lexicon
  675. from nltk.tokenize import treebank
  676. tokenizer = treebank.TreebankWordTokenizer()
  677. pos_words = 0
  678. neg_words = 0
  679. tokenized_sent = [word.lower() for word in tokenizer.tokenize(sentence)]
  680. x = list(range(len(tokenized_sent))) # x axis for the plot
  681. y = []
  682. for word in tokenized_sent:
  683. if word in opinion_lexicon.positive():
  684. pos_words += 1
  685. y.append(1) # positive
  686. elif word in opinion_lexicon.negative():
  687. neg_words += 1
  688. y.append(-1) # negative
  689. else:
  690. y.append(0) # neutral
  691. if pos_words > neg_words:
  692. print('Positive')
  693. elif pos_words < neg_words:
  694. print('Negative')
  695. elif pos_words == neg_words:
  696. print('Neutral')
  697. if plot == True:
  698. _show_plot(
  699. x, y, x_labels=tokenized_sent, y_labels=['Negative', 'Neutral', 'Positive']
  700. )
  701. def demo_vader_instance(text):
  702. """
  703. Output polarity scores for a text using Vader approach.
  704. :param text: a text whose polarity has to be evaluated.
  705. """
  706. from nltk.sentiment import SentimentIntensityAnalyzer
  707. vader_analyzer = SentimentIntensityAnalyzer()
  708. print(vader_analyzer.polarity_scores(text))
  709. def demo_vader_tweets(n_instances=None, output=None):
  710. """
  711. Classify 10000 positive and negative tweets using Vader approach.
  712. :param n_instances: the number of total tweets that have to be classified.
  713. :param output: the output file where results have to be reported.
  714. """
  715. from collections import defaultdict
  716. from nltk.corpus import twitter_samples
  717. from nltk.sentiment import SentimentIntensityAnalyzer
  718. from nltk.metrics import (
  719. accuracy as eval_accuracy,
  720. precision as eval_precision,
  721. recall as eval_recall,
  722. f_measure as eval_f_measure,
  723. )
  724. if n_instances is not None:
  725. n_instances = int(n_instances / 2)
  726. fields = ['id', 'text']
  727. positive_json = twitter_samples.abspath("positive_tweets.json")
  728. positive_csv = 'positive_tweets.csv'
  729. json2csv_preprocess(
  730. positive_json,
  731. positive_csv,
  732. fields,
  733. strip_off_emoticons=False,
  734. limit=n_instances,
  735. )
  736. negative_json = twitter_samples.abspath("negative_tweets.json")
  737. negative_csv = 'negative_tweets.csv'
  738. json2csv_preprocess(
  739. negative_json,
  740. negative_csv,
  741. fields,
  742. strip_off_emoticons=False,
  743. limit=n_instances,
  744. )
  745. pos_docs = parse_tweets_set(positive_csv, label='pos')
  746. neg_docs = parse_tweets_set(negative_csv, label='neg')
  747. # We separately split subjective and objective instances to keep a balanced
  748. # uniform class distribution in both train and test sets.
  749. train_pos_docs, test_pos_docs = split_train_test(pos_docs)
  750. train_neg_docs, test_neg_docs = split_train_test(neg_docs)
  751. training_tweets = train_pos_docs + train_neg_docs
  752. testing_tweets = test_pos_docs + test_neg_docs
  753. vader_analyzer = SentimentIntensityAnalyzer()
  754. gold_results = defaultdict(set)
  755. test_results = defaultdict(set)
  756. acc_gold_results = []
  757. acc_test_results = []
  758. labels = set()
  759. num = 0
  760. for i, (text, label) in enumerate(testing_tweets):
  761. labels.add(label)
  762. gold_results[label].add(i)
  763. acc_gold_results.append(label)
  764. score = vader_analyzer.polarity_scores(text)['compound']
  765. if score > 0:
  766. observed = 'pos'
  767. else:
  768. observed = 'neg'
  769. num += 1
  770. acc_test_results.append(observed)
  771. test_results[observed].add(i)
  772. metrics_results = {}
  773. for label in labels:
  774. accuracy_score = eval_accuracy(acc_gold_results, acc_test_results)
  775. metrics_results['Accuracy'] = accuracy_score
  776. precision_score = eval_precision(gold_results[label], test_results[label])
  777. metrics_results['Precision [{0}]'.format(label)] = precision_score
  778. recall_score = eval_recall(gold_results[label], test_results[label])
  779. metrics_results['Recall [{0}]'.format(label)] = recall_score
  780. f_measure_score = eval_f_measure(gold_results[label], test_results[label])
  781. metrics_results['F-measure [{0}]'.format(label)] = f_measure_score
  782. for result in sorted(metrics_results):
  783. print('{0}: {1}'.format(result, metrics_results[result]))
  784. if output:
  785. output_markdown(
  786. output,
  787. Approach='Vader',
  788. Dataset='labeled_tweets',
  789. Instances=n_instances,
  790. Results=metrics_results,
  791. )
  792. if __name__ == '__main__':
  793. from nltk.classify import NaiveBayesClassifier, MaxentClassifier
  794. from nltk.classify.scikitlearn import SklearnClassifier
  795. from sklearn.svm import LinearSVC
  796. from nltk.twitter.common import outf_writer_compat, extract_fields
  797. naive_bayes = NaiveBayesClassifier.train
  798. svm = SklearnClassifier(LinearSVC()).train
  799. maxent = MaxentClassifier.train
  800. demo_tweets(naive_bayes)
  801. # demo_movie_reviews(svm)
  802. # demo_subjectivity(svm)
  803. # demo_sent_subjectivity("she's an artist , but hasn't picked up a brush in a year . ")
  804. # demo_liu_hu_lexicon("This movie was actually neither that funny, nor super witty.", plot=True)
  805. # demo_vader_instance("This movie was actually neither that funny, nor super witty.")
  806. # demo_vader_tweets()