distance.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. # -*- coding: utf-8 -*-
  2. # Natural Language Toolkit: Distance Metrics
  3. #
  4. # Copyright (C) 2001-2019 NLTK Project
  5. # Author: Edward Loper <edloper@gmail.com>
  6. # Steven Bird <stevenbird1@gmail.com>
  7. # Tom Lippincott <tom@cs.columbia.edu>
  8. # URL: <http://nltk.org/>
  9. # For license information, see LICENSE.TXT
  10. #
  11. """
  12. Distance Metrics.
  13. Compute the distance between two items (usually strings).
  14. As metrics, they must satisfy the following three requirements:
  15. 1. d(a, a) = 0
  16. 2. d(a, b) >= 0
  17. 3. d(a, c) <= d(a, b) + d(b, c)
  18. """
  19. from __future__ import print_function
  20. from __future__ import division
  21. import warnings
  22. import operator
  23. def _edit_dist_init(len1, len2):
  24. lev = []
  25. for i in range(len1):
  26. lev.append([0] * len2) # initialize 2D array to zero
  27. for i in range(len1):
  28. lev[i][0] = i # column 0: 0,1,2,3,4,...
  29. for j in range(len2):
  30. lev[0][j] = j # row 0: 0,1,2,3,4,...
  31. return lev
  32. def _edit_dist_step(lev, i, j, s1, s2, substitution_cost=1, transpositions=False):
  33. c1 = s1[i - 1]
  34. c2 = s2[j - 1]
  35. # skipping a character in s1
  36. a = lev[i - 1][j] + 1
  37. # skipping a character in s2
  38. b = lev[i][j - 1] + 1
  39. # substitution
  40. c = lev[i - 1][j - 1] + (substitution_cost if c1 != c2 else 0)
  41. # transposition
  42. d = c + 1 # never picked by default
  43. if transpositions and i > 1 and j > 1:
  44. if s1[i - 2] == c2 and s2[j - 2] == c1:
  45. d = lev[i - 2][j - 2] + 1
  46. # pick the cheapest
  47. lev[i][j] = min(a, b, c, d)
  48. def edit_distance(s1, s2, substitution_cost=1, transpositions=False):
  49. """
  50. Calculate the Levenshtein edit-distance between two strings.
  51. The edit distance is the number of characters that need to be
  52. substituted, inserted, or deleted, to transform s1 into s2. For
  53. example, transforming "rain" to "shine" requires three steps,
  54. consisting of two substitutions and one insertion:
  55. "rain" -> "sain" -> "shin" -> "shine". These operations could have
  56. been done in other orders, but at least three steps are needed.
  57. Allows specifying the cost of substitution edits (e.g., "a" -> "b"),
  58. because sometimes it makes sense to assign greater penalties to
  59. substitutions.
  60. This also optionally allows transposition edits (e.g., "ab" -> "ba"),
  61. though this is disabled by default.
  62. :param s1, s2: The strings to be analysed
  63. :param transpositions: Whether to allow transposition edits
  64. :type s1: str
  65. :type s2: str
  66. :type substitution_cost: int
  67. :type transpositions: bool
  68. :rtype int
  69. """
  70. # set up a 2-D array
  71. len1 = len(s1)
  72. len2 = len(s2)
  73. lev = _edit_dist_init(len1 + 1, len2 + 1)
  74. # iterate over the array
  75. for i in range(len1):
  76. for j in range(len2):
  77. _edit_dist_step(
  78. lev,
  79. i + 1,
  80. j + 1,
  81. s1,
  82. s2,
  83. substitution_cost=substitution_cost,
  84. transpositions=transpositions,
  85. )
  86. return lev[len1][len2]
  87. def _edit_dist_backtrace(lev):
  88. i, j = len(lev) - 1, len(lev[0]) - 1
  89. alignment = [(i, j)]
  90. while (i, j) != (0, 0):
  91. directions = [
  92. (i - 1, j), # skip s1
  93. (i, j - 1), # skip s2
  94. (i - 1, j - 1), # substitution
  95. ]
  96. direction_costs = (
  97. (lev[i][j] if (i >= 0 and j >= 0) else float('inf'), (i, j)) for i, j in directions
  98. )
  99. _, (i, j) = min(direction_costs, key=operator.itemgetter(0))
  100. alignment.append((i, j))
  101. return list(reversed(alignment))
  102. def edit_distance_align(s1, s2, substitution_cost=1):
  103. """
  104. Calculate the minimum Levenshtein edit-distance based alignment
  105. mapping between two strings. The alignment finds the mapping
  106. from string s1 to s2 that minimizes the edit distance cost.
  107. For example, mapping "rain" to "shine" would involve 2
  108. substitutions, 2 matches and an insertion resulting in
  109. the following mapping:
  110. [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (4, 5)]
  111. NB: (0, 0) is the start state without any letters associated
  112. See more: https://web.stanford.edu/class/cs124/lec/med.pdf
  113. In case of multiple valid minimum-distance alignments, the
  114. backtrace has the following operation precedence:
  115. 1. Skip s1 character
  116. 2. Skip s2 character
  117. 3. Substitute s1 and s2 characters
  118. The backtrace is carried out in reverse string order.
  119. This function does not support transposition.
  120. :param s1, s2: The strings to be aligned
  121. :type s1: str
  122. :type s2: str
  123. :type substitution_cost: int
  124. :rtype List[Tuple(int, int)]
  125. """
  126. # set up a 2-D array
  127. len1 = len(s1)
  128. len2 = len(s2)
  129. lev = _edit_dist_init(len1 + 1, len2 + 1)
  130. # iterate over the array
  131. for i in range(len1):
  132. for j in range(len2):
  133. _edit_dist_step(
  134. lev,
  135. i + 1,
  136. j + 1,
  137. s1,
  138. s2,
  139. substitution_cost=substitution_cost,
  140. transpositions=False,
  141. )
  142. # backtrace to find alignment
  143. alignment = _edit_dist_backtrace(lev)
  144. return alignment
  145. def binary_distance(label1, label2):
  146. """Simple equality test.
  147. 0.0 if the labels are identical, 1.0 if they are different.
  148. >>> from nltk.metrics import binary_distance
  149. >>> binary_distance(1,1)
  150. 0.0
  151. >>> binary_distance(1,3)
  152. 1.0
  153. """
  154. return 0.0 if label1 == label2 else 1.0
  155. def jaccard_distance(label1, label2):
  156. """Distance metric comparing set-similarity.
  157. """
  158. return (len(label1.union(label2)) - len(label1.intersection(label2))) / len(
  159. label1.union(label2)
  160. )
  161. def masi_distance(label1, label2):
  162. """Distance metric that takes into account partial agreement when multiple
  163. labels are assigned.
  164. >>> from nltk.metrics import masi_distance
  165. >>> masi_distance(set([1, 2]), set([1, 2, 3, 4]))
  166. 0.665
  167. Passonneau 2006, Measuring Agreement on Set-Valued Items (MASI)
  168. for Semantic and Pragmatic Annotation.
  169. """
  170. len_intersection = len(label1.intersection(label2))
  171. len_union = len(label1.union(label2))
  172. len_label1 = len(label1)
  173. len_label2 = len(label2)
  174. if len_label1 == len_label2 and len_label1 == len_intersection:
  175. m = 1
  176. elif len_intersection == min(len_label1, len_label2):
  177. m = 0.67
  178. elif len_intersection > 0:
  179. m = 0.33
  180. else:
  181. m = 0
  182. return 1 - len_intersection / len_union * m
  183. def interval_distance(label1, label2):
  184. """Krippendorff's interval distance metric
  185. >>> from nltk.metrics import interval_distance
  186. >>> interval_distance(1,10)
  187. 81
  188. Krippendorff 1980, Content Analysis: An Introduction to its Methodology
  189. """
  190. try:
  191. return pow(label1 - label2, 2)
  192. # return pow(list(label1)[0]-list(label2)[0],2)
  193. except:
  194. print("non-numeric labels not supported with interval distance")
  195. def presence(label):
  196. """Higher-order function to test presence of a given label
  197. """
  198. return lambda x, y: 1.0 * ((label in x) == (label in y))
  199. def fractional_presence(label):
  200. return (
  201. lambda x, y: abs(((1.0 / len(x)) - (1.0 / len(y))))
  202. * (label in x and label in y)
  203. or 0.0 * (label not in x and label not in y)
  204. or abs((1.0 / len(x))) * (label in x and label not in y)
  205. or ((1.0 / len(y))) * (label not in x and label in y)
  206. )
  207. def custom_distance(file):
  208. data = {}
  209. with open(file, 'r') as infile:
  210. for l in infile:
  211. labelA, labelB, dist = l.strip().split("\t")
  212. labelA = frozenset([labelA])
  213. labelB = frozenset([labelB])
  214. data[frozenset([labelA, labelB])] = float(dist)
  215. return lambda x, y: data[frozenset([x, y])]
  216. def jaro_similarity(s1, s2):
  217. """
  218. Computes the Jaro similarity between 2 sequences from:
  219. Matthew A. Jaro (1989). Advances in record linkage methodology
  220. as applied to the 1985 census of Tampa Florida. Journal of the
  221. American Statistical Association. 84 (406): 414-20.
  222. The Jaro distance between is the min no. of single-character transpositions
  223. required to change one word into another. The Jaro similarity formula from
  224. https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance :
  225. jaro_sim = 0 if m = 0 else 1/3 * (m/|s_1| + m/s_2 + (m-t)/m)
  226. where:
  227. - |s_i| is the length of string s_i
  228. - m is the no. of matching characters
  229. - t is the half no. of possible transpositions.
  230. """
  231. # First, store the length of the strings
  232. # because they will be re-used several times.
  233. len_s1, len_s2 = len(s1), len(s2)
  234. # The upper bound of the distance for being a matched character.
  235. match_bound = max(len_s1, len_s2) // 2 - 1
  236. # Initialize the counts for matches and transpositions.
  237. matches = 0 # no.of matched characters in s1 and s2
  238. transpositions = 0 # no. of transpositions between s1 and s2
  239. flagged_1 = [] # positions in s1 which are matches to some character in s2
  240. flagged_2 = [] # positions in s2 which are matches to some character in s1
  241. # Iterate through sequences, check for matches and compute transpositions.
  242. for i in range(len_s1): # Iterate through each character.
  243. upperbound = min(i + match_bound, len_s2 - 1)
  244. lowerbound = max(0, i - match_bound)
  245. for j in range(lowerbound, upperbound + 1):
  246. if s1[i] == s2[j] and j not in flagged_2:
  247. matches += 1
  248. flagged_1.append(i)
  249. flagged_2.append(j)
  250. break
  251. flagged_2.sort()
  252. for i, j in zip(flagged_1, flagged_2):
  253. if s1[i] != s2[j]:
  254. transpositions += 1
  255. if matches == 0:
  256. return 0
  257. else:
  258. return (
  259. 1
  260. / 3
  261. * (
  262. matches / len_s1
  263. + matches / len_s2
  264. + (matches - transpositions // 2) / matches
  265. )
  266. )
  267. def jaro_winkler_similarity(s1, s2, p=0.1, max_l=4):
  268. """
  269. The Jaro Winkler distance is an extension of the Jaro similarity in:
  270. William E. Winkler. 1990. String Comparator Metrics and Enhanced
  271. Decision Rules in the Fellegi-Sunter Model of Record Linkage.
  272. Proceedings of the Section on Survey Research Methods.
  273. American Statistical Association: 354-359.
  274. such that:
  275. jaro_winkler_sim = jaro_sim + ( l * p * (1 - jaro_sim) )
  276. where,
  277. - jaro_sim is the output from the Jaro Similarity,
  278. see jaro_similarity()
  279. - l is the length of common prefix at the start of the string
  280. - this implementation provides an upperbound for the l value
  281. to keep the prefixes.A common value of this upperbound is 4.
  282. - p is the constant scaling factor to overweigh common prefixes.
  283. The Jaro-Winkler similarity will fall within the [0, 1] bound,
  284. given that max(p)<=0.25 , default is p=0.1 in Winkler (1990)
  285. Test using outputs from https://www.census.gov/srd/papers/pdf/rr93-8.pdf
  286. from "Table 5 Comparison of String Comparators Rescaled between 0 and 1"
  287. >>> winkler_examples = [("billy", "billy"), ("billy", "bill"), ("billy", "blily"),
  288. ... ("massie", "massey"), ("yvette", "yevett"), ("billy", "bolly"), ("dwayne", "duane"),
  289. ... ("dixon", "dickson"), ("billy", "susan")]
  290. >>> winkler_scores = [1.000, 0.967, 0.947, 0.944, 0.911, 0.893, 0.858, 0.853, 0.000]
  291. >>> jaro_scores = [1.000, 0.933, 0.933, 0.889, 0.889, 0.867, 0.822, 0.790, 0.000]
  292. # One way to match the values on the Winkler's paper is to provide a different
  293. # p scaling factor for different pairs of strings, e.g.
  294. >>> p_factors = [0.1, 0.125, 0.20, 0.125, 0.20, 0.20, 0.20, 0.15, 0.1]
  295. >>> for (s1, s2), jscore, wscore, p in zip(winkler_examples, jaro_scores, winkler_scores, p_factors):
  296. ... assert round(jaro_similarity(s1, s2), 3) == jscore
  297. ... assert round(jaro_winkler_similarity(s1, s2, p=p), 3) == wscore
  298. Test using outputs from https://www.census.gov/srd/papers/pdf/rr94-5.pdf from
  299. "Table 2.1. Comparison of String Comparators Using Last Names, First Names, and Street Names"
  300. >>> winkler_examples = [('SHACKLEFORD', 'SHACKELFORD'), ('DUNNINGHAM', 'CUNNIGHAM'),
  301. ... ('NICHLESON', 'NICHULSON'), ('JONES', 'JOHNSON'), ('MASSEY', 'MASSIE'),
  302. ... ('ABROMS', 'ABRAMS'), ('HARDIN', 'MARTINEZ'), ('ITMAN', 'SMITH'),
  303. ... ('JERALDINE', 'GERALDINE'), ('MARHTA', 'MARTHA'), ('MICHELLE', 'MICHAEL'),
  304. ... ('JULIES', 'JULIUS'), ('TANYA', 'TONYA'), ('DWAYNE', 'DUANE'), ('SEAN', 'SUSAN'),
  305. ... ('JON', 'JOHN'), ('JON', 'JAN'), ('BROOKHAVEN', 'BRROKHAVEN'),
  306. ... ('BROOK HALLOW', 'BROOK HLLW'), ('DECATUR', 'DECATIR'), ('FITZRUREITER', 'FITZENREITER'),
  307. ... ('HIGBEE', 'HIGHEE'), ('HIGBEE', 'HIGVEE'), ('LACURA', 'LOCURA'), ('IOWA', 'IONA'), ('1ST', 'IST')]
  308. >>> jaro_scores = [0.970, 0.896, 0.926, 0.790, 0.889, 0.889, 0.722, 0.467, 0.926,
  309. ... 0.944, 0.869, 0.889, 0.867, 0.822, 0.783, 0.917, 0.000, 0.933, 0.944, 0.905,
  310. ... 0.856, 0.889, 0.889, 0.889, 0.833, 0.000]
  311. >>> winkler_scores = [0.982, 0.896, 0.956, 0.832, 0.944, 0.922, 0.722, 0.467, 0.926,
  312. ... 0.961, 0.921, 0.933, 0.880, 0.858, 0.805, 0.933, 0.000, 0.947, 0.967, 0.943,
  313. ... 0.913, 0.922, 0.922, 0.900, 0.867, 0.000]
  314. # One way to match the values on the Winkler's paper is to provide a different
  315. # p scaling factor for different pairs of strings, e.g.
  316. >>> p_factors = [0.1, 0.1, 0.1, 0.1, 0.125, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.20,
  317. ... 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
  318. >>> for (s1, s2), jscore, wscore, p in zip(winkler_examples, jaro_scores, winkler_scores, p_factors):
  319. ... if (s1, s2) in [('JON', 'JAN'), ('1ST', 'IST')]:
  320. ... continue # Skip bad examples from the paper.
  321. ... assert round(jaro_similarity(s1, s2), 3) == jscore
  322. ... assert round(jaro_winkler_similarity(s1, s2, p=p), 3) == wscore
  323. This test-case proves that the output of Jaro-Winkler similarity depends on
  324. the product l * p and not on the product max_l * p. Here the product max_l * p > 1
  325. however the product l * p <= 1
  326. >>> round(jaro_winkler_similarity('TANYA', 'TONYA', p=0.1, max_l=100), 3)
  327. 0.88
  328. """
  329. # To ensure that the output of the Jaro-Winkler's similarity
  330. # falls between [0,1], the product of l * p needs to be
  331. # also fall between [0,1].
  332. if not 0 <= max_l * p <= 1:
  333. warnings.warn(
  334. str(
  335. "The product `max_l * p` might not fall between [0,1]."
  336. "Jaro-Winkler similarity might not be between 0 and 1."
  337. )
  338. )
  339. # Compute the Jaro similarity
  340. jaro_sim = jaro_similarity(s1, s2)
  341. # Initialize the upper bound for the no. of prefixes.
  342. # if user did not pre-define the upperbound,
  343. # use shorter length between s1 and s2
  344. # Compute the prefix matches.
  345. l = 0
  346. # zip() will automatically loop until the end of shorter string.
  347. for s1_i, s2_i in zip(s1, s2):
  348. if s1_i == s2_i:
  349. l += 1
  350. else:
  351. break
  352. if l == max_l:
  353. break
  354. # Return the similarity value as described in docstring.
  355. return jaro_sim + (l * p * (1 - jaro_sim))
  356. def demo():
  357. string_distance_examples = [
  358. ("rain", "shine"),
  359. ("abcdef", "acbdef"),
  360. ("language", "lnaguaeg"),
  361. ("language", "lnaugage"),
  362. ("language", "lngauage"),
  363. ]
  364. for s1, s2 in string_distance_examples:
  365. print("Edit distance btwn '%s' and '%s':" % (s1, s2), edit_distance(s1, s2))
  366. print(
  367. "Edit dist with transpositions btwn '%s' and '%s':" % (s1, s2),
  368. edit_distance(s1, s2, transpositions=True),
  369. )
  370. print("Jaro similarity btwn '%s' and '%s':" % (s1, s2), jaro_similarity(s1, s2))
  371. print(
  372. "Jaro-Winkler similarity btwn '%s' and '%s':" % (s1, s2),
  373. jaro_winkler_similarity(s1, s2),
  374. )
  375. print(
  376. "Jaro-Winkler distance btwn '%s' and '%s':" % (s1, s2),
  377. 1 - jaro_winkler_similarity(s1, s2),
  378. )
  379. s1 = set([1, 2, 3, 4])
  380. s2 = set([3, 4, 5])
  381. print("s1:", s1)
  382. print("s2:", s2)
  383. print("Binary distance:", binary_distance(s1, s2))
  384. print("Jaccard distance:", jaccard_distance(s1, s2))
  385. print("MASI distance:", masi_distance(s1, s2))
  386. if __name__ == '__main__':
  387. demo()